spatstat/0000755000176000001440000000000012252364235012151 5ustar ripleyusersspatstat/inst/0000755000176000001440000000000012237642736013136 5ustar ripleyusersspatstat/inst/CITATION0000755000176000001440000000311412237642736014275 0ustar ripleyuserscitHeader("To cite spatstat in publications use:") citEntry(entry = "Article", title = "{spatstat}: An {R} Package for Analyzing Spatial Point Patterns", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner")), journal = "Journal of Statistical Software", year = "2005", volume = "12", number = "6", pages = "1--42", url = "http://www.jstatsoft.org/v12/i06/", textVersion = paste("Adrian Baddeley, Rolf Turner (2005).", "spatstat: An R Package for Analyzing Spatial Point Patterns.", "Journal of Statistical Software 12(6), 1-42.", "URL http://www.jstatsoft.org/v12/i06/.") ) citEntry(entry = "Article", title = "Hybrids of Gibbs Point Process Models and Their Implementation", author = personList(as.person("Adrian Baddeley"), as.person("Rolf Turner"), as.person("Jorge Mateu"), as.person("Andrew Bevan")), journal = "Journal of Statistical Software", year = "2013", volume = "55", number = "11", pages = "1--43", url = "http://www.jstatsoft.org/v55/i11/", textVersion = paste("Adrian Baddeley, Rolf Turner, Jorge Mateu, Andrew Bevan (2013).", "Hybrids of Gibbs Point Process Models and Their Implementation.", "Journal of Statistical Software, 55(11), 1-43.", "URL http://www.jstatsoft.org/v55/i11/."), header = "If you use hybrid models, please also cite:" ) spatstat/inst/doc/0000755000176000001440000000000012252324024013663 5ustar ripleyusersspatstat/inst/doc/getstart.pdf0000644000176000001440000034637512252324027016240 0ustar ripleyusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3018 /Filter /FlateDecode /N 79 /First 642 >> stream xZYs۶~om @ɌxIı,䁖h,8@J\D^r3wl|g!&bd°)aHf8`d1"aR)ˤd]\@Zt+D'щ)iadt@Db5:(]b,,D0%Kw˜H4bH@7%Y1c)qX`&48U,&ȔbVcJ㎊ e5d#ZC,, AOV Q"X,4,1Ae6n8 -& ~6"GeB`x,",2D=0RL!> mpTL Rx`6Q#@YJpTX!*`eZ"RTe$)MXQ#42`(! p?(-IR$6xAr.IWy2e| ={^ٚIH TOBK/K%S" m/8ۻy(yvY]VLAakt(/˩Wt& p[YM5j_ }[kƷA*t\1f9֬HgłEg(df~ai< x=+j>%"=*0<hL$.@ٳ)m/)oFLKV̯Ҫjd拝u E8d3?d"[=b<4lS/\ë42~tuRiC6)IT&FOÈf 'Yl z e;)A?WXIN& {ivv^U|'|S~yg|_Ib$˛&g n0\y!}N6MaJ-}ha6ޘ@([, ?7<9*ҋ.Ɇ,Wa:?;޽rַ=oR=n\W3`6o|ĀuP 9 ?;y:㰅_,84󼁥 XbIuDn Ēnc%6 Т'Zp*"Ktus|K2keiAabUԨG6p&.A8z`eZ˞*˔OCiJ8l<;+)'wd,Eu؄m4e7 4}|5R+xȮ㑺=P<ދ5,uPMEu+魡W*A-J0Mm%x #iuFiGe|9i^ޕ:TgaNU0?֎)3.C-iUUŷs(._jrE9$"'dJ:UyNc1|ߋ anl^L9OZwSq. (-zA)/ =wF>_Ll=ndWoU^Օ|+"{B(Nk5ot<·cnhP## =7>{$z&CeU^75wfw֣z0'0MݼM4/NuR"MNwQN_Oo !֡{/bmSIE`MuӇwhzEAW{;3HAT}Ոvk2XZ1ؑ7cg5o1aR&ooo͏(VHX/]ֺ~bi/TvZFۿ`9Bbp@1}\"dR|tQnGHB4" x7L>LqT÷}elrXtArK z?+|m/BP7kŭF!\k~AhQX+gڔh&78XB=UQ(d ewUeYP- QeԙFx%qCu흥4H}wg!->!,y*.lxD/lfNhóQ\糎> wyvKPP?Eۥ4v15RnW"~[ƀ2hrZ9ZPW/߂ִy]?nnK<Pָ4jzFe^TRsO⮒Kq1T(b\nIR,jJ}2?e蓲LOD[?vmU(eseY6LU9ϔOe9ϓe},ˆջAD$}9b>@j zR ޿~sP+gKتۍO+O!sեr)jajJ!E'2y4Uulqz-RAg=\=HسuE^x%*`-S (ޭ쩐.aY\ǦjcS5,LԱ0+i&f&TXLe 2~AgZ=Ȉz?zA«eδSoff݌Xtq=ٱfWӇd^,9J3:UiX<89N1!2#ɈIQt{qw:!|$Q>`e8间ɓ|:Q={й8!}*TzLDXHÇEKL Cm$z]DZkA(SܸeY  ]4m-Q?93,\ɯ;Gygi]am睘c3T ex@N'SECt`@>K6u1wn'C"=4.t2qՔiLvb^*^ Zop-zp2b?BC'tֹvs&??;.*H)Y%dybzr}N`endstream endobj 81 0 obj << /Subtype /XML /Type /Metadata /Length 1337 >> stream 2013-12-12T20:12:06+08:00 2013-12-12T20:12:06+08:00 David M. Jones CMR17 endstream endobj 82 0 obj << /Filter /FlateDecode /Length 2808 >> stream xYKo$r_ѷ6o& #/^ 08;Wj$fW?5 ANX^SW_}&tNu/CbGalwnWRGڻΫ08_j)iӿݫ%2M͠@cb§$`<6L|ꏼR!O&|Ls,W:o D,V9g'oCR𯫿C2VΗ̅ 4}̏yR0yݾvP*Tr 9͗HA"ğo >Qfj2WDF6 2[f7MJ9C&^R.Wt?Q9,CJ~ς<'MBx,g+BCsQ7 MLt"]j,%뻩^?N2uQO.'@.3>ad.BNv W銉eoQːqѼ3EjedVR"W,o4/;K%Dzw;tD\+i@Tƽ) h-!XFViAwe;0b ?tԦӡIӹGN|T 1Jd6Nb#{^YwNM7jMFMJ}QP1e4%J-n-2he53nA!ODSX#7p8-yZ^6wg[#2VT?MB(<BiBYe`tY$zEAdQ|r&y* EW7r2Da։1Krp>عhxxֲUb[0HJս `;芍QlS9U+Nqf Y+rEte2P3r~ւkRb`@^빱odZ/DfkǓRQKcD$?q,NuyF}As,& [Lf>խ"vWAEp~r˔.ӗ rL%ڡAr1c4M"v |*҇X_X-[uǧ5tLrH^y8:!5Թx[Y-F_ۘ36{~1/&HeQޛs.%Vup4(?bC/?wE'yMHW8.GvψͲREl󶛶7Y,M3R,,s1w.eY&Du 1v{zT`_gR~)]6a.?dTW) k:;YnZq~d#|VOObkq@ߜh)'#ϲkh+Ϛ#*l1- 53_ef#*sц)5̀[f|yQ!2KGF 3hvLZOM;ﺇoŸqv #;,vpo6zg:9QC!h=Q/|J{"}#̀i"oRA 6[}lRVSmİ?8W]bZp ~taZ7=MQT0P_(!L csOэf.#a9;ɦ_smՑxCCʖ=9dx;+2Kʰ2ZUl[?o*ڬN.*zu[\}Å>G88D5+-AZL!)~M[:^ Z<-BD@Я 69uY-2u ,?B!fř< Wٙ#zVt#ԏR |ҡxq\hy5I/ bM%}9IoOendstream endobj 83 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1615 >> stream xTmL[ۄu[zjڐ& ISR YԂm\7Ɓe}}lll d#,*s!AQS 5U,딭Q}/{KYk:J>9! i A,pkYIRX"<%\{`*I M:b~yF%? $$Ymi5f]֪ToPlޤxa˖+zYVV+ VF*EAXZո=+noV- 抝6):VQh5rEڪ8kkmVYQ`(ט H6[YrMVG"$oD*OX%$;ėdS = V+9[: RZΤHp`x".^&. 4'l<ޓ.qZݮʱ'[Ԝ>~Ԑp;" TJˑnv0n:vz(۫1Ė rI$h)G +PCƭn1GQ)sQDTVb“W 3\Gd 42T{ꄫq};ӥl)u,ҿu -9=5z5JG@'G!ulP/'AG/N$e OĒE(:%Av o-8?笯 )*SZ:?Owv^V t=.~bԀ"s=Qr:jQ(.Ge?~Ə N|< \kt1}ź '`~A{{Ag&LbRqY/zsN/2̑tlʸIЫs'Ɓ9׏#g.p<-.:["=p>^֚ ~"\)f_K*ͬ^ *hSX3VzJ Gܻ%HRM}LiU ԌyD_^ m.m,!_=CaQ } `sq^z.&?xDSvd juXmځD_l̄?!K0!+Ȩ BQ.ɕ ÐH4"gendstream endobj 84 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1177 >> stream xELgZ,'T[e[CMiOd1*ZB[RVR ?[ˆ9-Ӹ,.E7gs1}7|%AdhrzFFlv RU6d`as8w!$ɕNlޔ/Q9llBBR6nvm7lA7(`q9&`j2c(,vn5Rc. FC^B[`t.[  7b"-DH""\"xxH!D1"fmjQjZe^8oAl1KioA.u&դ{=٠,sOW51)v)keq5"ɫ"jiFgr|_ҔtC[Ug߬ j)?&{ֵ@̮{-J>4ùvqn(LԶ2r01z'rTt(C%j%KOt;YkU0e{͙Gח:*o93yQZt~k4^oS> stream xUyPga`UdW=Gc4hQHpE,^:\B0 2300݀0ăl, <lcQVuuW}{ՅH$0_l"&t];'%OGߠmD%e|>w ErZ!_!eBV<7*bZ%KRSSG&$-ViW+Sc10eR/RqZyhdR g@U:YCT k&()9%2}2*,vK"6[mv"A|H눥D06Bx W(*ɑ Ñ /p&!t(\'ӈ·pCp/x$FP> U*|''*x*YnU,n$ó X|Ns/mȄ~?_tPg? QWm f~ =|lOQX+e2.C;?|TWe hd&O0hI[oKX|E ,tv5ULsʣQgKl(!ARtvDKRefT,[:O^XhJ?"Ey9b/Dk#ߊֲu.Aw aS,Έ2GEEK.?md0Fn<&$c% &@ 9XSx4᪭<=<=d]t!:/k7QRa%zHeA=n\y*/}Y`Nunh(;ʢRDp4Eթ!VA≈>PTޱAirl&N}vdقㆦà Y-[7+hqheRS؀E3l<@Py>J19ANhLkl63bO\){WQNt>ߔ^Vp%=ہw\I=$zY+F~`WZf0eX)hNa~*.^ :&P < MҚ3u &(~:N@LcX6X)v!J2YN򓇧0]ml5S &}endstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7878 >> stream xywXĂ:b=v%&cbo;+{]X R,D[L/11;r{wN`ݙy}cBLLLKo9x~ ? k^˺`5Ƃ_: FۇPAQ>^&YϜ?f3[/qs ^N~e=qwxxGӧGDDLs ɤ>֛=g) ]!NO{F 8\*liq?Yto&U[ \mS87>|uc=Xl趑DUZZC/V542RRɕ UjKuuΜZL:jMУ=inա:k} YTFM2(mwT|P(FϹ} $Nބ dm)I؜Z~^E.3 lrGYmڝXv[huHN^LO&Rؒ SA]Zv吲>&}ݳ]ؒ̈_L5J!@F7إ* !%&=j]!tcX>VuNO*Gz}# {8\?HZɁ"1ZBg|AHSU@v<2zF)^K:)%1 h&i~ۉ`7yxv.h6E}YЄrG~ (]̏cX=օ '1q?IhZ{"w:t+M*xTȇzOoA?gy7*[| ƬzNOŷonסш0#`P'.oBj%d@hH~-v&YGx D oح\bɡ{&gdz!@S9Q)8X[w1GQHb_w6d z3oiz3Kp!o[Vo!1*i?BoR 9>SYrAG+}֬ HHRIٖ pV-PJܢ`pS7}ehFq(\\ 2Mڐ)t_N][V/3ouSPy'iUϯ]'Vect[!\ OtRx( ě_ ޛУgOZ[zT_hHKBҒ@ 8#{jRj2P9Q}ZҰkZ<#PSؗU3+r3dyYsqTTOKﻢi]sQȗ! eF&PufFo9nN>*MOmRmi||Xk~1|l[K0 oOn7ʤi28dOa޲+WGWזW685GZ9r}ؓ8S\.S|4Gr u0)Cv=unƣ'dK!?a*8;"#ӧY3! bS%ir)W̤xH p 7F+Nm'=aeV$W P+J`ZVaE"@B] Ev<&1 -doVPTSnBZ{^+RvYM{ "ZWUC+ztDREk8,1|IIuWuJ%E/(uxc$Z'bĢ+jH(OtJ]Xfd+f^1QDRȕ2P:])ʣU\Saw8WUX3QY(%;njp[9pP8" ~/ B/ rKwMRAVhKBGl2 EZ8D"5a*4yH5R}U7U͚Kh~{ !ʔ(}֒Tt0I')H(_J\30yY3GDP%IAVqY记5q IocZAݫc!'4<6ı?\p10ELDd_Ct 0.4B`(q6:lH4u*Nh;E8vj5MW"Ƀ>AHY)W)!.N*e2'A:^z+w+|v{@݋T%Z f.Ag2L4n0/rHw1NâzVn{J>̍x9CHPNMRȒmoxsgp H֥.j^e8l#٢9&h)E gAJLxqDM]QyUg0W:]0E,Z(Du/bQ *{L! wنZ6WҢ3:ZrpR>eU}N_Y 0!6Qh?gpcwlg-vl | )*=z c2_fǯ8vn[kOx:4X,N=#fMh{,T?\^n+ǦE3Y~&ѣrS~G75 R鹬O_|gᢺZMyH?=h OsDSXo̩DN-*d2du,IDX%!t_E|'&="\};ڳ'hx̻%ZZԩ@}Df^\=\W,ʧ\uW}'ųOnXxOP0D4-[1>Vpkh:) ͒;/ReQ=2ef5|4W/ =A ,6$E0L*£}eZ"gC+%!}NkFũꐋ請IA%3VWN>39 dy9gRyosmEf/,K;p%=IٛRYKYkMࢌ#۹M'Mdv{kR8Pi4 >w~b5xM9)"MÌUq{r%C3= ujˍM (j򠠨N_?]Lªꪪꈮ$U_74PtMv#a$Z5)PJ`H)3>7sia1F}N0VXG]F | \iG*F!&. ^BuZ?}[ *2:-|V:e]*&(H+cBؕ 8BA~VM( &,&ߙx/L"8> rc4Ԝ>^Yxy#S]ǖ[rA"~<ެ!4$$8ʗkSŪ*4`}HӨDg~4 >~po;{A s.X֛Ʋ_7KYgLh(c=#4bS-G~%2MC\dELVf / V?1w_'*q$k#D_tqXMu%"IL@kNFOLhYfm̹: $dʉ8mg<6BT3#p:*k/h$ S}&挅FAL?߯gk/4^F/ |3EEd 2:!^IJPvB~~KZ1ރOyc}Ti SŶ8 vQd| PF6]ٜ(gφr  ) n*/$YRQC\_u Dəߢ${Kx3tdKPd6Вh FCdid3WbFn'(A'Y<\`25Y?obȽۑR4h+2E~,-M&EYlyex U+$;#3Lִj P6ְ_O}ڊ{,X oAк!pDZ]Ÿ?^@c%VJFL{9W,b⽟om_< ؉M8,snІ<;YA:NO'TŰ6FgfrjJ[^,ҌM̿u [@1+=n=9lϘRC\6JϠDۺSp˞(0Kʑƥ%&9?_OdJW1DsFΈr4АV~ [](qJ 볾p^ T釋p*5FvYjMZ]c_qy~ ’i~+Ri̶E>a o+T9&bƺF]ҳ>p)Dp=G p)lbX$Y)];usY7V:[v6O[uV3Ed % ^2CWgɊEaj# ^Moq%rzst zf xf3lY[1:;YJ9Wʼn3<:w-غ^b_,'JhY05 )16%y5|uW{OYv072}xы"L%{R;OMv6N_;jȥ&`NJf&(68|de")JHdDRipwWN£V:9Z򹷸1ULSpy`_ӌ' 29G w[Su4Bϻw eG9;cΝ2^{3ގ4$3&Inxvtz˅Hοcֳ:iGӣ 9hgs\֐ROv+ 2)JBD∤ +5 RicɅYjMf:W|6t!;SkYm\=C|j5lXx ٰfh 4jhX7pi9هxYBV,.G̲'HD~NԽ> stream xX XS׶>!prD CVT CE " I1a "*u@*AVkzc+vw׾{/_6{^׿ 07#x<`{ӏcF7 dGVuրǓ'-(~"xOl8:~fg;Y+u|2ndZ+S؎#BfM,'y&I~>D||mKe˽ž]mD.ںKJ HdK#K#)|_$CǏnIL6}LXA|A$Vb-XOvbXH,"KWbFL'wb91Eф1$y`b1&l!1b +aNm?6Wۄ06#==6O a! TøR$FQ7ida5h £/n8yPu5uS/o/Awvnyg Pe Iϱ964[c;0' /_v$w'gK0YtͅpfA }7ϹLm\ k}8Q{0d6y*YڴBw~& QrALzX QMܗ*yɻlF*HTٻ}V "!Hb>u=#/$2x"FW(L߃SA Ya ^ 0anaDjiLd(b/%"}۾?6u1z^Gjwb .Heha?\fe傢QgUq *7zA Qc8"gV׼ ilUգ.?[" kzġ娆u;Eu7J3=}]@A0Ȁ3|K@ ^iUra$ nelR"!M#ln~: ?o"&%y/;IATdJY6l$b'CF!(&u{)U(=+Ґ(E>GadK7 7 F;ILrlfgD\p:{ڃ(MT"R[R{#䉝Va`=+a{M'{b1Ř72l+MB 6t@L'! hgN'徉3[P^Mƻw7 l'ﹾeAdo'H[0^y7N~{gx~ >w8N#hEY..]@~,|.v44WF18FSUYo4ȩm%r4Q»HnJrؤ1T`W/!Q1($ɂ&.Ҍ THedrdcQx$-DQY7!C[6G 0LS(ѶSmǿsvӺU^.Aw5G[Y;x lάU6|uXOOky-Zfn\ s~tvŮLw'{xr OΟyL=kd23BPViz `^c;4q4;\(Əމ?% buVV6**P~fGخؕܛIʫ6=tA#jl8_#DH$jD yݬpnWJvԅ6wh7zB#K*KIdXB(q(JSS'N \0aͰ8<008\zXxkx~O8< [~ ֿrpx6+29...>2-Z$49,{APƾ[wynAb(ۇG]V U)apy/v`l9q]|DW'!):RQnohJW[.'x+: cR<#mugWM+XmZP1mV{W&S_v感X) gCi~iRS_MfO.Y|2{y-lVVN6P,\8JNiF1*uFhxWA?X1"„{?'=w kroR?#!P*VNo)tFv=MZ:(Z6IW7]5 $k(UgejKq{G]f/\di jnj8B)Np{9ojGNuSB}8>+JF *T^])kS9E$s{vց96C> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 89 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2655 >> stream xV PSg1!H[(XjS[RR@@"$ /a\_Yń0Zf3+ g62 ELY,e1˘L0(f(1ZdR?߮~733^# |d @.;;Q4u{[qJP܌x;_G&Ǿ-ONf`/!Z E5O]ס|w;5,2]9IBX?3%uJKW*֡V/OL!3$ɐw8'VZAz9 g*]BuK{.kAmU pn&(x?qKG~DNјpqz;wd@:8tyc74ATNfMq=܆ޜӜW:EeCP***# ;<ӮI[\ybJQ3:sBKvK29ϭg &@Xv;٭P^l=p_*ŀSIδ !ƶxyɪ8](-JB(5ʺqnZmRG+pP2.|8mƉ7s҄UA X nlwc[o[=D'C.RAnaE͛#zO'p8-7,%x=W*~H;wo7, zV|rgr9p\edQ1!I<=}oN5:Vua)/J<TC^\'Ft(%0ӺxVqe2<g͕9%*U=KbT2m "vm'=!wB#v +j8"xuFzH;D}Us+4R1`wŒeso[282pb{g>!Nb-dɜh!Gwk[GQ3aɰ'X҈SP'=O5MRD1fQ߽NV: Y zK%{ 0 "U7 ;m5 'ҝ[hUs]Zsl2T $E 5tRP_Mw|m4H?ulfZL\!ip1ڂ/_eWeߪݴo=؍:^f6nG˝%V؝\TSjWى֘D&?z^ w\lj©o6ᨤlw8)N%]2GbvAhz`*MT>6(Y_٦2Na ru R>v9Ld{V3U~a~Y~;?M hp+dXWރ2s㘍  T!A?CPZ\Rt l9 BFJkM{rf,#c{kOIʖ$߾z۝;> fBYW T|ч2{⷇}{x/.R[>ϨZG%1o-3?[\&p߬$Ͼw:eH~dlGwq_j fԈ _N FX-3O5+O!aH sOO[2&Bg(1:r/qAʖo%kwpuFNe9z;Vlwo+& ! ^$a{}S]9NU!z쒕J k ”I0\k]>A*Z⚢⢢ZZZgshZZEE/)n)qAZ۠$nkHM!r2QCcYdq{f'~YbHn*.Mv탒lF"__c`Iendstream endobj 90 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4289 >> stream xX teN ?ZF_AP@QGuYR"v/mhfiMI%I-mi,Rp\Aq\n/9~i 9|}yc2&Ob0)nXD\fI`?JIa0'?8{  )(TKxQ SE-[QO>U9yd~TtrAfzNr~'HF-|!@Nj&/eab^AfԖWM9Qwti GXX-HK3yᚵy n(NN)MNK߳9#s /nkVvΊ%KfF,w8VB6"vƣkKk011360bld<=5F4cc.#0.c2njcOJt;y/NQL#L枆iaӮOwN{ 'ofPD9:k32 v=,q^JRgje:]A6AСi6!R3 si#|iL %R'3>tP璃hڬlOgWjRFd,N}-Ngee*C9&kNq8dH(NS) OS EDbk/"] hդ\(\ D]do6R~4v-=DB*ړ  D9%͠%*`r>YwfDZIj:h2kYk;t9f| Cϣ? 8hzʹ͈q9ھc gP;vJoW\b}Nȳ4Q0,T nQ+t@?^e"gwn4WPh)$pMC Ѣ9+)ǧC μعD 5\vzd!=*%KZ؊VhRA5L?3ZGS!a\ؠBg(!*DPEPo2 hbl>bt5O<k˚灘lW`0.fpc+r C J2&37OA߱2j =j r9V Bjh"O#ah[`:HI))\ Y;C7E3وєFh)JWUu56+c (X3J(XD_@>zōq\E1 ]g!)?0tBD00WQ,aP^~RldM9zF赖4 k@OMyl#ᢸUI HD~ Gs~jS% <\$H0&X @nvAΥj:rAZY )m4BܡFGE~ӹ,Pb9*rUz3Tq :kVJUIckKYt!-Chil X`6:pt׏V{0ϲ+0s]d<-2SE^E4ge5jOV@̍L=͉[B'],}=u TcK+5<@cZACaPP鐢ITH1 # 70})'r~)=wNT;HBq?Afh%4P V[K) 7О_T*7輥e#=0c0M`$rwq/ג/RVJ%~N" :G:?o^S5f \1TWVC@8 8] vOO v7u*Mn]s`7:]Ǖ"Zo&7Y9c]ƍ뷚JdM @1%[qjJt~p_REP*h-T0C?(4 P ̡*Ѳtecf8 Ą,k]שwz.O? ל!⑴~½O8 g@R$IEaV,,YR wdDM8D@/}>LP !nİ"c42'0DZh]w`5A,O##ddͥY=bz3׾GE~ }LjSMʳWlnpq?Zwj9|ۑZJM2sXLpgb6Q4BT'A WfEF#pi*R uh*z=鼢, wȶ )K#ܞޮ_R:=x/0#E}fNMlW%פFOM!"G4Mw|&Ro6`2q$ӑ+Rc6Wya7\xPՁ C|X|"{,տf}n!WtrrՒ'e@ll(}p֝B8koǙ?UFlS(z4 +;w\`HB>{/z֘S.Mr2]iYcڡ3zT`ĄA@zw2PL 2CcQ{`?vX 5=L7Vԯ_,-) [NA!>)67R ~F+4v%ѩ[MHBr$%p,=_G?^3_r7kPf z;!!z90쫿(%z}̼vUx$̮jwu+`7_%Q(ij;Ґ(SE !\bsǦJ+RE\9O#*(sCJ̄E==8A{ftbK2=Fhhuh&E~˔Ÿغ-+'r_-._Heŗ`CX.|p9? r+1T2yNW\qHkL9U'z*Pi*łGY{/r&<2*KkXo!!en1'˭RQ/>ʕ7Z zRmh;tBAJiR-zJ:vW϶ 'fPD?K[KL6.~|uru u!N"DfP 1jhZ oGKb3KIclGU82VqCSnݯYcUۛZD V*3++'N0Q'`xN)͉lN8**g ۠QUxa{~˕\>raƿDvK@@= E5b<8^>sY }#8ܞZ%pj< V2yA.]򚭡m8fE%+T ھֺҮIɩBrD/;+3OD1z `62Sށ)DTNXPG(PcɎ){FL~3}6}:lZ=endstream endobj 91 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 368 >> stream x%KKQueһ+!LkF![Ԫ0Rh fR):iNQѦںl@vP,qp ּZ({,mCswiv:ŒhprLE%1"2Z hGJZ2(!Kt iL hi3EHvceEUՐ Br\Fj>a ]9CО a/_1Y VPB>J]|U?w^ dY^5ccͬ%꽾.eN^ҲW/޷ 4؉hFEf5h {EY皭LJfn4n[h[]o;FƏxa _( iendstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 945 >> stream x%RoLufΤ̲dl_ܢ.ψA h\+)׾kKBGO c aa32 웛}%Y~n,Û}☶ q|hNyX9+/U)/k@5=޻F54?>Cϡ cg&踽T.F;-΋7@|6*Jrծ;[[YsE#5tIoKn/˸fKL !Sd$MxdŘI!?r,G{|F{6aendstream endobj 93 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O10 N( ]Z2D! }I[.%Mů F0Lu@v=|MUVBoh )jHUZk 6ἢ*h.O%Gs&#q*MK\1 >l_iSendstream endobj 94 0 obj << /Filter /FlateDecode /Length 12272 >> stream x}[%7v{;?<~~A81$@?|ִQe|ѿZ{,V'!@}>? k?|}xoyAQMoF7#xK>\,o>oj~y+/?৳Ƨ{?'o_7:7£:yf>KS%]$*3]A_?`L ᷯc-"4ZKln1Gie,*ԐTcj[Y`0W.? Zsv\`#zS_ mJ/~ki]5ؗߎj4 )`|f0m&z'Ų6~3"ײO>G<שW-yE0ގe啼>u*U1hC4Gr%p^[.hUnZSԂqY'LX?J*X} ~ZRjq7*EK. *3LǶ3{ qCfG* PrXqw1h 2 @5UY%ki3Ґ 6,9bh?My)\%Lk{.[Zrٸ|tHO=L_H{"XqnA\ש* F&we感?VcH2J?Qg˿gw~Tʓ&z?!8OM ?)AYI%}S/&Z[юwګxisʤT'?3ѻBTTcք/hi*LWCTgZs tEr鿽L/obܔ ӷα9$Lդ (W=kUnL+Av' 3;LC‣![`^3@!_yʢlw(YOIl&{e$g˜2%=*iG0ޟŽ'9K!F(ymǨ0UIծ:ڳp:h[Q["EEEաGU:ɺ|w= g_~ !l:Ap'}»75y,`lÁ ND9V< Vɶk|XFGLٗoXqŔl0,L*MDZ﯒Gq#,łJ~7PuK,yҶ:69z:hC09:'zj wB^| 5m )δp/D8e1"S7Q'"%P4h ( j6i1UgŢ搃Ix- 0O}+ 2lo3v:4~Iv д ev?4NO0*Ă;pb_-!%iT'eᅔI;j?m2AsԅYkMOu1M6dQ"PҨa$܊x4b/E=$H +@0 Ͱ2M1*8A߇d/d4fՙR9%y,(bZA%Uxڹ obJLZ/`^Ur`GTи1U@AJ/ $xi(}cn#$`ȞuAB裵ג ŔxC]MwMWoBVjsdžKjl/`lE`xn U(5ZFVy_OqB'RWb x=%KVcBZTAu*blPm": Səc&JX4Y M,>61JwOF/a^X5jau!t7<"Kgf(m"}Ǎ ?@$@,EmQq .'줨 Dq +C:tRMg0ʟ(Jv^CL|Ԧ4MQǨ9߶f3 "i},$tg'Q:J'._v hWh4B*ʼn:#smd%\;Fi .B~YI M#膵1Gu9ƪQJ ༴N$-wHy•'IKU'bk?7|1ّX}#m #-``Q!I~1ׂJSl_plf@`p(tǕ#$dZv:cY` FbuLB$ZӬ ؆ژ%K!hii݆k#kYNZOap EPSÓ jiԨtY4mt|ʔq(zsݜ3aV,MzkN!AJv5ƜS1`/14;\@Jv;[= [ f=&yk& ϰ{jTçWpiuRc8rO,did@pp=Q[Z0utU(T "-YXGX5 ,CׁnYp˘aݞ.~_vys^.7~vR/N%Lk6kQlt>EEq3k@X:si${UzN%{Xu=: )n\#dz$,YFO3Mˎ ;b~H\o%HN]mՔq:dB˹<)c+as6Xz$$ $¤YN}jb1M9Q^:ڍ)f/`6<\ܙCAl{ƚW V}VbS L۴~"vzoCf>gnMfn!u]x$CN\ŵEt3CwN TqDKש!jtniJ j^Ll EbV-JR<`ɪUwizfVQAM@+%b3 ^&(xK?"h1hdޅZB<@s{bptÀdwv݌ˉvguyr!+2<Ԅ1QD0l-+p̸[D,]E !,a ( >Vw˴狤u8cGYBJWw6&ӛXUoR"eG!xFL5Jx+Gm[9EAW6>OVhluah]5!,ĺ8Efğe=*h(i5gm_7n5영;hϜ֋^pǟ\?c3R -~9Cib 'k ޖ~sԣzE~QDz\S}~ļD0MXS=j^Tbthd)(9=ބf#{lЃK>t/h*ʝrEq& f1K,+㚭6w'm ;p #䃻\g+-iFO3l8̺K/paOvB\nyj3K6+^8: fhYr}bYc)!ڎ 9ACjv ReΚ^z[CxΠM9ۢU jB'T,ͰXkJJl}a̔?ơ - 9)%xM=O& ZӶN /1˄1eJDh`TfTݔ/esC YaZ ]l fp`iּʨv3x1ͩ6ș%[ۢY(sb%l1DzPd*e# ޟT"2~߬Ԣ푁] ?ced>9&# vt<t}!wګy{  .~jOLwDɲ$lRGN:C•׸gm*yx /j =eUR⊝xߢ'뻋"u˨QB(&AN{rH]lu#AnP_;:=wҤE|m޶_PX{u.k_5 ѢMvٴE. fͱvX9 Trx$yXE7G3myNh_5qF-Nf6n_ۍc+^pT)r*L<&,J+dB $BByd0$c{YoAwcKZɾy}|#֙K2"WYœܕNĞAǝPfJ>M gQ86Qc(&l%5:ʹ]@#9 laJju?}jvuNvKݓu[ZfRi@ڶߟd{) $k XTt̩cτhYom}^4VnvP Zzf765OMOٌL]SW+;#l[mvٵpGѶy6[)0$hӭJE[)JX"Blwe)V-SveΜt+3]qNڍ.7#.DsM=")wWL3Rxx`UobnZD$ s+ 3Zny? eZR!kV@a$)uC Lo4p*Ni9I2`JNG*nm\V7bn{!s8ZEw`/dRl^Cm'*۔ ܠu.^LTps5"| g3rLf'͔T&^*IxK)mxm[ui zDBe@^YVƮ6 gͥd8L;gs 30uP`}b=:'׷0.A"Z,VWŴEGIvdņI;MܳEYE!aMOJ3v NEd֚]$l]*&GU/]AC qƨz"uTH#O0dwG1NY[up矢}opUnwv^*N[?p=qx'y,;`[`E.nђzDۖĎ k?EZA9čaw#hlZ"_Jz<$}r&*gcRu;c2 Z9owۥbw3 h0z؉EXM濪 Fͧ,`5RaAe~P?,Lt㵤5u)$N;\ ӄ^L$яq,k{cC\5CM'>vw~rdM\!uwgҾf9.|jr9E%4SM6d!lzZcB5"B57 p-Hۋ(M4Nit{1?Qqy4 4s 4W@nwKILOQ",XYSغx|aK6armfیڀR2nl,~$аVj95oWӇ[[!ּH]4}e\=m ϒG-7U[d4LOSz~Zr3U{.jhf<YٹjJ& Ib^kgrG~x\T7s\B(kF=n垻uN=qԝ&+Ϝ̭`=\C y]`y덳d8>]Mð?AV.cr":};߮/o׷M.E8t@&wA[Ηa'4? .wǝax=|traQ')ޡ{H_E mл7}C"qon|1͒13lwo]e y%oh=~z}{*170'8OrAw y& &zr=,ئ;ׯ}L?Jr}raN]N{UgyoTT˴P:$+w}wۤza*𓌫ȔoU:B5_s :¼p5L"`=28Io׻OaEIK6v.;ɺʧbɧ vw\[NqP`:B.Do矡veJlK$gM`ɼ;7>< -W ^w"qsl:N_{|?i*g*vr퓦^MNDvn.[zOPH/&mVz\Ha[Rn.~_;= Pyh3n'83?OVyxG?gj껰-zQ~?=~~޾=}±4ÞRDmCw-?=n&lNœ q1r\9 : @8lw?b*9AY =hJ 0|bBdL"L3G(AETC j) ؍-v8Ά:uaS~r4L5>qJWEK/':H#E2U>-ݓBdҼq7b(Foy7 ~-g^"Q$A V{-L%?> a}xxE R`g z{^<ĮÉ_cy{I0j4۹")z;`y'y:Lm>khO:47ҘRg |Do$罒h 7VIK3zdMo-L Gu89SOKi4 _C䯒&~&XUdi_ P/Up<4?91'9<ڋUü+_>K ,ӥ1}kŝl[UН < gFsLD9+5|_+oY@> stream xmkRa߳<!S8NGqt'O:TSvo w#T@MjdJǮx?}x2Dz8SJ&5ViEĮyz^//:x*YraI5m<`1U:N\Wϐ 2<>A8*8/\]Izs܃sfv$0o-&߼VE0WM?`ݘendstream endobj 96 0 obj << /Filter /FlateDecode /Length 2638 >> stream xZn}p~y H6`c ȃ7y$ʒ jiˁE5f_N:UԛFO}ߜ.tB?nʯ8|qߔlc8+{eYϗE!vM !?nu+u鍗+=DocuAɴ?^ܳi«woo0+-@}%ŝԡFmv KO~!Dfn-3itӐv7kȎ6 x"yd:#TCoJwW̬h-T_ >#^:G+T#MY*h՝yIGwAu]2SbwMB{]mWal{㔍@n^ nƺZgjNj#ڋ=)Tr NabtӉd/,Xf"]`::MR &bdKQq]].qXvJY;\NkE6r}~/1 2/GӛA ţ`o nHLQ[89"8o،oS|-{9^]1Il 1$^W簃94|z dz00yFMSj?$;k 5FXѻ uwC"m- %zmA)Zil@e6-Q '0ɿmpG`9Wwv@"JU]ni/uW+`{B ߧ/m8&q c$ӓ ¬R'œ{[X jN8v,=SV=UI׎VLx)L?u^1v^[;G2ά M1xK}kjdW Od3&Tt>V̺$w0LwG{s* gT"" zRcjT1*bv$i*NstC}Z)0Ƥ>^p{O-?5_. DR6@SwϧCKkS;o}6 $%Q$JZIT>cȌ\lD7cIIwۚP>*][)7!u&5@#Ԃim}j G9]A)b vڛG!wup4|IgSR羬pm_!#/ٴbGɥf1e/ f IM+#cU?cIĘwJT MOX]C5|@.W[Uw,&3~iz1KIK-UfM;uNZi*s+[}XX*TR=e(F.gH$#C~tЩ \,_D^  ED4̩Mڦ+AK[AY ϛ` 91:'1|J]+@iZ\2"bVSqh9-'i͒S٨(EI-Cލ۞aGBj5>ry2ޫ:|^u_e lXHʕ(+ݏh v]^g6 g:PjbeuU2//讠!Vb[qSͤ 24|̴$wZ? ,o Lv//U \endstream endobj 97 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x`ES{endstream endobj 99 0 obj << /Filter /FlateDecode /Length 3281 >> stream xZko.GE-|7|/-"ER $%Y-*gpnRiKgΜypz}O苗ݑ:8HNr3.GK}׽m2&tUWJw?mvpٞ}J_䍎f6w]o]LC)RvNỔ-.I%M mTX:ߟw]SnwG-Uw|h(EӇ;W魦}SF8J`>)ڭOC:l`}tJDg iFTIlgJA r)[$%7xRz=N:߽t $b _zLyukSChP%>&#?7[40%Vq fݞ T`@ݞpDkVuMj"ZBjևQE;ݦ สTT>'Ƹ $[b9j{7[g">  [sŬ]GAaY(i`]^8K o/cR\]tӬ4U0wbv3 ; wo\}$%l`?*+;Ӓ QzH{^vS0:<9p{> cC*=t :Ah@`5~~CxnoFi>+Ky>?8@0Ť:K֐i nk 2'Xmv76Z C)ⴢ`|[Dlzz]^ Y6tLI`^N䠼!=<Ns(1Xր,mp8.΄V\em πF9p6"Ω.]8r/EȱNG[JP\ \JL_*%bRpB%o 2ԙR  MDXNo/1Aij|Ô45AM8RE!hf7T}>1`(AIIb8 HpٰE$f Z`7p `H9Ӑ ("3ZQP~o@ބ9y Kol vUIDloÐ1ɳLn.CHʴUylfԉJe4keEbR:.r/+Dz)D6)˱}^@EcϑƸuwK) wRAÄ HDPaSB"]갯:7KpP'!`0d: slSKI Gܦ8kPdy DyBMNjò=R[FTv 2&+9GWEqU#`q %'07cXBפic`Aq`bA/ \aQ K׺{Oqto?4p(&G0MCil!v:3;4Sƿ r.1-YqTx6:s <"@+-(@l@Si (Ӡ-LΆF0b*岀 E|7\<`?Y*. ɉ<ԬUyE .5:WDq$JFN4E17Vp˂^WIt J] BkQ|W4)͚eBSɦ|:c5|Zp^0C#Fj6l{03Xz\jnR(v9bu9EDPӳ 2 -O_jkdwRۜqA3ڧZU>nf-שC$biCSVG8/~[b__b;xTS,TV(M^vS~~}؝UP'ߜ-rj~Pk-νC ǧ%~ڄ|qEDU/ei1=y1r<'=!DTb:YCeUCU^T'SeRf%!C_:|?wyB`R濢 qTkSa1֔WpE*,Lkn]8R_X]|D]tGs#׽ ƋÖqC:@˗ =G et Ky\{yM_GZC}̈T*ko*pftݕSR(˯5"y`ەWW97yS~ݽ=m*!'iE(*R n!wF_5?TMHϮ?BVtpOBi="?sy$4ѿ翝|endstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3627 >> stream xW TSgھ1zQuzh(GpZRSD@@ @B|ـ@H’aSVikVZ;uƺ97D ϙsrrO}}0a_~˺[gF{.oㇹdk8 ǖA0a4f T+aFZ(tJЙ ̏ t7U ]$JO&-TQAE"`aTT~~~d7'/L[25"4?C95'UnH⦆ >W+J秤 yMQn^ZqArJ޴-Y,Z2ej\D虳fŰ&=l ۆmv`˱,{[Vc`1ػl[m16:&Fcl6 [bAl,5l6!ÌuVaر>H>1|,^?#qDˆ#&8~5w|ԅC׃ QѼ܎@%9v\ 2yN` ud)j+Acȍ]ّK  j9.wsD54R+]knk9q GżrMFA3X.xź{v]bDB%Wg1(F4"`  ($CdL?n}c[WX6 ]0ʛ6\~txgG'@ע7gV`I=EWBa0LFyQڀӀ8wh ͜5zł]R~$j'[{EN)j+ptXfYCru,,3<۸Mto\l5m6}DsQfI4:sCkq'jD"\Ey]rĂK q.eft5@ t!&Uc&W]$QQEQh_!S\U@5dֵwܲ8Z`;1\;V8>{% F#P ͹rw* K=q$@_ ]O\| ~oȀZSR=e%'I14U} kepQBLNgfEQD'o- 9c.~ϲ98gِ"@1(\p^ר6y0 ~saQ@[:Qh/3nouo%Upw S& &͙f)iHc c͍-ĪkLIc)N=wѾ׿qC?6hnmfpU&%G/ <x'#?Z:^,3_N=f'ϓ-b?KfLGC"f2)#;]'pr.BE}U@GXK,bBE PK{R;هhǡQutۇ->OufQ'oȩ>ˁ~_DfTG> >P7hgٰE s~?DO}U;/DEVQ1=̜WVw|06q[$K)Z!Hٖ٦2A;<_eJYhV!@[/uZlz3Sՠ ՠ9[PHU'3ΚXa8qV2Ͱr6vZ-De\CRZ6"O v\p2Ivo2@l1fPGإMl.p! 0lՕp PQ')^MɽחPF1F⵫v=8 N M,wԀQPW(SSJ@ JJx[Kj r#p-6Uk*$L șf&2Q5]t+QHK@AHnCq}Io4! H`)&au쀱 ULUBlɨ&gM'ʦjj;GAзP_&VKK0bQ,-R̘e^ {`zmQ$`s@" \*bTz2gÊp po*|1}2Ӝy]:y|PR[52FUoa=cp>Oae?lwl!='ݳ5_e =X6sgdpS ypFBr-dMBf/N ߀[a6nʭ`sgih}7MYYJyr㗭PH4yj%䕅޾G'^@eT#kwA]?b] 9>?!zْɅuib̈́3ߚvLG'~?q'qv%䏦o[3\QԁZ+  RxycytR1 mŁ%c&HPQZ |VnQAp׈~?/]endstream endobj 101 0 obj << /Filter /FlateDecode /Length 3503 >> stream xZn}W[vA$ &kd\Y#9{fz+2 lFO_Nޟ5Aop}w볟t8.o/.0@;LYe=\<3cTCz2> 7got1sьoms:3qpR4vB\JEWb{ qrSCvJ쾿%Rd /Ə?>8\K!-ϣLf|6:Q %9YCъw\=`|_W+xkeUŕXf=~8I9 .VmU2eJN9n?ʚ XOvS``ƢocrR{uX°v.D{VCs~1WbY 2!iqM**~`'suW/)83Sy~dSs#"~UVu"PXu0(r>ܸVxӬJtʍo.bޡҴ.~voqCVecܳ Cq!?!n3 ":gq eORboLژIB_WG(&[lwJ/xz՟\𺪄{Y0)TY#d7uydҕorrǩ؄8;|JF)HFzC&{(<9>zJ.E )S C7cs)/qZt(Pm\i?v"l]1+ZT-) VُvBrm]/I`:=+G,]z/+ ( {2Z~:NͤF[;# q O5h5ߵ&⍅ֻko&ߍ}\I)zִJ+lk\7Y[Sm=cc;DuH Bct[v-~2FyAp[BU T24 `8Y^VUfXc[sj<m |-] O; ٻ6^Q_oSkk}k> ա%/N˅CYHzrMZ#N$ kMe鈥?qף [QCx:E(}Fe-39?}@-{yD%d2`_]4>)uD}29's@Nތ\M^rZ/y"JSE<VLiGʌ˽p/Nds: ZLzh~`4 N\׫ m~ݤ;ˉyׯh+Eb0)Gَ] Gps4_ IkrJ Z?~{l]&lzz>vR0H&d"݋ 绊7EiQ ȵ)d; J΍-Y7i*"/ҴBd$}uGrHθO's]_Ӓ8xX '=y-8w*'Mc~pe~V%D U)@ 9m,բufC&*i$e>kyhh'\moi<֜3Ҿi}Ik hM|q5KɹXI͙ e<{wH\Np89;BAʻ8ГR G4ǥ)O:x Ĩ_`JfЈ9Ȋ"&"’g>L.G{g`J|pnri rB*O (h[M: d`:Q]԰Z~!kQ o<5/D~Uy1ORQ{ *:'tL "aPXX%i h-k;\HOn|/B0IS260W9=-F:+)4ױ mL߫c!vׅFr /z#{☽a03O@hatj+tA]@NϺg +HxYAH6(y!7^hL;ALeO~TC$1FB1OvYOQ 7[g2`tn2IVd@WX_%vtpABg>C7$p>A:^q!Kɕp ,tG> s"F.`- FL5n5[8_A)Ӟ3h\=w)tD4-2lwT#f4Hx} 8K2T<5Qzٺ VuDP'(q * ^.47(|7K{i1r$&e$#N1Ѡ۩qj =IK A g*.:NJ b1޹HݥP#)1||x#Հ~\i` ȟ$%@6)82hZ+gJLm{3ՖϑK;Ȳ7 9B.T]r,͔|0Bg*sSMS]zKc'ŴJ&UfFc7i4<k4 8br?emNR\oxIJ:Oe .O^>kM;-+QPhקټ/E-ze0ddѲu5OK uW[OJz~qߊb\u/ΓZ_{_ܔ7? ՂrZ +=Q~iZq]&K}qOl/JU=7eќ5&+i;0˔7;/kѷHoE޵ /}l$5T[g'ZF^Qt¥[ϥRa}Ӽ誡DXw>T^5/?4+蝀Zq9۵r%Hh9yWU/Oin+Q=:ACmcxz߶$uendstream endobj 102 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 0 0~ 8@0%!tpw'0^Gk"GpXhu[@fcA剋7OH.O!.y%:EHAڙXWU}uȪ?&]mgM*)J747I3 DWSyendstream endobj 103 0 obj << /Filter /FlateDecode /Length 26133 >> stream xˎ.Ir;OT=\ P3!YbWru( ǗZE8!N?.f--\?|?46K9|̜eѷo>~*oOOOh/uV7~ay_Զ6TMWzSa9>5gz{ 3%繇s^׸(^?G/mn)~K˷v}V\Geh?~OZ'|w[-=ۜ~uYTKtg>>%Sjom񩻎8-tSa4=ʺuw{]MKmk5kZگv|}9q ~Y[Z~f9_j@uՕk{qsZ;~~Yj~]Fiyuf{K}ן^|YGַwuҵP[e%k 8/_z׵(v&8lݰGyI{>^J , +,<,cX[s˘oy>,kX୺aM[βLV^vq&*i]/| mk}e]C7KZOK+̐ӛtP1U ǹԛ/qjeuiY#:ӚO#lҿۺz a%H[f2zoO˚qk7zV_?'L~6:,;rR|]şS߽uN˴}Xzm8S׹^ܞoﲔekLLpkq5ð脛N=Ѳ`u˴QR&moY=Ya.a_-Ͱ-R0<%*ar4͂)٬; Na]. ̰& }pdzk.dsi0-k[/43]p.R|Z[Z::׳2O0uiX:.q7nԵpafo7Ng~5ͷ)7lk,'jCa03 g{ZKcTOۉFkmk⢪& Ų\WSqnq]ŅkZ?p7fLZlkZZ#|x_֜PRWvb__) ~͝8ח}2\o?e[MgֈÛ>OL~:\/]OwYzawhlsZ)n5NѶḍ2"rՉokV޴,k b aX=MLv0k,E^Cj uuO{q`^*,YlX#UqqnY&e}]74uja2gt8"kHzk/VgzXljC:-Vˢu˞eoz 5eU8tKńْM ·^ٚ6gÏ|B_Í1W{$ج gasDJWа9I^ %bտ^ #v$bU GފK8JE^ў4U.yRZCײƥ-5ƺ>ktբfc,qKY]S]_Z#JMA>2@]C8q.xafrSyY‰b,x,MJ kr Rr߻,|Hl]BV+݉ Ymɝ{ښnk>x՗`dsfY?@]#wxMzeMq3XŔ?u:V k28ui'Y1P[kki݃ukfJC5Oڜ_‡Ԗeͬx|%k%%m9d> I J,l2oREbYn Ix3S_kP2mpele C_Ou2lt(ʱ8eŭ'.\Lǭgn{0~3P6쵒͒,Nnfw,Ŝ YfLǣ)n~fGdVe\-˰1л."k9n}`Mvg)~z6Ngn}V\\s)aG;f;"52c90[xaoCac߲fbi4lvnf ņjg|q54[F8%xFNekhXC?8њ`` rEA!0zY sG}yk*;kck&?ZWfcn 水|;1*p` k.[|m#Nl *-Η<[~8ongo9as9gvwLpְyouD<8Ϗؐm!Qq:3=f WG78p}̖8̚.ц?|yKkt`qcFʚO ؆l-؋6S:yף [0_Zk+>.(>˚\_4n_oʴ4N\+-nhKIG ~=E  sp]4+l8bw=_M_ou4^( {ae:5giڲ!,e:'GRCi]8~#@̿jJjn~Os˚t?pHlˇg]mR7<9r?_)A=0Xb[覺ksarWX:0HX0@ aO˚Ve`̥O8n-O,±&ׄ-lwd:{./>b$h5^z5ٕ?LE&e!b0:mLOb|Tt6SAF`*Ϙ Y6SeS*XE%2 YUd&?*-*nF6V-*ǔO8P{cBjv`"BU([X"Xdec"Ba*XwV-*#辋X6Pbxs*A\E&smW$*Ź #=<"!1dd4ȊYdE}A`EU7*8Ȋm\12N8,+;pEf9m\QpŶ=zl^uZVlŶPyd 9nb[V`h8 ⰐFBlE&lE;' h&f9Ȁ+@@F( z>W *VeMb)B+&)WLn.7_13׏+&7Û8,+Fx6h\a6]q BT!v r )iyMW4Ƽ7]qt` tEp "kmanb[Wlx|EI+@~W! epmS8m!^Q+ M+hxE}SzX '^Ϝ5'Ec 8+r5<|H_k_^`Nj_WKy}2 ƃ7`RXX$Wlu') dGWy+W H ^D'~U+`̺<-q"^Vh$ăd\,IpEtEta!]b7)b ; dhH[dW܌nC "2WL Ȁ+M/ ,)~+EFpiq{FLኬaR4 (ϭ H^E땑taq"D I7k@ֆ銖h+nqWl @" +@H]} b0r7G+xX/) qB+&)+fv|W6ϙx@Wt1+% ۉ`H۷W4+Z+^Y{ X0s~ `1`,I_Q2+_ǃ4pW4BEX݀ Xqnb[XTnK plB|E܀,nf, Xp %?Nb~XTF7`A,b2^EJ̶aA ,na",rOaQ:F,EIXKH0՗탱PZtCaeQzޔa躅,(eq?, ,2sW(zY 6dљ/ؐŶE>d †,ېG,+OȢ2!O(oƢs =0 !BB y,Ge^ٔEg,ݔ{eQf ؔE,zPFE` YT݌,.]EL1| SAY WE_"ܔEݔd&9( ,bT)NvS MY N,; ew (e1}MeMQ ZDYtAY mJBצ,&YAf*HYr`; HVd}Xh! [&?`, Ă.X0xrAXtS ¢ۄ-% Tt IōXt^F,Xp0Z6c!$ $ Ȣ$Y7b,? l=3b|XtXGXK Ƣ~mb'E!b^|6bec=~X3߈MXoBPIX7 Ă "(z d! YܜHN‰MY Y ʢ ʢ@,D$mBC4( e1)$1eqٔ,WG27emʂ% e5EY`sM2C,XD!()^@A q$7h3) zzZ\z2-A$Aj,D48+8$685Yr>W7X ÂQMY[8(U+(vѵ ʢu( E!DY 9,"@")rrl1K{`*ޜE.&8mg+ bQYdob:9qpТ dТ2)A eZ\/ z@X@V6iiFARdNKt,^rl\G>p Z\5QאE=pz1b'A-5$0$0"ne~-8nA1K ͧ.aHc0 e61-8t1T Ό&824]h0$tPNxs7CcP8ibGarsE*8}q >wPǶ,>f SlnCUxA}l[>Eԇ 6-> #£A}$bH!Xԇj6d㰐"Dh!qX~` ab61 > }}b1鿹rU> *>ǟ> ce:P'ߎuQ S 9,DC2- im2ibzXfғ2D̷!Ȑ^$CF dPw!wȆȐ56 2d\ 2'zb!L(\j9K Xb!Tp,dY&.ʉ i`{ߑŘ:<4 g>Bœd> :ueȍ.5 /QTG,|lbBǚTGH[qX|;(r1g#%@1"#%p`< &ŀ`<溁ѕ ģWa 1B<6xz"<אlc ֓ ΢Dx܌9lPxLa+xLxD@c8qx p](&x<USౡ 9SGdbwp!$OG$QiQiM 㰐R #S& SAH- \&? G(!8RpY cp< Af8 n8Lo2nXhSm~m9p<M/ W3 eTpÀhHQUTm&i'8hd8-Np4+o0a 5LFJQF àtaГ2"8K$il$8B4K%+hv@`j*npebӢ[a dnq-._l[1dsI sYO%q |8'c^G1!DP{p #w=@ie"c XĹBTP 8O0q$ Eqv{%8ȣA֎J/q]8lVL&-]sAq 9i6q8 ù a8A[3؊z4%Zԙ- G.lR䨑M;lݎF6I*.p_$ ϫp;)9&p-($$n$ Dep1Y!?cb8H 1$1pK ##AF 1*tpLϹݧ0Ϳ A-g2hq0o1hGX x.@sf\`z  8_̹.i̹sE9kSo.c$rB'`Ap9j@r"0!.'"! q9U\\ 7C?0 |AI.Y!#AOE:"_B)R#R'I(HNAAI:bz0@TL:r7S|#@'i7STONj"VJn Sw+4x:9:Ցfug,\FN:]5 ɇ3XY1E8tSbuztS~L )bl7saٰU,sI+`:Te8)BX'w:JW#pu$Q )`JYv$(v;xQ[ع!#qMt5@%pEN(m3n;^AN?z7"Snb=l\5:ٹFف@!;I@!"%NoK(bvt&q`vj2;ԩK'iC0!ӛ=H ic˴BADJ-AjB7h7}7L)"f纤D"fSGj> f'u,S` !;TNM"DCbJ'4&v !ӛ;,ז z =n AH؆ vLЈ `碖@:ʹ}:)Q lZh<Q|,bDhq@Zp?tٵ?sxk:^˷GHg?oӚXZ˷ء5C!;[ õ x߬C]^O5(ּ|Z5~8Ywe9&e?_;7 U\}rkg\+uv`ݙ_ym:>k7cfoƽ^^S^܋EpGܣL @Ep ֿy4`_7*Ͼ:6"ȿ]9:?uׯ~c kcoy9ڗ~(Q~ìuJ3ms^#>>*KpBpp9Gx^9Npo>ػ|z`?~g "O;4"; O6-yEsn|S~,?5ݤuvk|7q!Nsϸu o\8o!Q?|v%}c>n 1%ZY040bz6εXܔ]~67C^).=v__}n<ա~'G1IhM?HL-*v&{X)'ҷo>~r_皲d[vFlAGRw , Kh86+;>ES)Zn8?S.8wZzW}BلnLs)9wAKӄOKA",OeCJdy| ]OE)X<>s=>uvqG(^{DSlw}MA@|򚖹k77սkw8"m_5Q8gFӟ# ױ~iݝ~q3]{w\sQֲ_w?^3Y}?eqͯT?}ӏ}QlHh/|(hRNԿIWݞσЦl9-AFefyhnqnPiEfQT_X2ailQft*`>,FW*^?gb0e(0 z@Fv0Y.`X@(!0r Ve^.`G]V p [# #7I8i+b2`d?-hW-?[BAI3njWMF3ftbf'!&ԘO8QcڳaĮ g$i ?,gWDB}۩oS#UZ K0 U Z5Ƿ#oll7LNQC?llnB՗.Q'@NѨ@$YX[w .-!Nf~{-?,2H=VhGOei1xJXa`0pr)xPYy2gh`@-}s M` htGI/Q| d9E Q|)N?[Dʐ$9Q|;^pb@iֹ&k褸,4y˒87r:k#gQcWNC+$(6bF*ZDQ¤l%q}Gut,bi\L~Ҕ ݄=ieƱNg@>ϐ{#o4~.L߼T2:(,pX:~ݧ ?4D1 QSC͉%/~%9W7= b ~H"X@\Bğ ~IA|/ Z3Q6Fu?JBL%[=Cė ~.g(6Uᛁ~F3F :A٫s9\Cj ~G?~:\998 7Hf72g%I9\}qQhA`O%o}n.h8O\]S/bڝ7_^R2ϐ˻N,,m#KD͑uڋ}X?}Ry%q$85vYPN<7;n0~p, _]ܛIߦ M=@ .}9ݗjd]^=%2ͻLDyl꼽,p>Se952͗|t<5ϑ3$dቝ!non":n,Sdz$GCvt>U9ϐcKUt0H?-zw};h!u!7k[Ffʏc6.3L:3dց MFf wzl]V}"︽yY&3;p>C _ò/6y ,u*_6w"$Yr@ۉ<6}6 Ǻ7O![K2 Nˉy> fU'y7$o}z3z_L9Ao i?n4j @y{stx~gH{>3pYgy,ܾ@Buܾ@wy{XH;oo 㼽9(t,Ht/hƓO޾@/㼽~ۯy|ۛq7@ @Ee}A῿̤;Aھlh{+_$݋#l,UpOCa`%JԾ"LԾK ~C -ӴP>͑%NÔi_'iojt<<^R$}~&H{[ ;i_PKMqteR $/}u= [|#ggz  B4+HIhoWs/j^\-%+}!Ҿ AIGg/0TwҾSOI{𲜴/HwؾX/ 4~ͭTioScNNPGj,Q{)騽f@mr%W,Glo826iG3|z\PNԓrފ]Vzʏڐo$[|.U#jg/)j0L}*n?f Qi*CB k%L|+'?J&DzǶќ|j1 _sVwN霦$0>a|Nv5(x aN4mjI{k>?#*>j){5^9}\ rTrL*hGEJ},Le}nНOYi9s!͞:[Lȏ%G-Hq.쀀-.7lx>!lcuJsC!s"؎JK$ncNїc]'L ;SeʵgnZvpt,Kt|k>=_)YjAzZ!~nZ.FԸtj9ޣ)7ϼ~gyX sPPofQܓ]ls%Yks6ćCOA]? zCpقuڎ(*bo Խ·l}Qt+7,Pco-Zr`]lS. Ožq[)r->߭m3rjď7Љ*|wM=鹃Q?0w>*Hl-8,(`I3v1Z(YE<bZg+K_4~8 +$1ES!Cjzu 18)RxG;Tg(uМP"">͂xYGP 5Dh\cK鐨 yu`ԙ5< h2aa`|) ~: ,h ׶x 9T+E&]S"\g{EҗW툗H]+gx0HQ r#ldGuM2( |&ѵ*D?0a) `P/1? @fZ!-[3e" ňSY k[C 3Xd-Wd~?R*e,yt^_Zg"\1dAY &_#SHݲ W5fR_Ui&n%UN}/.ϸ*G<јGF?r$N:sh$,چQs@8@yʰ78GRD=0F&ٙ˷8-IlV CI؛%VRDZ ?/H' ! Dl΄ @$a{ZzPXї7kY@tVM2GC8!L)*eq"rwZHh~#rG{]1&$pl%BIn>@v~d~6PP &d&C:f'ZRU&E6؃*CAjALsy@UPg"4$月 mxgR~!xx}"Kt*Z`;a3) kzR m |fٸ9#?q0v3 l#Uz4_`A" AvJ"#m1N:vU֛ !yI"H%xqRam؊<}QC,K8EsPl^|>PR`d$3Di [qX[}%f Ӹ"M-q3#GMXDϓI&[l8\[=36&bIt;|tF0i" XTT -Ztvx- MyOA7ɲ!JYD-28m%DyqC} ~MXJ]t\V ̇UM#=*'RqBj]EJ !,b D%)rHH(8- ,Z0C(͕|pxq bl_HG lB7xSYrZ&Si+ x%j*Z'2 *t B}B-MUސ)(@B?;-[YaSCLM9QV5EF,jq3Iy]UԁUJ[QMvXXqvUikQir ?pn`E6hwŊ.(u{CHuإU`,:BVg}̩@So0vQئ/Ea%E%Mov]Uifon*]-]UPoQ(.IgRaTM|X8cfd 떣7_6G@?gJuY*qOS~:{w]b Nj^*grV>JCVvM +%VEu<V|kab]-Yh%r~+?DWx( ,,<((Fno% Kc;LJ43e!VVzM*Mg,64__SxpQ0Alr-O7Il5z~,^g¾2+[OCocCCCCC,nЮЮЮxЮЮЮЮЮЮ8O/>]ơSP|Q}/pg9̡b|>9X|}Z|LK1IYxJ;^97^ÚR;iO/a Z0|l{&k<^7Z5 * Mr$+hV{ZH8Jh;,SKgSq!\뢽FLۢi#H< m*ZöAa.M;XRPa¼.Zp? *o8,ThS#H 0Dx6 ~؁Ccp~  ቧ5~rO_П(|w/<,׌џ¾A ԻӃ_ "[}XzO0l|*<+>D`JvaTU^hqZ|77iN{ *F3VJm sZa>- /6C<@{q@(oI}ZaP@X80+R{ʜ֪GG5\CEv>G?}hH/mykk0FRrmJ( IHEg}ʎktFcD#J$NYnT(shGkn8#Bw#ꃠG1:,@?'Am q //R3[ \m[gU^00LTH;-lC 419,4>Ba\DL?eݡPxРIJxrr<숵bC@@%,`jAP)3ư)فAOXmGJY;5\j`bZFaacVO ύQ(ݵ]Up_ޏt努c =4;4?, "?",@,)bL& GJ 3VN"#H~ʈ\$HTI`Pu_ tҩ>G3CXd;SE,u旰>RPo{ v@U5Jvm oRf|ikqfjIʈ1W=G|jD TT!8w>)~iaVJABmPni!gaaiKIԦ5M"=,L I߮,^$GT0ȃ{;< ij'do2ޏd/rS@Ck:3|GHcG 1)An^S[-"|X"nUF]$e!:ijLj$#.zR4"iqT &!wJ,6%BsH.<Q/L{CU o6_L}v[WwA$*s 'Wr]rpSk%̡+WԘduü^" (&L_x!)&PwË-Q/E33? i] Rt^AoYbBCe(mgXҳ {XқE/"X%Úu3G5N7w|Jt! + {X3P9Z).]U -C=/*ńgGkYH*e)/`Z@yhqXE AaeaKJWʩd)[ժ:d[+*j죰q_.q"2E +,= Uݡ6jv}fuvQΒL^k<̝,zc=Yz*qJGK5xj~!ű,M*Q* YXu-U ٥~5!u}!Ł+?8QgQʟSJդ(/AqUp{൭ySCAș]k[ gȨ-T[u].}KUQi5ġu6j{T^׷ v8{Y}S|fL>*兀V_rK:*.d|Di( IU*m-%?Z F22vPM#b9*%|ElQv7zQ.ߟrϣP` 5[,A r`m E(P慌ºo>C@H6I9T!p= u$  [eb[Da~}sG6,]*EK,F?Ptk"Ip:P/xkν_\/P}#" Df$_3V"N<ɗr1ܵ1I P!}AAMbp9dI!Zr9;~?PlpHp憋$$q.(-((:q9`h,Xs*e}^E(WYGeҢ0 bG9G#B.f6 r*YD^rqϸ+ ]-ZȬ\<ŬpZ"_.x .RZ X ~!rqEFQh`]tI).) QթDK*W/B]RU. Qe`J%qި:&6K E z6`fT FEPHn̘T1H?|sd% <G<\J1QVxLQB,B퀍`c15v )#,c< ^H2 *`4^'BEYTJnyC b9H: ުJq_)ſhrr0D[\/€־hQ_Ш]kN# ,eJXУCkϊ^GUET^UgD뗨+ G7hMoƣt@1xJDR7(Aڴ䌮N5j3lR3MD`.9wW"uY+% Y%gN]rb]r0̠&9XWMrJUK|%\r.&6s%*)) PtnDPcRuYMkgݹF|gϨ *UR|G聉EztCԱV0*ur>E|aDY}ĈTy8A{~mGEP#T骕9G$'`32gh3DF1tdɹ69w'69w yvʹUtr)Crf:LD iuA19Dp\j)PecN9w1FIϡ(jodmC#k9|3k(.RTZ"E~T IZyR-=NlR%'.DZ'je(krԀQ>((QHpaH!M5ʹ/@ ƺ}rX73%>9`A tK>$u"hjAu.]v:8)<.B,( BP(D2KKie!lo@*%LRRJ5II}r"OΖRa}rT$QN]$voħ9Un8/BC猸J57蛃M0s75Q,=q7g֐bߜh2fdv.w5DIR`d'mƍHAdClV.IR p&ɮIXlKUePBSxC7pd℗n%du钔9=3$0I]sA9H6aל;IN]snQ*5CO 5gr Q`)sxCN*vTɌo`S?<M,IZV,JbhD:/B" C#,do8gQ6&sN9}89bo:g`x:gW@tY3O{e39LDF眩V?j^EII;"\]ruuRRj4 ]7HCl^,mߢ-|ZhKTCQ0DVk&tXFp8:s7&{8WuQsEl7͹~X}snu93NW}s$^@uil%ޛAoCpNRLpx+ gta$ONP.m 9@؈sfKFf5F}shHY挦4Bչzꛃt:S}sP!#t+NU_^vbokjgYm.!T CBuF!sƎ8$uHK&cNͦ9H+iNWKh1iΐFt4͹EFӜ;4g9;;APRu'Ci).4ρoI>w鳆}iBJ$w_YNcYB9`9 c:[t,\1[t)LǢLtf_͙z7g2.9Kӹr:%r:*Km"t!or9t fr:J$wlKȅҼl@NaJqNfR9dp CA8>8 ͊~K9vι!ΙuTۃu.}?)AuLu,#_"ڒ8Bmr:h - 6t@=?a:4|)bqLiI6!c$?21 w>Lw;1̾Ar׃Af4Q:!S; r\Z5gZړ/,,:G'?M˻ɻ.;#篾~e엟ox&ѹk]pH/~O?wLmglo?|لxǿ/`fmr_^?uBm >#?r |<̽^o0]4?e}Dvi?}^6o+_Jlx`/ϸ2 #>&}zؘCڃ_>4xht']l2hxk7r_rx}_yMqC> stream x]O10 . 2D! }I.|D/J`H_#490Lu@vw=|*ՔC -A#EN?p6MWP_.Xyp8I.~r 6/ES{endstream endobj 105 0 obj << /Filter /FlateDecode /Length 2914 >> stream xW peVS 8JPJH$t=鿏B !r "BIHB&3C `qHla9X#[It-H$@vr![r{àe55o޼Aӏ8(LAt E91O1%(` O'r:1~Pe0i9p,3#H03ס[A9Q 1)q`ᢲ3@8]P)+g:9rYTI$d qD2O`DN dp ֭3hDT N Fx&=&"b^f^&w)G#*% ;6Edo1!,Mpfy* {<W̱pg3|wzfJ;3W- @ Kaz@(̈ LŻ9=2qzm~ oۘ?hC]ߘꬼU.($|əg_l9xwZ_z^ύ˳/uv~=W\\cn|]{}whbB•g\8s}ݹ=k׎WOx>;{ɓ'{~vݾ9t֭WosS۷oo0<U/?VYk{r5];0_?"99aɒJg>[Ĺ}8!}Tԅ;fݷ, IL|jx!5kڲiX_]iYT__eo/oHMKro:&<OLHҘE!֭[ǎp«vy+ۙ藔Z5daUקO'6jYIn;ooX}}dW_=ߝ^ҍG](Bڿolޑٹ2>>~O┖]޾}Zr`ݻ ޥMMM{ʻ䣏&MQC3uC]ɥ7='x.e'nϒ>cEqFv^cց'==yקN yg WgO͇?\'q>kF}lذt\&^{7WTm+c)mOs[_]gmnzL݂-6U//]9>8T[zcʺa~ȑ#jr#d#Y._w|7M;V,Ye4on񝾇Gߞ7ի@TJr( Nơl!%vw5UyfM )XdN-RMJ`^YtM4JE,5 # U 0Hui*1D`OR`9]#+,HYde 6f[OY$p':!O/"NRu2 <@E-hwQUL@XU6"DUOeN \`KO¢"Y45[d"eN&oQYbK.ʪL 9Ѐ95r39yl! DVU$KMr&ky1M2lE&Yf Ly]lXz'ԅ$T. ^dNcdB«䂅 ϥ\aYD6PD)8Xj=yfiDUzY_%ˊU@\!GPESy f4SKl9 , GlR؀ 3jHUEPy&#R"*׼T,!ȸO5: Y!DRq0[iFwuCJzt̺&Y NZQʁ$HV9TFjΗiDk9A<+Fazp L˼Hx½; X[@SSXG꠵Lbڤ(/l[V%,*$3!K]"/bd`SUulJҡQU^>a4&T|m$0X孅&͂v )I! p `Nil-צZVY>fwň/tZl3@0 - ~Z"bqfMK\EIC@tȲ8i*0e^& PQMv䌜h֫0ڋw }4FMڟ,#YwX1LkI6z#[窽ham1ugSaUX+_$xL2d&6Ich bM<1 iYB1&:tU6dLnI *猰pgd{ϓQ,ҹ1W3)mi塵Hޛ vOU@1bL1+ NQ6vLA%m3V{N#05ړɊyOw.]JwB5J(`썒V &)t)gҡDn~l:fk+,w,LYQfYL2`e1;feL8ZHK%z^cD8QP-̙y?endstream endobj 106 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5293 >> stream xuO7܍+AE%(.5j\!QUt}79[=мr>f];:gXћљQl6c>nlmmE;ݰ/w_OG矃XPv [s^+ '4}jQY jXl5+zԌB)cT_0$ZB &4_=oU/] Οd,":82.t!:)6__50̝*` W-FD {FB/Reu ʂ۩&,pƪb5?uZǩ duj?;0k$zH}'7)hO?-``)4k© Y Ѵ|d *^20`+U @ h9q%9/!4 CĀv`j~&EXT:G\Q{@;8r @]VZ ֠bX&& BIIz}9wDmI;1yUz`4D_o@4^7zA dꀎyVP5>"~pfFS5/Z L)&bMB0\PWKgOBS}DNbt7#z2ER+P>sQk. bP D}@WիW49;.=@P')Ͱ 42UVrTߑEb&GG P?*4'E|MEIbAD`c# ?conQ.]B@ "(Z[nS 7 w}F@ŋ]g3 >0T~ mpH( *; D *(Z fO?|DXGӸ[ ]"GeEP2% USR>cEKi.Ǫo1w'@B4 ;}FPǵ_\WX&ByS I\6>݂qy :"Gy"%pGsYxMSF P~p)QsRbSO]xwARiz-&*@D3p!$htB:sCgjYۿB@ox4CnȀJ}K|ЍÔj6Ό(Vdžӟ+w#:6( >%Q`kalO't 0J3U}`kIڊN"\!~;R?ՀMCO6@K$+oVaMhƍ6OP!$&Tx_/~P /P& IJ(5K{nV0/O*mcd"}\ia !ˀ MwqV[uR||`7c`7 nDc`Dc`S ko$׭P f"Ł{jc7B_Oُ_KGPPP}hD {l|oY7^{ ρMТgԍN 4Kb!v#^y\o5wI =fgI V1{Ic9uT 'P> 6zv.aFcZh}"e_ Pbk8 t:"TH.pjx(/Rm=Àzra^}BqyT28T~I1QQNO1D+9Fg[Uǵ` nD0@Qz ~RzqE߃?xǪJ!CG 'y߽3:;z.ⲿYG0PQ}T\U5.D?ꞿl&tDa ʽ+H!@a#tL6wє^nI NV)'_؄4G"!D$6~i-?] zy7xPq 2hz?+k!K1\Y`8 (ztU2]mAQe`4E0h@#-kFW^` +z4 ZUmZD9Jo* Q`,dY *HtEց Yi2GnKPPF(ëj&: ##z?wn D+v VWNP'I_'xHGӑ_9[#v'xvm'}<m.mwCQC?4>u#P-fST8z= &,|S(_ ؕk]wE`c#kFjxwV;zFg@3R>LL?~G*l ufD>Vׯ EDp1wT}aH}:/,#>`轇@TW=pgO 0l`9,xPwb6|k׬O?m(J ?~@F}0gY;{ @珵 ?<a|0qH}͟ᣯ{KڿK^$Sܑw,1|v.>۬&ny=o1h#AS}t}g\.?;~`%O<ңVwɣ?Xw._u{\u?~< ;qХۯ?RǦ}0F x7K{k9G[qɣ3G{oRU@s } ,N↏P ԩSBzǠi)  k;ac̩VW_r'?@KhGo_6=`_ӵf7RW>Z' >:d9}4+7>ń ܏qתY ދW鑁qͅ_K4|O8ۿN\^ڿGc^LR%˙3g"#ݩt@U\ % i÷jS3;N}lWԓDDG 9{Ӕ]L@b^6ܢ^G@3V}=5s(k®K܁7/W\siM3G"7p  4{oW[ Ο?b[Tz `!nB+~mHF|ť"6P.&u`6%6~k}.\б{v8P(l]uvn͟_ŏKDӋ/D׋YtAˉ55U_7OmV m;l|D\K.aF ̀:fnrWtZUvG'ΑM/_=d* ]i\FDԁ) 0EPlx$DWwLu5Tz*Ea~E^DDנ@{>nM\ > stream xY{UřWrCB!pdssO*Ke`23 3sgAPJb`@V\o#%A:̖{ w?Eu}>SuluHb-KG"Py.P"Jס& l #Fk"NLWGu."bcNipjDہ%8ʸg;VG[cS Vu 1b6PZc3CV̈́XpDH K ,\fSTe%_}?z7CmE&~X%Riv4mh]T`M5rRcBE`  ii%sTX6r ݆=T I;61ģQI 1$7#UĐ$FחT7꣈TUcuMZne>%F|hkYa ?.x1b-~zOOY^z__}?o.(|]^oydOڲy9_޽Çػws=v줏Ockʭ=RUQo|oYo֬v^uRgxx_^s̙5WUVV?ǎ}b޽my}s=W/=vʶn7hkٟeK&m5nĉKJKK>hd~w#o= y晙>)ڔ>7dYl;ux|/ud[of~kŊSF]w~}բ͏:e۶! c[ME&n3Qɺ{_Xd+WZ/޶paMnz~}nz֏ Hyc%D|nϿ7kN'w_Pu ܷtzz^{Q8㧋};w^Wш=o,}-w*>yÆ +_|񉑿[i/]N8aOv-o]^|iӖ57w~yժ糺#VXќ>-"}FݾWQ?ғL]S_~ۼc_ݱ6 ;N?~8pȳN%,: :KZPGOzn+[Խ{w^bح߾zܱu^Kӣ%]ZwPy}rK'N+?q.yny~ֳ3 ͛gE?rK]syTтH{$\lTe%Qq7JIklǰ*#1܋95s#SJ#BZ$%03%sSZIH y%!-IH2"q//r\Q3 fAzX &< ĔSr@P17\Zn@ 5AyiEYy5( jݎ@ (bݯi״kZ5-ӂ $vccg$ RY=F>0=cʚZإYؔZIH 6¦/ iVZ]_}HSBl! L쐖0j"'3,!ZBOf٘Wl8.$YNN ^,N=q1!G[XH}Yb(blSGXɊX,XR8ajM.pHpʭРTڂZSbgLŰ[3!.Rp2( T#J6kNF-Bb&~6v [r W0j 2U =}${ &a|Cך*悂2JR!lU3J\n3BhHz!_@WPb 6% |8BUY1LEc@ Z9RB$JRr`ZR~WDٔW0ؠu89 |+W~Pbyd#UPdXgI4TVp;`ꩌjL@l\cI1t}!4D.GE /= ƅ_2 b΀nQ)$`h1\ %U](d1(] .,f1mbiC1Ԟ2 [WrC P̧lV,!XsJCIPo4PcحDAPhh"?wn;8.8L( !mʧ }q1SJnExDr+\`?fˈ̮/&$<> stream x]O10 P ]ZUm?eB:gׁm_Xց$i, x`CSxendstream endobj 109 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4764 >> stream xEŻ71泈""QxW  jB4Wfͬ9uzvcXzggί.l믥Zk1M:O<ߚqی_ ~z1-O30`}2 *ATHyJPmҭ?4e$q mKPhX7~8W'h]^e@ 72o*c@+<0tLy :h'VIV@8=3(=1 ` uvh7a 4?]8Ge"ңdbPS2 `74.W4RjO :x8&L rNm( A7L úJP|֭[4D9-X++6@[ "e oܸ{[' P8]ö6s 7B ^$A ! ic `6@y8HD^~] ͨu`Nh*S )p5gDV2(H}S_m%(J%2Zwo hs[" Nn.XW6tQ>@2%`_D#d[W (qr\;y+u?b?Av^(€*bɡO+DyȀv(֜ h .{UdP=YFE'~L@c`s.[qTU_ ￟` k;ؚeK 0;w! (?+AMu 2{"k *+o6^"a M̥I n2IPE]O2_  tl%N;eE>/Bd0 [ևIBB *S ]/_[һǠ,}H4H<4||V҃=- CEi ;wţ@6W%hk|!:&#wޡPtt vS3O"O^0~.#&cPXr!l%^݊Q2"׷z{" b{UYbq~A Cpo+iy@&D* U~V?o+8 M]{ak+GUb0Aa$@J&0 I1^u\7" [v#Gf]Qĸq\_{5QX %(AM8Hpr$0 ` ? :#e8 AbTzneST2wZm1 CS&YfE tc W=lji7+yѺ@PFzeufԏ׼>5 }B0dp jGGI*vP1%0QNaɫ?wm/Z#CSG$(3MB/#Yh λyzE\2Ec+0%AڍZDB뿫 VH2*b?fڙ)8'W/A@@lGQ#1/HfDg˗ōs;Bid+togn?$=*%ɿ Ωm?C.EfP0S Ou?ruB#eЭBcj~;9" ttwB Y6n TT?B$ߺj" *ZidM̀Jn:`/6 &>(6-Lk>^v\ L -ꨟu yU{Z8%hEmXԫ?=7"ѓ0{ G= ˕ DNGa՗ΤbGY;O 𧩰:#[[SJA[I2G bH>PeیΒI+rkY7I 0c'] Oe\,~rChm Ż=vnjw`uS%d݅t·%jI Plq(wxA<*jH6 A3~q֍wnV}!=bꎧEDN1,.o?΍"$R_3#0i028((9vİڣ?АBD>~:tUo=B4&R_`(A@"۰o)R_dyJGp6*{f(ȃI?`ƚuG䭊¿;hξQh$GdU$d`Juʠ 8ap# |%Όa:#6TYz + k~a ܰȀ.TJ0  1F\%aXdچmJ&tcyԽ7QIDY> j*H^U!)ja#tCw1|6 h,oyLLрvҫ?mь.p੧aP)[). 2 q{cߪ?n'6;3u'.ip p q 5͑v47>g_3΂'x@(k7ۤ8!ϴ$A^}ZQ6 'vG+H}#Ǣr $S|h 1tir1}4"n/Tبo[T8d< ?1g4t!v.pg6G5 CB{hvA*z;cxgX X(+S܎D]:I&%oSO,>6݃}Ey>endstream endobj 110 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5049 >> stream xŜ=E_1`Yʲ, aHH,@l{=uw`T3S]u{zjZ)ezmȴ>x`zǴYkNO#lθpQ{tm:x8}ܭ+Z]}ڎmfO:M.}wvP&1ocZSj$]R9&k[>G&c_mn I٦>eژyȠS9EƁ 6LSfk݋b՟:S""g@4i +2f!"[~ cN?2XaS k P!]SPu?u- ̋3e`9yI5TSKH"+)f(t>KwAu8:ր*SPԺ2+Ӯ}O&Ym ՘ +R՗3AW`\t3H?vրn ]1(2?2Ov(эYȮpw@?Qqw%WʼXT._5ߕ_w[FuAtk{*=Htg6@^}`MU`jHA]p@~Wt8up&pN.h4bs+aէG@@@N"] r(f#$`@ayEt#PM+E۱w8JA7$dM+2 a&+vD8g*@c2?GD*XQ<a"8AmV_&.Z^&p Z w/F/tQڕ0f]|%k@Ġ᩵ܕ) 1 D#o.Gv(ԅ@ D ~DO_(xxBNz k`|8@ߧGZFKA (>Q ?(.ǜ!X44{ˆ QݑA$kCtw,L'-Y3e@a-DJ(yg[l" -lD *`spETjoF}|J/sݻM;"|)h{(#g6?QopY( E) BQ=JT?o25 "8$0}B]ݻGgݺZ_@p'+eLj@A4Z_$jْFqrh(w8Qr;,4|GO^RQ_Zu[i:Bȥ ] fUPmE_8/U(DN}Jh&~4:pnyϭQZ jX7p-VJ"*4EgOAV>3zPᒘ./YՀ L9)zIA~hZX)D[U1ި؏[]'#q&)Tw'|&|"y*f)*G;2>q޺&Z6u @J܏R Gd>hQ.Ha@͕:1A?D G@#'z ʲMG`ԠS+`Rԩ|AE[?b5eR >e۠ӉH0@`ލ`ؕ7]82Ha5dPܹcuG+ (rvWhZ{dg!aw;9&"!,MDIoOGV[otN>9)#aZ @1XWXA7o:SD ֥#:OA&ȸ!c޸q6 f!P0ĕ  .+u~}5cEUaj3Cb畠^v _E JG.!d`Y*huYP2>W^3ZP<@]%`Q,ˑQ @.,%$D@P." I{W\[A3ґv@Uc]y+\|9:B2X/X"4!ҥK(coOnr&hqsmu&8#IkeE%A2mL _}PM)^,v+he} 2M1)2d$Š@%zER7tDd2gV3Cp 2&W \?zhvȤ'z饗]cnw. fm40ui t(%mz[҈AT4|`n_! gݠM086B~e^q / `VR]D[ ZT8?'CAWW<*JO? RR=z1Q'`[Dw1>q>;> i} @Eq!OP)+.&J"];gs %NGJ 0~RLPRQkU~( kS* ];yRuHu_ `f맞zj%)}긜.0X7'Yv z @O[NRZ \c)$,H_ f'x'ŌmgVe@wڀvUF&(8Ae8@7jdEwpɻ%`t+"2 l ՉȽk JgNXH">e^tG1! HY+wyeb r'b|:\xj!!py{&HzHnԁqHVXJ9… G+UE ٓ"L4|f=/:T-_EE}: ;IA=ꫯ wٳLU $Utӕ["4].$1hQwc.;;!:6D@ilE/p^0Bwa !Z=)ep|| y`dĩ_HEHP ]yQCZ\3h5H4nX$ m Dwe]=R k7,.N@_ݙRГ| ~:^P!S;6T? J*N!! HqF>+H1 p(h]v_GqU4m?vZza @~{|;OA7o\tI鑁IDRAs|M;]ݜpT?€Mc>{i@էx?@& _tq\nBEwPq"D K"1 }v$=u,VnS)`e* xκ؏t8|w$#(dfD3xwo2t 0T~& B}T)_5y~-pCDwG}jJS͒(sDB5g?u-LwQ#自 5lR}f{G:x_,AYu^2&IણG  0J^S pѢGAu ˸ 6ׁ !eM;]ϲ/P0z2J|@e`X`WdgNc[Vmǧb`*A$]87AOcELu&1=Yȭ_,ALT}4;UlP;"M"nIn1(ึ QF.31Ӆ~.̕ KAv4 ZB"n> Uyh4*w´u1OAb$?ԏ>;,7b@S9 [92@vJ2OfJuJ6D5`1C5ȭ'ƎP? ;P}s`rU̝KDt͇N}P>eP/)mS3:$WC4"]CI9իW;i gqu~;h`'#DgnE 6hI]>pş8>>dk} %qZ#: #: (7߈" +S9R@yZ*>5I]*/_tדg-)"и Gs/žS`cNP. P+Uր >wͼ@沐=2/!0߂,6B~v1Q'x1)h_~QD rոۙDy9N1 &[?fMendstream endobj 111 0 obj << /Filter /FlateDecode /Length 3508 >> stream xZ[G~WDt~!E!ثD(D7;ٝm/gs.]zm/ \:uwڨ^o+ݜ|,n'jsuӉ?ng7OOav0g7:^E ^Ƨw˭Ꝏ6bSE]ôQ>enw&>ԽYlH~k&RQ9h;,*Rwmupp)峣AmΈ9woa6)v~xN1ԝmMpw(lk9ia_|d} U2I9 wxl5)Sh2fL*Llu$/DAhL(92) Iux:nq_:1ގ5.-xͤN W@{ zӶ.^`^Y(|n h@GUu)soPF] W`Rt,8pqС;gБ@Qz0>k3ǐ1֗ eJE@/\ ݳ/*-.d  Z1A8.wqukSߜ~yrCHZi W"e k1_;,:5$w7ɷzywc$A MZӊƏ<`i8GPbgݻ`J,5 fgLK'&]U4k< ?!1e?hCKsMQ5Ia6 ¹h`7[ɥ#uD /4|9k\c)'@9tBN8[ W0Auw[ryqlu0([\D%7uxY@0 5Bh[T|pqĿ}SsK@Y2 lBB&ǃZdʰ}S1C(WdX1`%p. pT) :ÏP钃g=eR=wGmo2"֔W cnZ}[{X 1@j mb/s܎17#`\w9Mu4I- N#\M.3V3˛6-5( vm}؍nӊp`D&T'󥧼%L g#!)G8k@A& )чd| THP(}$򒠍 Cc3R9i)~X|/H5^Tޣqc%_8n M ˆ; 'h{-gݕp l"U49q`1œ=*r-ԼS*%ؔs3zBPiP@`ppyfwhl]>\'*Hx MHq ||YȦaorh6N?X0IT*.1?s~EGŽ,;o,$+\b !׋kˠL-Ey#rSI1PD^'T9p VYoQkr?ޱxr-B܃ODvqP_]]05G4=]pUS`|%fX(Ɉ`+:\{(.GeSRٻЀ@ˮ /*:Όu:y،GB5Rv} *h׊Y$H;)m =tGA8&LZ[pW'n5i:kLk<@D^\]6-z([K|N54o+z#\Qw?Un\x44 a_#t< I7!?K ְIRgz@!-.Z҃SR2Gez#cc|8x4lm@L-Yن<_ 0/K.)*.`Gသ$quLD}!=ֲK,qAwi&Y̑,m/--h3gуG# ٯDrX=2|m33n]7:qF!.r=Srn6r 0 GA~,xg h^[!&B3nngp)ZB1M6,yr$]`čqXO)݃w=IyaLۄC!Y?`D*AC$(*EZ)uA&V4%oǦ|:{Fh]IO#o@lo rخ򥈹4~agArKYrwSs(j.,:#l.CF\&ËI#>B˜M' S婐㶞~z=Kwh+lP^UEr!e(*:2-k:i-YJGIVSB\W<%뵂IyXg83$8{rQlEU~Ԣ=-jZ&G)R;cz])B}/Ϯ>?Ɨ&j\ 1J{)} W1˗e]pg A 9d(*:teB7CVv/0TYbOde<{6,~me]ke<炏Ћuȯ.˼&.5 fFǘ07J6{c|J  $ϴ-=ʞ,_W O;0*#XE:_. ڲL*Dz?d> iX ?398 Ėn 8aکiaM+1FK &r\- e _W NrD{jKnv1}ώÄ%ك}E%\Jh$"udNw?Ɂ1 ho ]goQ,ZH;6_f?1^]>rCG? E|> stream x]O10 . 2D! }I.|D/J`H_#490Lu@vw=|*J!􆖠D[U]km'͟tF{8+/,S\ HJ$pLg9AAStendstream endobj 113 0 obj << /Filter /FlateDecode /Length 2891 >> stream xZrÿ{"8H6-aCP0 -qˬa5ٵeP^(g߼gjq}/vad*YyS/գv<}OOK5lHyr9I˕C6a$œXzYY& }G~`S޶5k>a}k(9lhPVJ$~I*'yP'iTc*) S:a}C2Ii_M$ Ͽƌ4h7$m:l~IaDgMʇ`qiBY|Zމy;a5: c4{:RQ%Vjǣ*\pXb8\C"m>BSpbӖ~MQAWM`I$%%dE߈moIa)P7I3Zlxf#bK'3vIze-Jѻl22Pvi[G '" [g&X՝| -M3l:ƕazOgƑitx{sx4s,QNN)99b5Jؑq}9qy:]ruE; #n2f:s yw G'  B(Ffx#6*,yDRox jPO9/p=MusIն(-{V%bU;_ٰa.'v6[=iL3֏9]Ȝ j%=^G!QixF2sQm$\,~r/ nAANFgH)u0]aQX7ĉ$Nu97 ,(4#Z>BFc\ [?&FDg% )_ُla |vwӂ8W$Olὖ!%Bf{ҒD oE\I`U#%-(TDIa#TͺUvFR CIi3"BDDUL1Ǧ"/` jdil|.&st\Pt.ۋL+^.5тH {;Ȧ8/ 6.c56uyCt2r"WFw%df{=kYNq0Hֺͨ^MH{0XS"ZK %(inbR%Tr%a E -; .^zBY$'3CL)1CIn^DcYSr6(1B7s+oZI">BE͔KĄ`폇 6geTl;WX$:v'=Mlb5[ MZh(iqVD6|$36?GPkXEc5T`N˄+`SKDP<U!(_.CHl^s=CR/Ѣ"hgcg up8"Ft7K&E,f^TN"kGFDl*x)Y@J.%@FD|B#XXȘJ%(Ö3iRL2)"E}IeܓCpTX r' 0ԃrNQAI]'PbCٍPT niLQDVI>Q6s8پiC`yq9tD n'aiUxXd%!ebқb@}I%њ!2} -Q4qmP bA$h !#s7^Zo }S4_IsgX6ƠH/uV!&YCgtK!. |cK~-@㜰ƞSk]1w4I\Lmj,S8QwZE`9WU'ms (9/vs>7C/E1t&GH cRSuxJƳ(DC>70[,ElN4$4%`Deww}`VFR٬KyR|TPYcě“^ԹrRH#I`)ݵLw:qN!0.`lxEaո 좎8*: Ck"@N;ۋ^dJ+Y12n^k*ZwYt/uዛ# 5sAa~>ě 9Y$ B#fȩ,jţ5:D&Q+6.fe,EK/ PҶUZ&3`1$bu,uZђYW \Ac eYͲoNL@D&WŻk ^D칽VCi?6kS^?XLX@:/4EOO, iՀfZ%gendstream endobj 114 0 obj << /Type /XRef /Length 143 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 115 /ID [<2b880d2503d6d0ed8b49d1c0ad1054ad>] >> stream xcb&F~0 $8J+?& < m%d "9ׁHy)tD2.;A4d: "Y/@dN\++"@$Ok ɘPýD5U0^>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.18-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages% \footnote{In previous versions of \pkg{spatstat}, the package \pkg{gpclib} was also needed for some tasks. This is no longer required.} to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format. The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimposePSP", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} The conversion process may generate an error message, saying that some of the polygons intersect each other, or are self-intersecting, or violate other geometrical conditions. This happens because an object of class \texttt{SpatialPolygons} is just a list of lists of polygons, possibly self-intersecting or mutually intersecting, but an object of class \texttt{"owin"} is intended to specify a well-defined region of space. If you chose option 1, the conversion process will check whether any of the polygons in \texttt{x} intersect each other. This often generates an error with a shapefile representing a division of space into states or counties or administrative regions, like the D\'epartements of France, because two adjacent regions have boundaries that intersect (even though the intersection has zero area). If you chose option 2, the conversion process will only check whether, for each polygonal region in \texttt{x}, the component polygons intersect each other. This will \emph{usually} avoid the checking problem. If an error occurs, the error message will usually specify which component polygons fail the test. The best strategy is usually just to plot the object \texttt{x} (using the plot facilities in \pkg{sp}) to identify the problem. It is possible to suppress the stringent checking of polygons in \pkg{spatstat} during the conversion: <>= spatstat.options(checkpolygons=FALSE) y <- as(x, "owin") spatstat.options(checkpolygons=TRUE) @ The resulting object \texttt{y} should be inspected carefully and used circumspectly; it has not passed the stringent tests required for many algorithms in \pkg{spatstat}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \end{document} spatstat/inst/doc/replicated.R0000644000176000001440000003773012252324024016134 0ustar ripleyusers### R code from vignette source 'replicated.Rnw' ################################################### ### code chunk number 1: replicated.Rnw:29-30 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: replicated.Rnw:35-42 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: replicated.Rnw:189-190 ################################################### waterstriders ################################################### ### code chunk number 4: replicated.Rnw:208-209 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, main="") ################################################### ### code chunk number 5: replicated.Rnw:216-217 ################################################### summary(waterstriders) ################################################### ### code chunk number 6: replicated.Rnw:225-226 ################################################### X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) ################################################### ### code chunk number 7: replicated.Rnw:231-233 ################################################### getOption("SweaveHooks")[["fig"]]() plot(X) X ################################################### ### code chunk number 8: replicated.Rnw:262-263 (eval = FALSE) ################################################### ## hyperframe(...) ################################################### ### code chunk number 9: replicated.Rnw:288-290 ################################################### H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H ################################################### ### code chunk number 10: replicated.Rnw:298-303 ################################################### G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G ################################################### ### code chunk number 11: replicated.Rnw:332-333 ################################################### simba ################################################### ### code chunk number 12: replicated.Rnw:346-347 ################################################### pyramidal ################################################### ### code chunk number 13: replicated.Rnw:353-354 ################################################### ws <- hyperframe(Striders=waterstriders) ################################################### ### code chunk number 14: replicated.Rnw:361-363 ################################################### H$X H$Y ################################################### ### code chunk number 15: replicated.Rnw:373-375 ################################################### H$U <- letters[1:3] H ################################################### ### code chunk number 16: replicated.Rnw:380-384 ################################################### G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G ################################################### ### code chunk number 17: replicated.Rnw:392-396 ################################################### H[,1] H[2,] H[2:3, ] H[1,1] ################################################### ### code chunk number 18: replicated.Rnw:402-405 ################################################### H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] ################################################### ### code chunk number 19: replicated.Rnw:418-419 (eval = FALSE) ################################################### ## plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) ################################################### ### code chunk number 20: replicated.Rnw:434-435 ################################################### getOption("SweaveHooks")[["fig"]]() plot(waterstriders, pch=16, nrows=1) ################################################### ### code chunk number 21: replicated.Rnw:450-451 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba) ################################################### ### code chunk number 22: replicated.Rnw:463-465 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) ################################################### ### code chunk number 23: replicated.Rnw:477-478 (eval = FALSE) ################################################### ## plot(h, e) ################################################### ### code chunk number 24: replicated.Rnw:487-488 ################################################### getOption("SweaveHooks")[["fig"]]() plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) ################################################### ### code chunk number 25: replicated.Rnw:500-502 ################################################### getOption("SweaveHooks")[["fig"]]() H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) ################################################### ### code chunk number 26: replicated.Rnw:515-517 ################################################### df <- data.frame(A=1:10, B=10:1) with(df, A-B) ################################################### ### code chunk number 27: replicated.Rnw:530-531 (eval = FALSE) ################################################### ## with(h,e) ################################################### ### code chunk number 28: replicated.Rnw:541-544 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) ################################################### ### code chunk number 29: replicated.Rnw:567-568 ################################################### with(simba, npoints(Points)) ################################################### ### code chunk number 30: replicated.Rnw:575-577 ################################################### H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) ################################################### ### code chunk number 31: replicated.Rnw:585-586 ################################################### getOption("SweaveHooks")[["fig"]]() plot(K) ################################################### ### code chunk number 32: replicated.Rnw:591-593 ################################################### H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) ################################################### ### code chunk number 33: replicated.Rnw:599-600 ################################################### with(H, min(nndist(Bugs))) ################################################### ### code chunk number 34: replicated.Rnw:612-613 ################################################### simba$Dist <- with(simba, distmap(Points)) ################################################### ### code chunk number 35: replicated.Rnw:626-630 ################################################### getOption("SweaveHooks")[["fig"]]() lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) ################################################### ### code chunk number 36: replicated.Rnw:636-637 ################################################### H$X <- with(H, rpoispp(50)) ################################################### ### code chunk number 37: replicated.Rnw:666-667 ################################################### getOption("SweaveHooks")[["fig"]]() plot(simba, quote(plot(density(Points), main="")), nrows=2) ################################################### ### code chunk number 38: replicated.Rnw:686-688 ################################################### getOption("SweaveHooks")[["fig"]]() rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) ################################################### ### code chunk number 39: replicated.Rnw:705-706 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ...) ################################################### ### code chunk number 40: replicated.Rnw:716-717 (eval = FALSE) ################################################### ## mppm(Points ~ group, simba, Poisson()) ################################################### ### code chunk number 41: replicated.Rnw:750-751 ################################################### mppm(Points ~ 1, simba) ################################################### ### code chunk number 42: replicated.Rnw:758-759 ################################################### mppm(Points ~ group, simba) ################################################### ### code chunk number 43: replicated.Rnw:765-766 ################################################### mppm(Points ~ id, simba) ################################################### ### code chunk number 44: replicated.Rnw:776-777 ################################################### mppm(Points ~ Image, data=demohyper) ################################################### ### code chunk number 45: replicated.Rnw:795-796 (eval = FALSE) ################################################### ## mppm(Points ~ offset(log(Image)), data=demohyper) ################################################### ### code chunk number 46: replicated.Rnw:808-809 (eval = FALSE) ################################################### ## mppm(Points ~ log(Image), data=demop) ################################################### ### code chunk number 47: replicated.Rnw:826-827 (eval = FALSE) ################################################### ## mppm(formula, data, interaction, ..., iformula=NULL) ################################################### ### code chunk number 48: replicated.Rnw:877-878 ################################################### radii <- with(simba, mean(nndist(Points))) ################################################### ### code chunk number 49: replicated.Rnw:885-887 ################################################### Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) ################################################### ### code chunk number 50: replicated.Rnw:892-894 ################################################### Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) ################################################### ### code chunk number 51: replicated.Rnw:921-924 ################################################### h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) ################################################### ### code chunk number 52: replicated.Rnw:935-936 ################################################### fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) ################################################### ### code chunk number 53: replicated.Rnw:954-955 ################################################### fit ################################################### ### code chunk number 54: replicated.Rnw:958-960 ################################################### co <- coef(fit) si <- function(x) { signif(x, 4) } ################################################### ### code chunk number 55: replicated.Rnw:971-972 ################################################### coef(fit) ################################################### ### code chunk number 56: replicated.Rnw:1029-1030 (eval = FALSE) ################################################### ## interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) ################################################### ### code chunk number 57: replicated.Rnw:1035-1036 (eval = FALSE) ################################################### ## iformula=~ifelse(group=="control", po, str) ################################################### ### code chunk number 58: replicated.Rnw:1046-1047 (eval = FALSE) ################################################### ## iformula=~I((group=="control")*po) + I((group=="treatment") * str) ################################################### ### code chunk number 59: replicated.Rnw:1057-1062 ################################################### g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 ################################################### ### code chunk number 60: replicated.Rnw:1085-1088 ################################################### H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) ################################################### ### code chunk number 61: replicated.Rnw:1109-1110 (eval = FALSE) ################################################### ## subfits <- subfits.new ################################################### ### code chunk number 62: replicated.Rnw:1122-1124 ################################################### H <- hyperframe(W=waterstriders) with(H, ppm(W)) ################################################### ### code chunk number 63: replicated.Rnw:1147-1149 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) ################################################### ### code chunk number 64: replicated.Rnw:1159-1160 ################################################### getOption("SweaveHooks")[["fig"]]() plot(res) ################################################### ### code chunk number 65: replicated.Rnw:1165-1167 ################################################### getOption("SweaveHooks")[["fig"]]() smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) ################################################### ### code chunk number 66: replicated.Rnw:1179-1182 ################################################### fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) ################################################### ### code chunk number 67: replicated.Rnw:1188-1195 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) ################################################### ### code chunk number 68: replicated.Rnw:1216-1219 ################################################### getOption("SweaveHooks")[["fig"]]() fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) ################################################### ### code chunk number 69: replicated.Rnw:1232-1240 ################################################### H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) ################################################### ### code chunk number 70: replicated.Rnw:1257-1266 ################################################### H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) ################################################### ### code chunk number 71: replicated.Rnw:1295-1296 (eval = FALSE) ################################################### ## kstest.mppm(model, covariate) spatstat/inst/doc/replicated.pdf0000644000176000001440000141243112252324033016500 0ustar ripleyusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3666 /Filter /FlateDecode /N 91 /First 763 >> stream x[Ys۶~oNt:qʼnk;qNhʒ#i_ɡ%L3+aJ8fQMcYD:"a)&3iI&}Ȕt(5SF{AL+vtbRŒ6SfXYO$syJw͜J$2g0XY2MayQ)KL 8L^(*Lken7eZ4L_C *hR-@`iN- ՍB%I AŃXY6OxKvI gXRc.pǐT饴:vJBAyP-Q\` ؕdCdGvcWd6 ͜ŀb>'2qxN<_Lx 3e!`+*B.sq$<-"Rey(˲])]I]NMeYڲ,˭r2)%%<3Ǽ x3h8l1,Ǔ/ !r|&N1b|6<2v2: L:-t28ɏ jO=^g#2A/|zWUSY/~}s8Lbh<+ t6rMIdUV2~r{>&dY1_۹ M Y$˪MVЋvM[i( >V$z4%s뛃^"y/yG|'OW< GL iBs9^gyCGK/+f-']kߗAov;=[Rk[ߏ=, 1=U'~uU}UٮYAG#%#e_s~*fpM>-&Si\J-Kw 6-b.fy+RCky &vt?u_N[X%'Ket2qFNhl%tYreG 5'c"ET!RB梅m>iVo%d7RBG{"Y/z4: a6flpL 6n'^#>I_ `Szɳbݛ10S:oШ}U΍PeI{$62P$7$pO; u(B=zL |6}PU,_$Ъ*) ⶉeOȺNM NF[fĒQMQkEYAHD| q^̫4Hh%5aOY1[p~=fW<m6Z!֣+ݓو@) RoF3g2Q^m."jK^IȾJ<0"p:-uR :iJ{_mbe_mݳ"6]ҍ{4W_[pY?bV'hF->hY)PAAdAQ̋0Gh罔d4ʦKQ)(V;t§s Vc϶-yiNU[[Ҷ6 cj 7­zjEfՇ1d s~unq_5DR Ӆ)Lj[ a')32F.6ӰGxRlMDp(e\?9*nEQJ<*{)MAJM!v޳n;k?-7+NWyں-L-koxt FKU2bXʪNUѵWYLuc`紌i<V<۹W湋J`k i$4!B$yV8գң?"+@ 6,P/kQ]X1aܱR[P-};{ϛa ;X{֣kCZU רkK K\ekxȧ֟.zxܓw 'Sթz;7E.$9X)ÃPUF󜣋_Jv/Go$\z𳢒~ȈTR;t]Ql̗z ڇ^D/*Rѝtk{/=7Vx8/?jMY$i-;2zHv% ^xpٻ/Lyt.pe'󢛦IMW5}aQA)FI|R449_48r/Y!8\%2]3pxp\M~7j]̖0>bN@uz!$Ȫ:}fS+o&Y[dS7fwO^ ;ų3iӸMG=͐}]y'WImJKY)sAb rne "%Uc+TMa;y.\U;qj})—(yn5"y9FsH ݂:A3 ʫENEwݤdVHs@AWۥCMיO:t]o_#1CPMs)m="ZdHWF/C|>Gz{}OgWtq{Hn}Li+S22O[@*큼GuuYal■1)]5jI=vv1{m||9hx8=yjv$ETMۻqtKcح[ Kc/IF_vNW߁,NPA{w|ޞN[g֬>ѭ30\lb%fjGo+nn+L9uo.Q#D"nFyXSb K<Oߜ, 6oO|8;%w껉m3kbZt6s> /fCd1BhQgg3t|2d /Jm?7B/m=$kK1BD #tlȱ=zF)[6@'(/.:}w'W6v)KQL%E~18, #{ iJ: %*+eAF5zvvgEW+(-wf> stream 2013-12-12T20:12:07+08:00 2013-12-12T20:12:07+08:00 David M. Jones CMR17 endstream endobj 94 0 obj << /Type /ObjStm /Length 1940 /Filter /FlateDecode /N 91 /First 798 >> stream xZmo6_UۑyiK%뚠US 9ʒb;tc(ǻwG2HHH^ (q m|KaVJo-,q' 9 -><#R|H)N@?RjCJ#xz(CȄQB0+C<&TJ$' 3 kȣ3ABǁCi\% 0Bk)4Z *X $5`t 0q҉0!xž0q˄1:OHX #5kQsXJNR]& XA` 90Aw B%Yf =80g)V#f! H9HyPWslΞK1! |+Oarp:bppaA;OV',;{ 3JAވϦ s3Wmx݂f a؂Ʋ5IM9V}NO`z/ E@?M:CWan6O4xцߏnhMGYћc`یy_eV-yeWjPήóL9ص@# WCɻt3 MlaPXk t?0.~g6C^*RLJQBnQj ,MUBr-[-Y8񪪅uJ^f[ 7DہL"KOC;MC`~Ig<ˠܩ|d44\U5s0AR:C4v`F/io,AZp릱:x_,"P?189?g/e3GI2缠f/ʿN'cӫ9L¯qAAs,x| S:YZuz`|:Ct<à2adv&58;FS_#ʮ2~F"G~A(k( ct0!yFMDIOQ^oDoE`8lVdZȂؠN 3P-.U# ]V0; ЮCں.aȹ3e 㺝ѵT|cGm"eq:[Yn@岨<7sL7~%KqiCpO.^]#蟾<[pdB.XB~0Dh'¢Y%zLD+& Z ¢g L]ԄiW![ oȫ&j-~3|Xib F_㇡W]_|/`+J[uO2?j"bs{mEz{;gݥY:"Pn&xr7_氭4lmCO㳥')}kYF΅z낪(NtEإ,޸CȦqvWۧ]*Xa]UU:C3[jWמ3{% xr~Z yHo:>"5'k#x aK?zUb|l6ԨiB" 52m{C[1*=FE;֞(p땴{][4 u'pQzҀ'u?L81gl.lͶInJPԫnT\CWmFή \Rjج=Ɵ?gz/$|&~IIja(> stream x[Ms#av7I9'aU(R"h̠gKJdSr@~neFZin~wGTjnV`@^Q-:8"*z7:9ڌp{lFS3xkJ)oy1d-J`!dB v<^;ÃGRl2ixpXOx]WB1?h=_3sH6q8% $mqبWud;"p;E_hb-ww\В>∷"csW<8c)9 )Xasi['sOśʰ(be{vV>lWDc k4:J+M|vOT`p_aHƸTuçŀs=* JjeA<2 Z3&PcjmC/aw5YOc[JdKTۯ'Hny>O8MtXoUu^_  Kоs2TsR6v=5We, c0mCPs Mfo '+@ͫvBYs>m1m騗ljMy}}a b>XdCŨa0dP1>7Pd6m 1 xG<*C..ZgS ;X#*`2m4PQ+ nrM-Yn<;.z{z:wz xiZ{5\CzvfB}&ѵ&J9,ϕ2z\pWټױ7UR wBz]^H%UDtVHo]Brg NFf|BQ\*lK!L(5LX\I|t)'A/<:zē(hϻ]5zJ%Ȃ5S5Z5q%JL29=Z<\ʲ@zD&`-`b1ˆ4*-k8mRnvwQ=E{<Z-"j5 4k\lnaJymz;R^0NJDdgt6n8Ë(, EϥH5VG3%0X(fB /~.P]^ui*OJ` 7h66j-] nЯ"8%\Y'9zS 8d:6dxؠ Pru\ WhrR! suƒSl1QDD!\ӂ$/<㨔9,XrpLmm4@op@ywow si 81xIBbCaxB/JpeʆZi,' fɍǙA\Q?nE/lojzLUO_c50V`a໇V.ATGoUts{[Fn ~ĺ4JdoAe;ov'rpu'.gN(1% O @yN:jݓ]|т|6 Eo;aށJGLͅlLyc$$ôBקP8,7*->J86~g =,b#n)M}Xn}.(ݸqɠi-\a"mB\ikcXO׽' ЁKz0{cr-qHpa~;孩oou3N7^lK9I,4(*E?i/EF|P w}?T[1֏*@4t@TM=|ݎAT6:G&CTZJZ-A 6ߡ NUUɇK)7ko5Ϥcs=ǒkgd2հ܃mq,97CBRJG ArGX= Hnߞ~} C;@B 3 ͝A:ɻMqSCN^@t-?6%_®?!rS6t !x v)ɤۓ$xڀ˞P%d/:9P5?>R Wcy;#6}m4{n6*b.;BR}3]^^KLo3FO7)Kk۠(6Gb'pK Þ[sٞ6Z!%G_b1nQhsaRO-5$,S BcKWhmi,ܱhi@ s̛z/NOBw8SY7~|fH?h@Κ0Z^1v~2xOGendstream endobj 187 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1846 >> stream x}U{PT7J8z鈴E>ƒQ |,ˆEX*v]XuV0+(XL4Lhx.=̴W;M?ߜ3s|}CQI􌤵OV?&Q1+*r",-;RE $^\~ڕK6)"T.)JNؔH2U"(ߨV ReYJ[|D_SH2erm@*IKrR]k% yJUjmNj J'b'$lb3xH%$"҉a"Qu_$$$)⦷Bus:j6 +f^hȖU+P~0t@ C},0Ujp#hB&'7Ty``5TBƞ]#iƏ2(m8a^#FPADįF/eϩPSMGr[-N׈g۬Cʢn4nby "SA &1 YHD޹c[7M|:  k;`+@}zjA D? Zu#@b`ͽ;9(SqRx)(4@+l81%b1oKXX3 >:=-qD]WQ5s<By9+~ps RY3B=0gKR\f:Oe~w8Z*f=Rcpdv%=9}l Z[PHK˲W)zZip92:u7uI$"0>nj攽 1VD['>v/+vNK&k{, ~~Z3Hnۗ*~a Pшx`̤R_'rþ'7}%3 'CyޘvwK!q>>? DZ.ibT09|<21{!Ⱥre#@&vٔF؜NfMzn֞3'Z"س'\ 8 Ϳ?BwKVÔu%ICos 5vJPxh!F-`Ɇ2{hA P>{<--hh[w~.F}fjlq^Ή6bycc#-Ŀendstream endobj 188 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1396 >> stream xMkPWwI 0j7^$\Z [ю$$, DxAn"PA[ۡⶵDZZ;∵tk۝=cB q/"99<}[s$ oS5Jgq]n&_&C~qПBQ4)W2zڵѵX@4 #Xxş`KSM6w\P*IpJ7WO7q;w8AKVT t.,|ݟ:SJȋ##l\xpBB|lƃ! u.[L}^%sJF@hu]_(+ꮺWvz2LdB7 &Ph|䟰6jPwK4}{$eC#8C8zIɵPc[NfLb0#yln DmV"*fW?Q(tT} th :as9`_2`!?x.Z1+8촜1SlnXGI=m4m:0n+N D]=9L4C@ӄFRIe0 0x}?LC7` 8Lt+3gHtLW펠f!7.^FNF;;mN ŢE䛀I4Dt;w&Oz+T,Wg+c8Cg՗fy`˅_dwG@3L ^~{ÝSN]imNWnvseVrS !K23<-@Ih ğ0BiJ._}` ͫ(!p$*xܯd W+ wkn=Prrƽ#gXQWEzR Gh _) <\ܖ)-f,;T= eٟ'A1h{Ǐ`%[ Vl_,omhvV433prcC;td vptM4 X NpC3tDȖ W){Zkj멞nĝ㗭^o$55uup0UQ<{N BQR0KJys]oQsEz9 endstream endobj 189 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1728 >> stream xuTkTa`gHflRsj4$5ZRoQXqewvY*{dY,n厰,]bS/xGOC=hlc1M|L9swμ><I$I*2=zNzb¤QsB 2h ] YGyK $Œ$Ψ//0(W)7lHX\40W%*TA2r6P0^/0t][^^F)Yo[,/4(3A_&S&kE2]í'i5RWi z MbV\R2 cmb%b ;8VD#,w+ԯмzN{7_|pzR vK^1VTDIAK13(w~X+ GcBxW,L S!5>: RKg+p:fng5{I-S<ٶ`_֦ _MGI&WZ!f,%e"q_cG~he.YlduSaɮq=Ҋ3TDIKooP5db~lFRwt[t |9/k+jD)>YAhtm -雖aD[QܠQ,08>Gn~>#m֢ދJ 梟rQboıW̓f@ ݔ^c]0[*Jq~]`UP0V=tlUG}L~cJ:l: jۢq8 v68V zAsZ׮F+&{m;4<w邇{H_Dp5f|-tF$\Xk$FG x\RtLIq3v`lY@ ,vDc tb[1 غ % %硹C¢cZgٯ Y+\C1Wa_fN;>uy@^GPVZ* YL-Ht?'. y>?ߋCa'ݝ|2b[#)^':GVTZRW L1&ZAO%dh/Ol7ًtn4t%JZ*Y_m/8/ -@D"k'ڠFmݴ6.| Nk+dPlp&')J؁^mqiNW7r2+-52<)gaqB_RY' z<*둒#mkshܢO<-3]K߾Uendstream endobj 190 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5491 >> stream xX XײqdUDel5=h\Q(nȾ#" ;"*+2kbKhRMwI^r}{_}ԩeLNL&cg/wcgh-I:IɁ֥&`*ε4Œ'#gFxyZ 4jɓFYl53g"PoO?PV|*'338jnb7?OӍ Z0Y8 QxHwǨMK6/Z|VILbj;nq'L40%`f)3Y e Y gV1#Ռ3cόb0[f-3q`2v+30d[d:tA.ȣ:OH*bufreT.Ozu͡[imך^O}χ:nnm^ ݩEI~ʯgY,n%֢@Vz@.v@4@rBbѵ}W0pEj,I9n:Xd5a.5GS&XHMFхϡT/o$_„D*Zͩg 樠+*,Mēs]؃jɢa˕Å3RKrWڌ\$( Ò &.r&xZ/PM' ZiDXPJX!uq.`_O]?}ݢ*TD"L X0.]'i"Li+hL*$ٙMu?co } Ŀ_˫?Qܵ9zv8Atֹqg=p8'BKoȐF/ZX` ͟MWIg~1Qz,hNHh#GV%ؘ,-HF O֪]5 |P5FXaݼqg֚LR-b)փp(o"c"(>6uJ.Y#8؊jˌ\a&ѭwghȘ̈GLcU;uMf`h'x*\/EWV 9ʈºS+W &ش&ԑ:EXoTǡD} (v?g~Tuj ѩ{HꔙFmWu;/ J9/͙ v]~oHt1BJ Xu53YBnMyMѪ:A};viC[ ;$\CUDFB|~ :(ړo= Uh@놈j*rߌi93i[7]Ei OV_-ᔋ3;7$`\-Cb]XaR8/ 4"[)ΐ)^3F1>clMKZ%tDZ3Mm:E+e&?E ]*eD~zJiÓed܄4Lh}S wzW+\zzfz3*X3MrSI'Rdge^Nd5` pqvyJVIW%[4cZx6ۗ]eqvinn '5FByk;-JO밒ݢKIQkSTH6Q`YQ]7pok_u-\BqT e\q׺Vبu!ڶu͌NY 'h$Ϫ ~".9J2{xc¥wᘢ Jc!VXG+З}RB2~#gg2EGhÏbG켩w=~K^ۢ%̍&c<._TlL!2X ɑYI\ y[d;hAJ?R;;ZaҲrIua|%fA5WR,_sTdQӣNVCŏ=c4ڦ x-.c77p4N<\fv& A"iU4|ݪԈ=5F#FH,.|W&UԮY?4 $}h.Rt&ll *}`tYSηȟ"v*ҡ%w*r@֚͆Us!HW#pmi_~$$EkU|hyBUY_;ZXUVW[BMC\}]B[yk~V2`ETdL6E73/_lpjҵ TS|PQ´0Ǻ`hQ&2HK.ɛ.?7Ȋk;}]L!&.!"9ȲCG '1?[~v%u"%Iv_dm[0+/҈`t2RE7YoQ0`hy7(9(G-?a:ZS\{oǩN9䄦2rșઘC{ t$M<}QeeGWqI(D΢b{{%խLzKx?I[[bw/\?peaa%0hkMm̌|Iۓ]&+iST ,XIӵ@d{bu 6ÀTJyjB*\qU41EU: 1) 﫱YMԪԞ ~q#YUOפTh{=:Qv Vy&* Jo{^i L'w?[DWvgVK FQͮϴۯpo.mX]P>PiO]x7 TZƶ}Bch"V:G7zvsvun8j8HeN4_m?{1ByKdhŦe˯!мU踰]Su3NذqYl9ɨ#-Z_jY{8e!#NĔRsAFAaovgf ͟}K|M^S孞byQʓA@{]h$Dm"*$>N]b8J|^v#%/X 9qF1FVzL!#qJ14k'f­BN|rpQÒ\sk Oב-nJKvUmRzڃ"M4m&sQxLÜ/vNʸhX`$<2,d?Y8z,S̾Vx3jך{6q['۲a̺ /4 pޮ(aȍw7 Ǔ6otՏtGyvBf@f㫣3[:n6N'=P] {'z`E|rX_a.Dޯ 7H4qCzxy;D[@/sN2hҏj3ksDΏۯ^½/ڙs :I0 /wJj׷knH}?UT7'\t?. t$-o`)d5UaZX*$6/>0)XVF&ΣT)b~M0K\Ĭ_Aـo ǰg1-.^V8Bن* 2 $;R~Oq@mmIYe+2yIfX?WEuۿa?Eman3~5&V ^3a5̲:NG]Q~WNĵOW;} ,3ozU*ˆ<{xr^aIJaslVRE>(atm32O4!SHzV -?+M#Ci˒\f`O_us?(,Cmd|RHKHf dvIKqCa.YɊ]QMyRiLSST435c yendstream endobj 191 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8213 >> stream xz tS B +2qlз'O1)$ݱ"}yExt\-rI CB^+C<ƒ)8?xAօ(bqҝˢܖGXko)h̷fΰ9}|R vL4uiߘ1M~5ZMPoSk:uj=5@6RcM8j35B-&P[Dj5NOMQSTj 5ZJMQoP˩ Mj%5ZEESj(5ʥ8>#՗MR jLKR!K@j>5zbW(j/MC%_{}{c " =2C8>֏귷mn/]5ܠ4WnznN2E'ݱj.#+f#32qdTk^k}}ch^aۙ&/ : +"eNāB=Qҕ꽚f0AAuH5XU5h Q7mĶs YcCybn@æ77-6 %R؇%+\W*GUE(er0r{og]BJ|ŃDNʜF Fk'-yoQ[^!3S @,zy3<xf?N+C}5[aVVZ(u:oRNIn7ٖ?lD*=Tk5aj@4:J&O̐K,Ki,1s?g} `Mcreag7񋚵hVI#AiNos>)8X8M<B3@CN *.t[v#Om1ܙ_qPS Z%e#ȀTn|-ǃ^gؕo2 ޡ=h*1Xgi<ʌ 9rzov;4PGMl:Ίk%0? {K瀛KEuGӱ ^PIԠ#.w?6ueD੗F֠ux(Jwd-w[H yo00z϶uk_' ૺ#gJI]~9Rb#~X~1=|1$OmѢ@! &D̂Uwq76^Mz 5aw6#ohU'KvT`. Ǔ"y`ݥ)1p Y8n*JYPK(ImNt-?n}vk z}â^4magZ Z,hIi|S ʍ uYJ*HGh D+EYAc37/=HjP{ w"|MdWY'&dA!?f5W*W1_KTMqJSQ%2vweh'~ԁKSei;i8zJQ6&ԜC=ol_`f}&90Ma_m'-{7 #DRsڂk\9$.OY5}MB6 39}0zr*bT B tͫ,{VMY2X Ckg-HeF5k{yơzp+#!YҾI5G,"<\b*e; .(s*aU^u;73TEr2sl{xߘ2*`E~Ϊ3zz5D>)+=7EfYf(K IV>.lN>&Ui,'*ezXeȐ$]X@gHžh\>=1_(h9rSfJ(ibsm'NTq2JoA,uѼ*&]!D&W|u!weS r8yut(8CFhC=rj++1աH9^uAU--3h}WU=Ti18= v#T{W`&<(b8|Q$3l-="n"%;$r2w fHP}E=n]JѿCϿ8]7cXbcg`.cMmw\;Mqͩzc(M1vjYB9M)Tɍ NUTUtUI%t*SOzhVS 9])i|~NsAz`Glp z,6i̓|끹Zum6$ %=ߘpG$~esf[ b3-6 xfwgU,v4i0'M c*£M=iZG")6Ca0SQ@Fi9xuM$!cޞ/Vx}|co9gsj9 }1a1i*UR.νm1™ӇutPIAƑAMZ}Z>,P+օ3fĭ=8jV姂ǵ;Y\{ZSÁZm_zIүgHJNiR-aɊΚms^ŗ#2cyz$q%bact!!1Qaə{DtN]Q\QWWUUGt7,="h6;((9Bh,%(iD㡜2_D2~0i Y0A]N0V\Y@]A J ˶êQXYHHUժQg>2}=8DfDժ?艸SY? R:hJMcSkfHbVjB/DJH"`8#Rb[fɬ4XJ? bK#t  2Ej`$wb>s8ۅksu5^l;T0Rr/iyZםeY\{NYz9Y"(MVV1eqXŚ 8xpC?/&yp{@2PE8,Ig*?D\J8'2ןjK[q^sPcxKL%Mo?2 SW6E\>9Eheʶ`}z+ Pg!¶,ʨ`E؝44tQaNU("!T<K5&>~{,־O,7E@eFÑPL~>c1~ yn_/ZuYSF=Rƹ݉+Hj 7|6x1C?^|wf~v.ZƢF Um5c'7N6 g9I(r#ϻDBt qkc7jvrrL%Ҹ%ܿ!Ԗ4fӟO۰-ٕswty Xԛ1Wdw8cZ0cpPRsxwQF2€yotܿP3gBet? bT70ٙb u=:wsgˌo c4, ktfhgNz\c1r2ʄJn$p'K E`皸҅ɰqc@^XyxƿP iY*C6 &#Ȑ+@Y#a <=<%Z+&ο9A n"ֶ>*@}[(kq' 8,'GǷ@Wh+;JO:\왈6*-4Z岆 )maw+#+~,u:YENf Rd  (dy)řZ3`/8< (wEY*+K;UaAEs9m޷/Fڗ}5˺ /܇ =V]3B# "}˖OXl~J (h1#zIvV&[|:HWuT3]%D$M;dxH=UQBX%5Nj. \AOd;ܳWgOĝ:hW̟Rs4LuܘRo'leω7ZuGc?Lyc*cƹqD b} h*W4T dBZˏq,NeDv!{tQ6|]g=9*PZoP0N1=@FO 9qbAevnXiʽa l#mkOc,~c7Cw#2ؖ 37:"t[WQUSzH L(ƕ: &rxFt烃pEkt.e}V(00SU&Zl_z6ԜUFwruxM"R 4m+5 PpAA iSsg1NEOendstream endobj 192 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6465 >> stream xY\T+pXoKb,QcQ4Fl (ƂbQޖ-;HmcWX5q/&DhcIL5{?ywv99w(>H$/trv2YaHGY*r@fҼa[oZ vCfLDI "ޞ^2qޱ2{mNuso])rrOv\{.zks?7;ibϘIQoQPQ ʎzZI͡FS1jj,LPPk 5ZG-S w "jZLG-&SQS5rS3eLʉzZN͢)ʌN,Q՗GRy@j5$Pʍb7aR$ʜ~E^yO dWX`10^̾~rπ;A+l5 + =C;;-~cߺuuLo~|E#9#7lo&pn\* HtwE脻l>E @Q(X+ %+㒓Q _JtYj-G|0e)G~UA֠”L>9=E#/B4 Y`kkSh:kY7L% CFT[@GIB5?܌#FuwKQiɑg<] ]j5M?_wh.'ct-(}XE]N0GRW*Ex<cDGQ *+iġZQ!dj鯆eBk?? @\V Z8A1U8luϿMÚ@rSq8VmF%|m 2qwtؕa+dd-|AL?㤧O`ɏ';;~+o\v<7vE|z䮨4!Z.,{Zڍ,`/:`Ŏ?zpcJhD 477Fy'"?ḳ1͋:sp+FY(9|ILQ^iΈhm+P9/=ٹ#q9Barn!ޮ2En;NE(;4 {0π؆cI  f&~Jb`6֬"1=KCG+5<`9Xo6̞mxko$T n];DB-o\ G0} V% [QT^TQ=~ k^N:J:XrHa9Å`R*FLV{pֺ˜yɗ{ K>Fmv 6G+zf!Ա0~#_cGt*|L";hXBO;LNB+ːK32>*@15?8K;Atb}* ϑ|e#/yܒu;vL޴:K)KF>"xq6䖤5p-u$q6~scթxiSkb3 œ ݙ srFEIޯkPwdң#(=L[ (I3]qߌĂ2TQzIF)gZ-D0&id+!3y__-~{h_UU\ۜơ4B[K;/5c8R2 =3%"iZ5 mAڞ[ U o? 8<ϰf{0dYۺB^&Ԛǿ30R3՜#L/ވ\#/"$XBjrF6w*vGe$#l'zjEXcv$G$٨MR$H~}-*#))cΞhJALuD4(2"}ﶖgG:?땘7uA غ'q13sׁFh>C |W 1U+ 9 [0si7LFA+ խfpH`c=Hrʬ*)R fw*ľa݇i//7_19B̤ĂTrrҳLZg0/+čn &,Hfzܢ]ըQi{lrBv&WD!' +3zW{j@?'XOϙm!5u{*-iZeMfFpڊ3kHuּE\ڕk[V;qˆW 7=AȄ\;̈́l4u,*Q }U-)ȋ0T+:*ȹՔf'%2t1zhE$ OC׭IDQl g!:Isfz[VԒU< v ‡AM8Z88'}tG /.hGQSBs&rŭ[6"^{r aIj 0boRᲲܻYg(W6|.+]9ɡ y]*:Zїy VTk rEh Ū49bhE OIۙ][%̄(Xu?ܐ#{}wbET]&% &NA?:bו487kއoԇCDsj C9"rRTFҪ;"ӗPNB%[/({s3+xڄᬡz$[,p`FEK JЊк:6X%W<oZ.9θlY17pCF6ν6LU*kuWcSUԘF]& sAeσA1veMɖס_~Y\zh~Q64W)֦5Xf7!P=4jGP[#NIno~'O^0M&2|2L<^6t4r@KbIaҁnK}{.3.G:迩p֤h[~1Yux=qލ|э&yiCXqICoaF/ʄh}jmVyBeVSjiBxƺ/3?%,(AȦ^;kNB&J$f r_q3;<LO8Y{{m.uaYqil2~' مı~/U9'dIHsa"W<&":I ?r`B/_ R5 JI1z ДF;mMLtqdi3G3m2_z鞬]&+ϾzLHy?F&cł6U  UlAX/&Cg_:ψ0 c^,A0Ái>p~0޺y<s>~4P'rkcK\2JD,@־\dQf]< nClBbr R0j<28p}Oz21@o;J5u) \c"oXwEt@n%3϶~Zwan-K|jbEy;ӹݻ/׮t }JOh/qG'?^| 33.rꓕ:cRMuFK+ValItIT\rJr SS(x %G&8$?+3+k))ByDƨZTWRY.@e" }բk m5;ل$MA&LzArqVR7HUs:w9[0:P' ܒ90x+ )HX6(;OG8YKsdJ¢v1Ð|Ϥ2D f .]slCed2yMx Y`)~,ց { .%u+눶ȵ,U2AHOZ.B',G(ixwOl?aBn`\0>̧qDXOןghWki@Qendstream endobj 193 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4932 >> stream xX TW֮DĵTK(( K7l .}i]UDhD.LbL4ј[IO5R{}kcԏdb癳?'o1Mdhnc0ѱ# ` 2d{mvz{z[XM9oTY,yz-^~`1#8Ϙ:]4= IS-B,{yx[[QyXmoz/{=-=`{@e UwsXk ~N6sNn=s97a28f382 lb&3N32Ә-f)cX3˘rf3YaV12f.31gb3f3 c3<3)3a3FLsQBvߐ~m1*1ak|D1XZ.10gSnffIf)p!T ;tPÌ 7}8!#,Tnc0&zQQ&ai#oAdAJrNVn!z݄JǦc v"sLJjT.`oxFf9c246P:$|\ڨh>^bи8 #`c@2\YEZqsJH+|dxjy"0' d:OvWD&E34)NJX2L%LX K/y5›']<>Ls JU)i&׋#e/i3ivxLpzo[p̏_?$o;8);Xdg΢c]r x[U*UI_ ڷah-o^̫^}ԩֵt H('^ۖ YwՀEyW+BȤ b5v).Ta8Z@\{~yl6V;P {y}¾\vC.x۩Qd6N-|UÚu;-כ-Ϧ[6^<[]QCNx{cYI},N1E؀8VzqH30&Eò038P\ȗBVT⣄Ԁ@F%+kIjCkq^]4@H*ӦEJ²aX2`q3PgqH>ڣ-Q6xgn]6NqWB93ꄴl t^4{Qлġ}(1 XFGSVxB -047N OME㝿ng]Es>(|-R+訳\X&r8@ơZrJv}EX˻7n%t~kj`ZN{܀%ޑ ɿ2(i~^rK^# [#BrPDs0"?Й]}͂R %v4xǪ[AH{_2 }"SF{3cixFf"5<eFshshìG"o U} 8р+ }[Smr'Ef) u7Kz+u9et;VvYYH+kiA]!ٜMWHSCKs 8}lbGgħ'A2hR ɺQR)>X i=C3+pmySibIDϩR>ktc,2{[#N)w{]Q O: #s4p-FkG4qr?ߠm wj+]LDڐeh"? qZmL{ sSwHեIj255: 9uCDuyӁw -.%qfrY¯")sIߐvvoZ;;f 2{h~d҉E_Cd^M *u2+;X&5c-~EgWfBN*խIx2X)8(HԦF')\B#ksZ#]Kڱ#ʏ94m3-ݕH "cDI<тXnzY%z \W:.7J ΧZ>kvU)ZlA|;pDˇudDa ^(^Hj2V4x.&r n׿cr EI d'=SOħ<΀yV $J֡lYoct.ʼ+?JwOcrEUźB ,(- W\CC*gaÇK}s][a^U;#a=pe9ͺ|j̆@i&Jiy8ρO<mnqo^vZG=(sM/A򧣁8LزOri)~2Owb!cb4a2H!U0C\H,ъ ~Ck6H_= {]'*eޓr3u.>iA$'D_o t_&B2!v4௒DB;.0깲2p=OKaFV)rMd( ^|y*L ir9bӴky9_S NH_τ-vȮ.8%( pZ]ݻ\Qv^#Ȍ; _p/f1_pƚb9(?w'wY{ >4/nΏp W%(|3V_<~Tлt?p\G@Vk_6ݕ>{vEXz 5]<@"0i>G哏дQl]\.n&Kggt~.nSȸ,5&yzX=Κʦ?@d7(LE ՙZ/%[Ա %e)+L:gҔQFvw}RT.L/kFdEO&X$q 1iL>t <:0쎂g7*.FP |gIl>?qA noY7jGX)saRl>Tm' Zd ^FvZPcP4y(vx¡uƓWMdk,?[!4(qG 6ٺďSkn!e{t zom}^2x av$LȊ!rg_=FӁߒ575^/۶sxȃGJOT5lZ[>L_6xxhbMR*$B  /J\I'ILaHQ @T@5VQ.tz~FշAMo='S*:,%`N`t. 5UyRWqC~Vq¶-\'Ru@\Qي-Q *ôla,suyoe=5;vo O`4qsIQ){7DMỊ'bO]Cg*Zۏ]1*#%2vs5m>} ! @ 1G}o=7e ϤdpPgG3? 揥C=gS;ҒG[!oְ"($MO2@_Gw/F,Pq*!5[MȂXʝYL萃;rd[k0AȦ̴C)*M1̿k>endstream endobj 194 0 obj << /Filter /FlateDecode /Length 2591 >> stream x[KsS{MNxJ[|X%Hl.ߧ14fgH,+$ҁ+,׏qzX)7=>wauq{VG?\:`O*(? +{WGzk՛B/ =4=6~J+vkgD]]tBr.7&>Ny\zh0nK} /i{@yL4cJޅ%5[\wt31up荦ny+:\5u=~L3Xί?6;Ak >z/ M.7=^Œ*`ywþ_oH)f<& Y4^qUrj#E@"O ylDmCP{Q 6 _޸F޺kMQQ07&g++CvCJNZKa b)H1挍bp Pj7fڿhqFa]x|BImuC7Oy%XD^H /-?\p Uz4Nc/w5368$ʃsAjAt}I"H#j"r"o泘ӇVJ7 ϵs&Y@+ĸhlFPH }u8аxIx,+z!/j.|]64~ ]턵MȲR'U EלjFB싵HFGx[= cQ%LS U|3qh. g"OQgyI1  DuUM3pK,ɻuV|)5+lU  GV3Nhʎ+s#6Qd~1쓻3!Zgn4x57{ZԢ/͒ˠINȀbcw^5#}m6Id AܒC8nR`8O,C2;!Μ5óE{Z{jRq@Jܪۃar[ ?ni|ۻj;ɇ5P!y;qAS2>>Vʣ7pܾJM$zn`9] @ʡlKnpshք#1 3r7Æb9$&f)S*:?L_wJ.|Y9[90Pف|"82؂B!%+sReWFmJGÎ ;cqڶwR(a~r($夊m*BB~%(4MxXxwCWR#܇=倓m@%kr-rw%l)Zy-@< bGqH_UH׀EXvj|ͺ 䋛/W;ܖyREr [H&QO_}CmtC1>/<݉?*seM~0w"]n _fFwzhdi?BdɱM|v^d^C?\vDɼ\fW|.):)LUyBRIarctJgD_* ,6 ~' D "}N|9AzTno0nnw+UR&Wt$ LcɸT&аmb?Ve mÞ~8!%57q;kW[Il&}BP1yZ#<2eL֋ֻ |.@χPA6#CqF9*l6x2=!R$75Z1'%/u,TLFv{)Kπ!\lc@^VdF ]G4"PN4^PvR|a?y*ɡ&LGA̧qc`(n K/o]ҫ~tS݂<%)q9l]R(ÅX fasZ{ D`w'VDUjp{* rMXDo*NDc~3JVG_~Op4 -d)`# rM$J2./|JC)KFcfƻVfy;[4Hq78Y:~*IWB$qC6_Q.6ʵ1B H }r /zkm̂1oƱTlm|̅ٴٯ0LPendstream endobj 195 0 obj << /Filter /FlateDecode /Length 6199 >> stream x\YoGv~̏ {N3A` E$Feg,8n5r,;S}W>>RgJxsvRpٞ.W?\k~/suPao5C>k5va.[?.g~HH;1?r3`Ɲ|vo.ݏ0ިT}hK|v t0ĦM|}dme~w?w: Mcq%G JD[ (;I'l9e_gO;LJJ,%)akGkmJ݄dϵ=xR!ⴖZ%YJ3ꜥ^RvcVzԭ9:pS! #@$ [1'm ʫ+焩Y9Z ֿ # z4ޔd/**媜܎59`/)mytt-O5&Nʙ<(xysثǎ$bMBUĉxg(Wt:brP h0``0[nrq28LWact?; yrvoH}32\֩0)Zgg-ʆI I2iKm4gL95⑎lz@ eVe&'DǛLva|9/cH Uq ^J*YWS4% #0=`RG)E,Q܌@yR'ryU5a/sg}v wPX3 m ZnCC/y;lḛCM♶B ؎!kZ#` )F}>#;8Ĵ |n48`,]eZ8:51ŭj6VZ! .LrsZ")|Vk~n=Dm&gRM#༞2J#C艕|@ż%[1}WpOTryCh)M[S"^5c}iJZ^(c?Y\IЯGZLY籦_ Dh$=o-[ (Ǽ/,3WN .EDsr@eVD^y]!'-.Ǯ˒.rA" J^AH!ꖥ ͘eJJw4`+%@aLsP(KH+K>v!9.=|ݷ;Lhx C' ƪDUdF:g 0u@7J*QENC\` 85ڡ&0]{)/8DNRKWfǜ0]D 2Ɨ7,uHq=%@ʃ͸/QBQkOI])3QFW$cWH!!,:A3@Π>p B{՟ Ae'ȝ'&cmP,q,1Cns0UN5tYDooUem?`1q`n5¢"9!,G2R)KQ~E?`[JqPM$,7rgl+BŸ =/ϣ0 "{D{92 }қRhin6m mO@IG# L᧥(NVd-}Uۅ;s?u(Դ?9V4 (|[-t?se~mțR&2ZcYe\ba.JH_KKkR04\آ4nXCAmY/R !u"2a(!",GC?-_X9>NaFٹ@YQv낢bj/"OA/Haw,6$\Thy$PaNTCgkKh| W}x_o :ǰat SBcʲ!EL:)"LsBG`ߚFgD~=A+MM"R]q iëΎb00%w<{F`yߺĉ/;/0?O}0n.,D8Ɨ"v,A/]ޘGKOpf 6c[]1ջDqW]Aa9HZ(]ʊu7mW z diE3HO/ 2qk.*Wۡ%*ubb"; z)'jP @Po>l!~zjYX15?X>*g5Ao 6}OF)SkKt J?=kҹ ϼhRR ?A$Nw#{6xx^̂ˮ;GYu4\&_957IY>M4NMQ^0?9De5ܴݨ{6:Uӥnvg3푸6+);WhJ4T1z5#CƖ|Z ^)z>IG9AK%~kk))rmvZtm 1.Fg!)yۮKJ/e]^K2D58zz7 YC=K |]=y3Ō}$4tށS`f\J,=vr0 6AMW!WnVݤ?u#?@3ު2& WiD>A)tabONrb+ 7{Jk>3%J 6R2V0=vS)D;=Qx{UўauH0Vg?_F?"O $Dk?3ߜB:`lѺx`=T-S>3B/l`]&=0Qlz~sMߒjQͧl,Biw8!x $bZNG e}"@תQ"r]4~DGto\>6 L[q)\/>5вvbh I3ՁnzV S E3wm} $TxT]7x+|QV uq@D34Cr_u0j&QLN4I""uIB ^UټPLe5}"QU[zt,'-jSJ_B dӲ=^Ssh#Y2'A[ݢy18҅cvf#,M?}Td)Iee+ߗ!/[XX#D|sev_pD2-ÝDb/FdX*RDm/oQ-[ %MeFB|ϚtAo_}] a-%j)E!7ǰ 'F֮p7xvq[U.,u-jkkZv+7w>t1BX: "pfO)t&$6 IEVXx07"]N37/MIDw܈ujC_D൬e;/\mc]+#D/1ۦ?@2ጓ/8DڄyhL:4X蹆x.܉o`sMZبSk5sDixawcz-m2ohw+k&~ jWx${aR`J왽k*/!f)33y|d<~*Ms[c\INbWg}>U9я]6qAōѲ΃(^MX6/ *WI4"n$⫏[6LE"Zl#`Ʉ G ͈ :/ }f=y?^Oɐendstream endobj 196 0 obj << /Filter /FlateDecode /Length 159 >> stream x313R0P0U0S01C.=Cɹ\ &`A RN\ %E\@i.}0`ȥ 43KM V8qy(-> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϪ9VBi )(IEѵtXIg`2i1/%ES&qMsT2Χ_FS}endstream endobj 198 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4861 >> stream xX XWH]JL1XjiGiZjUKaI f$dK!,WP h[mg:vrO{::sԌ&_z0jW;!|DAd^rG(B>ͨ6D%nBO}V un h̴Μj*kC3fIsUjT`: `=^o38F:~grAթL^ cSx̠,sPZ7EEh%l`i2,; b(hP䮦ԣ7/X䡌> o3|oyCyr5ꂍPd^AF"jtճ\4Bo?}rfiL:VN_Bah9W9yg7#z%TqQOX>;s>Eo(Y/?ywm=Iw m9䱦]ɈEn c;31_3L!D`hmC ZCPE1(ewʪh!hOh ]dFrm6=wr ^ƌ1;E*įNjq#pQ[;gdmw-r0n0J ,%l> ;biFDѨrmԖQ8%`T@C$_2296|8ڭI/Ѽ(T; z  ]EPk@n3<{ohPi)ft~5 nzZ42𵧡glS(Hu毷*O>~B9lw{XA1@nkv3ʖ]A8=- @CRCE22VSnw{гv !؊b q5#1^y(w׸F wT]VXj"{23f\v2UґCcI`7h2- A4n:|öԨJRU~N5t1IŰ;[DwbChsz?6*o,`э./)WWV'7k BQUոn9^Z%g?>X)*U䂔̯-nh;7q_ {քp!gy/NR+N4BT(UiUՃ4mo';\3roGPg4jl?hn$ЅArAR*VS҉W{Ӓce1جG1/LkV5pTwUl iGs%duV,)T$'u^;(a֖ydI*O#S_5+M Dv.;#d}wvWTo*Nv&dm1^vmA|G膓zquiڡMF0=u*M=r~zܥGJZZV&sVgߏqڿ vS-N?Z:-[,Qlj P?+/\dSl]!4A86CORa(Ql; +2]k0ڕJS@m2[utZZ5O4S DE2zGDp4 sc>^p5 N4^(dV1(U2:t~)z\odr}bߗmh~R& u* lWSmkk0?8F#Gv(#-&gqɤ.vnJYY>i)fqj w>{huS`1[0%SC7Ʀ&nt6a"r~OѬi%AuA̖Y^jzZ]iVOa2j'E>%8- |lR-Wtչ)gnnrl5nƽ޽& 0U>_coٔ-M͘7v wnOHRiE"rPȣܑ|fua4\M ,HljNpjSZײ=ҙ_XKU5 nH2Ҭ顥!O~R"gFG-2si5*fv={}mۉ1TMaؾGKl另h]5-)UveQvB ĥ 7 |@arVFE~$~/;#=+=(lk">F{yҦ`k0C PlM% ˭ΟVǤfuԡih ֒Ҟ$I(coefg>75碰KQN0|w9YYa6cְZSI] [\E%zgMcfYWȍ3ؐ4cCv1B Px5f-(VëC9&8bh*]]?d'%W$T°8gy/#q8U@ܚ !#;7lPF-c 2y>I E{є6E VB2bŜ|\n=*,v54m<)>^iԕB8'ԎayLO!?+!,O!<]*|?ҍr-~<<> stream xV{p0&VSvp :7@8@x5P ƖߒlK[jWo?嵅-?q(pPhI`Ltr0?94=I#~?ZxJV/-(,|N?(YT1v^Ji%Y@gh M_8.,<|8岰vp-?8z$qGZwV; D'қ e{O(Z~&Zژ=%OsLFZ. !@~b e+ʶܹ:?΍e{g{ =ݒ_%#τN'~bio`x Liъʭ5 ЄOJ<@O >|k VGOC#nk)g &1xGax91yy(cF@_O K*Ho\B'4;D Ԇ1Gh["K K\fAsv2oF.v+hpƇ`8ym`$51>rST4eF`<۱|70Tf,CVh2DŽ,4mWVΏhfH4b~[T,~.KEN~"e%⬲@=ro}AՍ,eMh0 α K'v- RRJhLubodu2e+ܤm1~o!˧p-*6(A7qjRaׂLib|p[ev-T隆B8LUͱr'; ͩi)F uEC}O[gBpE蠛: eP"e@ K VIh4Xʴ.zgda7zZ1o.QD"_*,SmKh-"PX Wg8κIxE^%}z\keM!?DEeYOut B|:0Q[ꚼboAy bbՋ.CH<5rfrQ]J\X^t X (i>b:z:us$ұ@|~iƸ*v(iN`NaB2ɡ[j{O@rlkccDg<4# f0۔⩣|7Ep!nO{% [;wnD"yΠo~b9[%XQjV[ fpDd]c\7VJr2u6`V*nwY@̶,ApYTy#CT Fa#`qqj/!ANHNj^`D'?& QA"2ÖQ6ss8 FlŞۃFL_ޡd)?xT)x-V/&y^ø5@~* II NG1Fޚ$ MmdcfjI7`=c_=N*1% 1gQ ς3|3Y|ic\&S/Y[~~ [kt3ER޾ ^Hp"U85  tԽЗ[/~Z3}l9Ak74ɩbLtƿkQ8¬Dn0vTz竴i`Nt8"OC]G|}6ÝSVE/ԧrHS'ʄ)߂5u\\&:;F:CRO]¶Y0;ߡe$B5>l6}}dnnkܺz:Bs.(uX:#, {t{Mḿ/pƄLLIjnzUfG#!ܧMAϲ٬ai 'օ01B+*Q9WeH{GZ(<|7BȘ<ꝋ)Feqa#UzA"6E+S]j֯T雚w_۴\[N/mZ/zCh@۽{܁wGN]8l饲k0K(MV1㼲N1Qlyaګgk"VހyWIKBC;7(^S^b~ $̢ (t3ջjD)x7ZAxt{>TP1;i9z/nbo,+<~&`uKmI)v+MJD5W @D(Z+ۧ>bR\d$425GdI9FKf؋"5p lyR o-=xHG8SN!əqC{CW?-fv[(0Dz9]ƑAVK~\xc"gYRQ)endstream endobj 200 0 obj << /Filter /FlateDecode /Length 5449 >> stream x\KoIrkwb9 րwlam؃4Iq8$g;YY|hvƆY(DMDW:zW~<)<tLYe}rO|B'!ɥprVMNܞISCysRʺ5m.9tI)f6|s6r}r:~{jr3PVOJekʂޟ~5M8ټ)3;Bh`dA]naPV S~>6jN638E7yDA~xUvVɻ.['cӰɬ |ʆXw2OtRm͠#M%TܼA.1OuW4D}8Bl唓e')ٴ9N'uJ0!t!v[\PƢ<qTzQh ^Oڞ48\ jP*l,?HŽ*& =M xw ll77O!-af_Szj; 1X>UlXC!89OȆegt^&Då*GvDRPdA]$ZwE;tμ6'5xm;we8]`lt4agE< *)2B@p !S1  `ʟ)El\ \۟x5`*CE, PTwl'Jh#EP9Oy$w-؏ut ФF: ܱ',UK1YNӻb;$忬4)["罵>zFmQ-ȼEŅl`6x'OuO#MZS5LaMip>,V!xGC>IF+ qhQ ʣۗ@ߏi Za.JS!f&ړSc[LkD>c.bdu*^P ''2-6AH z5Y ^[q>  w*i!Q\E= ,l,3 Ah=iY{@Ծ-Ӛ}" v4Ub"8F׏B5ixQ'٧ÆDkhYu^#]pt:Ǝm[&)#iq'n1%HS:Tl}_%?u*?Q!Mp|42H#fI, ffAz8ɫ vv,]Q3;4ցG'ChYHR$ދ>-P9z3q:?V +| I2 -*OonrZ3b{ю;%MZ_M+uDẁ\U=<ϡ3B`!ixLwp^[cW-&ƘMW90WP4ÇC}[f +6}uogBoJM o+8$[-ɐ:<BVƙz'EtFq ^O/1/ѿi|TcԚ$N[k~=Lڙ;be#*R*w۳%";ܼ$j>&;Ky_׵yQk:JT@I?q(`b%@MRb0Q-."9Bf$\m}Ik7t: JڊY']r1Vvv|=T 2I@s+K+[Ϥe/M@n]6J!A.{l_pGeb z!SEH{͕r<Ȟ^e^j˱MrDҷarI4c G$ ^ɯ3"tiG%_vU`ۛD4,컭(6zD$+څ, SQ9;hrd".S# 66 du{F}6ݑZpe%Im~ߜޗ@V>Ro Tg#"#G% @g|Lk"}ava&%|@A$K͍ NS+(^YY_{@2 A%c9zF|uI}& dJ]G3 "TGHay!A5XZ"^I'U\&& Uѽ&V`_ }Rs@UQh|+tOWkzxpSSdIHWU>rəw\-uG&̓_rbTǝ$.ِ.] %r9(E|k󔻁qRwͳ͇Ynj[F-Qo˜zb'4T6h(X҆tnXO!8{D0+ \+cs+^j›σˀƔ#Fez"v1T߬Qdk+ #_msIL/v(c\d܏\@􄋚F!#1@.qB .I" sez*~/эbLXATzdĈ^ZMNϑ%MՋϪÅ&,-$=I`pb-7Bsꨠ'qz-3/L-bbjR_vg 8eȕr1ju%--secC\=5 Rv|:`:2,=R'kz F~3krկ5b9Ũ WlO[28,&Du%2G!v^ !fǪ2KM֍/vU¸D⾲X".&YEsoX.f5{=uŠ`ʺ3e7 rQ+ nR*BE!8qrF`g >@bbgz A@/D0o苞%F$?E*6}|SN0C{.|2*(>+LpZ"1["q*J \d\v!OYԟH^T6okɝ?/ 'eGe׬zѐݑCMkU8;U .v޲:‘a(Hrp׫ 2noXg軞o\,K.形*y~M>Y? ȕ?J P<&9FSQSQ=x\ٟFRjTy[/2WG0CZFa|2(f)rV 5CxBq7sÊ쮖j{ G4UH@).2]J<xͷ4lR&~s8;X]vX?/?SfV7 @v\ƀuϧ]RD@s?m[>$KoeL>,WZpDL}`s|N%J'u)+XpͲVzK~]Td[fJwդ`v9~pėPܙs8%f\dң+y(r\2z޷T:@|[oR07a.64?TMk: O~ CGFtcQ<, H5%]l =P]TCLw 4`ŵwx5 ~~apjM&˧VٗaZIv`OH`Q|%*ݎJr߮Mōs6ƌHrJo;X"74F7B7^i`esa*c=5mvfUE5C:_}i-f~:.A`,aY0I'dX!.SD2 VEbGZKrO݁9یqd ']hBl',Dc>fP o3roa.8"O@}A\ ʉ;S#+`3X)Ǖ.͜t?!Njtibgrׇ6R`#"Н^қPnCFG 7p5Zb^E ?z>-CH OD}JHZؒ(c'bӶ.hjm/ݤk{ky A؆8+cD"e-EЪ/ȱW@ԄVՍ1Q»?s^%bG*yRU|sXrMb0G꧟9.+<7CgJR~i|4kv\ޡ4MQjKNonȁw|/|c+?)2NrEI|CKPCe:*&Jo?|zW.rxJb7u\[~6C݌BۮxMpCq29"AI).jSwky]okUkWd#oe,߮_ɮt(G^endstream endobj 201 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1477 >> stream xE{lSU]˼i*[ ( Ĉ3c0e:mk}޾u -$B4j4S#x75旜{> 303󷬛-If_Gρ,d S PóH ڷBm)kj-YZ+Y~5VlKU5U I~-+h>9"٥Zҍ2}kr Z&)J-Q!JfV̮yJym=-UI*E0k.ِanl+ ێÞƝ\@Lip X%+C+|g7& л]7.Xt?3xTMԪ/lSV=gYF]:٢2@'.8WsU(2-N[lW  ѡ8b"Tv裸8wSpUSM#cHL: S,L~[ 9@ĂF6zUTDWuoecޏ%>`pX,T[T m';þ~캝KG7 zLfdzH ~ |(4u wh_L DimIDO"Imch' U@ D32H 6J ~ B1Qu|kD{g) Uitr޴M\y2,N=D֭y-qNYiR>~SԲc -k0I@FCQUqeȻv &E"\LOONN&IXIp4uo4pf""v]z Z. `1aՒkSŇǢ=5W(+XFޡ+tS7~o?Z6R_V :KO" {{)!=ўp§cg!a{Xl2p-=TaHia"Hl&C#ihpp .5~lvF Y/7[Q'C%FPdmRsFSƃ,\Lٹbtn Ba5rͼF+x(aX 6 mheVK`P'JsNZ X"vwMHDUWo;m?7hgeZe\93'H?l9 (vC$ &y:N[e(Ins - "Ѳkӥ')3L Wʽ-ڵ ΄'8:Y;^@"()Y S+}@><ʄR)u0-ĖWak(s*xr\nN籡٧z>9O+knғendstream endobj 202 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 504 >> stream xcd`ab`dddwu041U~H3a!nnne?2W ~/^$Ș_2as~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-+IM/J)I().Y000000v1v3032Խ#녋|gB~choܾ+'/ɱtFEyi]nތv?zPn߷O<{>kڽsaE^{sIw-GºyX;rN}\}HuMy9~mWsfVǻ]aJ\~e-b+r3˫ns+ǖuKvwN}Gl߫Z.XfEyl2+-Rùy20Pљendstream endobj 203 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O10 @+XҡUqP(/ СY:ߝ|reA>E0u- -m1,O)x@fw5|s^[+x$E b'Φ2?M%K17MR{;RB|CSxendstream endobj 204 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6680 >> stream xY xڞ2LYhg 싀 Cti5iҤi,'{M-PV *BA"uS?I {f&g~~w#z"x<^ȢVm8mHnً{ܟ|пwgpKöp 㥋5ibQRBbfgç͙3{bS焿'JN _H I6wʔ)N IL _'ʎ _"L _ H' _-ԭ Ҷ/NE$ci沬9+sǬĮ_>i7l=:b3/̛`ܳwLk)SMn,MDsb-1XG@!b,Gl"%6-Vb!1F,"&ۉdb 1XB,%ˈ b1XE"V5D('z#1x`!yD b1H ";!Pb7$apb5N~'۫g\0A'}iQB$oH?[S'~ḱ4fj 1mhO#XsXB#BoaG~Dȹ#e#o<Χ }3gGmuqtW3{j76hqR R @{,o3=~OgA6)MRA9l2@2]'J)^^(=^ɗ\o x 8tGKPv}[d `0X,>Z]lZizJp刣Nyu$7HSX8;C7L`[Qf){|6<eŨBx" a CY. "PxX)큻q Qp&釻!vv"~ 'S+u1]CM[Tζ"1t>i;UWLW=]3B {/2 F "ep#$Ge{,CJMJEŌ 8zV;4~4:jn~>4jr7XCJ^_y~& 90** X[ ./RJb4l?8@&1%PXȇj{`-fq5'}vsnǞ,쥴k[G F:S@ *7kK@%q++C̫WG~8s41=P#4wlTm)H'KX{\M%gUffExû:br[SþIJmPKw\\ I87w/ԳY n376Y8΂JYXt :%޵oqV=hq^=^j^i5 @T5pX[/ozDj| +? h&o`j 3_H yǗ.PԌ|@`0VEbZMG(:ye$(y83rn׶hDU4ag @к+%Xs܇>aNZO0`c R7߮ :){ƻ U8m~RE!TK蓍@k6KbaՠZ з˻xcr^H2U-$uAmH_@):8G;G=pL:}aX VQ<;]U mk 5F: Nk*#'-F/vӡH<(GAMM߷#7P-뺺]ԏ 9 k%lPذ@3s.BErc%| :DMT؇ޙ~¹Nw^ME?:W:;NԳM.PwWV>8)b?^p3WG$>缭gT5|ߦ?=v M}*0hzGgo;ŪbUJOjяoa)JAb+wgJYdjtMʨRP:B6zo+m[7_CjpǮ_X=.F)4@!Ah u,&? A?ApJp \w֛V`*y: uloVgMi Os =#IqԚE3rP`V;5+@ZȨ"4މd;ZŜHx7e+H|~b{~֘k5'[UdGF=8Bu(?8BR!h}9v=a? MXBcGp<49Ѓ( Mx~ @zN|R090֔q]4%$ d5'oosG15|7,X|֣d,Z¶;q)yx_=y0꒯ 6$~p>3g :*67lw g4 Gi;QHQvuK!>:x:yxݕ jewPE:L dͭX\Jj?8({.A_z[K(؁RLucl4FPox@yE8t3EI S|]ө+M2 Ņ(6+6N=mgቩDZY 8BԱ> Nł}Xn[ rV e-b7 FV}a,f'i}i|n՚硧`U!w\FnIpV4 FS s%Eui4b(!oM׀3/Q 7` C~yh`f".e:ZcrvLiFL?]pt58A:A{{"ג'` d aҊtU\ģ'ua)Ov(R)A@HaYVw_wa1ŷDOps_\_%11))%.&p[[Qf+:/w=trN4ƽAjɲɔy@J2_3^ٵ\17+| iI*4:-kQE/y rH"YsvЉ:`4q\:T2B,IjJPT6mzTma?ל]NElq$xJ)jg`Me^]@fT p(8azDQ\PdT~Yne $[bII{+^pqs{y\G-vPIՈ$BmQ>QtHda@\4Y,dRՕ"3|.5.pJ0⼲MWa?GiZ+T$+G@,*rPo/60.TjW`rbGv]G T qf4x75Gv*./mAyEns=NF3S0f唣ЕЋXyLS&@[KR&qipRuDyr e%*uAhJEUjeƽМp:|xm7T57;/yRiTDhU [Q J0aeQqb1#F&o+]-,K#Yʊ؟֢~*d)͹v+& ^BFBqYZkm23zf8MEuKiQٱ=# ۨ?Ԡ?tɈse&@ಠN,[j7F>؀d7ckkUfh @cGh4U@&)[v,z|xHO IT*] Pvޛp !F&)zތ_9HAYxsS\2Y`[b+7!|ͥ?2yj VEU+7H^PtosE\ASR]U[>GK|{JF(^3|9[U9 VG>,߼?<(äuҦZi}<||hMCs5mh&L)% t{ڝ*\M>JTn sa4 *&E?Y2ϕvpHҘV^kɫ ~W\Fݟr3[RPhˋTR]P HGeM {`bI3fk#msR޾  GC׈%5zʡ63}P~xॱ(dSm*Z}Rg[|'; El*+pڨ;(lYfIgaZtwu`/&/M{x_(J-ث*e3L+HѳZJQj^coެL4gL 4?p يq۰ϭwZ򂱒7DXӐL= wׅoEd{K@t oMl-*jv[9֥wxZlj޸T6>-E]p| 89σ a7keUbu@e-UI~/ʔ1%+W-)/=^m.ʬ6Fꪫhn qDŽ ,A|!yh@„d6Y21:M@^nRAri` dWFug3=OuVwnҸIT{Q?jJjպF9s Z7m0imS2n զ[ fzDu֎ˎ\sp kWp?|sUŸdRNqcMCm1+AA?pQQ2R☯iZRmEh&`09𿭧';.c(JE6bE"~Lba_yendstream endobj 205 0 obj << /Filter /FlateDecode /Length 3664 >> stream x[Y~_gw!# bA xwu;J|Eֲñ 0CXl$A+~xz؈F򟋛wX f(ΟlN9X7do6ߞa]t::!lJOB]BU^}5'VpOq] ِ6~5o q] t=xE9a8Wuu~S/ ծֳEnt>JK'0J=Y%܃t]lċzwcv|Ao|K{uӒ=uoIsc~!n#]8,kMQ8NVu~6I > `@BhSh5Ԥks磊ZEz{?F^ё"xEXM\hԐ4JQRhpf5րLYmr]KԂn"I$pTN26&THllD0s#h"rЕWIe8ƈG(DFDyV˂,63p>mv &+&v$ `Y넬¾oJ]tϕ{FQP1%1⊤dt*fZȈ2ؕ QnH]ݝ 6Iʛl8lw "n‰mַ4P@`"Qf"HۣȔIbx3J㜝UQ  ?Ƶ _ęgN#aYRXp1F&XAB*O+g)4IOxeq>~Ϭk6v2"gUɘړ|PX#)2-ypIւWH (! s%ī_#v]Rs8|Zvh( `ܪcu +FG`A2X6/7R3h;k’C)F\(\bUIƨ"33>o7 :%\p9#H=la;E:-Ac#?V⤝ZH=CpyHXf:S8B h,h !OT/ZpB)t|,SDz+mXc W @ZVTo{E./UDۂj.H*rm+l8:g#"vi¨Y.BOyI]Yٗ#C| FceѸY:b)"LԙoA*_:}>~*y-C{&~FKfIH*p\4hZT)_|̖vm|aNd s%90HZ6 P% [:$pcѸY i ]RJf}(tC8AesC:8ฤf[r]-CH)SXVui7e'EOxq]%ߝ-ƎE0;{PGR4TOL\dluFB.{e7Zijbzd-ؑ£%cfݧSDZ\+SD>nU+F&d[,EF)BuJs%gL]yކ%xCU|J>-T@0bbVcJwNڌѦm~{YMƦ ]f״>hZ.MgʉG!.vߟoyq N놛 .v~:iY936_ o}>@Fᄦt}>EEc 1 ^-3ˌA x 93# ɻIzӤeJC:˃EERlԎB݆I*9+RRTS*#DnVF#YU2L 9plJ Нع%ºh;Tp KjlO-;@ 7b= xs~~6'?9; ykc#r8ŁAn\EVs+$g,WL9;V25$^3c[@=v'1c nl|tГV9rp振Q!%Ba),t? ~BͷP6n̛f'Q,nB<%`" 晊r=.OziVtd6Tw|S!@WЇ#5 J1ƒq׍k+a5EEC980HjCY*򭖈Y 1$G@-ۜ`N/'\gx Ÿ w+DGz- g-.ΊH2)qYRc>uB)"JaD=<iuae#OYyEys-k(\ds>YM[kD X0â#^j6z^֣YpȎZD{\<ҡ- 9Fay~RdH[ liT))`BP fi@6=-5z.Rh f0u0mGP MCy1X,dmhMO䤮!Dt< I%l@0E%6_tj*qQd^  *G#yP(c:\7%r:,: 5! Jp YB_frDYcBS!I&*to] `W-MջCh~)Q\kєj)2.b[ˀ첉ʻQbTKt ! RG 4JYQI鴄Q.‘RR-DGfTdl~[^(fiWܚԒBIxss!Z{/-DenYԳlinӽIjhK'K7&:0pʊu[-zvkJ$Ӏ6#坣y]gGht6kٕvH'H"{5qǬTZ.aޯ[ҹ(K@+e|}m}UU!.*kڋihlѩ>浥^uCɗ)>KUWZBbQ#Fk9? 'u}:;f{sҐu #J?*)]SY,+,8v˒:|/''uqU#L`,֨ъAUbC˞{1h\  TGߋgӓ!0Za}Bendstream endobj 206 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10  ]ZUm?eB:gמ]_:6fD$ht,ô2qAVٍDyR-4R?%Kĩ4-Mr{&SB|iSendstream endobj 207 0 obj << /Filter /FlateDecode /Length 4619 >> stream x\َ%q}ﯨ;Bl6`AZJ0Zq^f>'neDRDzTVUfd,'ND-~rOQ׏Wnz}inkf_]_)8Vt}w3KJK=;}ïdX1|1/X9}_׽/<0ֱŶX;ݍٯVߌw_u12j֏wٻgPtWCʀcT.zcPj0dl;Ԙx!.-%~|ct][S/KЏ2Iu1Tq3_jDPRΫ1{{c7<Ƒ)*OױgV÷/SdEcّg|,p;7Cyo57zTn(Lx-(U{͏8PO>Oxzg3PťZO~*ۣURGʝR=Ma1> R1 C#/Ђw=w{H?`9Ԙbsߞ9[-A@TZI>H=峖OPGXX$Tp%J7Iisc*D|Lﲯ<a7X=Jjs[F^]G>{5b=|Xmo!uma}5ft1x/px:@9}i(2eP !_tb-ixZ\JXJO %r0ݪ\\\>!Go}K%&1;tUfSbiAlIf(8B= l/mq2~ɗrpyTE-<\I-ӼrrJYCj؛7A~좤˭K^>b|l 7y |# #H~P֊X96nAEq-ܦV7wk܊B*̡_y=%~Z]҇aoYJ) 7hK[H=jA`!aT;n5'專-&uN '˫bn(ͥs;f߄}拫_O_k&PXWbտ Zxßۀd^ʧ%C@qyʔ{)䥈㤺Z\sbјx7 Oem4JcPc>%6Ntw%R&RXe"`8io KQ кB RFSD-0'. *pC*GOUDkKŧʡȒVLğFjغ gvkʎԀ%7*&#J2Xڵ7_ _9U1it@eTiqX!wxˆ*>'<`w oUh9b5FR_| -r=hn6@2o,ЩAqP\> 0@}fP0躑z@ Y{pbj/^(B>%Q]`n6vY-@$&Jr*Ah83n ڽuj`Fީ&0G5z" C  Cx/^o  @.S `36Z\zrST1ĎAׄc4 2C:X>* ]P;>'zξ) 19GX Q$RjәhPP]ɯ"cLLtVu>d'BUĂ?bzѴ Ɔ"iVrDd$e,Œ]+A-}Dd12,ݣm̢g]V4+E%箒fJ%13X`R0J@jt6aqĤA-[,RٛB^4L ]PETl?Ml'8wQv P " ,m I1KMvAFo#j*c?c BH3`篞|'|bBt_: , qP0,SۀA4Uh>M͒=DYFKH$tMH qkڲ`:2i+oY ^dҦ*`$,@9 {ޭ3u fA'(tj@BkŒNrQ̌Ͱ i3!fmO$𢇚c̴Z A61d6)je٪|IuF~/$H@[8u> 34xhT;mA m(4#/Ilܰ'DϦ zPҩҬ N5r5e/9.1S\r?X,D lH(. $ez_öD $rPsB3E Y }_dQ{WBeMv=47 ϑjm.w5_4 =˶ V ,f/% YDe- ]Mr;¿-+A3#%b0N&7NB=P(&6 4J1v!%m%]R $ WZ[ֻ4+VOŸmn_ m`Rz,d]j!}L(Kbi^e!8 %`lz+`@ı.bLڹUj<_5o5 =x{X6Py ?؈f"7Һ BMp:&!΄`*PҐ @)B-]N1d-߱l˚> stream x\[o\q~2k&},@녱I0KRmrFˋvS>gv™>u?U[}_{~{OOO'~<-oOst'۬>={{oh|RܺNnO]&Xk9Zm}Nڄ[}X5z6gZeqJݞL7Gs14ZِV8)3N*G sfus-_ݴ#)eZ+?SN32k8]$A 1W{xw6r6'ß~O&.@rn}#yhu;X&iWQiVfo7oȟu/^o yZ|[̓ (Z XmatAhhPŭ_}v "X2, -iK ዠVN$ AnIPQ8ڢr3 0em̧gߜ^~zAL; SDp*ZkhB*|e?x " ô)0MW%8)t8T ڃX`;6n䐷ueW@nQ؊qM:4x;>/M9,x]⑲X5( Ua58_%@'Na w 㠓a!{<8 ՟%C&=xPmYc+Aiy/8 )kGa'(< j6nw_Vga/c=*R\N;GQvB38)<jʡ(1ج'ӞYJ$S ,H+2""^CAE&w~+ڻ[dû,:kAgeкS? 燻T5tXO]b8>8=@y]Ȉ eͦМ:-z^qvDڰ|5XyHX$ښek;m"}%l>d7䫭/hwWGtF"XE(<@M<`&5T$X/u14ndoB[p$GMBYr޻+ l^VrM[m*l G>P]R>>%Rgfp诤3&y=08Vwx LPS&vK6'38m8BSWQDMOyE ,dsGsI)݈ACcVBya~/,F/r:!#w7,ԼJ*ڨm$*u,s; IB־ƴD\4dy]M(&J`Յc@9E{m[JPȌLܬ0'!Ȼ$sRʔFEE0[k\g^ޓ9RxPe<; 3Z@%m?eopטGy"N ir~R` BׯRO^GRSLYyOedz ).=OAȆ$R-"2؂nX4*4*R倽qS w71<9kjAό*l 1*,M *UXb ˰sYHs}dkv HGuaWX,~lF^6oOw3PZbޔH/lWؑ೘S%(!i4Dy0b\w n C)אr@逈|F5eTIr}>GgX\c-=RRWqQHz%p#Տs[xS)`U&R%E Q% D0B%07~ő9 c;a-w9Y I)$3^7'z~WcE'ȤM\j0a/Z !OzPώe8ڋžZ~hygp|67/S/_.fSSfp\B:YtlQIl]YJ?b/^jc]kPx!lH]xw_Ae1S0O(y SO%'hlޯtY`RfiN\Cz[niMʪ|eO˶0yL)@C :DM g} cNq0eh0W8ud8uXK= J6ppAaG#Y,2\p޷ TS8Ni}GVgzeEO6ekH5'*]]Hڀ.yqWؐ-|?]1,e]sa.RRM# &s]+RBxQPG8_F/ٗ˦C֨\2Ë2(~z3t@+9gsz)l 6̧@NjpCJ w !rA8TRYQtնaW`$-pL=d(bZ]㨅(.[p}=gV Mw铽/C,.p YBϩCw{>Ri%"%NXY| 6g=_38Y'8d!^Ic됰H$7@aLA&.r2CzwݕzZ;hځ^}Չ8l"ؕ.؂|0R0hvV3D)m䶢!JnN#2쯐a>csSi8W+dQ[x L}wD˼ bVj}^yBo~^{gY~^Wccx/YO[#`ShKur6ZΝJ9M^㡫nj></ܷ<~TArT/puqv}GDxnֵ7]a_46{XzT<޾ypWvy!VJ%;Iv9,cN >u;O=/au`i |y DYv %hi:u쭿Vz!#=]AR, W>\Td.P]d{x,Ǟ,Q)HԖӇF5F^5Rzt. Q"̜WsKy#y:FU#_7FoA{TTa]#]-TӗR 0 kS94(tT(`oe#vTD[9ðlљWbx"@v,lvrznyk} ΒO{];^:.aL"v: s|ȽpJv"];3d~|uatȷC/ 6 51O!V, ]Ð[6R7FFɴ!¡/r>d0dRfe.yйjy쒊|6Y/כ~ϡ2[r7R`RZBLrt2 {%%P;H )XSUǿ5H1H\1K1Dz3 w#qUQzd_ 7M')uBޛM=09\"6ߜ+_aendstream endobj 209 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 b C+qP(/ СY:ߝ|pFcpƲ- DeQV-Ɠ剋BwOϪ)> stream x\Ko$y&t~A`$N8qҒVZ9F֬֏_",G(a nXW5X X߳_wGx99^ QDy|rq .c`;>9zދΘw|]J^tDz!ꦒ+?I9FO΁7VAZoXcAӰܴkF֔u!F]U1uЭL}.G)ׯj]p,#Ϻ `,5↨m<(;XеE'*XCPZDX-`$]uI'ɜxOml j0X1IG"s¹xC6@v{w9_'RfZ"=;B2FC>r1MG7^.cxBIeVD4¬&6=5|#HG }1{ > }2"Eu@ aG1px2c%Yٱ|lT1HA(}FrWy E?(ˁ7SSf"irϮR_}ttۺa_@ΦW9Ny*bb2;(/BcUEl.]Uɡ⅚`ildhs{ha\1&&5Iyʍ^<|)G{*ZR9,2-3W\1D3x=Ol58Y]RpP| qeݐun憈.3cIlThRNa"1Wx?$ Ⱥ"" *]kk(x V ,&.@ }CmAЍmR5e xeIhT16 fMZFg̠mu>J2|XB'\\đ T}<Ϙ籃x+|mDnh& 羜 Z]]#M :+Zh%!zKgs5+ DŽ3ĀMX3phczhͲ4D^l${fI_E]#EH0䶸YXEtut1eϡ% ꥍSdԚ3=!/d b:Xҿx.{2+/aRVD e|zO M/wg9m*m,TCG#qpWg//Y 4f%q`wV{ 3R:- /.̫ʘ&-DLЭAXѶ@S8Sp!8.|LBn*ؐ#ME)God8AZLL3IDs UpMoVٸO𩓖*.ƏelW*ZcE,Ȫ{I^(dmېbC,ch?vly2Bc@&܊ ^8쾅v%=y9M8SY#msR+-s,$4=YA<{3QRHS^ūZi࢑'"~}[ZrhP 8lIV4yT(ȀKC٤fd7w:6&'!vB׳$h>{ )gy~1 Y\c< 5n,3W-Acz#A!&ӂ\Jifشs'(F?p>4tg 'a筩JW MC${NBjFHqud`Zq4N$vnJøkr="zt\ `0Yh܋AkCwQ$^xf{fQܳ4)=uA>Ռ.螵"t{˺L?uf/$3Jn;kLm\=w{=7`4Oyр#.(Nlj^@a+ҕ(O=- z PPVJ e iW/_N)U/+endstream endobj 211 0 obj << /Filter /FlateDecode /Length 1437 >> stream xXKo7W,- }ZhTNl,)$pei=lFp7}[q&*z􅭮v3^]DJ9.5s/j6xL9S׳Vr˺=(7Ǣ| lVY%$nHܓx[\{Ӽ4r<q^SrL;gB>yܔʒOůSy mbZ~% 3aZ;RgA5}\*##e:GA6ٞ(͝2@ K4f(D!w>QcF~xon7E1c 9C9fܚ&ɖ3cM1&n`4=p̘h(AŘgz8o a }J4WP/fi9Koqj9l >Kɤw6Sl_8ñ+-Bfk!{G%,XW/‘Y鸬W{AW R0/dtF Y\(a+%shcyA`laxǭ4'hB8ςmǹBר6r=e8O3 3@[]&8WC.3|sL7tun3koU'" ~ o0XJHp4XtW)[t~2I%ㅤqU1{kXK2$Ni4Z䖳xgaOqC$:m̀V%U^݄\|$9)+͔0 -Ԡx1[ti 9P]࠸#:)@L)h/2{pFC1}L>ba93 ;Րl_7cij#WmIq}l˼*j3[-_#ůI|O=$ Iư!v)y%"=V!ײwey>)Wd[(Iq3YɊݑ!qIk×̜?d\`~1_e|V7$fkqc!3]$naf jft|Mendstream endobj 212 0 obj << /Filter /FlateDecode /Length 1854 >> stream xXKo7 9ݩ$=( k4~v^qv>RERGλFt.g嚳hff2%*H+]A6,픍 X],/g-FA'kG_g:j͖9 Vо/UWE/E|m_q OE|=/ЮE-E/E k*_'_fU|07 iX CSFGNvG^mJ#.{)$!_3`y,ciPQ^xj\,߬:*ٮnZcto^&>Fnl\\X[rm +0&Uj)/6N^QACҏWj;VwE4#lX=) kwUܻ.\ 4e 0iXΟDվJZiY 1)F=%`oF p}v:abnܰP* vv|ZFZ-۞[ 1'CXQ^S^(яQ%-HHէOrc4@ǎgE$ƇX$`MK,>B€;=k^iŀ/7QlGh4_uih1W-O[6#CQ`/ ĞE7PH~tLݥs5}J E.zZY&ʻ&*SOĬ0퓝}* Nb*)I0)"%5 0.OټtR66PT כFži:{@Q n.1 ԇ0_q g/:}]G=SQ\ě}=>{W>Xu14s_U`d{/jvQ^䰝GE(4Mc}n6v'%fzYUc(ÐDQ1F=d hui"Vϱcyƴ?KHbRI9D$|@4apnbDj#EcMgo!sbX> stream x[ko\_q/ ߏ) A:2B $6$bu{%wH|0c83gțo'1IOHLWGT9~w2g"Q)'f䂟MpO0.:cyך5_ {酈7yٚ|s#)g6Rք0^@lYw" 2C!?m5߷ܵ&Ie>r/Nuv<QdHk` zmY6gutΨd--I 1GZ'}݀G>+CHt : Jm[,CuҤ68mL{j8Cϵ~V*.ʺH~eCX|05wyךik>oɠ(P.ooo2!S53;:W˪{˦u@m.ۥX'ɨv$D gihI@5ϻYv,'gK@]Y`UD\qԑ#’Mg嶗+a/shuRFtt&|RX{4L юQIa [_`df;&nr./Zo7yۚl,o)tsF\>4JR:m9R5Rۂ:!{52k_yLT)A<$5=.,KCD'jʠNYr/B*R)uRfW5.3tLgcԒϚݚ XRtʷ}HIAgjdjf'zb k2$n#4 \YD:r!niX 'qŬqwk?4-3lQ=,NB0{P⾦ėH"(t=qΫыyxm Pl :Ys3k9 Y  ٱBf `VҦ ~!,fho6s6msk 1)j^'wCeEʼcWw#KAue%<8E.`ѣPd/RX=IhUtK,Y0ՈC,_J`]\y#BzL4(4=kqqei;:-z5Uk^>kG#qn2끏̍6p/|c)mZe|6l! xњ_X&BDxFyu0;XZNr1Dd 㫰 ^ X6v}\aJ(7a`"+6aOswᰱ>AG/i|v@ݺjMVbrdag-'W!Dbi>FPf:r l-c]/ɬ6*< e;"REt*D"M]clm%Zp(sn_%oڶIpPܻo4`JLZ|<3hJH*T>e~[JNW )(D#^?gJɮEX\!U)դ3ɓ%<\SxXb$&m}/ |(CE:rաe5Ejm.C A\m$Y^:Ksr]VS Sک; Q1*4Te!I GEY9*y 9;–^6A6/; sqa:X8$hVu.k[8Vj:ooΡԲa]N@Ľ2s)fDݮk#ѾSp;@B\L=1mvyyY>J 香T=cTW;T$ *[EC#fԇEV]]_a6JL34P@3yB3ȉJ!xBn:(~`Mrwr^-[y-"%I8f,p^^Š8\34۞r?AZѐcpVޭ^h̥0S.h=]nU\޲[JXE}0$Y@'RH#0}}JkNpe-GzR pAriLʱxcq޾lsEN5;uNXY9֥뵋aB E|b&2fiCŢ_>\Ixrf4;+]rv=zR=٥+Ym>?s^M{*f4uk2v)[ӵ/FmQ.8=ѷG%`q92Vsz]YߧtjN{}%lz?xBV/ݿGJ3h|s&;^;zJҋZ:kG;ď ,\v- ?$绛I(-I3G2:^FHKuLuf}un2-TНCDOVtk^L0mj"[;H>ʶh`w@@uIb TZ@\QZ_JпA _i(n]~ijNЉy u.x~2Z=m4=)7*+?HVNX,AD˃-+atwCynHպhaAsh!Rzz)a:嫃S KT|-Z;_TDf:H:/*)$fh |v/7{S¾Bh-m Sˆ j?'9Cs 6dp{mr8smU[͑Юݠm v$Bs  tӠmtC8Z:#CB<'"wR$Pɟf4' K`BגnBsH.F8cdmh W {7 dG/*AuQ=NDžJD > jꖋ xł8[ˌ ;8 ʙ W0X/<~z9U \ȁB0Bwa`zZ;*+ﴤ%fw5=&Gו7>V#YjMӄ JnNP":'JtN#OozPj>í&e1J@)m i(Sa2k({Lk&ggKn {Mo(tlBP6v`Oh pt,%lp*J_$tN, ,J4tR+"&icFLWZ8ʹų9mAZZ,/.tҰc> stream x}Yɕ;QU}x`5֌K~Lwϣpm5=>Os-oH-E\\sF>ܟ,[^#'֭6./oudCVV x vG-w%08 s#ֵDRX\Kt\8Dxj5ϕ3ybn|zG8/|>-Lcsw< osbXma^}i/ OwOҺ+ߋ= ) %b  `T_x8UbOs^- 8p6y?y䴍路Tx}|7ǿ{loz$4qc2#&  < |z~!שKJwǿ1)\S7?_f//@&XjJͳT`4K)w d8uP{?/o? \K@= \C`Cx+y"ק߿K*Mq% ؔBźS^cQB}6\}n1T ,a. /πnq9A9b@[H;A) An##p $g3 ^^B d3m|咼,wjdpNjn)O;ow 7X 盃rxYQE!z,$0 1w۷M5dmxxFMfYn¨pҰܣ 8J0x2ʤ[+7ݚ7 `lȮ_QfЍubi=C٬Wpp2aCvw m հg3-qdxHO٬2w bE2vFH \ atNKۺ3 vz)%ŸT~Ɨo aѾn/'z-N&i<`F@ )-+h֙"\[[Lo`A;eκ`ح 2-SMP Ωd-#y[Rְ +W5bhKi^q4eW`Qt!4&\c Mp$TW+ .,_ 8@vQqJ~!0>A<XHn vC{ʗ@!+@! RN8* ;Í%dU9(t0M,DqDGb7ot/UA %S=E/2-窪h(a\^8neba0sV36Yv䘝R0FXOc䀌 `QgvkX=XXWSSK5F1Д# =x/Ɩ@ AvKt/kDEUUy4Sc~\(، T,l!-nF3cvB:~ٮؿ><7Xr+׀-ɅXwY`[+0t~a,6vOt. KcbR/b:W,+h>zaN HϘy~U h-`e}&du0u"X1TI]" qk:-52g3@sԌp2^OVŭň3%=qHW{CqMS X^z IynZPRS@: AUma4 Kq `lK60AG ShnE=,6J>ztr,b4a3*ouRJ<G%MWœPӖzW"XqX& #G j`j:f*Ӯbgh.559&!1"N5t f3+QNq}.{22 Qb b3W }ݞc՛g&WQDu~!ƊP,iaD6ƊX+);ﺧ| a70)1jW*(VJ&ALNsJ5-mZJJDDG,5hSBa?<7iСKQ42V O =^q _Ab<)CG Q[Q04!$%tMaIy)b9CV$΀A-^a$/ H\L~ŀIhX6f,0I"&(3YyA o91|C;C1jH uf;E-0CABnk:#29EȪf67dBH&Z|{ Y1(cGM  Q0W4P2F|UuR!- 9'\"Sgӯ5/h_{l/9J;K,z}{( um; b0WU#W Ѽ9 %2' Afl%2љh#PQH[O+fLQa-d&E WDA2>7{0٤{ uڼ Okh8wSfjT^3%[qiL75=@KYHƿݥrogltxt(JФ3`~Y`N5"X ('wٛ,mctFT.kxOA6V@!Ԡ(9xδ*k9"a㊇}N5ƆEZ^&=3H`xZ5d ,Jz/OXWטKXhF0%Tcs}sv#pN#wg bVHN10^{9rZ50N4A*#d.U-ҭ1e5iWVh<)Lgpτ.hepR6( ,%4Q(CLc!Nax. 0i%*6LܲmQe``L|w He V/0pQȒ@x4Xd]0PwOLun!J4]N Ϫi#r&:7eD=휪K,W 8QP y3Fng+zHě`Hw3DjW%uWAsl 3xJGd8 l}kzHمQwU1v) afn| !Ƹ2(+(gF˷0X_,r:69 IE)s4Yjـ`H8u2@Hh[BK^n*HhS,P&l ChYFk״R큱hX Z׆VyJ QC`ry'.l6~MiK[?a5b\EU$Yt, )\bɻtq{L 3p9Fa?xEkR R7BHD50' (Z(Ч졌݆# 8,v尧Lh@[.(b =Ȫ"kٟ\S!7yǵSJ Pȶ\I4Q=tnIe4GE UF7vPeI{לͧу`LFO Q1qmXIf)M,ns,1[\#$8ULT,SJ;w#2ʹ!=Y1BfO9[}Zvk_t/\Jկ('c2clцVϏWgIk@}\=|L"Q##\GQ( &Y2-7klhmnaЅ&+ތދqx x} 0P!SQxN1K'F?5m&em*I$yN4P ƕŚʤ{%XjĦ|(t4m kI2Th̫ ;':tk( dm3w5} qI2kkSa^oNڝc|rbJєl8[p%[&Պ33D1d=FIъWao!'SHHj'Ll,\.QUf.}To3F]7o\XD#WN +}baS9! -BpX`*cܘVŧ&"0NXڦĈUax}[m.ǟYK ;B w+8V8pҝ;—0?9XDN7S~*9z%)O 1n׳ascm,_O50ʦX2*a*.϶@E5bs6sЁ-4[Bg{,)It,ާo+%Z:R!o/cdjN#ɨ;p Fԛ;4a9mG[G3RxWq~Y^2u)ap e $ Ou^H6nKSbvƺ9f-c}$YB }1z,kN}8vQ {mYaJӈ0o|V@[3VϓTŖyzc^)őv\f5t7"4dp@%VmXy Uf+L^*',H,ReԸ&` Ukd*~eȀ+f=t,(:w1rj_:J>v,vռ})|(A/bH5xBoUay(uW'R ]g|0e>P![`z!^ {*]]*W6܃ϬcX XaZ8N٠~K`x~>|;Yxw &f:-\} 2c~l5(u5YrƝgůUU2Vڼ$;xш:2mL'4l9wLMx!R:Nz~X}9Ur@NT 98Cdǹ_W;1bӖYպpKau1IU{ 4?^HU!U\3-3-F%4UpDkwD˳4tE76qdǼY%y864Wfa d kmJCߛʳa<ϓ_%6һO-y-zX;—uS3xPhN /YVQ.|0ZrJ)#n*AuN.t:V0('sOi-{bK0Y ̨)[QyXۼDMQ8$Yg7߼LMT8q+XEZTxbN&LaŔ1ģ瓥[d.{YaLօ/y>oS_K7 ܾq_$^{Św^>Ms^:ޫfӋ P H6(UP{Oi힑tny[[@nOrrwلn[ɻ$X$탸-8=iN&K7r [Lzԅ/21mpb73Q  ܚ6pkfG2QlGLm)JE?%}ҕԡuUkf+TM>FOYIX6z4iti}zha?^sHƧ?f:0Y/ݥI$_]f-%z5Zc^X`:湴8OK߹OlnorqڔfuXuޯt'ףq"o.\vgv+;#incO_mwp|\|||@C{{|z+iwm;~ 4ͱ>>CnR;%U2o:?QtMh+C_2ܩHoGϿ;iZٰ: bg[ Nc ([*b _=+IפF~ݩ <RHΠib-vsKO x7v5sent7> A%<ɊgjZ1 enFVc8Rxd:zp< 9~XF>4.a;ob>*7? CN|pgc+)LSej xDcU+:R^%/)MceٲdSLǦ#|GQgCgD!6*" 77v6GT6Hc07p9,QT_]9vśGE1!n)GΏc|2.A}=jIb09(VKycipmх.~[΅YȎ"k]^QCnl9+y` /ZiPp{1 |U l 9Aő dqSl|j\҂j (ME0!JeM7% P*(S×7&1)gL ogR!gEH(lt)Jc)(:U%Jdu ;S`F{6!.0l=Gy B/;M%iAsݙw48BിIB=DJqU c9zWa&%b[5N`&SHJ"mϩZWxNֺk5pBìи[ߐX!^KEJ+@ |;`<'ookwR\ZjC+Yd +EOX6 rFԞb&jWmqy㤘l/WMjrդN3:^ Ĕ8JѢy0W(A!G f㏴~S?hT0 Qa U%-\QZ9a U!:ma1J!i1mjOJJb iSQ]kʠJHqX19^¸09:V?^2^f"Z Ԓc2ğC!R"i~|.pA3t JJ.y0AK\B9IΡ]8zD} C]lŗ#Hq`RR/ 5x t]}N&"ήC^9avR\/\hGdJgO + 'xe'X?)~j9ETpxzrsT ]r$,&u,_S6˜{g [#@GBQ4;o87qlAc iK""TBlyŮSԁ2c'r<|E2<\B%J\we\~QGLGQ 7<#M@M r+vNp󔧣|GSG*7D\љLP$GJ?) Ya"| '3=9L" ":2"n2gf6 \ۇW96P3~[#yF1W1Lzդ(qLfA=8 G-Я`Վ#rF9P>;Yq9[հϮA3@X2 O6q+U˷T?w:Q"?V>Thkw#5|lߪAmZ;*NG[JPRCօ:ǽ{Xvetþy>ddoCڊ9ɗoY+I󨪷{16Νo-^ձ/Ju ʬqyg- o>;>x;>~s|׺qv4+K> stream x nYYR$bhs?{rnm7r)PCS uP2^P3hiQ)AAQ(L˼{Y]9}Lo{u{۳n?9Ɂpk?| _'n=yutAȄc|3zVG:^OSOpv#ӳ|Gʧ*Y=Bgpixz]0z?J;׎Xs;}.}e\ބ8O_tRꑞs;׏?Bcol칟\ }SO?EfcD|b>2}fc G/RS5tZo/+=vh{,qzE<~%^KO7CK>kN}6}6K-79HQG;Kn5[\w㋡Oӛo=E{Y=_%q~&MF\w=O9z 9ptT[f+ ۞sÊ.f7Hd_aoi$:GWy^77sͧ_VȩF퍀S~ްF޸=ॐJQPy4`Gx~j~K?ʫh7'ܼގ)2iXQG_Tbi,5g:yN[cu KVy(KHi;z!㈣S8Xbkl g%$nwݗ],4 }vxU^>޶>]ذP n. Yr.蓧1ynݤ?5CB]rּztWZy}D~emTr{|ޜWTQ.vkӛ?xtZx)т򶪽43e!:ױ#Ǭ~ϯrbK ZzoYyH+u>mX6ֺxQ>|NiS-yRxa%vsӱr]y'.Cw^hqH׭ޥeZ/GWD[7nP܅q<7~E#`|\ѻͲ{(1}vn_e7-nWW]o_ϻW/̂mGqמ>>g}|WH;?߸{U/~P! [ s՛y&K;{MZ[?o}GjF9}xO“ɬ!O[ -ߴA=h㎛N9wkՈ)EKj-g#%Ds,&Ѝ7z7ݱ= 㬝JF & <}[.qo'ʴL$yLKA7擔J8M pF^Y_F 4c{fz?B?@K~.w1yO{ʵp >nkO;}ܵ|>鑏IwtnxmwB_o}Ν };Oң}3{oyo.|?y7g}чߧ7}wQ/K_]O_|}wŇȏ3'-?S?S/گ}[xٷ~뷾/w׿M_pY>m;?7_k_?~_>+?]zk_<~oS5?ʿˏww{oOx_{/|{S_g?gԋ_ƛn}Oҥ7~7}5gg?\s?c}9\{G|~[;_G_>/wg?oo~__w>Ko_O~o{Ooxx{W_YuO?כŋ/x>{ߋ^w_xOc}O|Ї?s_g{N_W^ܷȷ}W={wE_!<ݿO͇W-|݃?o{3奧'~';k/=_G~ٳ}?Wo~?˾˾/~_?vo˿կ~qooQx?6}髾.|η?~?[M>KOO_=~ngr\! O~]]Os᯸ᆟ};;oyŋ_w<>c~7{ۻ{>.a_oil<|~þ?OЇW' >=9'oП{_?§ɓ/_`nFIę&ry"u֕Sqk566#k9W_[ӒH$=Orz&R:: -*Ez' /WZQʐHO*ErRdJro60c{ QM(gG:#X<;8J`J=I0S$x8ɡ x@KlNoMV*u"FopKC//Rz=TɌo%z̎K̎TܯЋ `$W];hS&۔J%lԄh_"B˘4ioԖDG#~nX$k){e8# w;?h8pp?t)urSqK^GtG]t%Fߡn .8Bo!)▨]a$7n`q\ZhsMӅL =bnF'&KEIFaEs XަZA :(`t7m =@n0i<h$&g6^V=ɞF,رP ?=+-e^j~.NG9\Oӓ*M:@:vr ?] 0D0EÏg^ig+Xhjwo@ĺ7Z WIӚW2@hZox]{22jƽ;6u=I,?s"٘S.M7VnN DztkN73^bZiAo{г5z[dԥ[Cc_0SВ'Y7ð!D ֶNїhBjh5qx|hDplGT&lAyA}4 襁`['#jK ~m-2mӢёh[{%GK-mk!ZҠinޒ^#zmc-Um6a-lS4(Z4~% E聁:`k#P+Đ` ZxYDr?rڠ-dлA@:1촏7 ?@iűI=1Mc9b&RDOx=p@[V/cۺ}6 '̓V¹H'$ދmVabDhu_Xˠߡ'iKopj(cgAW޺&zw֊ְѐ>6>VM:dm ޭ*"P԰T݆Cj:v^-` ZںfG6ЈKD݃yHXh/ ic{+tmegJliM..TMsmŻsmݗIcF][As)a spm}\[kCeBMDr}$|Jd}ښO@܋{L6<~\ѿ]EfUlW zΜUѐ*2rώRdx,m*E*&k J7{`C޹rgymIXL,+V"j8rVoDպѣ 2NVGi2˥!*>,X[nJ<&L8`1jbRHwnPcy;#$BGL F?k W;MƼ9ŐXh(:Y#Ή7[B HoQD$x)]rxCF33Y8g>@7xv,cs !Y( )@g#!r^FPL^yKv%j(Y+ްQaҫݻ)6yMᣳW|n݈BJDsHc7|ȹ~̽Oe^~#KovY,n8@ilk +!?loHF855mKhȽj97 &=-i[;+OIofl KI|ZkzP[|^uq9d+0 dcHjc`ER@-N C|%it F }xZ.KbQ09GgC8Lӳғl+'ܑꞎn,9Sx{FHH~ ".^19pȧ7Qƶt͹oμNAVj5,f'uAj1KW~x;COoKO~xbjx!{K4^#x `&tA a ^5uL(F+ VvH{h< Y-ykdcއOi3[ͥ/ XXgF}p{AY9RFA] mE "ȶYկN}`Y~PmnU|27_}^QN쑤~a U8UżgO İj jY9Km2MJxQHؚay:vdm1D+cl8uL" h'-Y0>t' G9mG*ݝGwG AB?Gxx 9?}K,}J푚O;}MijWEHͫGj^2<ic"7nF{@#-hsޓzƉےN(^DBZ.\MՇPh+2&ijv\]]sKɺ_H7z;8GmdvQd-l~Kq<ꏃ%sb0 tGi"nOOmpk+g X~3 ?!AlfNAB=sE =2eA ed+^N>" 6xU aKB Vcdb`ȋvGVE RY5ep?2ӊDf1/ okI9ot.ﰈɷ-^H>p9GLaϮg~uYxoxq|Nˤ? *'F~uJ c_*/8eI!YMXma.%W%qV Ӷ6 ~mRKĂSLaZ*%ߟ@_x B{H)];bz^U#41A2ߕj䑹-(㞫ϗE $/U辂Yi`=78](gu/4n!^Jr=V$U/_ +ZCnw|m|o}?'k{Wh}]sz!RHɊ˛l˟dU ؤL]˕dfUI\WzxG*yVwK*e.m.a'߅P^hD,#p*=J㟔G51p2X]D5]8#w3pEbs zƪ[q^1lR]m$ ^ Z9]WIU/p7ͮ6&Z|v{PlSzN~\㼈yt՞p&KB×^w!nz7wZ_x՟{w\@LLNqt '\w =f}|x]{|W(׿/j:upEdP./ XU˖{Y !ťƑI`v Wp@]evUD[W)]*EYUa39Z%1%UF9.^"8nΜ&cN[d;F!4A,c6 ď׫Irn2ܛ]8j5WM;ay+O 8bw>iywPFLn@:ԃK{߽CN~ԍmoGfdّΓ}T(0UDj-t9,FRe;B.#Hoe= ]S>{~3vkxunzBޑȯpA2Bɂ ?S<#}]Cq@Ôt#X <I,AZ' !Е}9e;RS%}!2q)E`fngo܆ >|Ce&2?8D%$elc/A{@+lC@$6H8Dgo!C>"ߢQnFwla!F 8mx=v7J bHN&O% pI&pO#Ǎ@8D r'%(ؖ1ӽV)GB~U  8·B1IR(7 #&XQtD(CB ,1dd2HH%*AxDDˍ1Hs+0F # RkʰA$(yznYS'Xi&XFB?D\?8CNP(,C?@OՔ)pHGouc 1РGT!RBvh#D?fS+l M  IgAƔ(!$ ampV/lojVNXP+$;v!:Vtvyde3&:Ž#Bt`?Z1(ώD><;XLg!t3 ω⳦41$s`<NsABX3sIc_JO񈰑cz CU8'捍8DوK##o"(ǬYqՍ8DEb\$dR@rSoELh ?Dhm!AS37螏lmy۾"wlE۶oC3#5C܈08ܷ5Qm }/;DZ,)m;ż`:@T@lu췉DI>ɓ-}#O-ͱ=Ѐ}=$\}?lD׳Y H la":Yr~PG=u]CO01[>4ռ 3 4Kζ0嗑9!B <>^båo!r&?4 xpHmj@J$H{ԦQ ~zDa jpaE^Ru75#0+@bvˋn8$Aݳ1y]nЧ Ne` E8 YչO?_ }|mb##;z0b]{pK PԢt#QVp\}rF !gf0@)"B`kAx'p6 MPI{KA A Ë2#7쎟6`f@Tt-+#HہhA %`9je0GYZADŽ 2 mH> ߃Bcd&g7Ԙ8qچ!qoʻ rZ& 2 "'otEitWDI?k釮0>#&KIHem@&H;#~S$0Reih?$m6Ha?%ƬspshTD:hqp:@$zi& K@L֭J~463/;2a/b6#;CLm^fkӍ[Bc<5h |y/ٗ9m]C.Ʉ!7wIl#K,(D ]pM$I C1.Lt². d~HϘq#2M29glvi#uT.Qs}؂sl-Ui OAbh#!r̜Zحǡأp70nsBQ1Oa}7=G=b G˂jL'K4\ {YMɲ'& cQH& 8%H?<Qu G/b#w(]@1iD0QBXVԏ 2 e{,9pa"Il6A7E#y-z Dej&M}"p2Aq/@4 'C #<}ڗHUEQ_D Rr?=SF2Қ&%5'k|P$K U?GuǓLCF@8AhzA(3DwA{ߒvĬ 12gԀp*dH8= xvMARB^${GfX4)oWhp*RįSm  "jnoU뗡0#raTq!!Ȑ6)o8 tLHF(וĖ23H8E9nASsޒ82@ sII=ZvjIr7_,>n8, ޅdIh U@zF(fjt%JǡCNl6$,{ZlJJ ɳ"AdV(0)$] RAB$2W ے<*@N‚#S7-I5iwN7K –$Ќ iZN-)lIC hL!Ģdc)U?{f eH׫CI,BLViAZP,Tv&l9Hg hԠ Lg{ #Jv"B3д!MhQW2V2=u;rET8Fb!n<:xr53){xZR6-EQ!M-A̳ m>RǙv7IӡQ4}J!.ɖ8.%M8.[,8n&;4ykTY- ĕ"_ҭ JamȚR| c[R-ɒe[>4'eH "!Ϸ^2t PjHҹ e7Lk^Pi{,҆:SԺV=KZTfXʍHH҆qB-ǦdwwypQI~Q#EvH?`%րDq4>ٽ[~6#CAZ ⇌Fs?B۱~E:MǦ"ic $ WI"UqS D7I',/BH";D`&a<^>(@]NBUGR HY+MCrJhB1_E8fz/!UZny*i "di#1,BoI]40*(8Q"1aqqЃgwe/v"w6ܛW lXMCvCz7T7wVj+B'sNeK\7DT81iU\ ܯȋ^ N[Sv=Μ =4vGsr*†p";iȆt96 $!H,3?x~-;"(>?rDH䡇x Z wF?a?dL# !)J P$\7)XtI%HX0gK_EVޑvV$ؐy9-k,I{d}CԆz 2Eqy#PŸTo1U.'w0~DP=ڻ!E(cb__2~>3~(@cmp!Skog(%XN%E,/@i -i-O7C7uJ,CK;D"|?Xy*V}db|?g;Dl /-enC[E$P_4Qp~(﷼"6jGZR_~Iw~C VHCQa;DY?KbÈCW U.4zq(\!Gl"Lݯ i'N WVDdL1g>H2!j"i,LV 3"Lɇp]u* 0Ts 1` %Yf,@ p!4V !l^f8D $*Cbd VC#b䔔\!5B솀Cx f8DSXf@?r13`!f ]fCQ7(Ƃ"T4er H,BpE֖!3q3V5,C!bø2 di #b !3 !l&ϧC &f xMjMpՄJ)1@6KbTbvD1M>S"riېVRGT0Ńe*8DLlMS!f*dqBV,+5+9?zv?H_)t0sB>55&({#.s 8D֧1 7:1cbfLe+|z.|>H{ 41 $Y\`!+|Yr 5$yM?ͶHlH r-Em qB'?xf qvBZgw, ZƎpZa=PB4)N:d!f/Й&qir\aέ3M&2Me,DM#bɐhBY Tbh YZ!I_4:9CEb!D #ڙV(OաuXȲ:kXYZ<¾R8:+$[d`Z!B! Y!Z_鬐L+.EyG pVHPq(i8=% Q$e9XAigsTڜͱ9 fsLl!X&l1cfsZ9җ=1iO]B̞8d:{b"fOCm+'`u!0, =!慙  YvxQig:hi3&C2g,d 3 TBΙL3 fA3fB,h*ŴYP蓶Gn YPZf9X/<FE-r=qc(ISDD"hvCxUٴ/kg SNАMAÈSs4Hc9= #8 #Ia'dD.!f4icI0--B( Ј^s) n Ftrt 5" 3 BEٍǀ]#]6fAXrg9QaDphHpS<("yx#WfR IF@ WD9<[=߃϶WO4c@lAXcF /~X8yLU~LS.` \-,iKBja䐳" 呋AjW//JUSq$W$bDwKc9,++"fYWPW$j}dqC09J%z׫~ic|$Yu 56qI7/Q< >KYٳP<5m2W!Ξf"\A,Ԇ@#J5&h#TU ўPFD#T#\1|t5W],h4jFC8UD8*jB DȀt!vbO7 7^IUk#]Y;^yA+Jg"wC,QT3u!J!f8%4,aa']QV,@!5SAuh5%@݂03Q, np ~FRݘbF7EҴaZ!mr,bî8'd@PdhCJZVJ~0G=QM5=ЏhłEPz̨m̠J7wÌ>aMf"G2/}ٽ/*IHŒ8´FJpoIa(8D^#W=bW3"Sz ÆtY[Y=mfHiƊK*Bw,w, ϬO08c o'*$N !1Y nlŽŔ"]Ռ3v47FC F4-6{chi6D$"LO֑x͡ݐT"CCC.6Jm53eӎ#D2BE*#ؑ.YO5wWDfDl.,&UL#TeՓ$D2ywih|.ʬÚE,m+-K W%k{*Ta5lUk٪,|:fVf53BW4l˚+^ֺ՛ȬI,5I5p2/ Zj>viJqMf}$2ҥC,1^>qNj꺆UWM![q:؇.5u;؍ $[tj>Q\#*+Jn IJXr( asuz2ele@tZ(-sfZRY׬QlCGHf6  ӊ5@IOtX44>;.Z}giIچh/ Z`sC9q b%qRĂ)*ġ:`e~"uQ҄0eʬ)d܄: zfѶNMhe" 8Qs Q a iU8^IL+sf&@Y,V6 kK;ΰ PuZŢ`&& %AC'S@yσP̕ f_UijVWvIub,U0,zV`F]`Vf>?0&3r GC4gV<+.RVYvRǕΪͬgTre琴3+%J6xH?Z\AAIXnpt 6`YqJEBTӬC;PUtՙ6ML\u䷬δkɒ[iӰ+<քYy Yq'9FdjYY,$=gffd8'z>~!3)HlI鳲xVgq=\ hi Ŏ^Nb*`޹F+5 8f`MT)?VԚZV8 #iŭիb2Ur)sбCY-*a:V%,hˆ4)u&K]gkYIKw0bc̻`(w:Ho\umͽW7ʃ&Oypp*oA% '<(k\G!<(Bw֮lG9}EģKw/;|Ek&cä,:x/`Ab?WJK`/Bt0At>zٽ e7+2+An).$~OU/B "EE:"EEZށo\*!@;]^uKo"VE lhh@oRdJ]~H xs$9imiEw5 Y!W % 1+4ﯤ`Wo'xJ5>;G,^oȐ|ΪǴ&#|N:B8)cGdPxWgR$2ayG*3)#F̉DaUg/dMifCxjNP2W$9d}4<"#2jDPyN`K!sD2ȹ ru32<7Hb䰳36$н{PΖwD|9mQZ 4 8Ɔ Tg0:#}xʑ\Qۇ#*ߊ&2|X8׎W 7!aq!w.Uco#<p#ֲKbg3V+ J1]aC`HÅya/ 3.pv}P "K$)Ug|uvmYDžUՂFEشYK d!,">.H3rEC|9`еػxm8 ݱ/|cueάw$d3lوč?A,F'I)͞RŽ!ɔΤ]L,~gYuv$]@Nv LaH Rׇ2jvbec/.qY@Sۑ!_2]Gi+*U|NFݑ(wc~2e|NHHqJmG8=p:#t+J1mBY!;u9"i;H'[`z}9rwq`=2!q[ijY<g*^dZH89KIOvY!\68)[j6 }QRiDF!jI4* rҨCdQ #$QQB"HCDi#R 2մj3r) -:eiN9D锥N:SZBN%n\|!J&-B%TvN"T 1BU?CP,E Qϧ&`tNU^tj"N}_N-GSt!J8Kó(Bp#SY֋L9"SMdJťf.PIT*Z8|R)BXHϨGJEoT#JO۹CLYLz)(C\EddKR0*5yR*5{RdRJyDz=rRI`T\Tj!FdETHu1(U"I!Y/&aRr1)&rH16ӔwN&.J9DT9rrYKMdr)hN 1.RL2eS==ٔCMPgdSQ6 RWʦl e1b)Dda26eS8UPS89UڇũfS9D98CS =rr*RT]©< JMXrc1*в, )[!ʺ!% #𰘙CYfR#Bpfn6Q36~(;1Mĺ'%oyYR!o(B/,jnIbMy[7vm!F4ByWzi#oM9y[^f!Jކ1e.;R):*sRh#,%vR2>;D`Q>$M]Tb\Fe!,pq>(Kع8Cej]!,oq>(竚8.|2;'ᓴˎ-dR>U'͠KhRZ K3hǒ/%yrI#QXT.j aB% 18TERT#I ' t@K6&OH`I=$plzv:@i?XځݓEzD~DʍzDH!e/0!||?O,ߔrB%yisyD(pBqN툐,m0Iv!m03;"6Q9U;J8F>Evpqq_jԍ$ls䃈դH&$sy]-#QKHz1I88np|qF ø;q鸉%s!":ōi"kF 34xqDxµK(v75C, H~8G~3fp;EDSYTǔR\8"L‚&61yȫT>B&A$ I}DHMRkHCe`8Α?pёH8H>H$$M(,At.QUq7N\\EհdH8H>xݢpE3/vPx2.M "4b'4He,R8H-0yí#Wi ,0eYҙtr<3$y4ap|DĿg8Q̙޸pBC2gu3f*ރ85Ly([IiSgztP#n| K| EԁD1zbG8>D\FÛ~v"-)2D"C"톬HR@GY ?'V *- j7Z9{P;ʆ*X H]^@˥.U˻]u"._ :3 { E}UOtOV>h )N мn|淞nYvoT>+CȅVk/G6ɮHT'zPmgnSqn3VDHO]$DxQ\"ޣX 7aC>sB80f?VJ0t܅h7o0vsW~XE2`'vd;''{5{^U!C̈́s*9 bR5 g xhI7S˨e~г25U|&fE{A+/\g||)C!ţ/Q1Ok[/FPP$iCեt(7C.:E;$CYμLQ8Ht =ZO(ֳ(YZq;}Rr|a EsRQNR$5rVF,G,롧 &-a=qqj)81yĩZ,!m0̙#hCU5iZ>=suJG\e5B!],󱠕>cA6rZ\(&YVx248 &ƓQ~3I#0/,dc.dYHv&ТJ3cSzLYFR\%AasPDx-:édjdI[ԏ=i0DX1t*5eN\.m %=4"y-g󑼢rK+W5eQvZ:>0pBh @~Zo<Z;M<5QuͪHZ?ME䖋OAY0IKsTOgE(MOgF(n'bkIC!3qlC2+_$9>;`vK|ls5]UJ0,B>ÌY;)gҬ2RA\5j5hU|Z &FVQ] n {dqҡN|##Q ,|,hU=(qBVi@&4eE҆\Dq%#0J}_;,B>3lu]"uR]Rf_KCxUA:,PF5h*h_p*e_1퓏|/34`oiK> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 217 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 39 /Predictor 15 >> /Filter /FlateDecode /Height 53 /Subtype /Image /Width 39 /Length 1223 >> stream xV0@7ԥ|+5ҖߓA0>G!y{{{yyL&F5Hԓ- J[ >==e[k/x ]ZWpwwwyy^ROOOM%0.\]]M:Vm0K;!@L)G^\\HEx*W; SCY[[LNX%h fʃl%8Ys DP>!؃ A6'S.Xq p񯠢pN]7aR9wJhVPZs}`Lձd]x^*O oREҵ`Pbw'E,=Sc0Ү`BE *O)gjPP/::0UgSAPL!_THWk S;onDlT07`*A}M %3V 9:0ʬc3X=bP=d=T {jih jTo±)[a,F&w\ Uܤ7<wA,7lUgLGK4zuS(K SU›0;o+y^47nd޸=;ʕZMV6/:n?FD< [ ->;U70Jk|bGRNTk:DlSdg0_B{%YhM$T6u[^O<TmeWy'aendstream endobj 218 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 53 /Predictor 15 >> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1350 >> stream xŘV#AE%<۔ #$"&!က/--TAWuUooov.?<<ӾNLLЎ 5 k4vh? {+ 8C{{{K A`vjb-L AZP+,M0;>>M%!S>"eKo,L/F 3|lc5DUD [XX ) L?\ZVT%*|www?䛚N("d|t S?J0)XA_da8/Y~d28 bkCv_+%cnnV,n~ /kd@iQ>S1eusYS?ĺ|Z0\oA.+A<<<kqqZ֍ .QAࣃ7!@r|Vamav{{{u@nB6;&*/e $Ƌ];'qсST/M:~p۬!@D<8I_&5|H+/nGo;WWWGkgc~r (*CM^vFߊGVu`> /Filter /FlateDecode /Height 39 /Subtype /Image /Width 53 /Length 1332 >> stream x͘9R+IEU3,ER܇:Y% ;T)S Rv;U?booo|sssKKK?S'ߣ+d%\m?nggg|+++?d=klxbemEfZ\__nыeBmbbSSS̽2_!>v:==mG,466l ӈwqq!@+NNN%>AFL@DoC>~uUnϏ0G\^^Ex=ܯbMtq'%oDsgaL;[7i]@M}jёB<2V,f;y!y"< |]v>j^!@H3e)6 fdђ=ٛ(,#MdCN$Ѷ-C/hyrD3ry`YA-SctA[_" q4_j"eNJP52>f,gv_y722Lmk1t e#6 2V:q+p.#OP77ޞ /#ycWRjS?qAB9a~øJŵEL^&ɷA{:BƷ-|lLpޔee>Ҿ t꣙&j Ԯd^@r}{?HHr!vehq̛N|o˨4-N~qCO>Vg{4pNSB6`B6'O2LRLR[[`2l?? c㡡~gJV^>,^Uy4=*䣥dʇ9 >kSy&؃ogg̑ژ? v0hA4"ʞOq{.[僦~V2CT? ~ VG5?sag_CиD >,xm(?I>666ϴ|nu10ʙa_٪/C67B_ɔ֪I_?>/Gg-TQV_2F2g>ƴeBe Yc9Ҷ330k.?2G~~endstream endobj 220 0 obj << /Filter /FlateDecode /Length 3705 >> stream xn]GX#` H޼Ж6ҌV#߇nkFj`.۱?׻#q>HgǯNak黣R[׻] uV'R*/ѫݰAy;BhaD{G!'&mbWVw;JvlaceMf=>[ћ] ,%Fi^a V CEuu /tiwlBԢra%nwvoqBFnwBF'BDX) :]x .mYJi&p%D3XfPC"`&67Z_h<c4h0`PDDkmh-!κE궷F7sb"tt toLjvTH`fL.!l O89A40\J]`>4$UR>87nr"qk_>>QNO pw@˖ 8G=Q;wўw6^; {%/5B(=@A感W3eͨwI{&I w qB+Fժ%zDdIt$ h:jl6Gusae/E3` qfk6,@Έ~R M 2 ߍ<{3MnHη7) !:|_p 3ޞxA`O9Cs\k+ $)+S3: BE_4r?m;;NQ&'tFfɅN U!IĀ)*zEZ]fM7$* =Yr@# ){!z2 w|3~>.-r ^_| ya~#ٳ#Zev[b_!FT :fxŤ ;M >0Xg a=W_0^TQ<ب ;7ɭ+&K]|ޘVg &QLy'&79${br:#,dM zu(cQ0Wd;u;n90ֶVUfLXPLK,lt[ȃ}&Ϣ'z,N=%P><aK&RŒ ¯`ex狕j~T4y%A6s̩܀;7gԃXFZLiI\5Q$Q٨3WO0"\:ocSܯȥ9U{%p |`*V=o|h.eOQyw g*L8= 9 ʆX i({X3$&Tm⬩(CnkNf/4ڪܻ,)j%|MXQ8 Er87ߪF궏h.yUMu7MtpMb>_u?戅>2g~4OiZvФF;CSIC(?VYcjb$FX̟!Ij.f۝6(pV7nLyuzޝ"u2#"**仳"/if]DdvE K][Y}& çByc ,X~pAI%PeY/ UIu'l++ 7gf!%8"f?8"PFa7TprB?͙C#D9$K =!LrWPd5| QH(߹\' y㉍ٚݩƏg%Oɟ|l=DM;eEzz[>RCd*(7 M]&N?H6bx*z -쌭NgVlLgYƺa놷ثV BrSXun Yٽjasz^OyV]eu@SlA,Zs+[0y7MriU՝e56p<{0WM{@uhU^ba1~3CUTԛ8.i.\V 6x+[e_{jas%!:ԔJֶ:5my 7\͚d4Tջ QndSybr\$;we2#5RmM}mi]H!Ytc3z{ӗ!e1_iR'DX$8 d.^X&ڏe~IC`}={z|$tM%BZ(7٪ڜE+>! _tطIQ-U!m<0):#> stream x[.Ǒ_>{*f1m`{ZjF~bEȌ#!^deFyӅ/響zO哤}&'dAj-?}OSWz2=k.>\ۻ_+Jt~x.|-2ǻ?<;1֪^rCyTK[-˕}KZ^:p7N]y]+፳9|?~ٿw\Ck}~#zz3wՖ!?թw*}gh>Nj]3ϫx7_{$vƧ]y^e7I$Wsky99|~~m0ïo>}s=CG*r}t]XsH3v\zu(lYZG|Ƭ%M"|<[ϮK>C=^?}/_<*^_v ^H[v9"?<*_U&S9 Z15[΢WfznV.US^!>.h*W\HOY2pLsOȭT[]:,_^^bRW[vƭJF|= W7¿Iu6"jehEk]M{J|䑆bZho]|g*kM~|_\O _-g^cײŶGS*垞kN6O?/=W>Ew;Joa_I,_E_UfwfϑбoHh)V{6S4z9s?ߝR,cc"m#|_IQp ,vXEQ=|giƧ4-gi^~w9ss9;s{I`yxpnHhz}|g&ӄpa6lKB:/c<,簞v9p6.}Q0֛=8_*=zO{̓禷P;y/X%>ƋK+qe5:N5FHg#]6c6If ܆}|x'4?8BmpC~c1+>\V_Jjq$#wi;͗R+w}!TYEWG@y~oO/ Yϑ| ?<^dϚx[$p|:HM2"ˌg%6K'wH8+KG؟kZB) g Aㄳ6zƱ Yo||4kyS^ p/(cI:gt!2~dw)n4\ހfI"%IDngUikų="r;O q퍶~F?jLbNgB-$XAC00󒯊yđp+%a;r 1VF 埫؇ =Y62o2gaLi'<3dGd-UҞHg_g&5 .]d֒ ^# V42uAfeN2q=f20a7%nLjratiYW RāG(ʿkeY+yfߋHOyneqo@E.t Ck~pq]e&LM,ȥӺ|qG..I.LJ+oD~._J,Ck!zGߎ]fD>ySlR#[J}vkALyŻ8C/1d`ZHSd i#LD!'d!bJ|*2^zicjT% "t26m4f5" ]"%y]cL#OY ׄ1Se'&?@˦C 6rD/lQgA$ $h~#q!&!ڇ"YrF!MB AHDw! n҇v=<Ȝʍ==7̔Fb!N"b!ρ9#Ї}X;}ۦ!}{v8 F 7 nn" $=sn#xFqEF"(5R"FјS8 Z"Qu!sBB&٫E b!JQ05RB)ABTeBJ{ BD^n! $l2w8tVn! Fm!"FpGpN"b;C !"JϩCDHlpKfORҘ`ġK[!t^ qo#oцW"miCliCD6}9mi X@B 6mpAf ̷2a@ICGi {Cx "RnUcd9x8rG.s;Ҟ5'_"F_; YOAť*"MUQqB#}Zb d \"' 8cVl8;8בOb@lgAƃ=> 6vsi] &0􆈐TDo' 8,6wKC7$S H6:)ҎȘSHJ_. '"1"׺trc!! ٪HAj67 9ޱ8F6w9!- !8H[D ,o$ ;" 9HY? 6Wx>8|>G8 AH͌&g!dA %tgl9hUSM5NP@pG݀( q!kt}KМ+> )"2#J̝*6ͼ{0 ِBR =|K#4CVb7G(E#A463ZKywW"bRZ/:0!ve1Erڤb{k+ِؕj.LCt&ۧRGHfCʉ u*& !)e)2Rw^ ΂),;GI*ަiH(4=/}͔>(}"!Ð#r:0j7>4d 8*l4DzWUjW*xLȩ̹M'ieP%CP i NĮ T!0Dn92Nfq.F5|q:nzqieIP~Kd)~g[&mQBDUQbXD[P nA-V@C_j D ,e>q:Dd=(j C(W~8Q.c.";Q.h,|df4 I3+Y:LkPʡ?$ )=E#@K{en 5͒%PZMi!BXJ ^-Pu aYq"VcBʢ4Vi~43֛:ʬƌnULM=5Q0Wg&B?HT&Ҳ WT58%OtzMc4yV\^+c0Q;xڣI ezĞo9 UPB:ҚJҗ"LhyA72,ҬkK.]4Ӱ?|ˠԋKeRͬjɰ ?xm$E `,͊=Ki a|zN7ugEEXmtIY^֭U*{yn d~HrT<&KrsUF)SK0"H$@FCdjk2.{ ZPTq2K+[&+tjpn9hBӡ!|KBqR,n9$@k3XYLgP k,6`kK㲔Rk薚Rjk>y^a9ҺaKz+fw 0!g3"e/ 5c#(Z 0Vͽy2f,5as-a.̿" YKgͮ~;fG.!fiĤ'@kڵxx2(UϰH{?מU} lL"uh<]eY|[_qk`둨spMQsz)z/,_++e(#WV>%Y6<{},iABl?hJ;ew=ߋMλ3nujǤv7bwJT ih9z`Wݠ^)>?WO-i24= v6ͦEQ~iG^L_>+ <]imMKk䅚FF&H %vZה=&/ͯRh0 `4eZȔ -^DBH̩#OjfscyN}N#W "'cHXD@m&@~Wt%{{/xQ8G<;jjxiM=IJ% `dQ,|+1a@ !o_47^d%9Y0M/^@+U0@|y r07IY^ W0uiMdzmcUT"右0}8CtiHAGYX{^{;xFUH )M?Y69$"g1 /@j^1bƤ=A*?FRdR\ֲc g񧑦d$43yI9^Q27 @:ց$kV k]r6LB4hV 2gKH`te9y(dvVRqX ^`U; `L*f! RY ,lR\|VlV.8q8CgA7)qO p E٠G oYzuB%| e28ZLwv_+^O-1 "pxA"8ۗ!_+MxG6'#q1JW aӦA !,JF}"2a%4RLV~H3 $:DjX*?9Gy8̥O"N8ǂF-tYw[DL 24غ|vx(W8tUa}k`2aCq1Wk6r廂_(ww^?+~yռۯ:U%+  ?|\x}AGY g5x3{Q6W= _n_J͔* ʊɒp,.ICtI.$M$kZekT70=xpע9p;9c휪٠pNez(ǵxY#kqW8,K8+O.AŅeݥK2CJ aӨ]'tnkzK>{dgL;rJ^ \d ~L&{ﮅd7)HeQFVji72-Y!] #ȵk (Wqp9 JN=`ƚ-M7I3Q³,zS)#-޾ Z^EGY{M2EݑnWfmT[& T綊z@jFD3V;PBbaEFDzjr=]V!lE2}FQdNOC,e2f+d%+g9@a5e Ch,Lg oȲjOd#.Ki=Z֡4f3iى {VdnE&^!"3{b iV 2 XAXʌHwD+0A!y$D$kPj/X ɉ˯NI!$r'O};2P"3 ״J0XnʨA%{vAi&>%&9.\Y6&Usx o"fd[3oМ{9 @Ty4SfFbgܑlK]FMVݐne]p4;Ҵau,Ui5)"<2`ՒX6eYղO}аvXF3^E#!ɒUE*$0 ,j谨[WXN kI;l~kb7i)U,ӆ?Oׅ7*,9!smt,Bf9l-6%X\@K54dZ: SyL+U +Yn23Y֛zG4/{eew@U8(ִARG3M ݒUxJ|Dl':_9_9fRq^N>@@_v6S8y{;o\ A8gVgH?,Bߦ?D:? {̒H?Ec(3Bc0x@%߀s!/k{!ߋ]?s/?? =&@67@9׌f1qt#o[is]9KKr~_jKRU qȦOOf^GĨ? ̥vͅ77GE0߼\|SW.=:? H?E#L!!GxG d򁲹!BiĘ?*j21IB^D5RBBXv@H;s#^B{@H'7ȢLesB#ZBjȿVED#^Han1Uc0aD?"8? CBaD8_? F?eJMB?kbMw̦0erq"ǚe_ Dс*\%= @\Y3\ZB[#>pb=Q:CPI`[FIe82~,FB2zLpd!HtO" JpYxp%BWbLw.ܕ o qW";v_L| 3ڻS J8I%n%C!J(w_'P0H%]beOJ$w[Ns\P48q碙_x|  [ DMN?Y A `kw@miDĝ' dP$vnJ$rܔ2MvS7e# (qddښ'9}Ԍҝp ?PdBHu]wAB  tqa R_ kP lJ?+9{%Ľgwx@b'ɞX@G:7K {)˪ȃrRJ8!q'Ђ3 $r9E .W d#Iַ=N"_o _xq/K"TB6DZeq1=͢CӽVCgDDOX 4ٱ;LA@prB߮mW t *V2U\{\0;UX\a]e1P$s H#B_v H7)~MkFIBU؇Ǻ쏮Z~M)t>>@O[1C|iL4BI!3rS,bV)z ([u3堶N@v&:NIT恏rrmj#d"6ܹb 6&"Y&zR(h 'jA 0T!{VU3r\j;UI_bV7&֫ii=f{"'ih(+DY8D4(`TN b-똡VpP7.W+R؈xRŀibg7^'.9:YK^MAktBj]1BOM gH8bvz*>h]U>sF( 3a2W/5dވ @ċKM>x>x\ i;#{<h+nbˬs\p s51!Ej&qIЉ@ˬ2T)H+mQ$<Ѹ䰲"VVH?svtzbī2ؠ^-Ȱhk/&^\A4$ks59{PҁP nU4k:0Tv3A,49ԼupZG,]iG׷X䎮<ݢ6 {V$[1I<PЬݞֆi90`W4I#-u ;/VM{G)YB_$?2 M>~$#NrhΨ{SfE(9,<P)PWy(ٕ3TT 6h^c q_P'L4R;3 f&UOfuPa*p/|KI 2AD*7Ђ'={?/ez`ƣ)H!wcO;aCHyIy˫רQLT[V|_s19`Ҵq6VWULʸ6sΛ N,Lʬޜ;WLJ{I,UJʞLvn{`]Ğ9Lj BȿlYL1 RN)'27wЬ 랮Qj$vq;I,>s};\f'vfS[DTV*AWdI+fz "VHW +WB7n 2Y j#dƨh_T\XmV)i`U'B ˹ -Ȏl-68\/2\;ɀbR&٥jlNqƈɊɊVM'-֩yv!PaeZ.m }8EL:O1ۏi|,BA `Nۆk nrt;w Qw,:ѳH'õydZ&MM M"&_^݃j0Y~dC\~Dqm5i\Lkq4he"̢S_"ն|N5EQW62dSN6r bS"l)bv0HbL"W:*bKIWoќ^xi{.Rcw{{t/ٹ\iBƆfz k-:Ln*i+"0Os,QM t[짡Jzny <ު\f2ViY&s;8\<\== SleK{͒UelJx]iEN`vݽ"8aE{PJk4)=JiĹױMulг-y^d,d+m^~VX*>guiKXu[3w:~U-*Bw:K Wκ aFbK@zQe^zjm#ʮpnF.\xM tRhq=5\N/w³\z@ڳoAz3q*N8]`ߙgޡr&;ިVQsqZA2^{Q O=BZ̊ĽXHr{Y+[ȝjp ( m-')5:P,DZ@(Z 畗)<^8l*p2*Yulr\^yu_XM W X3V $&$&Iȗ&75 S*Ltr{B_,)[({Ue : hM޷iÇ{$u'ʰm҉ 6k) ץRJ ih2Y,E qVEѽP,ɔ% Wd ̂XC@2p!^+9'n9+9 rdyu|V{. x\Ɋ x)'.݇i}Zѝ[eWb}Fr>=22K. huZ^;e[W'w+JLŨbTŨbԲΉS|je:Wm׋uB eɅ{j[rU|zi^ڧ/e;\23h,i_azuV.X{$]: vPt7) `n\:ё]^BZ,9 c*>+c0ΘFUt/~F@'U4WXG1i%3GQ3"~2]̖@ܗx }-/$ϖ@D8rE|t!4P3 uߒ@~xZr[X@M _Wbp eEλ EUdDͷ rWo{}A N3pu*9DEwp) FsFY/;_bD_$G*P$""$ CU[J"Q%Q踓bNl/AFCzn8,'g,$dg' ggZew DW*WiGCi5^xz@|xCz+VE@7) t:kmOMM3eqץXdnk̗ȼ'DKJdZAe6+v9šF=<& ~*65*^{zٮ<]y/*دl xhbbwRM *s"b0ہT< |tN=U8x+LYiVl5q3ċʪ.{]7vjX+ n5\QVRd|Շdk#,,V;CE-6VKUQ4%,u#?UɖQUyGL~*\4" #bezM{,sjWͬh6H'j^ɚ&8j˩8@ (NM%8bfa(*NX"F6VHAHSMbR8 )TˡP!B6ŦP!,y= P^Qs(T@BaB6*"N}(~8ADU Mt8o5ITȢNvHԺ7 IP#Qե"6IT撊CBe6D$jODH-,* Q"ШFyQ"Fd1QΑFE26iTNqbqQ=;c# i`ԡQ4ʷ<4 NyT@H(”:$R8(1KĹ!I.ut.:IR!]`sECB639l dS e{-nl TW !B9 MEt.2T@ȦPj8⾱LHKmds)T@ȦENAȦ+4Lԕl*"Ʀq6cS؟3);ũLML9fz'QlXQa6E u[.7{D-tQ@┌a?w$jj/FMEsB#d,ݪwŽ턍 4dlvqn; m xm;O{ж8m;iMMXAqAE l!a+aۈmXӵ]+gx;< ɶG-)7; Q6еC6SMܠ# ܦ^28 h* [ޙ08kƪ6c%p`y1W7-,/"dym$xr}s!6 qNp~9^@0W̱!/"x^{8^ -; 9lbBWa}7a}17:$}or9=o# mr>̢ aE8_o^Da{ a)AQ=bG_RN"bn 6yQn1pv!.QIU*MlLU~{" sbw'v\]D9ub'vXz )[V]Dءl%y\ȱ<JN~kR-PrL4RxfFw4p(̳aVQ!YΟ9Q xR6ңW`njQ2ՔjbJ۟ T,[We.NjD4\$GbE\v)Q,רy_ozR:Y&S2lU %G_Ip % "(+^S+͍IC)).Ѥ$bվ}Q:)V'*%Y}-1"e$^I*]x Ik ea_ *'u"xXP,4"՜ꒁtϱ ttgV=Ы]Z)]Iˤ*&O*ʞ {O妥ː9̠Ia 5vtoTA ojQ'%uCD)b[`@@`@_D$.t.|W.Zm7IV~dsUUbq_ l j]NA@_a)5;3s3zG UFQٟ43ݵE'ϓYp'qZU} BGR% fs,fs c4k]OƵt'm$4eJ+\&dA<+Gj/I|#䩑/ ns43~ç9'̱ m.*dd$H7!W2R>G;R5@6 T3~{Y݌L3~Ӑٵ{le;v|Wu-3~˗,ayj֮r#\$HS/ST`[}#'s= Ȭg\Llqӹ>JN\*\i#ق;&Gnjґ2< !nB"ETF)\giU铪 EF,; MeQEfvfdU.Y&$I? lQqoT$ظ""^F]H=>5il#y5l2"mZ[#-!Z|Q[TZrD(Ҿ62Q2e"Vø Ҫ.?T+Ye3ɅE"5DQDl B^\KCrrȟY^\Mk3'fL6g>p[khNj5tCk(&b; FMyÉ C#Ƚx@mۮ#;)pb͝[( RRC>W.-T{c&Doc=:S;:=sx5k4MQmǴjڟsG=ta/7']f V_)ʶq-dxϗRFUP\")H\"^p#EX<=-YTǔ$'4u}kABxbV]jzܳ-@P2mZ[֨dTL6[kY'P\ …P]L6 ۂן=%[?Tnz4[-ɽ+@m]3wӦ"AUue#v] TFhjIϗ ɗaɗ䇐/l%zA[u_DL~t \ӽ` *kt i?|o7n<{jƤߩ:uUM3A--Խ9!sb]Hbb>=,&Q||YI,ƜOfh!coMVD7/Hجl&|?L-νRL լ*< WԞ žYex2\\pcڵ ;Z3 šoUFUd)uuunQ܇PbOw9 ُ坊tsJrf42L~L;϶Utw 7.>f< gKT(ݙ(kdfO®nvj=BM[h|MʘUgi=Lwf |t illS;G"A Vv *Vm$Pd20ɹ5!` [:>Kt027A?L7YSpl WVŖ,MÍ?,%/}ܚ6[ڗx}-/ĖҾ[6H-Oȳob bKPbKm0K&`*FT 3ueyalO!o>}%tjw~SC_ _%֚[̛2^ljȹɈK XyeiV5WWZeų%N»/VYHwͷA?ӬSI|\ۺة:BA|J=|]zMuy_jɸx53 #ʖT&8վQU13W,},@7%|Ax %/gVhp'…mXsO)9wA4뒯ps=}~~zxS??zxPXN=ߝ9On|Wgts99d?;Znps/fw'FH_XC/sO/ՇY:!NK>y+st9 's39>muW|GIn-ۼmAU? s'~?tgNLBfᣇvp›)C~nAΒ;cz^aW?7Y:tau$nz_[oN%Iz'/em?"gw+i_C_ ],ǽxpKSnƏ>c)'ǑEkZd?6{h E㏶``W뭷[0|ǽu7Ou>Dϗ[pN}袡80Yw8*#vv֙X`tERG2Pendstream endobj 222 0 obj << /Filter /FlateDecode /Length 2763 >> stream xZKs3[fS1+J)d\r+d(tAc%i|Pi?|O ˅ʿgo؞|:鏋n&#}Q.'KwQ/}~ޝreݷOlj&z/]tNVr%񽐝}2)i8nʕ(1`i!F))I0h~WE<_y,<`"5 sBWSUL쨫I2^C8kx^ QbgR:(9XqHɄhܯfz6%f>zW/{܈W͂2QA}H <9^C5 G7l^Rρd&1W ?@J`%:Pڌ ?W2A73^j 5?4G+aL?f>G)L Wlf?Ryܑ8_]3') "w%߯bGyqhJVXHGkUdéGH(4fI{=SoqPe@%7x6LGhܹkC~ SVJ qG{,G/p|pپ:N%V%7z810|P03y) T>/즥Mܤ!BA3DlMj.&RΚĔ)*Tt-L$I* J䇒ПڟU*S6x:k5^MVh9,8_sAP{EIv ]K#plJ4gED,.oDi!8ȒKMB8m W Ov7IvFrg;6-@RƙĊe3"=>hM}oA i3 hm8FZ&!i,@TLwgUj:/ ǡpVƤQ$Wryi+AeӂM FEk5 XQ·7]UsWj@(75=8C{(3P/79L] *f3RX\Ef1mEu^GJΊaۊ4(#1%9pԻܭ!A?덆m@X.`1UEYºZVn rHn8@AxC3tfnH^ ḡ^ۃ¸ƒUI_ kq„}m;rM,^:D)|JƻjVsq%;誎>-*LB>>'BGw,g{vv 2+_!-$-sEn%H>=U|C:01ӓ 3% "_j~G^wg7U1d$?}SM 耉#&bUGI6I`GI;MX۰sΝ\*-șMjIt3y%0:M*D 1Y_%,k^ēo=tl^sA{D re!-[ҁ],|Η&޲~IЖƙf4xI?3_"_79mo@%69W>Q*ڻW'rߓOYendstream endobj 223 0 obj << /Filter /FlateDecode /Length 7976 >> stream x\kqΏ2h|0 D1v$KD$眺%S}z:&pWS=yU.ePJS ؓB_mwzKSW+za^s%|R۫M-Cm*ċW/|勫?OT|~msޑפr>ฮUV& qno!O[S%Ox?czU|RSzc;wk"ϳu~g{?927"U>mrÙ\8 IZJ_N1Lч|8S--%런/Nx9;*KSb]}'ƩR6VSQ}ԧzЧJS.4b4d/AIN0o@!B5aZxPf)E7BJ9jbl4yeJEve* Vv|z;Naԑoդs*Vi". ܴ\<$@p?+ )&$˝ l\7wk"ϣjj&ӣ-`AbCXA/WqAgW9Z=cjܹR!rb|0+t.OţMS,HqK[>J*'AQ{e!<tJ&pbgi\6-C9@p'a l\vJ2Pgb0p DMj03bO&7ڛGDRP$Jta?Ulؚ xrڧEo .z1Q q=!Ik!FQX@#a4s@sg`b5pU3zp\By0 $)J/! aP1uR1hXI@r/8CW@Ĩ<C\rBiN6* y0ܧC&GÏ<6S6 A[c ,{㱑^\OX L#zlZ;$LbZ xOZA.%*l>wmi}_j\/̦4LK29*7`9u@j@`)GX[xՇn>EYC2Ep*<%vb{Ӫ"HDsŞ8;]Sjm6X**Reu$-kBӼ&o=v#g!xx(Y'Oj Tt$ThDCSt|e#mL'D>ORa1byQD0qtWXg2 FN0 N!n .h9fJM, =xBt9cOSgb\ 2wl#4b(> 3&Ӑmb#x$d:) : WGԳ`rƟP=2 _.deP^108p&|dI[mHTjD\{T_q $ 01Y!ڕbJk b*55$ LIjc,3@JKBIWTAA73'AV4 z^\r;]6gF'3)C563yg$Oƫe`1qq,1&4)Tz5"`pz"qD{AY0f3 :@V!"Tez6xARܸEoK'"Ō]pVKT3|߫ĎnDj$tpbr&<6M{Zg^S&]A+(3$@0R p[g$|+kq#l f32bap#/4 jȢ;-5EGS6ɺ|Xvr?2P($ci҇aMjR(avXH9j7e la:irFx_!È}\A ߑT..:Q4(?BY&,e=C06N4(*lS ˝ sQϞj7օ'\hF鬧;TCjz Ϙj =$~)J! F/C&ܴA`˱C^Ɍ"[p_:ˬYFѾ]=sO(o  <ЫlvXlD6+K&7 'J̤%#ق%d*5Apl0LbtJ_uIb³̓0N vvڎ(v9 kta 윲$HIFXL3.IM ѭ,k1tKE*˪Ȫ=ۈHqn PYۋgY5"/{0bu-]"-H)Mڐ4t}f4*V_h ,ϱH FgVlJȮ7)%6b,0?N#}dHbcr֣=Kxv벎Mgc"/tgs1Kx^O|ϗ ϗ ;z/o%<_xCz! RNs`F $9 fx!ZW5ӝirlcrl6&iW4SdZd/Kf#˷#=0F2jgxe9ci+ @|ck sIm"Ң)bPA2WXݐZdKGFnrYy qJFUvwX\F"}܅b^#ZK J1$'vc dѐ?֤28B]014&戉#2= Ϊhp\~ġ>;"vY&]3(PaDar$ ,+FpXDʯNR@s2`,EJ[e;'8U/å<Δ,e68]=To-wI FDԽpJ nHHÕTwC,(̰*#WN,FGt2dLY\FK+[J$y 7gҎ$HԲcHmL۹:r&lpL?p)tVf?ιx&w3ر^ 86prnMlm&l@kXZՖ9#!y4Aitby@)ھ<oN̞'M,pnA|Kf{ {e,5p2b&d&G'`!ތDF-vJY44YvPQ:Cwd\|ǝFL:TS# ӆ,":%9iQTFW8Hj%de]꿜'6/!Da5 E\4! NJF]h+$yLkmq2 VIJw3#gzΦ]Xf̗۟0O2x̰Fs+Ы͍҇ #$^l32.撍]POkfƛIz6F-u栜7ux;t 29a-09')6@LW;[+2n̻A7fs.} ,6b7K3̼l%bD.|5=c0~nh €@GQFhf}a?hÀU(Tl疝a`+h&u;q=xkR#t72OvZg!&+,8ɲ5#xہE9aR2FEY^uqwؑ~}]~|HǛqF..pFx9͞dJ2J9^62@[e'"z5|xB~ x09cw 7q(Oj'.al0q4/~v# ^[ Jv O`bL h<0Yq-wu@榒e?Ť-#kpsO{xs}ZֻAXnbU*o7ZV/_/|0N`}Ǿ5v,/8⇱tscf%(z/r}ʬz ktЩ~\3Η˯Wjowmtw?27.6ח/?Y~r,qY)(%3 vV_:>̓#QDS)mHyU#$6PfC\O 1 w]Fe {s"π~vPR`h1_GfE/nf9S\9iz}7Qա$9_01W۱ߌ=4m MSAH}W**Z~N<3.*DN W| !>~fʋ7 >X5V~bûb&_=“r㷼F׫&${t` 3Wa=JztHF/ #b9`Oq |L$y2ÊZЯO 01 4!~$8w4R)W9OooR ̺H0LV^$֮_K*҄!qشO,E g8 tGܾX$ܐ<(^*>}y8 Q;ݦK-i\toJ8Mzwm%^<5bs$o T/l^} q-K'yS(unuM-W.u+[Qe 1ܺ*g݆УJc^/ d!^rq ޘ3PH Fכ-"Fp""eVdfғK6 eG&>1oTs"" gy?> фA܈s y'NJ)C{s|MY7g8-@Ygc`"젎* g`v].jdgdoÛ{][ 3zʂ|>ލ5R_!/RM"{O5׏q|ԯ)Xg9z,  3!\,<4gSe~h$kfkK4cY#x%v`H 4_j˳H:ɰsn.}ᇍVMvlRD}-0-}!jFN#-+ihzٳc X> 5<L1l/?!.Pշs(Zlab^o;eyYqԴ\:q`#8t}!mu-uݶ£nY}!CvCr3ur0,dYyHרs6T W|N So7$K\Ws̀_ ~RRih$ky,#M{(79aK}2~oF`tZ]q8ca/ ܾEkE4LRA3,Lەcv\Z{IӜ UGPZZ+|O1Iƻyd+K&mh+Wwf0[);۪X h<}wϻ QZ FtX o=b۱s#_%#P\Ҏאpdf{gzlXx%@AFK{*B*iZFșC;NZ zcjp#] 2hגq^ |Blo->(}k=2h» *q]|HGqd_=Yŋ7M;کjn 2όm^Ɩm@}0J@i n{ݸ3Qg.6W.j%Gm/T T|T* SO܎Le,tS->YY%EIga ߛERv~D2| "|6|Dq7"Oj7 Jn^Gtv,G<|zqa771dw4(qKj~K}&8qI|$B/r,|G-L6Q v:פNƇx݇/uugkbgVjeÜʳai>ew;]ιe "*1O(֜Iy$[H ]?WLxqQ5{6(EFun큪 PC7L̓ě$Zj/T}îDw{;qsv]>mor.=sy[OO*IfI-ٯϢ ACe_[ۻ~MMY3&_HA|h2c5{;.Ya yKcPtjcH]f-v9wZU#~|]lj/K5 F;.(_Ԛ%.>EVaYrS v0ʥq* [siOϡ6.y6-.utendstream endobj 224 0 obj << /Filter /FlateDecode /Length 5055 >> stream x]}UŕWTL\)P|ev̝}-*DK0jPa`aFEMBeݪ$&~]E--)-]$Ň btE}}ߛ(W5͙_w>}~;>Y3?2? S[2sO,A mPɐGd,O$R0 :ł N*k~T7dTtd12[9%ꁳ d+2[2WVf>f01a㱠>hfi B >&?jf2s8; =sdGŸiΫ2>X29&3#L1lq2"SqŸ^5$SqCg*2抬T V:fР13k&U6b:5آ@ifK14; V9c̽5rY+\d??tҚszhxown|Ν;߲eޣ>zÓ>>xOǎ^?}[f^_+~=jՏd۳vگu\pСU+W>}lٸqmٲnӏθx#ѽW=3Ͼp/8|tw?|wkg'.9Gz2{'.*++[ァq*&F\Qq5qO`}~;|W}/lǏ?~ds>u\xx ~Y_|2aMˁS&OeKw;iyAӮ[nen92O_Cp>g':g'_?t:~|֚e&ӯb kowo,_[h8}͌==v=׭[wt+-}<~P3ϛ7?F~Cyt߈3{~/1c~8ѣfO7mxܹsz;hw9n> .VTT ٳg_nZw9k„ l_p]vױ%/@/r';_m޶rݻ-ٱWk/cǎ:uK/T[]}֭[_/>QFx]kؼꗛ\U^oZ0}6%"WN|;o{{~_.Z4c>гgϗg~4m6K{l7i)iի?}~_/y%>nχmhĕ/3n9uڴiO̟+~5Zse6ٲBgƋ+3<>zK?%.喬Y37ܳi}U]6ҥKǂul7vd{so睷G?Ȟu?W%Z9L> {࠾E]++[ԣWW\{ケln{׆˾rDvl&`~0}ၫ{J^lL9gD.pFq+`@b.P{0PF=(#qQx.H ]ﰅ$.JPFeBYWؙ{GrΊEc< K8 =ߔc(_ <)rB%9 P92KVV6m#Ix;"nC=M:"t!yK]"qN9pB%H,6 % 4+} I0(80F=8 )mؠHd %{x!rM~ȟ]i4}ز!C|<[I,,` TBi@i%>HD%-Oi|/=d簑$˨Who|3YG˅I8(AP(#qPqPe$6(,Ï\z9eII|RH1Mp* HH1$XH"R:T.P"UO%F,1bKx1_j 0$g:N>^ f1T`Xp/m8(c(#QYw5)r@yȚXZqUF$ŠiLbP3`͗R"PcP.HD%=$r0g!2#EWo%V3He1#qP7 c2%|f/B Fhn dlcEU1}/TE}QMdMW"ȯD~o5Q`ZHT1N ݲkH^'ٷ<#p0xv8.ى$h`,CCo%U:3U:)B :SB4~D%B, ұєXIzJN>!^τScAŠH^ɀQL]Fo"2ZX(KP0|\X.(Hhv?еQp̧T2h'jwRt);Q 4ȲD%,剪I2(IX/u*tL [KU26*$HvO$qP 6HBYz}x/@z{eā_i [;LjQ]K u.HD%R, űX~XIzJN>/ *z1_Lv9 $J}^쪗Hh <3Fb:4F`}l9H<m9,dՐr!\SdWy)幪y598W\U+R2ɗ3=j"OIaW6Cȗ,\K8 AnRݖW=2D8&u?)@ԦhjHa1  + RntS!@v192q,OfiUrH9uz<[w`f!/vaJ9.̶Dsj$lBr5<1R&(R 8㪜 4#0KY0" 䬯A#k9o.V1@ ggb.$,Z`Q>Dr {0()$\qk똉pOq>% 6֎(Ӧ*56o5! ƈa?˱9kvGpUWM-|p|F#VX kQ70nZҪٚ7c;~I=bI Br\ ՀTTK\X}K6U}MQ%~nzhj68S{8O([:5:໕-Sk}^RaY/`j]kr-aO^mNΡȅ 5=3+pK"$ m7Nڭ)5sʆ~ʩOA o}&K)mڡQUX}ҝ\}8اL&qV|l|bu2= t!MU+Rg&(o,pZx. ޥ]QڕEh \N,'`IXV(SH tZ(X:ɍnbZYK(UAC1DL\ rҞD$W4O1#!ۄcy-:Z~HzfpEz6*9.qg`K—V5 ʫHA;.8np\,WЇXI;X.c>랑)QɈrjJ ..Y.n:_Zm8E-9Z1B#Ӧ`/\Nqe})cCav}hB; <.!]ZmTզIm7XֺچКp38L:kiMv1Q:<}HW 5`Q6tظ#6SSéVI(^HŊNCu|8^TwKnFjJeV&;Ev2thKPq:jZ4#<}`˛Ԥj%,HːJsA6q1#={דּv32! hmh SGYd*8cY $0˅j)^ViQG%ID*{uS. 8UAj؝PnO_1)p&B:qPwp@Ul* 6a(QIULmK`nڋ`b  TʤXn Ktly2? ܖ)jV*lmD#3 %s.!&4syBoS97-K#{E``;gvM}s(^GK>0w2j.]`5}Bc86G'c'_]Դ>IssOC[Twr\g~)'hi9&7[ꠖKMWnwѬ HvB.BhtSuU"뵴1_Z&n_èb{V5P;ھ :?u.!V_9DT:ax; $)SsKxy%lL%7! !|XlJ- vP>~-XZ4[fm̴''ِ͚4qgҜQt롹,2;7=uI-I3VMh~6҆JLM5>y=歂ch}cnMf]j>pE :J`a""A:Uendstream endobj 225 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5054 >> stream xdUϩ_( & " h (h 2 _8"@鞺_w{N[՞8UuksotןfNqQ5h:^ Fe Tr;A1{°_?#.ڹF ^EP?aFNl=oԧw`k`z ʠ ʀ6dʀMO[ >WZPӛn1Afhyva%to/-nJ-+"Eiu"n_) =ߌ/2 v> ԍ szOPDH2"aE@B8ABY 0ؼO?uwBl{l8V)t>j -{}tM5*@@u"ݷfyy'|"D 2/[U Mx1V} @cpL=h9:̲B?tN+<.26(bp >9 ީ݄k{v@>"Q ɱcǢo H}>=8 j_D5DeGD00 ,+vVA耨Zf:v :۟~S^gEPT} wC#uW ; 7Ym@t.Xu1 .>Q`p.a4 0 "'|@V0؇GP[@7x PwAWR88D>Hxך^NY! zȴ݄- >@ &c9W_}jݱ6tm akL/cbOid& @N1$T،֗^ziTI-! iv +4 8^JE>@O:t)BpQ@1X7}Rh/?=LcP{2$r&p}Y[N}A"O N#;B0 !gyզ%=DX;\Du' ܗU:S9Bb_/;j7bϒ Af185jn+w,Ha(`5VWQZ ϋYyOF&͛ bPD(̯@Xqw&ݨb`!3yD hWpg|``tO;ERe- e ooDVMÞSկlW@1vu,{v}'ePr$>Qpp4 Th][ou@(@Aw?Z$@:ЛoZde@MC7`X檏|7H V)Q8N8&0d~3zJ1l0:n2PĮxsb ID&n qKH`uGUO.a]-%\{N}GBc$` 0 &ꫯV#R\pb߲ P} !W^y%}A:,P?ߐjbpG / CQP{ R9K/ͫ/ ^'m#(݂YzhOK.Pw7❫8pNBwqZ .+}Db ýW,EYȍ=s[It!\x%QNzT@3ԧVSel7< .J?I3@J?Lg :Zz@uC쎰>矟Q.)w2"hgo!}@ɕ?&D46$;<{r_Ƿ"ʟ*wʊ!qps=_S13A_wQ sN@ZF/݃ڏp' F ɬ`g}-j_3NT2+G5HnDjbI>izw3GӚN'_H y2A1# '= 8yݻC_LdU$.H.D /8]=.#Th;bVo>G+#x{|j(TwՀEaɍzeUu]6Al~VtUG4zWh)+;4gTb# j?RgH$ ^uUs?c+~ w'\sM$}G#v^X;NH}G~sŁ/R vDj~+A"p 7EwýŰZIQȤ&"3nRo&T_d Rdup#Y][nEk-ǧ&4Etu@hţ_JC2pCGPc*M]p>CZz{5 aqy\9uȟ\| ¨"4\6{aN6Gp-2\y裏h;}\ ]eJ `=af'4ǏGv{fvAD ?\>ʍY! %یJ@u] h@Y믱`ɓGptHtG+ #u@$N8EW6YdMm]`K?,sMr/YDH@;WlH}eB} @0L1믿v;" ``MU4mG̟\v> $id8~`u$ ?u@a'z"`=SN6 \;9'ĬG3& 0?C#4bu$ty?ivHdPY#\'?p_$D %P aӁLnlG7twV COf?2u}'=oC<'="4(Cbҟj%;S u%"@?0N0hp4@F8 sç+zXUo)|&pW݋a'e9k 0ԕ"Gaw[TDPcRdIY4}&,r`|nDt`"h Ī/ԓ[=-endstream endobj 226 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5710 >> stream x=$UuFFFFfFFFFfF&F"((((; "h" """((tWٽsνUz|{gfRu|o;a_qkǩS6G>].a;ciƂcߎ!Ͷc{fXK;v̴d? 2`vXXYI,eЛ fkU[`8tWHTv:j'՗#`@dyzްDҨJPP̻C;df92ɀ1}/'ꪫq6<Y^ Yq g͇CW_}y#Ʉtge'ņnA6 ,Q.9Gco5\$}<%KT p0 }?NFkY`gM?5/]; !k lGdHbozrZ3H7tC~s{2m'VKQF`IHgK=o*#8S2' e_[7[l+t!NI=$X_>3:R^أ 3 N젾g>rp6؟+zUaW?eg.|E}YP6Q޶1Qυ֓'moKx%w #f0 oz$8Gw 5&0 :3w"-DL< ق31X/XTk~x,z4U-lnfuܹ t P>,@8Wwf2[LfY I^4ueWsAU;Р/,O}K<`*3`IϞem#Ÿu3%+Bq&\ux*eyni{dHK0uʫ`-mgyFf@p* 32~Y ;X}f =e09\@_`Nșb\5=A..i~dQ9sbX.G d!!6%!O#iNg6 L!1Oz`YY%$ YA`5`a`kW iᅿq <]نC{aX986`{ښQ,[d,XM_fT<6@qy z7 `#J gY;gPv[Hf.Zzۢ~ UM60T3 %l>PRhE} x^,T^XHK$AgLd< /N`a`ARƛwkA-:6 0H#^d79lNe g?WCe>  M,6?qߙ0g<h,^N=Sʝ$AV_NU? ;,,Spxxh'Ѱoe d6x,T0s:ѹ:$ a ` :s-D0t9kw/ LRRT^t:,O"^}A CRi$% e vDR}\EΟ?ojw``0x Q? L&A_}Zp (6hz뭥sեWg=0@}"HDo;Q<zӕl;,fzw؛[sy b vy 1* #%/= {/''V̠% I 3r s 6`~'9}K `ߘy&Axؗ1HPr :yؾ;=^yG!9w d _|_k$$ktg-<iSǵLVW_L % $켅Ȓ N +֝=9^x07A+s"~sqȹ&,Խ#B,x8,*,rj\e_JiGy d/vB0Ls]K~?^̀8R-IВV,]<ٝc@S36vrRz/8 SYvg XzC|Fe p!HߗW8)!md  DBd/ּX|Ȥgc0  ςz߅$VWZԗ!0! CzŋY,1߰(o,` |i;'TWce#"<6cN @bPK_g>^OtJ`?Ht A>ǾWH4zI.o^>p0[cA'*1'_m=8`g-u;ˉ yUoYzO}CTd yT\ƾ< hI~@_) d2i1CR]a^N z,CNL|6{ @gF|Hkfy$dP6߫%=+V^z5޶ϣSݧ\(p!NCW<v@'Tb/yF,h@'oQcQϽ3~׿,J r-+_ +P6O? jyS!x)"E㡡TkԝƢy W$-:O~M63I*L<@t DiL֢ Iî". һ;5!`՗cR p]Po6/! KƺIU}`0]Ь<z57|+^6H@@FlA. k:PP!ST3:םӟ53 ~ j3=j{8O#2l&6,yֈ_ 7c3TvA-.Hwz64^yTVD4$ > wWj@| >v)+hz饗f &hiA6,|zIPv2҂AIR}/4 %'#1X-yy @,}B7I0=2ɀ|pNH_ <?jJg ^1s<]tN1!`ccAgϞB,\w>̓K,? 4&Aޙ$dChz.Μ93o%N@}l1``>;HU͋oAp}e/9!5?o/Bӕv+n WEdps~ð>ُvƑO֫ `/{.$x`GT`Iykw$H8 b}TmI~ p;A`P:Sl>2M'HoNI<65;{l/$FEhg=*ePpmYO:F$t{2/2xORCkWoV~ܑ$<I@SUb@T &o3IOκg-L–vv\x᥹tdTc$xmendstream endobj 227 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5456 >> stream xˋ$E32kf| *Ȁ 9E("o .\.ADDoz*{V|_,܈Wϟ?c98q,{{{ˇ{p8?y^q _=y}zPѾE 3,sƙѝtq}N* pý#c-,y&Z J'M8#-.{j;;8mď2 LPկ_XȨo/)K9ļ/*Q1JMˠ_Z} a=cOU/^pdy3{4 AA#u@dO~ ~h_E?Tixi݅ _c/EԟD j/tnQ9ԝ~wZ@'I)v㤯, gBnT}Z[woۯ[t#6plT?% }+8R?rVM>G׊r| i|nIcGXnEh~Hq-FߍADw 9g}A*rd|\n} ̠|Ǿ̚娺EջhAB6l9e#zY4)|M$}W8HHhGW'NP+` V Rg/[/w@Ew7^2f++fe`M`ۀl:M+>(~(aգQk lǖ p壏>r Iocc@SɍUO)B`R(3X*T}A8@էG?~Om{O(W? ԝP_>(Z !Yʠ؜Xo 覍V\xf;6+FW}joN@s r·_?ѿ'O [BE/gxרBgyOk2[7XT}+@[qv6Ur,*L Tz<.گ#G.v bv7x饗PYq@G?GXG5^ !lubGyiiGgbXۆO} D ٞTHb@U?/J;S@&.tcuw 0Q݊2AygQU%pE4#2A=qRH y @^IM@BQ]w27 τ ZqePxNDymg}Xt`Z([hmc|cN`fATz){:Gk$L§֣(@VI!'E2Њe?7\q޻:*7St DJFtCɾ}0>/jˀRh{ T}$Y(dG+d>tS^x J/C]"ͨ?7 ԩS60ŋmM H9s2Y ^laxK /nh!9.FW=^67 L{@wS D;v Zs/9wDP@ VDBzWuS$<]Qa@2l>,R(H~78Nc ~|^]f`=&ȷbPz qvMbn[KA1wAE 9'UP bN wމD Iah%ZM0g"@Ű fK L `e(@J4j؊16FG.LPO+0ۇADa ڇv7P8jE6􈁶J42 z`9 D+vuù:ҝ2&(X]l 54>dtI.t'p+ <:hЭ;>忭'QBPo,_|EW} `+QH+:`Mͯ(QX o aAJqN=믿^Ϸ],mAHSa;vӢڡl A˨QN2L XhR>@çu;Pէ@A~v.m]qC/AT(1k0Fubp[6pL*AC$=1( {J[v P ܬ?&J<#hTw:A^܅E vj7hQj0ͬұT}:]ZVt}Aa`Q䀝MPkn[A} K^HD0zrہ{9v@DٳTE>E7OuQ_lkh^HqL0{jm݆SݩXb8H8Q;Ů:u>vH]12(7|}hCION kC]}dd/A. ,r̙!z+_N|V 5Wgq4{4{i1nodnᆨuዪsVo4\w>2&@SVa E\; ʕW^aV!X8؁w~I7)T/G3/L,%#6 FBԏ`[1:@{[ /dqŪ?&u6>eb/sզ<2Hۆ:kSt؆ATݻ׷>;"P}˒m`S>Dzb <ꪫ.U5aa; N>}+5%q@>* wGP MpW#(:dمt?z0â#wh kֽ btxu D 'ڈE HCx; D[?o@{[>lu@]B+,ߔF>e`AQnuϫ9و w wP _h-얡 "A"3goLV,0h)1p =Ac8͓7t/1eBD@1*;(`G-ܒ4` t M!~tDM۩2`jW}k~l)(wq}DTl"*BvuO}m^ (!&8g"ĶfAbPV |spܧ\@$~;sSD+"݇Z1ك؂Ϗ@GP/wu}3jCk-o w=ᒇ?gn{GT= Jvc퀩da[bDn8cnTg38{7Z ?ecK!׍# 4y݅#ꌮ]t: }M":`1pKN@ ?R6a|KlY<1bQ }1al>.x22D&ʃ>h+F`lItSU "lؾrn~~==$̣~衇TQ;6?n`~a;5  Н+"j U}W<Jt(毥pLlkw&Xvƀ.G;GZc0ݘxGEo FˡA#4yU=A8c&}纴& P-:,;xtΡ]D]^`cL_ѩD2Hb!eLq2'x"2/ΏeN7Эٮ3YP%ZN.s;㶝X?+ܵ 5Ovf@ϝ;_9 "8( DĦI?~cΎL 89^Av;Vc?VH6Ht,V> VCrZIendstream endobj 228 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5374 >> stream xɏ#E]M;H8pā@3bX/:,#@\]x/^F/t]|2mw?vUzxܵG輭VvǾmsy;=oO];۷k[5kV6uǶ8eЇc3}hOdP9`Ui؜7y@1 L?d (<uO=\3X  0 evG{fԧ$\, N@t񪧟~ ky`! h"P@pgyK=T}^5^ B^A% >ֳ>3gw1 :2`@Y){P}Z V$UyiLSWAO0)93T^xtG#N=`w47l)pϖY,:'(z{M_?23Va! TE XX SEKѬ"`Fu!4 ~`AS|7 ͂O8 A 4 . 򜓓Z/ r\a1l!萭NIo^pҫO@= [ h˪~1MHhoW9;i2}ɋ#L4d!e,@OM ݫOlfBSEk MH?ό=H͟f BAf awcV@ū WXꫯI,l.@(5,$7|cz.Q($ ìViѳhw}<OkеHmm-<ĮW,P#~ 5h!Pߺ; Dq5U("~G*zfL7h!D[1ذOmf%Tdoϙu$p>eD?2/Ou GQP%=Pa!O7<,ݯgĚ914կ_.4;oe~6|+!Jڡ1h ̄(}S}3 Y|1Gxw<@C/OsnTKTg v`~Yo*H;!ԋL_a00oYOKJ/*M6KY 4"q?iq ʹn'.~%V/2НLV*8 [{YC Tpk|@h_믿lox3'{ 컅IΒ~.AAi%4i3p~V& &U s^Wpuk Ƞ"2?GV^"7/H\MkEoǠ+^ \,(T Y/:_pEduv3=0 +eO=͖J*AYZ`، W1n2Oww= >[WoЗbAiAH{gtgg1 foa:W0,넷Vt Q; hͪ?E$dǪ)P 4%kHC>@_~J/FRuOopE?\$#'~' W hG"S/{HuN61I2!w6Id?-|&w֚+d~0|41~~]gg$fAg?<@@:eg=\ܸǀ 1?oX@d2USC !,O@lg9ywdP,Ph=MC{٢Mb@(ɽMt 4 p"װ'gVSLwACƠ`xIFEWԯP== #)ʆ slyx܍1Mҟɽ@_ȝٚv`> pl%7n:`;] o/2 GK/YPBt5 `*(7dT? @D D1z(4K}B]^yзnc-_P}݄cyZ}t&~/lBq^ԧ.hb_W~б$|QC{Lğ P&]@'@ @J? Г»X' D@A6BD)>_YSh% ?__Z #HTH. " AET-(@I *m˓c0ȰS20]i@S&iC,?~@ ` *BMj^_8+;laI/bk>J? >b&믇? Lf+A\*toFk!l8@$C +4`ː,Pn޼OK>2y <hA}m4n?k'<@͊ўŢ *}vQO ?۬6ӛ!"՝ ?hejEw}׿lEVOX 73A"zrMYlV߱%}|q`of AE:7dgS~AîO@P"t-猄PUf/t_@~[ؖȜ`ֶD1ez0cO>t- q&8~AXM3AQLC?̚Xd/7(~kM~0+!R0TD_ (7u>2r8$(~Me1O?RgO7J3}+Ym_SQu_ 4?# P5QSLTqq5q0NW>tÄ.|Ub"AdV!lal;KCye^ h5ͯj f,2hg1!=:7R'zV %@s 0e] JSP$3S!4Uw4L:0` ӛ3хL[JO;xɠ CB@@ >H"Boy1m:i |Ua?t&~a C0"F]!MƀB |o~JAςO5BfDaH cZṄ jP;<@9 {@)BB}z93PQwtssn0 {eϤ‚ @IݷK= ZBPf+b'ͩ~@~sCP{"d%t?J{~AAܽ{2&!!L)ô ƥ~OKBp4~#;5Ð615(tE?>cA~S"Et^824L!B !Z?'\lx"b5fY $惺, O#O'9,sv0aVu .j;wcw*@>Ϸ"h 8foM~VAo##ܵBX}yMyJ Ԍ?aV& < VBЂ%7uoOAez@ L,<O*I‹/Ioە+WsXIX!(+ѣ2@_VI 7?uz 9ʶ)?7̽Ch!/̿sm M]ի_[IC!M00?YO1 Vlk+3d0Eş[__џ&vLIS}o$NHM۬(?B}gQp+WU6QwS0~UƟJ4'HRTxB˂O0|ؼ׮]_֑EKMkG:AVVtQC oh ;B{'k6|]MLJ ,B >` b61մB c A\~@>FÐ|BPE!7=f ? <<Կڎ2q:" ],`5h@ڎN r @7PAly_YF=YfL^t x@Vޡ@A@xP+XbAۿo>2 ,p ^0e= Of&|? ؾMFdXWA> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4970 >> stream x$Uտ"" ` >Qedķ 3##3#33#3####An[=w: S}YS} pl6LzctF~|<=غ[1mZoVطm|8vFPݘk:q vr:2 Fꯠ;6~zN2p껑[Y5?T'z";m*jLWKomg쿝]=\ *AA:@U%gᤷv'S   nn&WCXJe7~i 9jӳW0u2q oՏJ[eI;M鈛 XC Zd1#mg)D Fd ]t~efE ;n.:S1#שoO{Nq'`w*B4 ݺO[O' /߼Q!u#[酐ЛŀvP Pso+wIډai!p UgiJvO@oCPs8cىtqBӻwT_؎6L@f% \D2p%pӤu*}5 aUW>5Y,"tP"QMO;[G;.H8M ()2 Gv/^l!+ n"LoD?k'_@Y!XfWe$Py@c%a@ϵRD pS̥B D?̩dJ@n[t!:q d`T$n̿{&} mwŨ;]p/F'LC'a )*>)o?M"EnbQa`5R T86*d_uާ6 "`7b7( Z$h_~%~&ni ™[_D9(J? ?dV1}U3+E7;?|-DX )fY2ph#cm ?cFL%?;2" ' ᇼB"ﯧ'ZLm;D<ܾ{i-rFi/ۣ5Pl}I0h=#b5 ^ (,K!9^Pm_'Goɨߌ` w\-R?!pp?R?_է(M5<)֟d O$YEY#I7sN pOvK-@3 }T.7y R?/GI @/0jaQ_A3ky=q !HJwY~IτVY ,CH_OYL!%\;|@zCuJ@կ >*Ѩ.\@%[5$A @hM Pv$Ο?/_M#I$ sErIQt}wͧw1tD5TtqG1K4e Pz:]W_Z]0yX|%(c%(Ϡ@ɜ|_iWh.[} kb~>;+=U~i-Z} VҗE eu=ӧOiP+DT:qˆI&IFx#^Y ~^Msr^<"M MD^DUay'd0bt%н6?fH ~Z3P>^- {&v7R(voVY?:'@(ݩZuvp mB}Rk;nDfṇK AI"_]e074:~38 ':pSc=Ŏs*@9:YgENDЕGK͎NOth_MSI`)/5?j]ECF=4D ! }6Ԟ@%(oˆ!ʁ;oj2IBI?)ANr0pZœ 0QX5<l{(nJ}Og0PLٖ?X_dE4 QI\ZK j-O>`P\Ԝf%`GkW? *A%R,XB( @W!m p:,^W["E;]p1XTnzd kBD:*E.F fL \s''Y X#xeP1p~A755ZSgaGK Bv"uo߈93p1@B,Az8JK|p"4]<nе H4I{EGo4# p d+?>e D?zEmVz$fVPo6?@D")qWk``-LIPSFX mzWAΚ#={VKowMQs`$ 72;QTU+Cy2wO&Q|/x^{ 1䷂ e0J1t4DB^Ee_+ƀWIASPt^xYGM 0N0pHscz!ېۙ3gR[P @Z4CM>Gb4s[ ɑo[O4AXoO:Gph2f@;N}wHe#t v|ɦ PDHZKŹ4i͂Nj"4/fqA0En=pv7塇ʏ\ 8!=9CwLOdtS|Țb0)5'FG.)=2yuvt'0T=!@!(&)z<7lw'\s;ڧ!Š?6<{+n էާ4R dTZBCO-mCu鏬aHԷ; A@+sM8C1n!>0֖2Z. K@@͆#B?N}ib#gUuH4b8//R `k[]w(.;GkPx5tE7; AT耯02p>K.! kvkU_%3Jw#SoC90`і(r5XGG߀.D M|N>e@n`vj,U}tXǒ"ەW^he @wvEaફBqPw0GFt`Z|\Uut$#B}H}Gbkendstream endobj 230 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4990 >> stream x$Eū'~""""_躮뮋 ~w}Xl˼oD +͏~f{6sndFiDŽrUP)Q oAơ!vk,J 믿N8bBQ}jWvDOz_K?M~W6XR@SgDZ _l)81–!z+}u/i5 Z @ߩEw}>nF4iPt}~/"me NWa43 @f:?i7VpMa[/'G4g}+ujGD7ce0RTʹ :ذɫoO? D2 0DV@IQl||pS!'|R Pw ̀JՏ쯋Sd(Ld>cZ8hK1?Zaޙ?5hG}DVH!*[O>Q]Vz~{= JH}wNrOԭ#D7m=|PkEup(:p"#j"3o*\QR;O^aSNދȻ]'vsY):Ʉ-Һ;?-8:|1l؞'{-;+WP})D9BHk ܳID$ޱ>v^|9Sew#Eq;4v@F}'ԩ.]Z BK,T?Bh?BrsoVw.T⩅nLV_c"`i铹v<… 8tPWq3r# WV^<o*.,ʔD4"# 5SDPq;DP[1믿qV JYȝ|hJOuuk&&PBh=hl STr/R?x}W"FPrwC,;M:ߤǓ}Wi`0x"ݣ-\w8`*49}G$# @qБ廳fK/`+!]t[e 9x/'8U/R 8ʈAC`CuBc,;wn_Xvhu 8i,2GO|B&џ"Wc~͕';#8h^ wad3e@gDfB,OfGP}:=d&ag';]бS"ݓ #ޏAbк W(4 TDL`5>P5e.}tG39 2<"Kum%VT@@@SwgxAξT ЗĈ X~yԄbTi&[ms"vVA4OTQ͌ѐķU5P 蒠hEav?W7K/j +;&Of.P׊i]/tͮ]"Rlkỡun 4;Q`Ȉ#eI1g(~tCU<-0l?Y ؕ{%dkS]*`l#S D?EAҕ=[Fzw[* 8eP^bEU~}1I_ Φ%@:k$h:&@ED?bpvNe}s]`[Q/w]fTY 2 FЛoDxQ(NoFm" (+H,U;0C]?bd|ꮺ.^U+"AţyDHB@Al݀23ٙꞁAId q@IkGv>SBf ᡋëC{.w``@}Li7'=QE3 U:SLPEP觍%mՙ*%$`t=իv@Tz =C<Jh2q1vʕ|88Vj{OGEp ܝ$Ph*9} +b&its =:mnwY*}9wÀYD]K1`5PϥKV ž>ADM4 ̀@c&ŋQ#^}$Q@Uv3"Cs… $=o ?AnR7Q #ر{sEGΞ=/G6BRE:F~9w2;{)¸;Q^LkC0p |}} Zk[]-:,*ozmy`Pg+3" >؉"( 눂@c\ ]7!VV W@w9zvi['ˬFVq>0$Sgc~c{WU=[ 7 ;(;X=P<2i;;jp hBЖ$Q}Y}ԏ.##2ڱ>3]Q}YaCS[71b0}Q؛駟֔`Q ذO1"uSO=-Vʑ n-S_@>`@䋀2O>8ܯuQdž)`@ypEelMQ'G/MSro °㏻i؛yѿ+>7lAQaf"c2GmS?M|"XB裏Rѻ ?`~墟n~ܬ2% xGyfJ/n{Ϟ又gO#P_R;=PWz~~uWO2#@ooq,0>`EDS&0 蒨<սu̇vө+f`T?>md ?ߩIܣNɱP+=M`Oxg2ƏBcu0#AMO?(}7{/M(v7ynʍٿt۵7~tDG(a`{'~&vp ]ɫy?i| `i1{QPg}voDϨ3lu]fF&q75WXJ]yEd:[wy v`]o;m6㎌b"kOɝ~2th Qz Ŏb" Z{A;c*I *n[<ֿNoyrءcEO}]zT3lܤf|0yV4?Fާ~'[neO]k( Anuzjׯ_:T-7ndر݃Nͽ UɍGɣ.s\ .e`+E[Ozj$VH.@Yob t9 grU@=ث6PGGnڐеWc4endstream endobj 231 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4958 >> stream xŝmEgG| * "" 1/(   "DG3lsS=ԇf9U= 1i8Ѷb`jѶsMǮfwjڦS85;ivI;::t6L.[1:D4O7LCӥ+}LG1:met:ixU:ͱүMhpc,.4ý^v,}wʢ6iӁ=fu6SwlOG-̀X: H '@T!*q M3r$vU 8E;MT(AMB^&[ \Zf@ԟfD[eM*Igd 瓠:1-؛㯿Z$Lh(& *բ#Q$r@]tt+nXEEu&A>#9cZgTaY ?#+4wz*@,[ *T:MH84KO?QN\#5cûX r` ~G,):: p$s:@vX&`{4_]G<4P*$ځ4#`GB=G,MrlQ`?jGNWc'0h !}&% `^,hAN3k7ovz@j}f$`j6(x@Q Ro\8 Gln"Aa+KhA&dmG_G5QTJ\DcxN9 HdhW_y5 ᾊI`45PM'?I`gd lpk@u|n]|aD2Ep<6ႌ^[P_|$};(<&4`w΁ao:_3>sTh,Ṗq Lh/#" BD&Qg}f hѡrwD4w"n " D!B.(,Aߧ݅9pG?S>ؾt?E {nD36ޡI(fr0$ D-ʄC۷ED  !NFPGnM^&z!`p1z&,&} }D/4藔x'Yvn#GbV9k׮-S JB) MAmP𷻠k towiV$^M!u5 &"r~P>^-5+"PG؀@vAR4jQ?jɕfTX`YA u@#L'.wb>s+*% :Ujqb h ^grS75\Jofq L" vtO젓[ L6{5(wky*ݻ h STFsx;{ʝњJqm> @!NYJѣBnܸNM }:O-CCqh"}]?/P{:›7o6w%vP1eM% W@ItήLMmxEt 9Ѷo;}QDYsȀ X~Tҏ*tLGkȀB>AL ~ kcF;\D]14$2h5>-MCuwjVd@ćCGcP|޹s#b"Q& XtlaNq̀f5}U4U}c6⃠<{q6@%>үtOG U^pwӉ1'8zpwP k=ȇ.7w'o3({P-H?)#:>6 ] 3@N5zI,O(}^2|\Tx1+ۧ7pwP c(階zQ*`ˡ pcj (k#N6%%暢?2 _XG?{E{MtFV+A攉@ "x'!U$џȒ3)`V*VߠeO&#ӄ44r'^ Q׆3B .an S"#'HY}MI4@͓pDE'n"| wyؤCw E9 |~@dFpի"o:A::DO[;[Ag+W$Q!Q旜5ԟKћ?A3ֱ0 hf@oYs1[og:#ԟu\#z^ʼZڹ9|Ș+>Հ"H?#ѽa+?I_8σoѷ~{zUEԏT7Ft_GRGQҏGGo6jя58xf7W`QJ\#e0 NMTPD֟"K'}<)W^yE~H{ "{3KM>wZ2_~مkVrL9GvƸ`Cf =i/_v؏g 7~}/qq (]^@@ hi3l~$@BOL\t OFLҁ!lj$jn.z٬w؟y#ʝn~~aEY{rP-J5A ]x/ѣi#+A}g-dDI`&@g!s!C>RK< ߕh4v?ԍHMH9~˖u@kHŞŨc'"4W*Edqzf JP30]h\&Oыu&Y6|I,ӑ. 8_,(:F𔳨9I;Oh$Bd,ۚ-X#q=51M#8(.3w@tzp9GNS X&"v@ycO Ȅ?&2ܩ~݃6Dy Qtuu3 z ` ǰ+Ά|c x衇0  p* \G&XDߕ{1)n[(@'AބHs@kŇ"Ъ Vc P/UP$Ǣa̹n(HEO@8%&A;di {ݾ$ܰpL~]2HQ# r os7BeAUUSRpA:Lj8U]vF 1GT&0a'D  L"X]lUX ~olD . BĖ!(>͐wpaF L (^%{9uf b]9Q.l?Ưa*d8ֶ";z1G F3Dwm|-r>ivH hΟ?__BFM$z@ -@I ¿5Ina bm #*`ӉJӦCS~$ ٶuIФܹfه*w2uJ ;> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4849 >> stream xŝ}EŧgE%"" BDMLz}O|76Nf[Sz&ww知ain9Ɲ;wlݨ vOQ/8hh9bȍppӰkVDڟkDmk_^,QoDN;/mxwT_ ;>ގbuQRZu0ؙԩbuusN L( w+aH:w`8ibz 0A}~LiB㒻 J=` E^t ۤD …3;)?D[-]/mkd+ݞ:rnݺ?."߁#c*; H$V|1>?"Z@Wԡo8jq(6?_g "G P믿ֻO @$zfEiz1@~/d$3$X =D} ZД>R_C/4KEmT}!}CD(UN 1]hէd2G0dgB_TE$lO o9(t QتSZnnd vP'^0Hh"Gd @NpӦAST= ;Mћ0ТИXtaXwΫGl Z`btHiAZ'I8e)z:I0Dܼy3Z{ߦx ux`yDy7npl`$& xԧX~p]Paͨ?$DN7|`o8v]uc_).zׄ @fyqQ+mhL=0"#;Ǘ_~GlV!ݽgѾMY/ơ'JnJp7t7^jo 4οޭKN w]K%z=q@>&؆$Rk(Hgo|g8@; DsH=z_A`Cҟ?SN ʀ锄bG9;C@ ̀i|pzE7hc`G}CYx}J|jE2 fR1ׯS\I)շ ^V5A2!P}gNQ.c|TnTna|މetl"1p(hѿ!:|3GU(@o;H(HzJGr5`i` e@2hb_{RA!FB?gpQ`a\G hؼ %vSׯ{}*@S\lF|ʕ+ H;=hI{D˗˗ $ Q80 Cwt@һ1PWwuI ͇$pЃ%d?m>;3@MaDrQImue@iϊ/ $~m~A3"2I R$O4m.iץUyqf4z?&nh"bPIK.a `%Sߞw1(PO@M??ŋe]U'f&/ɏi(@1 8@3`ˀfM} SATȀVe3A.E KGS?[o.= A4'k뜰9A} em@xf0Q(}I†(DNw4$Xy"Q]Pztc%( J #/zԗ.D6Ъ׸TPOh"~yStxـ0t1dIFR Z̿<6Bؠ 2 v$:#hCTza$$a ܆UATt1w @1 1jaq7~ E*hTz{>KP}+Bg@d)w#cPEϹs2ahffpҥT&|0՝E_3JW\ DId 9v$:RB;fT! "ˣXڿ / 'FWwxgpzji-2M8 Sҋe0_ QP`z @FޕN4RYGeA΃sA K}Dd5"%x;JP(䫰 dT1Sӆstd087R_}61j[EUHԢ$T Qg\"nhmf 8@p<,ߐ.BwHCy|W1к7aؽ]P !tEnh}Tݜ]Mj@x ZD.WCgȿ؜>|'8N״Uw @灨?ak #3ާrkg?Ug #Iw-Q軾c%4нYkc\50!u-rNi]OONN [oΟ@݁coHF2>VZ!>=n1M^͡Óuשop 31E8@oS I T@ًWM܏ԏUbH2A!vSq݈'YM&P}@nŠ [ 6䁖&imlY[1 P}a]LJ: 萐AmD=_o1g6MWT k ^Џܡ82SJ\ l9='vn ,5(Et B8SO1NĨQvx<i$AeȧՏ@5''fö(!'bSL)!a?*D%B6); I0   Oo 2&36}8fЄX퀪B8& 'd xԷ"cȶp@3`4:.)}S]_jC5Pj&3ޚ:-Ka7%mS7j_Y-H/ ?Cks M`>M+ '''Ty3کV vmz>K؀a?OIT8B.*f;~Sޘ ?Tk.>t|C5{G4!оT={eX`X #jrsF]пeI;ecДqM㯞 ҔT2p4t]؟jx m\gCSR;G-AykQz>mDls'X00Rخ<-OwC=ޏtHТdb`gQ֦]GT& Y8+}!}@q7ن*̓EeghKvbvwXM6x1p`H~I/翛^U.̓&=7|'T^ o>V^wli?lS v goG Qןz]d:Ɍf d &Xr=Nw`X`)D1PN$Cb˯IeP^nfwgߜD"^{5aT>V~\NTLq 4~=ZZ^}1D_{k?'CR0p*ʆO+OK4S_ 3^ 4/,6R 83$A̓K/9U>k2$2T"@M3_|Q{?^+pdjNhܝoS?^x$_~]iw~\OncёZf,¨Ry4>1 0Юa87ͣhz_d Œy@}i PuGף;թ~G3~ڟb(= @lx5aF#ѵQ}>,VMQףP &F3)[a]Ayg*"%#qG/Ӷ#0 N3(O?4Uym+ ?up+R(O=ԆO6\Y&wZ/Q4J c+6ʓO>A}Kh+/ߩA  >F;-O<ā/cTpvB߾}`P'u2"{?xW G7ppGK]i`W}={@P-A.~gw5ѭ4"s N` B{>h~ep,z uXh݅KXSz+w=E>%_GuJF ͯ/ǿ^OE- ;j71s?0.V=j)Xߙ}iDUH 3ࡇ:ZGVmyKԶô`((C>-Vnow,Qrk~c Omu~>endstream endobj 233 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 5192 >> stream x;EӅfA@@BD?oG5sTQ:OꞋ[nZ<ϭ?/t-evz>NFi 7qgsiΝks[أk$mvlNuvZOҶc8 VHiX@ ls$@o(BS0<;wv hX63lt= A~y]}`'믾 ]M ªߎ_oV}̀ȅ('zhAHqڱkgm9 ]zV}.m 0"ˏ?p1 Y p Tp > ~wշX h6??2'BvD5H}߄(p̝QTz?4\%`7ԁdgRCޮqW~סQڈhBD:vfV}QZ: @7O,_~'!H`p*$H2p껆֎ 2`, @ͪFqAE;Q &ҩ/O?$,:pOp3-(#o2IOUG ?؇&0]PI3#[2d2/ &A>gxXLh\ o&cYV$tNtBbA< _R(|-.s oIZP&1]Ln4?Z4 Ơ ΂\c*?䌮0}>#4#@Ka`+5f"OZ?]8Y~>C5!H}rTM 3X<}8y4Ww>8@5Sf1 $1T-6 |dD /@0(k\(a^_3Oyi 00ת 6sE3_ ڃA >,p1ȅl^O憶 .&u':mg_O.|HeTiAżQ@h.|oo_dSO=.>j5 Sq'tSm9v!:av8Q7駟Q"8"p2:W(`o⛝+WTV20Q8eY~÷v Khm>xefs=G_H%2.F ˀ!%]L/W?J?ȅigpzq6y#;''D Ђ."/rd~Wb+f@]n@###B|n@?t`lZQ_$ LF p h: ?ɀ Sm W< ,hBC}7{dʯ2L8'h BjhN?`Ië_X2Ꮭ| b] LͨRW;C3X: 4gfrXt8X)l8nwu_hHe8)܌VD2E*AdDn;H{O|M]gi3Z"~ԩvC~7jKS:\c 3p-6bݟ~zd;gfy)&F-X Jh޸q# |J5Q? a юܛhC8W@/6B W`2)D<͛7]hXQD5`@Mg#$M])"9lE P! 'wAG7 ,3M֩| = ;KjQrQ8 Nڝ/y[OghQ6h+s&F4Wsfp7M-jS>C: sf4 p/m4"@@dF;J@gVjN? Z  vڃ&"Yu7'QG/yA #z^/mO?VثРI,68`mUG0#{E2kuy-̣k1G wkh@L9FD44 #/:|$Dnxa I-rG4,Q@a`v( M@d0/ke_$aFԝOp5/^8A *wޔϸb0CA숚& hD.,i>rF4mH5{ԗhdF9(2k@+vtgN4]F4("ݏ?mC[jfFI:t ۭ4> 36+ %Έ&WJ GJWwе*A{ꚯڂ2I Ζm=ԥa5D0I@AJ"[Og`0jPgN &(.:G}D` `8 rV%A^ +8}?޼y HV w>? ( \Ċ\ Q6LqvEдs<0_>!,XgaG$%a[x1#ީ0 €^4TuX'j׮]C&T][PM܇pLA$H2(Ŭm @K'ئ2pһ~Eix|NF_EZДTS~@@wիWݩ'y!g…(\$3 TlP} ?n0jg- ".4w~S>:3܀&'a:|PA eQ#VJtuhM8X `?rOad:՗^zו\EIP{}2NG'̣GN??3D<퐶2w:hsC5 *+WS0-Ha7ӫg@y4˗Tݩ:ֵE593syKi"FQ:$G3#ѝ0In؝?CȩȂPMzx=Jw- n=ݪ?KOC1:Fwe"ŒQFf@4z m&AdA]/_0V(7V@2kuTv@DO{~r놊c{ bfTh p+'o,jPeAD{gf  @Ho; 5PofN#k;guendstream endobj 234 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 4905 >> stream xgcB "" XX`aa!X&wu6<ߗ썧=s>gvmL؉xLx4-0=LmVyqxN tN<&`?D+(. w@"4C,GAJT?Ӌ +A񇒾 ݸTiR *9ONZ$Sy5T EIedk~b@,A=~/.Y14Ix $ F!0'M T "4I$*H )O_~KG:X+VGA!i(%Z$zHOT A3ZI~:b S8~E(b@͓}- iI(?.6oE=S $}:mbAbO:?C]w #*=MR?=L9H C`*( R)$vTE=(űD6!Ccnh #}(7 J蹔`Wu/b7n])}UO7 "FUݴ7|Z}ŀ2L* Խb;%at$a<_@1XZ" )[0b.'`૯:JlEUK0qYf/z*Դ,X<[AJ@ AeM?*`/H V$AĩW(>Tw^"+Y 6ybiJnԯX `}dž * i iTw *+* 0/iq ;= zQ. M3駟y_9M=$@ tb?#,cZdaDPqro4 T!:E~]P4r74օ.a> կXCPы!`u)iJ"iZ wpDէ!h( * >m0m@3=!0 #f`HĉI$;VSlSI79+ U;>gPMwQ8e:ՒT!z, 6~wWx8Ze~^}ĠR~=]oGYS(TzO LQ @=zQt?SBĴ(e? e|(U`$FA?0=o4J)JІ̑䀶8HOzhЙ:U_ghOL7b04 Ē CRczݕ(ૼ5kQ$9&FD2qꫯ=z^:u7+Ea"}YÀ#4xEs/l{qj6uՓ @D:߉CIxtKTl OUʭv]Tuخph:V ii$AWʍ^nӅ/b";NNI( #}ޯHOOkWb`BP7꫟h(qS3N*0PT>Nch6/^ۖ /@/@fO/(m: n_*=WBnniFPId]2;A݀t\[mF?dU*K {E7^#F4N0mLʦ>18?Reß&XcP ~<>tJzNOc-\@S&ZlJw|r|` "|׮P:WG]k :/1H~)`UH zjlhd/:oGE7FO>`Ohzo$? 265v'xb@Ӥ_$=ڜ# jnQqߴ?N?`<^bmsJQ#R~:(tTHh߉;*_vYҠEL K# U о8S u-~J;?@KDzjI#=`+<eTf8+WI ދQ+l˗qKEVMiE~i 07`0^ҥKh~R2HYl6E[1ι7ŋJC_8Nk{`h݌J/\@_i^-UxU(sܹ2 a:K1 xppٳEnwF}ʠhv}]C=9sL8`2"SvJhwI̽O>FPSi sԩSWw *6WPQߗɤ+uyV7GCl$xiI\;&m R_Ia++P7%hP! vKN+}Wu˷_TQnQ0ާ#{Wgp?cZ:V 蠨LɓُW}[{O[b%h'ש8s= cfL:{g$?*D1qJI-No&(ԉ3&n/2Xm%n0/R*~6cTKǾxE'_Ϥ#7951[V0P0t*Ղ惇*U.N"s2XPQaO yv*A ا[՟:JA\:ݑh6IàKVxzf ȫO]H *}KzI"s!厴Tӂ.}]OgC<C ~DDA:f+t3``% ]=4 +}ܩTO  aEi8Ԛ_*A+)5}G81:gOREۺ 踇c!0RP7ߞSqmJB} MS)x'et*# `87^C`(}/7PNJٜX`&LQ_1He.6=Y1>Jr ċ"Yu ^hJ @Քғ( r=}^W_it0Vp8E:Q3W54x֓`<A͛7=KէYԉ  K Q 'u+/Nl5T uIӡd}78ݼ'pPx衇\ z(A_Z K@° ۵dJhE0 51&&) wwJ)MO”25쬝={Ve8ei;[hItO@S&m ΝCĽ7 : xn`1*T@աE %?߾0*_L͇ \E}܍14G) o_G{MO# }j/J (JpyZ=M:R:dO1'f0 ٵy#5(8ojP$oܸAw!|> CRjM1ėk]pKS\O'm߽p y*=ŀ<(/^<s*5~Up,A {` ' _ Dh.]tk4 @bЇ⤣p`w`ܺWiP}iX0j%tDOURO$ 8䤔 iH8/_Gi%o0 R )LG:f^բ~솽r .aR-;0iw` Q\CpU#\IHJ ?5SV! AdD7(ܸd4/@O'߼ 0~?ބ`?k׮?PVQxT.}߃sgܺP,+Dׯ_WqN;A[b0ه}0h AۇP!JpOkQ`N-mQqUN?60 *5Qendstream endobj 235 0 obj << /Filter /FlateDecode /Length 31874 >> stream x}M-9rݾ~E--^)MnZd5%xo8'vb U4of 2~} J#g?8떌?sv~>1ث1_W{ywoC!Z>|ҏ>ӿo?o~TCmOt'^j}~᭕S}OR+Bs~K~+n\Uo3ޭnjY{ZݒU:}גlb%ﭖdkjGdk}$Vfԛ:MtO)6 &߯{S J_ȣ??[D#45ye_67iz7Gڦ~nMޤ\MU۸hChlyk$˦SgԖ?͜x}{ G^ߥp]2mf#ok%5)_HD#|./+gz;\͆B},M욳W#5לK gmވԃ:7|T>>^=õϟ_sC>{e=|֩~e(iSKR\M\UMwnKm~I:[is/{VM=c 4d.L2U̗4_IЛ>0\y)-ΟdXS3;:I ۫Ig-M!:c Tf 7V[IMy9\cDye>BY܋0K0њYRgY|ln:UMؤɜfGb|1Y9S2oXctQ"nL)[s6 Ϲli5lSra$8Gث?wIũZ5d~WB9P^Uu-A+Lf\ ^>37ԔmnP T852QW}͝t-L5!(s(dp@NA^?vZˍckMZS/6U2U-_W)Ե0?7K_KaJZ7e sh+۳h5jӊO%4Iņ1w6tA:9{kܐ*w͘iX`.]S!=]m\s'>i {`v94q _wH$v́^]SB7\8/ >e|TlDeEQ4颤R"y9ܵz`/de^B٦c­ϱMA{ԡUy mmг;ls˚IMy*)њ[yq^o~Gt}hxN&tؐ&->-(-1?5+~6ܩmSrG>nߓa٭ncmd>AM} N_z7gL̜Sc){y5SepS3໠pظW9mv%0Ur9Wel|לZh\/zx3}jyw͝R*J\NwMsm* =ڻ`+>W@siMX(L{b4Ƶ9^1M3Ljex@T:Ww}S:}n+}Z`չEv]sK =yfMqbr0;ڟV~~3`/_P |TM65f@=6>"%v`ӛ'CR+S'ϱc aQC{1畚zZ4mJ[]Ҵ";:QSdsTB*ۜ˾e**mJ֤̏lpP^ $)U '׶K6,ܩnNu~dfSGi}j-ְ{9kW'K3KSk `S{Ta &RSI0OyqtX*-A0Tؐ"PATLIW`뻸N9 a|SjJ0%Su׻>wNSgI`$U/sMF&JLd*g~q=0Q>[R0$xJl1 Zgg=M:*xk$vV+(@cJ`hyK2Ij.ٔJ^W5jYy:,7j®`V:1mz6Is6C<| הOC sLI48NAF/88Ye9% Z)츨`iȀh-n֧s "4M3ߜ`3 ݎ/SL)5)H*)m;kl.b*qL]cžPk=6S ڠja(͢zS}MlӉSԵz?Sצ^y_U nI?p7;=O:2{=uз3ßq@.ש^2M:v8/~*PiΒgѮ)&(HΤ݅& FQԋڔdFkϹ E U6TuPצDh*Fq٭iLJWFUs'Jh0t(%_UmN&jf6-ӨSՒ/AH< UOTtDDF nH Oڨq$%\I1}$ԷqgTW`J:Ga6p,$AFj@@f 'FI_L S­<"S\p3"F .O㆘Vl#>+}8mzF/~lHvަt6۝)vLjq4Q&1y>$nO\(:pG#5`KĐL#7SO͔[/r`*<5~Dj193YX}n69Y)\wsCPAPn4ix͇5]ݔ4nfDf gfk,9|h*ŀds )UrK,]AVELW)P5%f Q JM*._פiˮ)dSP5{)4q/ @niq/n"&IԯdW$tHN+w%>nfJ~PŗS pͽ7{+%s,&_ԯ}s~E;#rՖ2S]iʴNIhZӈMC0;g*NLVEԸΔtjR{{H E:T/佨4O/ve%`8C,'0-Ix 5N0'08( ΣAAJp$()q:)Ľ)s7 R` RBp P0n5OWu@4\GS<] P@pB-l!*#"(ކ aoC {rz MyaOyɑQ F()ϝU&, 52r\^_fbiGsXYFB4(L8<-'lhrؠX96#q -8A2,q 0-TNCQpDhrHJ7Q؂$"afwf E6"ن [ @9 \@Kpwt0&N6-q: ӁR:cCa׫ ""?@ 54q]"!4 6A}&,.$E%Q"/b-:89rӜr!8Jɐ`.3$m$PiHLF-5$`e[xmH;yi70W~?رnAǧv냍!!bR5R"=xXx|H/>$ ן̋=Rh 0 yan3x4i,g<@BJ"L#]NX:>(4M"*+{lQBw9mrPKh$ܣ4Hس.Pt{*Ęl56c',XW߁Fڮ8>J+|"X9%!&.%A s qY۳K*!@TPlm"JuX-HD ]nJ2#YF PqM$@-l".K㚏s캌r&'Kۚ{3 )[p~iIE4IH4 M7!7;(e%8? r{&!SlJr%琕iݼ+oq;94F÷QDmMלE&}ɨثDX%$ʮkN59vhNcˡ&f$N&؈͛M 9YoRpF*pNEO8#Q@IrMIQΈ,=5KLlqy{e6YP:#fM-1;#l{3Bywgm9g,*g=#~'C;;>#.bvix>r*~Nt{ŗvJ|iUt+dTbPW$U!6T0Oq~A{fWrZ @`Z^DA[K ,Ej9~X,d ǰE-ʿ -8僂8+ T3Al.";._xuBsuǒX>zE㱟_GapQQXD jX ؾoG{yd~/5:,:TSy&2בzu,G:-:- :dC#籎g(<u(ֱj}mGU8\~^9u^iuGG7?#xב騪d(kCxhO<|L'{!RZa1@Sm(@R볟=p' Ӧ`  XzA$ dGH#wDi%GV F<*r+rt9K+I+yLQ2$Q:dAV(X.ߐgGi˼x7(d!yxVDDM(6Y/o8_y~J{<վ} B=Zϭ[޽GC0ab %CVS ySچSQ]qX%ru Eء{~pWZ*G¶gidԫ'PWPf:Be9t,8ʆreQReaV +f8@Q})<ë(0WNdqnQbiR+Pʑ^2`k$d$,6* (x2WJT#A4O ܧ҇ z COCV얩^L /M%XYn%NV樭Pw%L`otTB9{+#QDCEpB)l#j D!Ԗ„ U8A%KD'.@l$Bza_(І@GFc;]Ċb%ad y2BS0Qj, W U2BTwn['2ZGGdGAUĀ@( fG^ EWw/M/֘<FAx K,j/v%; *@.a¶nlfa ig躣͈Q8"86Ht #*-ja^^pa%Py]> 96aT8cܱ Oji95IB%V_7lG/ >J7s$VPb7X8K|`1G^zӅ鴝Iudwӱ86 )]!8l ?*жꛅfEy&]}Įjp 7[ꩰ/-Bz "*/OUuA/80© C": ΂, *: =ЛXi47DSא I<[1OpHT@ mp$:ճ ~Wr:&uV|gA-ҭ 2YáD.=$RxAB .HI(źM wF~x ($<.ˀk>EwDdQ"nuTt%Q+Xlu/03Y ګŔ"yP]D'QUĤߤ%ekD8@~APtd,D._NA:B qqVruۜӃ ]@f>J. eN?Ԃ͝c$(c!YCT(,fLmQqVfD 9u j W!b+vX| IHBϠ$r!T(RY}ݤ1ď5i ig8ӕsf#騶В Ģ!=j TUhzu͵FlU u1ZrrFJ>dTTA. @$BXQm𜯨u9L|EԧRA q^N{Df@_1#ٓHR*=HbaZ9 ISU8-@rZ9otZj?eSH8S .'BՄrO gA?<VxOH4.Z9- %3_'c'l{en ?o^=o`odV:dke%[+8;Z-`oSYU3%/jzmU_篅[${+C\B8ݒVŭ8aNզ1_%^*7obp{֒lvIߒ޹7rcޒ l^zO`̊ "gw}+5@?e,:A"%#_i ꒣3$Gf$${bKVV,koTڠq {#IV>4acpȫ8/!%<*GĿH.D>¿Q|c/pײ$STï}"" з\ sIE0&}tJqjTy#!#[3 qdRf#:*&[;t֐hs jU4,W`reG =#tϙbEv BΤ9Ys5TI2[gJFqD=:76kU 6aк69@`> 3S"/HWzk35V}j5Ai!z%?sgv]Y  I@}7s=LG)++u薤8|Ƈ&4I.>f]2Dh| pV]Ɗ0J>wox9d*,[c`K1ؿDD*űxSby筱M"8D fw;j7:NI!Ƿ4oWu~%`,-3WWeJ\{TS҉tlMƸ"͏H a(T fYmC9@WB˪$z_UN_&"FK"8w!"7ƚdΞًAWS/x U<5%|))@@7h^M3S;]kN8y~D7uєdA* 0|: 6,$X=l FDx1MM |ΰ1"¶\Su#q8Q\)j*J}HDp$6Eq4 9xbS7A1yZz ;ZLr#h5ƲBuOIEޣŨM#91ЙrUtSJ74SY[8O V ֠8`ehe@Ed72lt l=5- )D=r7*=p jq2l1ښOM ';' 4Cbw6x4!\lͿ^5?Ia}.AxM6"EEh0y"S`;u̦h*SAD̚—| 67tqjܠĔjrcËWfFhzWbmQr#%(SS'/@Ǭ4^,heXY9"2ċo|̥*G5o7_PHV$QZH|xW7g.*81:3c,”4nR8[G5B16o/{PF?]}J:$QGټ'ɷi7v%Q'ygƪۓHWSHRѪ4N.*9o뜹sX1f+KGEA<bLFSE GIRIg؅3\w$HdQ$A)i["1cLE)QƔd-p~0Ƙ[=&q^kwAV_i_z3 !e.0{6$MKer.]l}HMj 55&{5P DȀ^UuF4-ךf/g(\e7uŗY"AT!"2Rd+/xKC4.k#1>/|pP-$}~Q-:8G$un]P]% "B0Ʀ1>Bв*BЂ^-4E(/rW#RtTicξWhg@m5v!Tޠ;*of$h׎k~rSDpDVV}-m\mG@'s@Mz R  2%12Q@%6mH :b ek~AM<H4VV1# ;zdwg}1@RwK+@zn'˻C}DHGB"V(-6Yё[{{6|cH#qG^6${, 8oMq_Q`.~T Xu, WR?@*V-a꟏iU'l'HSΒRXQeV8JU+:}/>`9 *};$ Dãh9 5*ϰGۣcV@44 [y1a`c@DAl*0av<uSW ǣUNCYܝ#ImUj{u+Ƿ)4GbiX) 5ޞÔЎzbud(!>S1!Q"TiF(RJEBt%$HiBS R=]+aSj!,(}R{Zx M9J/cS ڎ)UiDlm֓gG([ih=9U^)wi*-z"z'3iMgAbI `N98U[!"I]Yy3 ̨G,|1 _&)_)xHLq9`!1zա# KbҩğHO!FEJuXPS_#K\08àj P@$!{.\2MQ89I8҄(P\IAGA1I!Bd IAQbaDp& J -W@.V{Ea_4k sCb,1"88~mS%aܠnٮ#rP[tV/Oޅ#,{{X4M" CcvXPXܣ#y.5ľ-\Xj `_716 Pb_aG5 ̡kŠ }+DrgUPl : 8ХrSTV@V_HŸ 6fao=w!00_XK_Bo_·sۺ u|{"LM#}gҤ_EJPv=L Waƅ&$6Gm3?YqH>t[HsHt4:@ OT;B{ 8Q@h}DM`Q +!:~_܂MPθuF^a_ĩ g,ߋUTNX'*fX䃽fj;  HlD,uQI&Cs@Hk^Z1}#Xhr3D~5cgA M^\Y6f#~cmS"36۲!d٦-Ad|#/Jf Nf2!*NTPm*:|;Wf8dh /m(eƴ /"Fgϋ33?2ܳkd/YvȔ3IPObVfnD<wg44lu"$>8_EF7i6;I;bŝ%2ǛJfFh~qE%R$FճZ2R"RrS-IBGQ'6) pT zN:mIf+]7QCE͠8FJzo| 9~&~ʆM\tf4+\UN#- l"O[1P]r=ibEN÷S1QXT[X ۖ4Ld7d'+#g QQմ^JyB b@'lLiGPu^`AWs sP7+^bmwƓsp32EsG3 2JMoTb3D2 ϓgRϗB+`p '$dJqn9PF*@C5Jf=]hxΛArw oë.edzk\}$QuK u'm(#&1\grCz&01 '$.J&)gWT(&J W(TZ9Y ƑM-yIvS[/&f\/@]"3,םgWQ:Jh Si YibJ[qĄP 2a%_bRK,rhhJp!nHAчFHF.HqdomD^%OD tK;&pD0:\ $ ! v]1HD._ `܀S̠P-`$`B(+|'M;>0x U@!(l(=酌!I[`D*!*BABKA"j ўԺ`D`a&$ ]w$B `±݃X7xWg"q⊓Ӆ[s;̭A"P{Eu!&H|׍$@; @$BBėgxkc!^ C I;X> I>8ʓr'$H'6A/Ęb 9K*ߛY7ۯ͗5{oݾW[HN8QGCr8x`!!Q!]Zf9{pK) S׷uPvW{y;[ I<XCgAμku_WLj  x¼MNSDGRt!1BC"n{sS c0"^G\3 0Mljv8&_#K+rĭ|N&-Ž䝈QEd _m#[XpUD yvv |Q!@1:X*JԄDX<.P1'SӜ4ӃE:h~EЁf)OO×h>PtPqիDIFAUl@#H:Iq$]l& L^3PEv$ `ݧk'iA"\{f1|wAp׷nu]6HAs[L83:Gu͞8A-s.{P Ʇn] ( ,;I*n!_ Fr㶃W:XvN'΍'3,8,۳QˉW 'NrgOЙy4ra  kzu+Nzذp>N,\-j爘Pљbp> 8SN&( p.蝔1U,?brtu ^dϩ)x)&e;g) uE?"9<]͹"':^ty5x%!WYA=L@q~8m}@Mbz3N$J` ao,2ߍ\bO|>LUAXNwomqs9V+o?ub* *\㿼}'sz_S f+Ms-87뚫B~oߑ3\kO?l?>ݟ;[I2NC;Zw "Swz*WY뜜gQGMk4W t5rSsۘ[w@Sf޷y(1Wմg;_x?j T+%_KRXI?? ynL( =ϛf+4aՒlZ-*;rj$[e"\rn"-[EsV.[e\Ҫ6/m!F.9ZsnުZ6yoV^,r !vr \Bjc$G+xG# 6{#쭒KVu|ln-*r*Zoo咽wFmF.[3{+$򋧔_>krG+IV{+,qdoŚ{+{do${+jU;ZIr/kuIV)\/_$/NM^UCUM/OrKVKT%9ZYh%jrdo,r*udo].[+QrުZL`o咽U$%{nܽKVlz6ٛdkXjIV2w{+Uų^w%[eԗh^aIVF>ZIr2JU/VsIVe-ުr-G+IV0 {+=Œ {h_蒣UAdh%jjKVyr*{# 6u\:ǣdo_%9Z}^V߹$G^'*$Gb;do {+JV&${+`}FmƖh^,k]:m!xG.ު[}o咣FmF~dkjIVا.K^[8[AZ׋=nILmOo*Kdk Y>kIV1Kzǻ՟𝱕C]$Ŋ+~ {B,Տ=s?,؇5KU"V3%]_iU" pFqV`_}o% VJƩ[h{*Jn/Lw̉J{gs6ʷs;A#̙(9ؿ;>p$Dsl6]W񜡻:nT ̋B~dw c[)k"Z3 =Y]~pP\y˪8%;,[@i\n.,5%V`د=Ywi!۪l|3`:EK.],?SpidDb7MW,S@زT㔘YωωVϾ#,\RTGI7/ ~aʼ/x͕@PPIjcȡ $+*E@HTBHůa8%9WO\T7CW!JTLisLDܮ+^?X#ʂSv3_ __ґVNI˛#M^X}DmDfYByWX^c=fJ_O>7`PWezgMRPlVG?ElF,WQl%9Pu3ݾhwgp9O| E3{P 6S@Sm ~mR t`J%U@JUݓS;lS2) pMw3`tLUw)7"Mb^*AP}D^(! oz_L]ܭFZ/-4 MIn ^CדDm=eMIAh_ʤ4i:;7r0"_S>%0$藀7%|_A*jo*p`W5'`R…\u$ tîA0Q ]P U1VcJTX$]^IVL.HV,_jWBCAm"8bPYΌp l1HeFy1N}x-1R䋊eӤr%s*q*Mh_C[q32;G.x}BKu܌6LɐkGXQZjvB;9 z s\XQ-? P&paS4\$T^0K*lӹz?`1ݠQ hbHmtknե ˱ppL%dMcȤ %Ze׺IыWz%:èk&7ٙ $ЯS)U2q8 XsNKcy&1%'/<6;hGPT$T0#B xD4x4p*\3Kh o;k ),1%?ٵ068f8xCDž)2z)DPlU@ &m3Kf]n@~fTB{WaM;pLtoKیM~Ib@N#6㗈 185fOHI7<&P!؀|VBU[4MD$sfO@j'nM HLPUdxjm% )K*qgˈ%2 =o¤lZ`T# P$^u zAUjFҮF>enƹ4ixsSp)y+ûF2X䎂A cTpBM:"=/)+\JTH;`7C=bʭ$V^ō+Q!>9 ej_m"MŭŠ)kb*vՂx0)1fWE9qƣߢHf^C$&ܟ u4F Aaε q^UJj)Ԙ)CmɢJTa "7=!RBn,PfO1j,d>'e{B>8z7QSzr:'qY#档C.]ݶ_e`EGP/-W1l#m-v6Xdk2^)!͖۠mFcjr뗿w g4`zIſZ$*"k-#]Ʀju6uZ{CN8SO\VQ#)YFQÌZn׾V\nzR_x׾Q,{Gdm_H 6ܧ9^NJi҃k'ZMðmc nf5O&|32i۟ 5LD|n5(qܤysNnʵ2_7_f btd 1,ƴ 3bnDp}q/>P.̝GZ抎D(weTNKyO1G1=(\r˲ȏ6G^w_6yx1J{/,$rʇ'^NIq'.hZƈr\͕$8vWt LzN %$?:q#Sqo3$/t/$PPnN t≢X;tu0)VCJ!bj@n礂?Gb! N:D3!jh]¥ßIt&$N1fkv#쬃BD9^ H6kM5{PTA9 <^5ɜ栂6ʏ늌d<׆dEmj*ma, Y^ nFX^`菱ǍG"R|5S̨תS:*ЙuIҤ NM'fD**x˨TAlՌQevj-nFfNS)<]Ť0s%d3 ~VnA馊 7ڬtuۈsyao9O!O0o8ѷUbaJTzݓX~"×Ք1a5eÞ 7̗dVN%ÝPz.nD2tgo2-b: !S.]]1VVXX B2jmO)díSgFp*vDDFm!^ 4U>k*,, 6gd* ;#L`,Bw YkEMD!jڼ0Ū%$n 9ۡKCџAkf 8&*]Q=uU-k.?dN1q6-IL&&ȓI1TA؁S&Z*$90./CyDҎʪ`Q!ltCfz;_ yf*3duU[(}$ pj9jL;젻Fr0OEt{Ѿ*|;f`QL,aF* s.Uޣ0%$n1S揊,[Ǭa_ iU}4P~,|d37uezPZG& !$ Ukk^ut¾].*!f+AK T{+ P:~nAb.R_^iUakzʎ΀Ga9Blcgӽq)tVp9#BB6gVcG?gdP"3j2gΖ6&8T۱S=rv2ByD)#`>UƆ3VQ}"nxlQO,~\G' D'|zgdoZm[x6|v~QaϵsCBpqKLԄ$u6*}Ax8 <pAx8 <@Ax8 <p% <m_x8x8#pAx8 <pAx8<<pAx8q~ AnA.|;}DƵLJNׂ>o5wc+koQfEGFY e Z&skHt ' Vte;7rfl.mM)vFׁ!ƶTzNjꈖ}ŭ3ZH*xPh]K^Zcn[mdLj2P}kn)}/BB+KmO+z= W` //bO[`d70xv; ek.\q 7lҢ\|״/A|اe~̆isμ8`ZrJ(jXF$/X;76^`Pbs{fYɗ5b7 qI es[SBrm ȟ %Ћ, zZ4O]0H+(6t,>{4a1+٣Xpݧk֩3}gsӟR\]zuҕS($5vbVی3\tqp^R?88hH:7'B'ۈtr֡ywAُ9$G"H861"PB `{&D4q6T H9.rZYE'ZQkN9+5|t6rP(IH9^EicW ^>1r14_7& !)TF]B$`a 1d8pOt$ lr^rI"r u)En(ljsIVHD`{8pu/"`h`q`c.# C(SE9x ` +<%ySp ԅr#rS4SUbXj#RNFiSh;ml `D; "^8~݄32 pH:fsp1--XxR!\wG\}7@7HPӈ:Ym斜o\AJ7Cމu31S@מ(8"*/(9-3.6v veao~o-$إ|K*"֐&Vd6Q L4SI TPN5+%!=Uy}i5 ^+yP SVWT<=N5ȗ2y\VnTdaN50ةr@{]ˎ% Y | IURIYdgg#utLCR*zzȮ}ޢ(*/5>+fT F%=Ҝ "՜;_]`F,|A13_ U#FMP82r 5{FF4@B%hljZ $ؿ%r_GR5^`-DZAzs4 CdKK|t\$~`)U5y PM4yq`>(%Z4@q| <'bP9Q1U:?# S4"4HTH<yp2_8pP:) G86`WUg˿tD(? S6 pe)KvcڥD!e n9D&xtfgJW:8mZ2YmA~ϣ+$Fj΅;\ UMF"ʴR&ȄE%+%tzJ"%ڨKi?&e y@/i^ې`hZPiyʤSzƵ+nT>gRҾP&#(@fD OZO+a)չH$TV=ATPSF1?UjdQP'㶎]qu ̒O2$Ygrv=H;S "(SYRLs;)b:zD88(UX[]3rEPdNlFHVY¨%IE+6CEhkitȚiY, 8Cj>0*&P(qLۧEgBTg.bjRtƁY8yƹXͣTktұ3PJf6t{c4@ dCJd0W$$?$؋dM2b4Ѭ|w2-k>þj/Y%*Y3t,I$,e0ÞefX MCl\-,1Epm."!/ˏN%ZJnMs6P]#2|f:FzDW}CX X_|/AӸqi@'^Qb4ݝࡋt .'|z~Qjt^]纺S`+V3)Q>:d,GBVUw$6HM^Қ\[vq}ȤSޗ_w(&4H kiIq 5.,*Rtb`Y)>*=k-˨Hpi&BXÎLA[[$˂Ѳtk ܊")pb2NkKS%7Lhp;hh=I<$LE3L$PD5(<ӇIJy"B]HqүV25QWN%I $[Fׇ[ڨ0N::yt^iGƪ%^8Ӎn0N/Kf-9b'W[?t=Ro !hq'2_}@Lk) oD I ִS~KfN \"urϦ0Xjf6{nnIև<FV:(W4!d@GSyEAEwf"KōV<;Oj\k lCNFL)Q7`n0tZ_N^;h2 )!2Di(3=t#̓F<]>50cɲjwG[t}Nk^-/IJ}../fU v{l_#~n5?Z{#=lwu{G*!=~tS{kG'2̓|J'O1_m:p\{JPXXQRm2svHY_]i5-i:.ԂFkCWnJ1]T y]KT=Ѱ"dU}zύ 8["G)^!3; <4Cbpqۯi[kp+"M`R v :zWM(kf뉗 9Dh9$e=Wx>6||~>6^_J[@{ʚ@:q|Ƴ{6'kԱL5TxWA@E/ڠ>_.m)vo6oT&)-Sxj?7`ME ۣpJ{PG.7FnSm'dZj[ªm+?+p\ͼw`9N7~spa ؁m;H/wLϰ;S&3@CY af==L>3rc?nS?*c~{ay8|<6uAʦ\B+9B#Z(O L&Fvk;Z l֢?=oΕ &x5dTJҾ{%]ҮJ(''3:; _r׈U RWWqY0?c=ISi~~Uʰrϛ޵žyz&cL_ަ֍\7?[㵮&.<~?uvSncHΠŭz)Ɛ,g/0 ϸZ`)ܝ/3 nL*͐ Txendstream endobj 236 0 obj << /Filter /FlateDecode /Length 3385 >> stream x[Kse6~C*nv"H6$(Y=3;ӳ%H٩R*ZΣ׳iz`'}y{obpۋ.xq]q SO%$2(_83c *ԜKϋħ!+5JN=ݟq6o jy4Fo4E< ]%TcŅBU@R+iPB\$ЊTjR3]b<0M+ӌ|{ftN0þiPfJnm{Qs,:U(SE:D/aeBcnΩOp@! x'=ZT.!Os q õrƌ I5d񤯪>! 4|7;B F  Ґ0%H<5VW@e 'c].Pn T­6Bљ h 5Pc 飹$UY`F@ag}jcU6]4hR{ҨتqF$~frP:6n Ц΅vHv.$F5-`=܄7#K76XpmPz87ɩo泻rƍkJtK* vMx"ؕEvxOP&J؄7u)hY8>Ҵ7jUH+L`j">OvG.,8;'&8sr `3LMR+a^7&y5eѱ,ߝ5Oh)a)Fsix/TTy T# zSs/PKCTA2(=v3+Lze>V䕟xycjV.: nb@پjgDgKCS a ]q)c݁;Xբv΅ks"#b3Lz?o^a(=Y!kEJsg4~]tK(Ӆ>=|淐WY5/$)u ws `?w?lendstream endobj 237 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x`K+Sendstream endobj 238 0 obj << /Filter /FlateDecode /Length 3605 >> stream x[oEoV]}b EHöX¶wo ɏRdy9dB8fl37g]3~.nf8 ˳ϴ3제ߜ}3zQϷ|SB/Zȁ1;ߕѻ@ZF(䦐{J&ff~r..Bw 84A;Q^FYѹ|SBw)]~?q9h/gfbU!@&fɋ9KtPJ9(Aa4( J) 8 2W~ >B5XBI8Wr̸2_:L@1$3a@ ۋ0goZXBȃz`8޿ܖ1 9+;c* .UYɓ~ pkܿ, A HX_--|{cwhћp?eM-Ŧ kYN̕<-c҇BX]ݢ1N7Wk$A=kvMfsQ_@G;A񒁇* q5(g(˒o IBn!d9v!9g 7M`K4rUO7s(U\EgTBՑ˝Uw7[ L6+W!%>)y r_uݝ@)|Y5usxu!,ˁ i];Ý^VA3^yb~ Fa. kT?wq fH'BR1P>n j"%]|!mw*SPɒk˧MYVPI||7306'c[`R#dh|K{Xdȷ i*-0ڱA4k*^i_fȹGZ],L{ݢj26_і h56. amiWmG{mblo$2 (Wa.T8ƸM9)KD7c_V'7R/H[ X8_L&Y)6Q!6[;zkb<*AilXP_T,DNu.R@tr=$.q QƳ2`>_8 'M00N|'7%Ey}+";!q{S vsCKREau,Pħ0[ -h2"g@քEМ4X ulJ3z0gBp>!ij%O쒹z*8`04 :K 2%dYzި8WU,_d+hE1X@EBK]L8Ąr66nϒ-3Ҳ$Q©ybA弊Fj|pM5 JZX eT,}HERBUM#F0'4 y(f;\j38238)!7ׄ1D"M>(,}^]B$pUoc50ES/NuXa-MaI~,Jo1+0ɻXۣ4[} S(8w'$B?K0#W5z ' J3_}lw2֊W,{D\!ۘWeoӞHIt=msb(gQAkoI@e P0Ӂ{bΚxxSWwS>"^41_T7sc'^-nޘ^vI[] ѐsǍ0bGCNܻ2T\SPS)SOFxMESOUMD\kT?; ϸ_t:հ?Vk=OhNpT@ p_t45B5Д QZ*^``P_Og?L7endstream endobj 239 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 742 >> stream xE]HSaͱp6s-4 &b¥-̜&g9T!JѲ`bf<^` o8szPtr>-UHE2tQ'+NL#Qל =CUf\X8xí^Ϝl./kPIH p6?Gأf,+wM@tuvu`$L}@R\% Xv.Oo~5^.̄{`EB DlUmYJ(&ם ag霗w`GfO=,M22CϾv6&5Sendstream endobj 240 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 426 >> stream xcd`ab`dddu 21T~H3a!]cO]nn?G ~(Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*\"s JKR|SR @bI?V3+_}ߝݿ:9w^{g倂'D߫>d3.yz}ƺgK qV 7qZѽM3~_+ۻsO/{Vw\B ۡ݇f<ʛUg+[yNƾ|Փxxe`Vendstream endobj 241 0 obj << /Filter /FlateDecode /Length 4575 >> stream x\[sd7~7/2DU\R y8kKlY$n4:;3Tkt׭ЋK;Wl8>?]ޝ&HDJ[y~,~S۠έwoX  õFtZ~YuO0 {c`=8{pƥu+i`]!a|b*'N?.> G ]o5 2Q^jPLHKC^)5`P b)`t^t`$W'mR^2e 6*}B@yoiÔxe Qn!\(HT`WOJ&OIk @iUx9&~g)"fZv($Ayo,PVrZqO 3%\w Un>^ *@)7y:<5x kǝV<\u`:BCR+$e> _ bKK<6km7ge"zz( zyyxyxc : L1+$8Z?q| H!^T҉!@4OS\u~4ǒvNx5ƄעLCp<|Wy<>lݗ-!9oU95 Q|>CƠn!V*8nh ɖtfA 3)"0yZG@V~W+Æi *fG2|[w`7/OMSBFvi-'rc?[!&2ز'kpd0Xm`j H+eFNKfSGfIϡxFߧ( ȸdzwR&V]'-Zۃ6Lc$AvIKH%u0Dy'dQ )-7;2qH{pWʧ'2/]8KO.A%DZ% !|!t$skE3N Z.^k4 r7&!Kt:\3ʲa*5YR4ıh 4 6a@B P#aGT J@_6x92yfbg3"7JWkw2*ŨOImO烪(ezSFiewHYpBHI#/ ,5 ?Cџ4U&'Ma-@~3& ,'E{IV3KTKF%_>gN<FF=HHc0崷[ю ݯh!|=l^pdo/k[39lĄKUy 0E>Lb*L#>ǼKg*Izq8/I&=Lzϛs ؇\Eڂ@HBoe+<f3|b G*C3YTcyȟ\rC D݄@ȇ^o{i/iИ4 ۤɑ&deM#1"-&1A%esl1urFf{M}ybg93M^#ȩŰ·\ H;XAc;AOrQfo E@ %gqaTLpD 4e++Psb< +6M)9N/;xQL f$C  1کXz.l?ť-lKײַ&he&ЬtٌI*\/B.^7}*?=FvdmTVHvCJ'ݗ⇑ݶל"Yl;1{U!eW<$v&rgQ8̠*Ӯ`EчWa-׏3Ͼ~̬vOlʴUU"`/=w`[ڭ he%9Lmb޺0DǑαh62~h48ܙmR63|'(е!?1J4t<,Ibx&[ ҁLq`GinL+ʓҒ*)_](Ȋ8m2Y u,סGHJl8;&5&[kEäELRل;L3pkR$H)eKL1~CA^'}Mp :'bևwVb’ֿ4Pد}a$ÙWy/;7DCf޾l~ʬ]sRAWV۱u=fC>|t&f`?|^O+. Wi W@Qb./QT_p qJEXWefcÛ9E?p}-\Fl'G8a:VAK^{E2CD3Y'_g8 _)޵endstream endobj 242 0 obj << /Filter /FlateDecode /Length 2109 >> stream xZMoFBGm)rHݚrXcAso3dvlAlyo I}V)ovǯ0ǙL9/mvWXirrn]뢞[|XZ[!||DgLldiW].z!b 2Oɼ$s[]]zxӐ} Xӽ6~o2,~IZ|b~EdJ~I'wd's_eo2%mxD_ 3){2YNI[^v# D_j{I|U6U6xN2V c<~sѪUAUdAu[9Xy^5$ 5_'` VƟoպ'պYM .:=:VṬWY-O8hêLa#UX5O+^ƦMWl[56#iq݌Pu30X }#DUX1%STStIʷ";]WoXpAFf;{Q{SOA^Rz W{f I5zfLn^WC{Ejq)6dUG%SUWY9RhW5V!@&PluCф@_-,gOj#@>Tȓnxʺ3yY51w_,{hG`2YD3co-ˀg|^/r_;ӃX]eMa=X&*={H}jz}]J7jFm|'K:i xQKc p.ku]2o0쭓z}|rfxF % N`k\@$aMpf+-r;RK)#[^ t6~{ *͖ߘ8/i;hT^(z&KʉB% VHAGuR(R|i 1#DcrawỨ脔Jh}Mi OiJ{0RI `U5g-028jRi#}pX`LpQŃ:-ux/:'$0:T qȨa?/,t $s`PͧtRSI%Uqp. XL-;\&Ac^t@O kSsIJ7-r],Pן؝J.s@rAAFWdӱ@s5 'M{=Ѷkp(W)*4eu2糞4\18\5[NA~UON7Wh'X3VE7“;Y~P!L-;t!=J*1p\E4Z6<-% s)a*H)E0dSP20T :5+yC/Rt -r^}QRN ascJX Ri.` =n!.{I{wd_'V},^>]:M>f?$_&Z j7CrRa"SxdFӻaɭq2vβaEO{Fi3&?Drc-w;2/\W`" 4>xI dHb}Ι RO懔_9VÂ#0u2+~]0U݌A`aiG.la%޸[`>wZ:?MIO s{RFQks O8gbaj;܏endstream endobj 243 0 obj << /Filter /FlateDecode /Length 4348 >> stream x\K$ }s!Xv j l_uOWk\&bvC`cr`0`D?S>SS~_|g7g4Yuad*g>v &峐R8;7-̭vi_w۝|1'Jr"c`/b&2*7FjFnm#s'_4Zِ6Z;4:'3>VS@:VRv=KJdӿ_  /8{R5^h>w|hU#_5rȋkbރH!L^8w:߾) [296dŤ<ʃh4.:gʶw4I S`y*{w7mS V_7B8ǻޜRT"|vg~yNHjb=!ukG"2Zrĵw@H'1]@$jFy1|M{ :tس ,#-vOQ0pa |dIB[``ltTSb`uNNǤ6OIYRuЕnT OIDzPjggL9sB\y~VvYߴ'T8{F^ zt?<5t^6F0)~hBD"|*J W|0^,eo59m:UY-rmDL)qHJ6݇s1dlL1 ˏCNtMG= }ٔ~HDu"lH)KOZ~7L#7ľVf$bdGP׌h]'aJ,vH kC +\ Ô/3\gm 0$uէ-];^wɓX]%y~o&,@ბ $HP~wG#0I F0$u*|e%lGڸKRjb!0= ɫ!C(]l; _BG/OLpf1H{#9}XXƊ9Փ΢ kFN4KL =a33#Oh;;3-q!scK>~&lc Dp B|_o$SQV<YP`71S TAP0d j(^78$A0xpmnrHL4MU layWӳqOe´sQZ=iJ.Ԏe]w$%),15ԋlHzRU;$iNs^ Y-enGՕK݄lG¹dX$ y/ȲS-id{V@aRNݟ[)35wVxܶTl@ e2źDҢu%E wJ)>*J2El[OPP[ox wL112fbdL_Vs'2\1]Yl ˚^mC/=鿾R{vVe[g^"*/K"6O.֠hԪbԕPMuk:w'r 5͊;Krij&i inoeWgeOI,'頃6gO C%x(I)l| S2g0wذ͌˿ƶ¿KlO,DE^tFE=* Gf]ޅrdtdg-~qx)0vEgFNçRYSGUqꣴTF ?!^EFNXb6md_Sv̓= !-j㒪O*'f5> XCQ.>| |p(%4Kf\׀ڬ燧i!~xH_v9zlkv@?&!ԼmKIcyqG jqk $_ti"ǻV4/vӜeHA =,>垙n>밐 VBXwZ{1@\n/=rA띯ed].E2ڤ"?W/),jhȄox}[(J2Tz%_c$D>t({z_2/2jM by| =cvBLSZ+-EE0|yӿ7a8F&qls,ҝe2_CDR `r!393zBQ@D>guKDr~L>DE:vkWL\O k,.yqs:"Wy ##.)l>9v'OFE"hZz3buG|Efc'MB5lLt [:_I9STXADYN!BSλTjJgevT㡫Oc$rʆA9 sq3_([:>D W͔gVG:Bfhe̫C,jD}| O޼c$QEm;;& _dWE]0#Ujz G`)'a?NnB?^?"Vn2!{yn&IzB{"TqN/qgendstream endobj 244 0 obj << /Filter /FlateDecode /Length 151 >> stream x31ӳP0P0T06P0P05WH1230!U`aS027i`9'O.}O_T.p .}*.}gC.}h\nn@n.P9?47XΎEAmGTa?ߠ pS! B)endstream endobj 245 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O1 y@BRĒ.ZUm?@o Ct;:8?%0H_#4YZbLU`@fw5˩=^RTn"5쌑j`4)βB`JM5Fr4-MrL!`ASlendstream endobj 246 0 obj << /Filter /FlateDecode /Length 2000 >> stream xZKo7 /=݉ -"6[fh]G^K=fDl u9)Qy?=EW~nv'/j&g3H*Ǒ>,τan뵷n-P/XCwDuW埫O"tox^*^-F^}"ToofWxF I⺊7Ub}E{vԑy*R=5sǚ~>⇪lPNguL)荑H1,vpbJ^Z?_>/tIϫxUFO|*Y0=F+ ɲ kSE͎zVְt~*}>Fi5WHO˺*'$H3AEc51iw D_-AB8 M`*L|/LÁ_"Zȿ0^J\X` XXrzR{q3<%>(zKK!dpo $bqԠNwe VH2E0A'XpѸ[ԅQE>R"ojvLLFEIz st+e>jY * ǵJ:Jt/.:rN p8٧W-[0P[O&hP8BR ddy^'mԒn&0GgCޟ'S]Gی:R;k~#1RyoLbb@#O9n}@^2 Z  A;|X)PB;rHzB:ᕲc破2]L%e Ys9yiV2I E ->Ǣ@{v/Yގ4CAAHo1cyq,@KDT0~txomcPm@!6eM`0AMp̼cF' ꭳNX z֊!AB yECOx-!9`"e; &=o o M3w KQ[,bRa4yf5KD&A%"<Đ|m!ِu!E SPMÂ(4L)ï9K\R6gT0ۈdi_x[9F7 '~S/[TF<7.Q6Ad9%‚Ɔo+kcw@7ʩTi*Zon>YO*TOHSg{iDo_K7/ Bn`0ױM=}f(Uޤr-%d&πcB5w=()/ez04yλ€vcZ^ b@lۦ # Pw w0 R^zrs;xhڞ;J'{OIgdj江޷i]jsCb'6miY !Hmcc)^4E8=o.{'bnj feendstream endobj 247 0 obj << /Filter /FlateDecode /Length 7568 >> stream x]͓\qϙrۖ.5TJR̪Te$WKJ{x@][qA[<<WbW~'HU۫x 'KQ^=}$)^ ஞ}A,JXt0QJڽcD*}"F"vb4BIevi>S08XRE>1:{FjwiI;maϰ9DܝJ^Wlޫ4e7i_m]yѹu4@$;Q fޙbyRdc۸x +";ݧݓY.cɈ>f\X^^#kFE@E~?Yrw$)Vx.oHށamo!Ƀzw|('=:*R{k؇y(#%f.=5F?cOk7¢}R xFkQKk2g iLS9|Z9O|f+%KPa BS?!%a礥mh/"~.1wt0 5FԨpo@܌4TnwlW׵y[jF%kXwþ~D[WxpѪQK*Ywn׵ގ'aCXASm|_ڼeO'B4+ҭwd}Ӂh~BһG|%6J9 {=N(%gw)lLE04)M. "I-@ʮ 45I4%5b@)  Q>Y95ү9~hxR`.Wk0}V~NLf6[a-mv$MPDǁhm@'䡄rys0 x ;gE->h%a9mڜ0.<*Hl8ɭ'JD|@\ ֈDq_$3w ^М~Yofӥ%ߧ!߸h3KFL] -8 niM`}]Oq?Rɰ͋8" gmHP|?T:,& ^= Bf8YH@CEli\15L)-xI/2E :gSu1ZBW$Pi sl'[g [C(^yŏjXP(wGhJ= 5YzC|jv\G.a$/FL&LG/*Pki&'k wpi|xl \bP&OMBEEXgL:,8cU Aon?UeGW|QIۢf2Ls%I\)Gh3fXd y< H2~Em*i!\!.rNf@p (͗tдɞWCV*kQ>@[eA ${7@Ҟ)昊9HFh6J+{ ^dkd(RK!6g+$Ge}h\cD\K&`^^ h\kh ̑%w65$XβU*\l y&XszaT䕓jdPy9aЊ_]o.q/.ûyF:K)"iAK~CXrX߷þ0#0Jt)yV;cV_Yخ.Z`)⊑Qmgsg"vh4Ah|}UsẎBf[<,r6ߍ}k@WR#$Kt,9tﹾ"Dɱ^07B6딞_:GpӿvϞ-f7 :8 [2-()?#щ`g`8T%M_(\|@'SW%diJis1E%0Eٸ -\I|f@kb*)ENò([`xj4B{0դ间-1ꦞ(uFhseAH&V-9Y]z.SZNQ3FyCmaګ+Cru'adƜٰAĐyUE,TI5g!g`o\X[Axνx0atՠ.lX#QiDH_eKsݲ6\ɤ~az2bL%SAL>"3rGvDP0ehB}( l~W n'ˤSFCeQyoK[/ <ǶrIn[4ה,ϐYQ ;T+KHp(6f( 2E=Hp^/;F# B"My:0 H y8@mõxȇX⩘"wDQ-X+#Ct\mN'az|6v\;]]@*@9/yJOJs/}MǐzfXSsTl`4=b8D|]ӻm:,vtJ-\Od:`)&mhpO|PX b 76r'i7 usatbL{bNGŜkP,3h[fSblfTYj.M RMt)= RQPp] q[7Lx:GQ\O .$]zQȼƯ(#ܦS>I2¾5;ɍ%IIpm:$XJS2 XqU;kCP3t˦+25F wICݰ )ƴuF  Ri7W<ˮ>dwMv um~͋}]dMvF)&F63w8=ʏ/+K}Y\ g%FGgV>>cnk4r ۵]{÷uxi]<Oᙢ_zV &C gGƮ?0ɖ95Q||Tۆ@Z='嶸>+Q:`[53:UmK%|[b7CTi|1ym>6Ip\+!thrt|.uT4d=Q*DN,"%'UV8Ubx=5}޳qsq=&?hc uڅU'> 9})rx&R"1_'e- ҢxTcC[?^d6,>vF20<7TIЪY+Km;}K&J) ;`]V4zz`Ȕg",nT*D*>iF_DZ_cmaupχ;FOH_tjdal=tb=OUXveX<3 ܯϪ"-*l,C>7TOendstream endobj 248 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 437 >> stream xcd`ab`dddwt1H3a!SG$kc7s7B``fd/mq/,L(QHT04Q020TpM-LNSM,HM,rr3SK*4l2JJ s4u3K2RSRSJsS ӃP%E )Ey  L,~tk:s2n.wIȋ&Uevp]i> stream xcd`ab`ddpt24H3a!Ïnn?ԅ' ~O+XP_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@ݧs JKR|SR+"Y޼zƍ_cU"9kmX@::9~ Ϫ=e+]^cڐt߲5~鹙ݒm%O{ ªYiyQa2Nٰm %+ts|:̚v<ܤnʹ80e^N=U't/_W5G~Mf[2s}?Oendstream endobj 250 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 864 >> stream x]lSeϻ։9\X]YH qYKDABLjwa?NiO[֐eӏն7Z$fVM$$dF jzaDO}kaGzyyd}||^ٍ=]^|Nl>i<lhp*W P\ ns#6lzr''G]x7ZlE:_9Eݢ{? ~{9Ϣ!|ˇB{ym@PBQM9(e)5zzخU%^&A~3/@:1L& ]y[I|MqPUpClEFNB|ut0c& 0.??Lg$ .å"T2U< 82fxV 0r 1˾^O~.C vvrSA6cBrwMC&6PaH+Po~Րasث3x+-kTϠ~f~k&&8̍t{jK'E98ΨRMl)KUM#&y4 WTyS iF2wX2NHSW/+kdT\۩؉jD"i#[Ҷ؛s}S>wG+x6> stream xYKO@WXD}ڻHh('%*LIz^lę&EBr@Q4g#<WI8<O ˯~~Isas s܉0O,v*m´4 nNb]4x5R1Γh g+ 0,QFZHI8vK T0k)#9X+Pp;ܥֆED_y%"2zKN Ԧ$D  j2H2(@3.*_{1$AHz Lom (,Ǥ zRz}Rnp^tҒ9S96ѾW*mYRYQZڜq3q3"k MT <,8•WKh3rI8$Ý1ZbѠ9!DҎz%Ǐ%yd8M 4:4"Q%3O9KW_eI J6:JC@ЁI4)m+L"\8yml! b\#sz=`<&XS11H£Gf.]PcfjbQrƵjl.$1ɨ#Te¸;IF<꡷q̄&5}1dwBX^&F؎m~x|t^7 ^_<x:)̧` OPKg =x?Vm+ܪhTєn?,_+b/W4U_I-/ADlU4vqF]:Mu#wIRtǁv[ ѯt^Yku=f+ '}5ͥ:ypZM^SdZM^gܙ]6g.2a)kHM4r踗Ʊ?2p1@t.O2NPvľ79}_bDK-aڅ]řRH&wp?_Oendstream endobj 252 0 obj << /Filter /FlateDecode /Length 3325 >> stream x[o$ wqND}+m"A.(6@>;׾_fFԬvKS{8BHEH'7G|#8ΐş"I%X/a|Y`d6u|(Y! .T )rQUO>LlҋMS46ﺲ}B q/]u!o.qa&m!S!}!c![>ZyXWi'&r]{kO+գ:j>KZ^,W& ?KN)Mwyݫv Bt4-S6pͫϧ"m=A@Gb64h6Y <2Xڀ}V KwMrXY6Kgt[ hYDѹ:4?qh1*=c4椸dٖ1w1!<">K⓮gD%{%j 8p7("(&r86*9иzY\=-@̲kIp4k/H?d>5e* 9d0H74MIH1F6|=3x&}9JԜqZqa0E#8RH[&pc6c~dK.zƺp\8Ju^WX0rʯÔaèaކh[(=mEYm;< 5ݳurLHI^85| U}<Ht Ye ()-ۼ/g\.j)N m- Tֶ݄h:5btmd;@rTff[ V?xkV4/ K1Ų"!5ĻBL;?!N 3yL'Q?ago y;ln`OE/M|u\M4MZ4BQam7ͭ]5OTA_Iɟۦ)FDJB!}Aڣۣͅ/4&| cD3gxpW"Vn) )^]SBn.%s\9kxUm!Ż@iMm= /+\ѕܶWzPՄo樒 %LsԔ6?sr GN~V8%|T$DhM>hbѤ`K49%|DnM^MvяPZ'TpQ+kRQQdT[x?SyQ"*E҈1/X\*ͬD+'>q"S5+?Oyв,U Q5Qah>T~>̀qR5s3k-{[ʕPpehWL|QDZOvCע%s'ihGܾ­ j5j6 i>@^GQPE\VaIE&Y=FVVs<>*}0/A[Y:J}IQ:?fQƩ^3!]5;-o Ǽe*T u6sȡy54ȞVٗ%dzYωKx)%2Yh'ua@ةꣴ `0Æ4Y*n-AD0sgr:vi0.7)6{.4.^_..rO!Rm,LS2+ԩDXVEX/:@k%e S[g8#Z[N6 y)Nc> stream x[Ks3e6p99$eJU)Wb|KTt70@cъrpE[ Jr%ӛϟX])0l,5]0Wg`xu{ؓi9pjpRC8EG7y%ﺨn+y9 Cn`R`J9Xβ 4oZR7UV Jq,jI$F+]wk8ib3..}YpN0JK̯ Tps t>B堧˙ji*Hq=p#v6bb x0_LM*L},֯jM" #/1ߏ^DȌшلVصX 8-pc K6t̕J~YM%o趒7]?EfDU-{ڥ^(' Z!m9o59/t}Z5N*g=^)[,u7zm{qv`slԹq'5-,Qͻ) pvXk/{ݲfZ}jޜI^҄˖nI? gMDPoAϑQ2}eޫ@Q! `1ڒY`g.uV7@.xXc ,+2kCMݴv:LyۜM/ꟛNly؃;VON${roLZZu{ ?-M}z74ňֿwk =Lfl&.`FɷMI[B\hwm[_S:Y;M}hkIZb\PLMACsf"+|yAH80g}ń^ϫ q:>w cȉ0{l wȱ3qQ-MbOf?em+?ߘԕP ƥJALMmB?֒>$njv+ K!@&gl 6 LnBBӥ@/ZFqjU_ao&>)ڤ|!c1^ȼɶpwIGc\=ZPqt;8kJB SiHSz3Ju(m QMGV;7= slTy QҼ<˺tbR)cS6͞ «)-JC>({A""0S(> stream x[{UřW!a!pͮwsO-*:Ȩ!:830\f."` eCĭJb(jf FJJA:̖ussRz_68O Kθls%|U9`@qS66D.WRTr.p_qazH-%SpBQyPء5d8. Y %!&>Gu8l4C& 42CZau8 4p7N69s6v5a|8 fGeCJ Kqscpc aU8+am,;:2lMLߏx$ֹSN 3yD9eUe)Ş0D$UX"))%zi`2Q F8BȏGCRAj֧R0$ڢZAObQ㟀1P~%he6KflvW8$'1$i@ (Y'W2>]@**1d˗$4HAxyx"}[pS V)ogn}7a{ [6_+fXcRa!USL6XQnIN(M)J+`@ I0 {[vā6A%}naҜE43)}F]& e|z( Șrm-)0`N$mx)0F8Sǃ@ڛFEH"*wԓ;pШ(\lf}nD _pQ\*!\n=Ŋ AXI pσ h@(ڑgVBWZbcj&0q3a|aI0Hn+#q'X`R)r3,O,*+ز$=;H~@s I0I2 ph)Xd2qoo\Eb;#: С?S̙b-r @,绝@`Fv*XCa0jPJ'F?TEӘ$\+[ʿy$(\@tL"0`` gs&\Ffy f9MET*+cA,d;` {,\%v49ᙫϔ4yyWBhg]b r )' V)&|k_Dž=,U8N)Da;3̆C7N:N>@v\;HڭG92*jK9JV*J_h%S S O wm8̄0f|IxeP62q.N]9pSjC Q  5;=N]b[q+dcul/%!V$ d9Jm\ַ08X"\ k (/^#H*XxB:D _cĜXцdT3gtW_"rK|N2cublF "fpk;G→"A/Lw:2TH(']X]5rrA ejgf/1x3:&g[aWo]S:/Z\U,bwx}֯_?۷?=;wy[Ϝniyvčv=X;~'?~w]߿bŏwvj[|믿S~>sS[[/ްaֹnOsM7tԨխ_|:$j~%ޢEKJJ6͞}[n9=Ƕt}h~=2#?zȘfҫGzН3NO:~ڙe3}6ٿf]ԏϺ-Y!C|뭇:M1yӦϭ5~aFtt k~e-]?gnҡ^ɞ7nG:73>ڻ{?ݡStϤL>޶qc;?|[sm'|oMnW6^^~gVeݻoEe7Sަq\]vrwzСCU뺦JJ&m_rϪÃYnw{NܽMܵpj|n~G;vxϰaO555ϴo~UCv޽aC4aۢ&țc0a¢-[v~p`K,iK=ݰ2{tX4w%c>lޗx?4mٲENpu?BݖL.\ϖ?ti xnA;zB\4ŋCtle4JÎÓ:w,}7o?:xƆ=oVKoU7E=tpC{,)߮CѦk۵__ǽٳg'~%v' KT 4G($ bm~M[J~шԞj/J@SK~ё%fU+S"GpYY=""YBdN 5ɒש E7R Xk܎]"o0=@o+u: ]` ,m߻\I1X# Ә0T]KZê*쭡$`HML*JbHT',鍡2I`50`!oJ_P.2!`_(*f}PяG֩)h "7ǫ#Gm-gEbmPc"|eDuLK~̰iɭ]R`Ui"02r%<&AT}Uf]n[2B 3F]P_)mVHNN#/#HP_ , N[P*T կB+ _ DΣ!5m)wJ 2i(?TK,q"Df)8g{ET֬%2+še:!1= : ǂ+}^.\ "~au0@- 0Z@PK|"H 6 ,cT@lZsKNQS qRφG^]y^-,S<3'Y&$S\#Zn#˛G|k(-m h͍V ƣRuKd&m +0k ߣ9P)"50XS ȉN?s1سb2DN-GB;%!/j?Wf1o +Ħ@bClr 5 x@lq1]ľ Ħ8*ľ BB y܊; eTR#H)ZFOj ($-،r{R5ɒ;j*"׸^)Wzj„!j@WpX1a=#y4"WbaqPݽh6=ۓWLQWab3ko%V5Tm#> PH(@|kr*2m0":~*q)N(8yĴ4Řp&W$,M"hex9V?:aT]lUcO"L kŸDXTWu_\ɯe̵uL_X/GbkfqTL#ڙe !CMwO uYz33`0d7]IxV?0$ :endstream endobj 255 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2019 >> stream xۖ* D'7AR_b=¶@P;~۶ͼ di v@%_~k?"IN)Cz&@~rW,tdwx@cK踣4?_WaR|Jj.d:=Ht@#`,I` Gzk?M VG aV+;i[sK93?aa}Oͳ'_߿5,@c8V}I 5}f 0<uiey?կ\Uwh-5(twgѳПշ "mH3P4oJb)6nun{ [@>`@Bg`x}ҟ62i%\@;L@n1rK3l `?ns_@ӰSh_G#@6]lW6q  Rb˺<ezT.w\ MBO/ [Y.;w:J Y/uWP{^o'4On}gTjkY[_WjRs 3Q[j`R>!s<2nK^^HE7͹3 @/AIդϮqs kA͆Ph;;4֏bpy %ȵ m*m@2C37ͤw`嵍؁&48'~3f茞 4PmGn*?n}):G940(d^/(M6X)L>R3Hfv@YWpRfz w!ɝt{{sj|3 jM` qW@R'C%`yzr2u+m и@V~s~2n@..HƝR }[A̔FH,Ǵ^S-\ UҡiM$љYf3(cw3 y2 [ (ލǍ7>k "  Lqu,zP\?M#y,x~/3fA}!Cd@MY?^׸7&q&/oz&`=`} <퀦'Pj3@J֎ŀ7smҨohZ \f7 (p8p{Y; ^@Mf;0f4W0aܼt}-|endstream endobj 256 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1752 >> stream x흍r k߹in萴,ؙv|f%>i??8D+{V8)sɀ9j:ϯ @;_tdR lأ@cvH2t껑(rg6x4Swj hП B*IAQAe,?@L-ul6=c@?5)ىn' :ЩAcd4h FR;]9t+ fu vpK![}QV?Y3mYwǓRO@<my\`.h?{z7s1NjX"% S٢{܉>x)F4 \Z@dTo`n ߾1Myf=sy h@1{$"l34rl3Q, Ȟg$00 v)EI' "`M[`\mm.69wAQ6 f'8 fX[6&ǁn@n*/bv;0OC葓d MzQ/{cmժ7>@vH?`@(]?J+7Ab/ qASи^$3 xd"Ot~=`e:!Eǭ\Rۺj@ދ;(BSww^=8[[ugn~k EzT @<S#7C] yP Ɨ/7"`4zM=:Q xڵu @FEs 6g2uʮ$ #7dưDI6nl]6j`rmٞnvFc`@ ` *Z J2 J@l;Fp`.Jc I1.j7EG@t>(fpu n ^Wx{3f(5E׳&P}=\ 3@;A ѻ A4G8mdN>gIٟ#@2 )ig t/=F8d(2 i+(1nUhbր||endstream endobj 257 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 1809 >> stream xb* kvnZ ha1^#Cvtf~>zUbZ_ZGCsQxw;8|b PfpnG Ed'P&Y`#5h9vg]*}v_<2~]h#pk\ӣٱp ]H<bh)n94v+M/s+3$FRX e@:#W@vq }3vV:UږK4 <VA.+?7tyUT&nȅ `Ӹ'^uv#q*KQh=':+p{:'Ԓ>@L'Z@ + jn (Mp/b`gٿUvM`] f0 IOz{ <P{g@u6؆F-XF CT@;:x GPY r{^GliB?Wzmh H8/cp$% L x/TXt D;Y d*?8<. ; AHbR2U*@J7_N@ǖk>ޛ 3@&#c˺dj/ǫPw j TMLdMg*7]BlWO@ubߣIy> stream x[ pTVjh<v6{׽83 5HR MBdl";(A,eZ;ckbA*Ev 22Z<;<A h {sWv:L?'ꢦ>K#ɻhE6Rae*Lj sҊC1iϨ ЈSQ::Y8,ô-q@Q̘a!-IJՙxȭőH-A\&Ɔ39rMTAƀcDpA!0ZfA{W&6m̰x nƘFd,:La6G\Mɣ1`2ôlJ *tcGI5lp$yoEA`FnHrhrh:R4`@!SE eҍ5YyL}6J dɦ⮑~8cZ~߽c~ϒ>>gb~K~4lw]{ ; f}z`ǎ8w׮O=TS?q--Lٴ{=骪?Cym5=_r7zuzϋZ?:~3GB'O\`ʔ)g>1cڵkW'={_}\?zsw92^ܭߠC=u-7cWm}ׄ 3^s5?~ayyy,L${{axqs\m[M?m(|fPW=:A7pcǎm=}]~:nO<1% &M1㦃gX;ۮZtF|||rǿbɺ{_߼yQiߋ8gduuu=z`ח--X`#t]_{o~M_cW6͝{;C^}wv̴opĢyu:uBeϟ߾m[Yfٻt?NPkA3{̚9}zws]L&lƌ^8M:AU%%%ۛr=p,zr?8nϊ'N4FG>h*krUwT}k雷O9sĢ~;y.[^uxtt0̟/FIƹ0r8Ρv ufL`Bhh#85(fpg{`}3 LOl "ʼne`D.qR~ ]#H2v /%_|/0~E s`\pQiwδL3Na Ǘ)\' `+)44LKy.M$@kR' eC0ץ#3mi0>2-!}k0хl庐х t! yhBOAS{q"trdPi? QdzP)p25MPm aD(Dbc g(֨R/XlINҜԻ|DN r1 2le'ǚlhD፯9=<μi|=Wӊ4BC6T_a18Z%+BBJMfΞѶV_2An1o.PNi9ڼ Yor ` EZcx8{"7`ԳŃX}՟ ;i^FMVS){&5.\nA6v ?,P5绢m1:kݮ%+?GRj?B6.'}6nZrM!b}4Z#\D;4NZiqX1pc5MCIju| '?nN> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 6306 >> stream x]$[?(((>uED\POEe5 @܇;]ޚ9f~7hjz{^߭GWavիۍ-knܸmnqaofl8G݈+NnlO>==N-^08>p8lۙ8l_@fj[X&|O r4FዧAah =RCۧ6} ÆLšDL@=O:}]+WpD@P(FE٢C}pMχɐy^ɟEw_t\< ?!CS(W蕃ZpID@`/QA_hۧA ' l C,? QVYk*B& 'SuPāE?>ʁ% !|@ q^"p`^@)}DaL'dЗGP 8^ N\o p{PiAV]bO3ͪ:AѬ "pEJ\a'jҮgo eh=YO~nBb)&} $3Xbeh?BQ<(5YS`%zOZxEA@~HAS Φȸ]Ed@gcZh""c:@C6Ъ.)iOK &܈xJ][i;Aш1&@ZKTv`w}>fQsHg>;0B)\p x(: ed#"N44x#p؋? ~<Ts`qй_~Q 2y)1)kwڜйE"kqb\ 8Rn'7M$7ld( 9XfݱA+Mо+:I P&2b(sp,,t RL1A" Ϝ9C6!<+%Am " ztGP#;@B'Zl5A>k=]|>& ާzA@+:-Y+\ N E1"+-JVA|#|dOy,t6| @Pr;K??q`in@3m@B36|/h% / p)'Me%Ѡ[/"TߤX(nCBS|UX_+0GBG}`9*uX/K)Y'tBL&VkQB='>"W_]/%!lC=DOPҴ7xctѷL,%V wf$W(A{뭷 O-"R,FeÄTбaXA{-8w}WOSt%^(W3-BZ/?}֐z.?Y!d:n?>z`bn C_ k);>#M^Q;T9Pе+- ;VED9;>{^!PP:v4YgM`ߗǗzY&t/h%Ѕ (JALp#=У,a?kċ? ׷>,@+%N:8UGgwcrע_P@@_|_lIr E+fHBz]ГveD@`/,|,?ݦ_&UY 3JngLLm5cI p #a d'kk#3W 6=.v1P: ~9y`LV; KH~!1sJ')LSMdkq^4Y/_ZA0EM3W7* ǯ(ۯodw? 3&ZdnKE#BnoFyX,뺚 "b? ?$)T@?`5F$$:U6p$ p].]H+w_Faٟo~+ ] ݱn_~& hŏ-@Ŏ9O?E]2,Z(.ucr""}4éu` , 6{gM0px%98Le@MP)TlXƤ/ixǢ OmJ[?1aM@)dKbKOM@q{wY \,F{ |[n! D+b!4+վ2ov 0pD4J0}v!EB86o~fPn^ v1Qؗ@ր!d$"HHc#Nn/^/V6wD,dKҠaFhH(qg`AB$c$VDK/4 zF@՝D jɑ ay#NRMPFАE(p?/e_AL{#?#ΰůS$'-F1NAM N&-,i@3'og |!m P jGZΞl>v CSkԂ=&cDH@;<,lհXjULJ_g|쾦ڹ% @`gb7j(c$Ad4菒 ܹs1r#`-DNP#JWW#⊾rBet3g$q!9-^h Nd1 2n$ G~}. A0:P??7yj~\@pTeW>?;u"X~#*jCa6(lf/K@GSrg* z82'qjBD5A3^s.& bMY!!#hߢ z?N=ZwmY*.8Pz"Izm~(Bjb:7wuW=%`@im*[NtϏ?gfn^sj.yf;k! ,G?@)QrDlbymDAy*$:싺S$ j1o]'bQdXi!:vk]C%F?mF&\aE'fk::LkAwKd4L.h@* UBӉThG-^p-}뭷 f]YYѧvS=%[Ѭ1 _;pͧs& FCX_$gHkzUzޕIi(x"?/|GWAأ/EHt0:UPqM9of-vٞv̦}lqf`Ş0.d9g?LH (uF"%aG@`OxY-EP'ANGpM@5^^a#XMi Pu(IVdeW寢[$ #&%s=kZbo>$w&=8| @?RF璡Og}gśA祸~E6E܋`k/qv@<@S&*J2sS{M.=CQeE\ qpa'u:BO"m%b`wYoPpu[ɿh>Ҁ-ր3'mPG61,>^o޶]h]&`z 9SPǥeB(ƝVВ%LC$`."w0  rR~0:`MSkяУ&endstream endobj 260 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 1754 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?SJ@<ѵT6dwIceq[M8P!]]W`H4łk?j1Y~j[ +Wba]N6:&RWvjheJ7P>> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?WpKW%~rMojW۶MgV>aTeN?P[mOi+W[bQ@sRUr m'Xځ4jqrk]vsXPx Q5fO'<1b⣖.*^* d̘r Ѹj9bk:E,kVhMV0sҀ*EMkX8A=+Z1@Zt{q[EdڨP*h Z͹}٢Yޡ@.bߚ˸'tk9̎hALMwm`ƛn_ "$sQ2֋[J>7TmW\Ք"*hdGQDԪ jP[ȭ1YEZ6.(hik):MqQ<)͸ҾXwsP|~8w {PA%;WW.:Uy~o&q\״nB7UU;WVl1@ 0 izT ֜G,LOMy@]6sY2jwT-Z@H{X-[v8 K#E-#^RMjZbԎn(h\`u;E2I z=&[;v9@=hs޹ =j@K?iL٪qEK,5YK&&f}uyn9@$pj ҙޠLκk5օg5!&<ܱn,i&G/QO7h]{kTnhD7-Y!&[PpVl- un_-endstream endobj 262 0 obj << /Filter /FlateDecode /Length 2121 >> stream xXKo$E }FLSH<@v$ I2j&ݙp󹪫=,B{Xq\m [A/ͧؼm5Jvo`MtBWͯm`CtFo~HaLkC,4"L]yٖәUT 6>hh\CID|cA♃Ş^zr4.G#y_Z]M=WN=j^"~u.2 %TOg Me ga ,/o܀U RMݤq1PUdt `pꠒA/&tvPa_jYŹh C*i' ;fU43+oQVpC5QU}>kXvEet-A Xݮn2Jג~? :`B~5A(Oܿ=Ph(f2uC5(V\ÈKJuNZ>2wM9gf?Ӳ`~])1\ $D k2(b +,LA1cx攌ct2JsMnk*ϧ{j|QYvBJ5KԠiqXg>ӮgVoDM4}4[ G0:q 3Udc w1̏B^#L17`c~' 3t.G_`b(Ӝ s籏O4dg >bLjuTn.L|JlZnG./6L}o ;U5 t~Δ\U^ܮO|y^*AҪJ*jV҇}%;ߏ]:ٔgX$?7SiN7r)̙>@컳?MroGn5:~ i;N?|!v?kx{F"X^vlv"WxfHi~Ġ3`0jyծ֍l(t.2ߺ*T]]EY >WU*[*e߬I'eU||\e>oLOh +VOKEfʏUVM?endstream endobj 263 0 obj << /Filter /FlateDecode /Length 43172 >> stream x̽ɮeKr8_3ĕʹB%A &ؤ(}2[fn~#؈(BwWs޾ݭY$Oo_S˷N#G~^؇?/S_7f/?QF_~?}/eW1o?~۟?ÖvaI|ȯ{Xv󣗽?wGs9ϝ5e?8{?OQ|Rח{y`v?%׏k9G|mYx]g-O?L_J/3X@*këk?ʥz{㷔|.xe~g|t/VuwU|~n욳v-叴W珞ytGT9x̊{u㣟w~́9w~z{מ͞gqkʶkmge٤4ŕv~yEZSb|ٓgMl⼋]|-㣶ֿeYG[)gQύ/"'F?j?G[kVc3<%w+^n/U$ۏ%CSqHmadȐ5ڮ_EN|Α"_~]tsXYuE1~ &*HXr3"r' ss]#"r G'O1PH֛9oc(Gʹs"G ~Z?Js2'X 䏊=J4;D"j|n.e-mM^rqLl.ʃL :ȶ7s4(Vק.T,ٻT}#B$BzTpX*@Cm#5׾d<\t9s%D=v &vCƕgGt۱'X9zb!e[#XQ&qL"\YeSSZuN1ʂqv7hD8|G*X]\s|oZLD`2S15c-{d̳ATy;K;Sp[i/rvmB]hRƃҎnijS,&^<⢝˞WѠP׊dAclKP]ڎ+ܨ nj}|N6m0lQ J?)66G,]yΙE؎5s`K6=ЎFL7X]#A6)3U=Mn&-[#̸p=&l!S A|۱` i׳HTL<((Q PU H!LAAE?tE*&~$ֻR@(0D,?&_Vob ҉%o~|j%H3N\Rww}>g:"{~B~=2xeZY5Rk{=)wqV &_>H]~~z.՛WڮnUۺ_i{iaFN2gi;p#Zs?&瞥=9 F_v>@WS;=v-;z~2c5pԏהcup5\plΝZ7Y$XKvJ4G iu blSd8" V/Źϔ%PS)8%JPۈMI*JnCČn8 :ٹVyaJ;rG~MäST0i2 >ʺvԞ88H1H @SJX3g?%{{gӌM 1;/@IA$ae?H)&r[PSOk}xtvA&Q C"0NۈUAĜ^zh @S E^[d4ÞBߘzD"_o5_u|*,|8_>'VCU1 # Ȁ}rr~;KNl.ԳGdAu m{J<Y90YaҌR(Ȁ=֤0Avi˳ R<=9dvdiFXr~xkb@EϙQQeު\>j|Z&c娉2᣾ThBIdbN/EP82_k{l{Qϛ ը"494`R%|i Dunvh jdPB<6u94JzciW$i/9O;$au [NC/ǀiD9HC@?Pڧ(֧سiZFX9]9g{l*X+9lF toq:,j,"{YlxAJ|t5)D:E[qtոnId c("Xu8:yR"XIStV 0Ĝ*G wЀnPԕsfuǎ!RPvcHGauCñv/7>+΅XtoB!x$S["dcŅV.ވXa GFf,do_w{X8JX G5|T߀LG.?lMɏ9:|G <>*?_3h/Qd\e:$:a1!jB;89ѻ3KFV{ p%Ԯ.hxq0U -H~A W r^~J $g~\&O%MWg e'eZؚA4vZId}Cn_W' U&|HfgT>Y5CBůR憷 dcyx() x7՗f&pY}O+ z{0=7TDE LQI`j9R:+xD$^ C KXl: @2#wиhS-yY9TM7AMjhwD.ܽA؆$"4jTd2Xa|gZDΠ9g{RQf/O{wAs!ìPU#K"Di[K cBm!a.`.v-);X$%!opb8g8x"4%Z)֋ jv=@x(;?(eAs F aۢndOHR{GaH-cIȩA!2a^vK1l{rc竿iCznȒр^T _.u8h̢(#⡄׻gJ\\;ғ/"QKzV8 '_zB)@zr $]%ZIOPғY,-=&Kי<5Eʊ t),t)N0tdJA)T+2s9HdgjЙdv% s2;W.;K'a(@tK]^f?% AtJۙ?$TSL'PtFiϹþ ߖY3JY GII}qPd$78'F1.%9}$؂R-FֻNY[2-wF"kzu?@G " SKKG@^B%xx;p H~}C+ ^r Ȏ\`\GȔ>X)L sD>bQ>ŠDL+20VZ|Ps"qR‚G _YM.D `V5 oD@'5́݁p7]n-XٙQ!rʰ).3P+wFݮ}89돒7 4$~Z0İJL-Zֳcj +0,nY'D"SMͲ:2D/A*ԣʹLiuRG3ZQ˛!8$hyVMPV7 ae`\H7n~[U5yViңiq$flA&/d N7cSG#!zeTp3JYέ t_ɳ?ʈZ9mpn4vl<Gց[0Ǝی6 | ;5QAQӆC2m[+ u;f[<@Ӌsw]b# 5p9*Eq!*RMg ;g{A}+yzMGxA;y9#[հ9Gʉ{w"v& atwp!Wsx ki7gK9v\αz~^b uF9{FV)EHo R XZ _] nb"kQx8. E .[MK|LvOIٔO9R_p\$eCcЀl Ura:UILNH!ɣe.JDӌ$I\t}L5 :|69ƫ I[ݒ4БdY*4IEċ9xHz$`$4CyikZ7^3K9`$ꋈQ"%P]*<{Q/"m- 1E.> 0Ņ|'ip3"Dh`)ܣaY-hIuwZ1Mru &yIH>: p$ oU(ܴE ̋#^7k}h$Gf2˭uϞieNy/"jY0*pɛ@>".5AAV!O.}}_aO/%2>Hڑ?VRYppUYUdALJ(_aH-&oMiB+m)As{Y+UAn LDGKE:Zo\ Y-| nD3?Ȗ$egh^CP& lUX8u*1ٿY&1G .Kݠ'&p1g".%^RUÉCd֩6fMjU[ g#S`FC',}jdh#qb/R+s$W^d*}])JzJ$փ8_P\ "BrPHl!7H{)"S}a!&A)#R i` dkȐ\eH2JwT64"/AmH1Y) u7bZBfn5IuHq!׋(!sԆJ5!ׁ#c&Iy y#0 i=OHI=@y+}SWϥoVR<S5{8' } [H|-ҭ@s̮ƪ/͆$u$! =b"5^L "M#Ys`D$iQ+z7\ F KTH(,< bр֫z柦ҭk SHiDM2?+zzM/걎2+DL;Dw 쿥]Zb/Z]9r듪Ugh!sxbE5X`C MK;؞6Sa]k[|iȖxʊ\r7٧Tb deU˚377ڑ 4ۋ Ƃ5ߎx4җ[Ȑb%~@D}+k<VDMo,ܑҒDVdZn]67"/Tm7jf`iaKOҽ͍u/ҽJ`"VKEQv}2Y)hS tk\7-PU_2FP̡3dn+AH߲,/YE( f !~Nҍ D ƙE2;f:~l]~e):9"ЫgHA{0enѽ &И u YFC`Zh: c!Z+n H4(trkTk^,hY!mZ "YmMRV%y]ZVsi5~$q2em +֒f}D] rh俈I~ u?w%?|/Q_Cҍ{%gs%?%l%?W_$?6(6ؕ[l q(g../`v(/Ņ#.?h_ :@ CGsWi 4i"w[\?xd.QJKJENhd {C\,Ņ#.YD#.t}D#.˧M\leEVe#.Yd#.˥eFYrڊ@S 'lV3{.QId<"+='HrG\CbE Wc"lV*Y~{"\ճWyūm͂:-G)Nt7{)F BTV>Q83x-gLbVcL΅y{g۱)wü;L,w?w>L~!CuviA `y:zo,ykڐNervq)npE[c^cs;* ޙ0 *Nӱ@?i=׌=Aawn HR{l m?'9MUeK HC*9qYŻŤ>*zJƥ`Ҽ UTM=ւbH5\Pڦa(WtC&ڱ%dEy1$.T$k_䢝.BCmPXB"z)M?;'86*MIFti BܶWJ]yno[^'MUX,a2i&UV LJJKDSVZWYI3+vm%Kz.XՁ7 "ivo8Kt@>Y.߼XFd[-VAݡ < 2^KDTܰHRB }BX -ݭI(0NmUDnQQND;ܚLTt]? FzڨBJv6oZ-Tp/ȹEDTB{Y c*y5֠P`wCz,/H5<Ġ"Mm=x5$D-kwWM=*-"%hy.m= HyUڧ< [qF㪆*u]QagGaBH [W 2\qM44*f!L,GaKՁ hm&fRIm5Ǚ]E# f2-! _4"0zZZ^!LZ ${k/3!2;_e͗dGE=0n:Ժ& 5 15u`]ʦX劣.hg`X$f/lΞh,"K7uv{NdFlAp6 m:3-\ع| MMW5Z3y>߲M/=/-P kuԉ7v$*P-*[[AcvnIҎ$5L#;@A"1Zf)L[8E2m-Xb eN sl-(cdXtk>ZUnzWD|/tYU6Vay/K;xEs BV+*'\cKhlզDc{[Ȧպz4nOk~55Պ62BփW5ï9Im9N3U=V3TY1FsFk[ "PE35:׼Ҽ;K]kpMUf~FD~kVyEKT{'N\kN4,QlAd\( x]নhp_yFQ:X1:5Yѧ@[4hNu X?N1Sf" WIkʬS#z+ z *3Ow3B cOL \t꺛OWwr{U+DhM<3[Kx۴`!dLłĦ[``1K_u01*fmXۯު!2ن &kmXگ>N0\ˮ0' m=eĠyútƮl_vfڤ`he֭5׹-FWJ%kNc.m_bB"ڶL,2fCsZ'_ĕj+FlXLϜ9u^"j7)Ig UJfa7fRI=>Iq2~lwwJ=8m=h!>d'txU^ǝٚS|̳T3l|9^gb5Dvf*͆ʤP.z>ꟜRmf=k@ Y~n 6mP*$iJ&`AM#4(q>gpNcœn/Rs}CBm؏CZ suNwifk3ڲ}lìdmNNMkoX6[8nwM4 f> @6tix=ѥh$kѴC)Hӝ(A4aM;u[_K=E]70k !M~hr ֡=q I!۩]*pޥ-@paws=hm7$/)x#ӰDP2KX,@s,+;R!@SNOb_2cq|3>ppB@m2*i tQpQI+2[^>t C* .2%&3r]<ӽ AQiWԅh3c!9Hk䤀>@S$AļJF7K4e- y2jCKmqSl=PdكiV,Ҽט}*ߜ5a@7#{ݙi,O í̘g2Mb!!\cE{? /D?ӰM pDf"Xz>d-J;%rɏ˰s&XuuF_uEe[NI`uYrP\ġcYwo wl$߂${1y YvW1`0<9LxUTc,F%^t/yy!K;Z(AA42[gTczg WO,CoA@^OsJʇcMò#X}r/+>UAi.=Av,'3Ez7[/|2ޙ|[3%} "#c#ӎCWh9yևc/^QG #e/^Sz ¨+ D&^F\ea?oM c3sB$hD(s[Je=SǴ00à(U'>NـBg@:KKYpd1!{YXY{8_ne C/G:gFs݄1JYLv&_4_ fQ$Iqm;fDb!_'MFD3q, V5q$#AT"4a=,#!yi x{rZG#FC%/0vGgF r"5c;vɏCމtZ ғxc *K "oJؑ^2/RJ"Me:n2ּg;v?s4o|UXY"rm5m*[˳9z)X^X"y{Iݚn6nC.#No߀K7-CnHB;r:"4 7ӴM*PNj;pǐP뜫Q# /\&x1,xUob@iqE1Rִ[K5m:"n$b7B6K%ԡK1 6dH7첷jkc0" 81B$VP2" w"72 >c#̌gK%=ma})`כ1jn6 U+goborJ;w~s`Dq,_rL:24*4Z 1o=FV+tyu4&wF+iuɁT'vҥ!^VrH`?l|5I;_7jRoE!!^n eH9u-ԫ~:'4ԍ:Ս11 ,G :G6?1't!CI2;`$"sR3ƜFBa8ݜ$u74t2I9+01%AX<$Cp{ 01*٤VvCNH!Yu\01&*)5&BNf-)íjܘ!„Pe,f0S,)7%^byQ]: 'D<?bŒl DX}Î{xH4! p 1zpaΜ,Aq X 02||y[԰3̣z>RZ֤b4]2BcL˒Rlȣ[x4= bb'x\]n0OqwKIwH)kkay<HM ɗxv8@ M#`,wW֞,4H41bԪZ XWJA朰FMrzx\\d|(lHe f096u( ɱ x^a'S[f\-ڠΠh1<|XˀςlO4?V1~_:54 >ƒ7\hsÅv789!8opk*V=\4…h % 51 `m"rȠvzAm}XY Oٰ?FCp~l!YB䁽©_j_ *[A]zZ0ㅃ7^8 B| hxY/g FnnQvbeLQFebqLP."vЍ;2ZJMxBlrHOM2dn_b®A+F4G}R<"՟<@Ӄ F4GaDhJ~(+!\JpzJ0{Õ^p`H><N7\m•7\$+IR WDSg㄀%tyV_197`9"܀%vyyŅ! k?Kmg{ b#He !S4x{Kg`Z /wf:$D0O`D4d"+zPoF|SB#>!LQY\*) wGgmQ˪EDAϊCAH]Y OA\lўbG+̈́@\8ЭBuwMfqPb3/$ /!}+-Yik&'YXlԳɚ5 =MI\{+r)#9_.)9 >N.{3,}Q9-no5V$mk8oNGV,\5`:Hփ,",jd?`m̧RiѺ ƭ Nh%=ªՐ꭭YxLp `#y;[%m9l09k˓Q"~ 7ti!+cGЬD$TGJ&OS&HLn8Q۷’ZTAEFc9yi{%wsEGM^dVk(kJK-,nu24yYJao8HxF#"Eik5,qn|EVO tB͐c9ǁ]v.s_[mˉjțXȶ\F6 +T:xܪ#M[6J \5,5q@pklGۏE Du^>\]{GRerg\r(OC|’!3, R{?=Va}yO߸r1vbЧC Fgԇ0 ԇHN%!"ccԨL ȒSY㚻$ G~"_K7:V9^57b8>N 125u!+ueAB?<2<V 1RBFRcᆇ-~Aģ̌ȇ@H5@giG?i'-޸9 7$`BϻINOM9n6@b1I] (I! iMHj}5t]/-`e\g!ÂaºQ1pQ~ӿծD #1:fY!b}(vZLWm,jyo萐1mk(k(Æn/ K;šT]+BvFccޡ! SSBv {>B)Xq@xS9wfEp=6^H:>JKȘ#n4?޴]ii:7fBVrm?}fվ.G8l_5J=ȱOFcMҿcPMIzD>BY;ic5{)#4匵6r+bɱ"V#a*/grܘ H1tCK-dE^SȋE8Ld:<1)s&TWSr$㤑sRzYdRxE^{YbE&XU4"([XWf3+rYde/,N#," )?1+p\l4ok<EFFy#fۄ0k<E6$JB\YdV.ȤN%9TțM/L25mӬD~$#etjr3d\FTMDT(͟;j i ;ҫ%q79DȧDKz&tውe۸L#ZiIyƽj ] oU?aEկg$E˴}dlMr7?-x>mJH'Z҉y=[MH'Τ֥'K'+҉2NԞNԞAZ/=ϕ1 3 rQʹ.$[,HM}1ȹl <sqvr\ i~sAs !LAV1+PWBCxpUml9hȮl h_# 簐8xM}+,fqXL 66.p j\L~/izUOTd_l2L5j6+s |UH0vO8oIDSpi ^ udD`!,$k1 7\#Ҟ.ҒZҁSǦSN<%K[dlL7?y%6Ȼ|N`MZ ̘ |!TSi$g"tRaCH.Z%V365a]"`ӑN.[Cf,OC8C*xS9>o,NQgRȏ"jS:)cmAp7EUˍ]pSTJE+dW -請n[5>6QX)oqM7 ^^ \6{B~n 2)fO01:<1lÂ$Vf[wcnZRCO}Dبe"tޠDWIa~[نwMFX"}bD&h($5A8~i i;fV1Vސ4Vb0ye+ =* y`A[+Jc/֪H+x.s: Y1(Ɔ\ilri\&{8%ż+~fɱ}= s/$EWEa˗Ȃ܂(~uuW!|Wȯ2L\S#S绑Q,"-QWWrLّسf5mᢿV](_ʔƜ4jrqC4V`Ll߂| ߊwy0$RشG7"y5 ֭֡9!4gUs@$( H۶9Mu]uL6J\r#nċ$2'h=sɃܐݤX>#֯%v F8m]R}n*mz/]V9LQ}d۪;HHzz4IH4:,#. 68&,3@diwz?drvmG>;G 9RޠO.,`j9b약 ,4.P^7‡ ՅpdtCzRfb0"Υy >4^+Cϖd稉Ji}\FJH~ iL,?;HLqd7l% R?*U:^? ,~#p uRRsQÑ dzW [9߲ojD8b!sF*ifJʵ %H? AwHR_v^D#ɗϢӆkn}N}p-"K'Il|M-/ FEhx]gf܋ vTF:Zoq6Z *Y k6-6rdZقƁ5'-~yZT݃2:8C"IgphA@5i1/5s""ÏPBM9MiD [LR@Z|ҴgOV6 ☍YZ,vi$Fj ׎l82a7 2pCoS'rE9mY!#[ˡ*!Cтݢ)XNHkZL ɞob BqD)tDQ4G4 SS%Yx}ΏXfLt"J| VH6"J<ש>z뎦wDt\:k5[ ڱN?›%Q1!Z]D VGf^ھIݡ&PYңu%Oq۶5s0q # `{(nir&h#Vc[VنŬ -#G9 %5GLteѥN ZAx#r曣 Z!$fؚ~|lDta3j)l)fzD1Bk(fC.lHS!lK1+.;0l`|وܴS˙N;lDT BrAӣýR%IARAALPTAT4=SS FR>bDd~0M]]2 IOJZTPFDeE3Q<emDT 4`NK[bG~??7_2 Za!_ tC?/O~J_K|SVʑ"@)p/r^ʠ<9r/EC"GECE܋>=L|<(}/XZ |o⬚\5( gcOs< ̟[͟4 ˆwZAM@;5dtɎ./_p`<rǗ?~wsǶNp&xb\"X)REhzs^nzjAƔ[_3vl|?7ROşo0&~JϿ}_|ߜx,~/{|0_՜ot]:{w) Z+,682 ̠H7h\_5(ȶL}_:K>s=# \u K5(tUdxDMΪ+xr ]@@.[xP:T2":$is[ p,d+)mAg 3fYj9b4N RX"_nF@67c3dBRg-xP_,VDh:LHX>^Z=d;Ĵ0'w9"w$0'"zQ [(,F_HDF~ Ys@dyAL4KbdGdy,rc}%Cf#u>Λ4 6,⩰:A(6+#y8&aV"==\ Ag)qiۍeFD'es[EDRLz7J@l*6;W: :S+/8k&vd7"`^gRZyJ:gR[k稞gd"2 "tʹͺhۃՎ(sg?iQEOdzK>ik${-Ų Aڜ鵤[w$` ALis9\lmPۀVUGr[,U$&5 Mj搟 #D#/bbA7K5шIK2[!ȹes385l͛mjǶn=Q ɦ"HOD67޵AHDŨZuqm $:6""Wp ř+`u 0UƁj|aih(*b25AՈ#;e0ȍݑb(rE {=S L(JeHg9`8H1,"?Q%5`ULB`@ ؈S EZ&H H\[QG\'iGZ"ޓf/ N<M MX_ט<pnx A&$#Ӭa_oX D= o$!`At]ϵ߅`M[C+2|C=y%&ӱ&} kСi.0|V]ypD pnE zS^7 Z4ac z c W y0+Y(w26G{31{ ^`er+,>K6.Ba\rYY5QH>6Kf gis"&++/b¹&/b¹Jm^|Y✘, *,vD("&ePn)A_z#DqQ|łD4Qj"&|A_DuQ|ב xqzŻ!HɕUW_,\մSsU2l<\E o{"\"i8~*>Q*<㿀JL_}y\<" K?#7=_u5m|oW}@Ozr$\^߫DAIzfBb~K4+>?d1Y1cHGQ!9f˿<^ XO̵7R8FO?ӎn5y"|d?㿠Uz !i.Ml{ V#Xbt; '_scBhǍ7e_|qTc#oB EUr ~;ɰ` w2,/_!0{z載&׎~8>VϏi+.#k?_/W<JGj</~fnhson&ς|pq Uy](F~,yUUe^w7k~#6>rWԦH~!e=go˩>KP$#8 X[[S|A U { 3qBPCI1o/HӨ yܮhlʖ ^qA g i7bz҈$&y+-&vqP54<TA8u99̗ q ;#=H lyVgҡ[)Amyv^5!/'[ Ib !||n?||[y^´ $Ր'D">@V`B8vwQǝC1G؅r ɸ)6b@)M|^A2>8B3?]pF;ǜ[cBH"R(kBA V9!M  *\C%CШP/jcK =J!dh5 ?;Z&#,h(Жrwm;RuC8Hcϭ$tC6$Zl<Zˉ qh:]VTׇ|=mNnR(Cimv|YNn Ck ,Ao-%-jJ†bOAvrYUK:6DZ7lh8 "BvCIJ"҉ԑI[OPl"͆0Z+=f0hk\ҰH6͈d j@ ʊݐHf75MPnH˕'ovCJ0Ĭȶ)^n%De)Mc^qܯvs~#:z)G2cGeq[Cg_C_y!Sš }TlHC(2{!U2R:Gc(O]k Q lb9ύt¤CT0}ofcJ8CiLnԊ7D'^# uqX5Z݈KEGȈ+=V8 * ]QRMUCXj)QRSZ hU7)cg=cX ~{ /ڋ|**>LuV,}ªQ6ߙި;..u}Q׿,irSWwL]8R\'V0VEg1쳶(g_T_m~b:րg{e3Jo6IY\h N+I2`JmB^lV^%$/&0RU2Q}#T/ Uu9$OʨbpClZ9-J'jZTu.T,m׈Lq d"&uQYݼ,JPI-٬-cZ+s?▶yB48Bc<+OrKe4.hl߁2d{#@J\V\"M*alBf/<ޟ,|C[vၾp g6mމ[u(, ŭp*DbаF- , Ǧab=~ae @#5?3*z/uw dD^4]QgyI.iH?hgHF+Aj+CXؼ {L dFTh?-mUX2vadF[dSՒb1,쮪m#U 3tf!bhoѶѳ(0\!FkgZfLI݌4뢆˴q g :$z| cьH@Cd_+5U *F4‹W˜?7!o\ S[B2T1]&@F 3@7!$v۠'}Xa3~Ic<10m%W4uu.->];G%1g^䰦N>?aԞd Qcw9J.z悫Yz6%^5 l 5KVC ij bճʀoʜmnD R!Pq!vʺj$ABI8C'.uSeOPvKM 5!k7Ҏd%}ʹ%}RW%{.R;Ljmu⠗7ajw 1 &%̮GGJNǦwƸzEx 6mn] MM-Ѕ >gEXVc~LO~;F2vF Q?15"dmp: 3¾U* k' Sǭ&P2I:r0/ݳ\s5Y(qXJ. 4d@'_*Z g Dmax 7eN,D?'5WVu|Ra:3ҁy##,CI)ܙߑ!&3rֆ P%ѰGpejb \ov+j?dKLt0#ca9 91s0iяya )::>)';4pvb,|YF1t1~4a9zNt[f(fp+T 珈ҔbCn= RO!uM;KǏXyTK(/s?5L¥k8 i7"Bl[үO &I{U!R T32/͸@}2S_ԭ8Q_HZ i@¥՞]*o ڥ3`@ehypVZs6as:,M)(9$C H'"-n;-I:Ҕ*~Yd ʤLiJI۸OKe%ce2 ڗqu`h$|fuLFR>:8,SVsnUGVcݩt]թx!ZB3u q6́"2:H;Vs"%KoepϝR7#i 4`$ŀ"G aB6)5+Sd!U@2 4;'23ЁԀgx<1ňo u)6y {V*N1 N\>~[FWK*4 +|9ý@IߪA)" >=I'KT%l9uݩQ` pS*G{.%ttOuwOD~&_)/ߡyZk5Ȕ*~3ug +VY%o.͚Xo.ƀK:u]Dki?Zٟ;8i烸X:YPKQ ՊF Uԇynjܐ! U/ z&:|1TIHG^rg^HT@hE9j=xf 2_7rtZ:^DBr" _B}*TCAt"d̕G[#dDO0#%22bY _:.Nw/ VxV'.d5<;u˒>T-\Z-I&oTTYbF*@0&8|AeɩgU `>+5D<2G5@ԏ! 8KRJkYMǥ 䋌 l( u4= ruIKAh#Cv:%ňe,%S6,%Rܚ_d #iz6Oe a&B: tazPB8pH%Fpp`b!~*}/}ʔy~>{Vr(5,ܯÞ BU3U; B5,BҔ.'J']uq/=Y'NCs^$08T<ǻyȩ@+cj2Ęrz2ƘZ~Fz82ʘ>H#A 4]uLsjڴz~tWJm@]wȳ@J Cl#+Ay ~T\(> tn)לV} dCt[|&zK(6ړC' d70rhFqH#ToƘ J_}&@p*\*IReHE!y !( =\U)~AdVDO>:چ/Ut& _q]^߅NZ4؏ءRaZ1،z@֬(a% D_:t e2=җٸ:ˍZ| W#ٟkFoCnv-ɨOB1ͪ^<f*`vцeFY ym]`X,Nw .웺+_aͭ'u(Pq$)9Ca-~?J+8|׫>N OEbqxaB'L:R8J= 4Qqj"IKsԚDÃP!ͷ_t YrZZ$_ΐ~9^s4,: 7A1 9; `"#gzK%Pd%}*dLD! v:!DڥYxy/.dCLMג~dt,nZh6md X>)۞d6ZM۵+-m[KsUZ_ckkJ FP] ZgFy5:DžtO,`۪oQVA%",.Jش<*;Ӥ,[B@nA 1ץ\BQ1i !&ZޅTIKqI=&ՈZ 34QӐcTš L> 1FaJ5]zyԪqYQJwD.X ̬W9X,Qry.g !Iƻqe( \퍰#x \4zQ I'Le0M øP0tFED,[St:`E/QKK: p=['{r'%u\ i-ʖ&-ԣ=&*j,sw"lZ<.LWzU@{,D4䪼ED\L +Yy {+ kfBO-\ lbzШO$QtZmW:KJ-U &]5#!m|<zU)\ci0&&$U I<2YK*=A$d;ưdukAb%9 O(1RLd-*AKBwb8!nJf Jd6x"SNמj"1XB#et[䂨J6)G߾!UFXrHbLEO#H2VerBDh;@ ^d\Hr 6 ݏxb֎}~c\&vP+r>\@{/t ^V7Zf@cdvTc+]=2v$B%<̿3Taq WUYk)7'xd^$/v@&2/љ1ƁPdiۃ蚤8)Jn9 n&L>1gO {ta8b7әVӝoJcUD^6 iڎ$`/HRyzIo4+:c q#BsB q'*d Xҹ%+VXz1\ԑb` ԰^@4RR 6 -d2Ȕbd DhQpYl@/Y'`{6AaEҚɩWۯ&:Y2ʈQ* [Es>NԔQL,1 -<^Rz .}לHh"EE&\uMJ 0"ԄȐ4;DFR(UPڮ&4"bBYCFz Kz 94 ew% ,)OK.4"lKː߬X:[<1 RR XLiDĔ֙jJIM`f)hRn^іFDF*R0t# vaK v Sz lJU\h\Έd) gDpx10f;%G!f2b&YThya3UO1)x&ԯ:w_rU,h!!:s8Ɨy koF"=zfeERP~#@Ak<?g1x!~.\?;*2zA7/}So$AݿF|xɂ )A]>o^?J?=B% ?牶 a[#%'N9£*糯߽{/~xe{gނ1_(3F4ԣb hGQ/xm( Q8pT8<уZLҤDض%%:CX])x?x/ (żd:BT=63Rf;$oSSPG1(}zC-.TyHoz3C!qÁ],4: Q~XP?UwN2-Tv3.L 7Gu"Pv5HpWN+*Ԃ;nC3Ι'49qT#jjk $d)L:R-DƖP`&r;͈ GJ N H-)nN5&$grp\U"$Q@3&P`LeͿf^P.Py!U<$C:jv"R4-Φunm4,P HTq0#d[++'R91K:ê~5Y4#PɦQ6yUjіՖjȆxe#-t:$F: '5nwBV!AP !tXV3`Jq[(nk[ A9HeMiBGF)fX1" ^nKIGӛ$etQK#"EJYCKa1uвB :/IFsPyԎ}~p5uPmL -s9'iYm[* D+&iHZBraٸĚh#Ck||6Vmf*2qAcuKBY?Z?B֤BVŲT8EP[VkՅvԄr*&JED*p ~S@@&2hQ1Kީ4F1y&e:r"jU ׅtx= o$!E3)W썭 ;d4ձtBF) m]HS Bh6<^-}T96+a!Z*i̍fB4--QQSEU{ITFt7$k⒫Pm!yN@CD,!MPf't@u#DM%1)/TЩŞvĩJb3Ukܻr$m$:0Pl+o&¶Lf6 !W(- );1 Y [{cbu +΢N>ĐG )c"~ڟ(CQD 9STӧ;G}-+:TH.Y)~|FM~p x#LR>Q}yk^=>ZV}Omw?u#U^_χt4mky@}$k{ÙP t}ᑜz>\~U;S~]sw+?!<5g.cܯt SJr5x)_^\(WT{/oP_y^r ?=ֈnXw'ܷΞM|t7K4|/CH*H޶p=^B1ߚeHfio?ȡ+},n՛,Yq_/ƛ܎ÿ>>~!j|!Hx], Iilc] ѩ$<|Z>>u<|fs#ٞ& )r;;o/EϞu:e+Z~}\oϬk>Ζ2ȍi_oĞf\Zo9n L)Xd~o#{ ?K2ڷo>O⥽ g&w=#ׄNvi ]u59\8#7b֓7~ߘ{5+,u¥[/&~׏X!NthBD+j[ľM{Ya 2u)x)CRFx]LMuk/l?jzi۳MlN\i}ÓK+?~s}{>D[9/|_~t.H ?Ưџa"?wW?&?r7.?{>-\G􏿹辿6>ܿ' ~{>ܽߢܞ*)gs_?wWíq?q?^08}δ];?~{?ay>|yw?U|BJ.n|Vٓ|:aog_2a[}u p_-`?Gka|/qUIĩ_>v^gYOQ"gž׍˗/PR,Gu\ǀ}l<1E]tEݯǧD z\ǀֻh?ǖǻ9 'dIup?k? ERIqຳHcyǚLlydžQ|M_Jw~8vįs|ﯵw|by (noLv-ް 1*p!PIawot|;MF@v5y/H \2H`疿璯gJEޯ?(+Ҡ7Ҭj":^(+@7ri[L{ E}WxxzSY8⇧T 0?%'S$e'IdD\-{ҷa{GHգGUzDq˰ƟyHɅ’MHz rzL2Ћރn(en7ڷA~?RoI43꛽`*>oc>+\f[B'Kyt|[e@ўK >ᑮ})pO6XkD S7q>Ѧy}矸gw'Ku{@*a[:~f^i}D]S^͢"|O.i1I=͑si&l)g6R ?hJ<սO+Yߝq?=?~<^:tIK9;T09endstream endobj 264 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 282 >> stream x10ѭ ŹYendstream endobj 265 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /DecodeParms << /Colors 3 /Columns 128 /Predictor 15 >> /Filter /FlateDecode /Height 128 /Subtype /Image /Width 128 /Length 2961 >> stream x+-9wTuQ8p8ḣp87Cp! tUwHSSKvRITD'zߥzG@‘pzt{.B٫'B?A?`@]/ .a& B\+$MQ`xDJY8##X+]T &S,?ujD$Ncx"M[Lvwʞ5VMG',K+BxIdda͑jwZ9`}gXR=xju5]>4_%Yg}O^C:wf>qc?[>ڮd(<9nYsֳ{j/ߏ">$RUOiql45))QG }R4QadR?$YדX.Tbz5`:ݹ$JcsؙXR"D{n?qwӏ t%Τ?cӰso_yi3>;W8zϗ`of!V~HrqD_ nZLI1.Կ v>2)P'2\o>-S6ɳot%Oxm*xM6^$VpM;R>YaOiV0(gwگJ60/Nk>RM +dr% b* {,|Ap@O4T=՝bR}F1$+o-b`j6KrfK(ܿ)>뿃9Rxߪ )x7O998u3O 'wo |[ R&) DX&NN0'&-ŲLLSə`j$iy "|@䝃D. Ff2I:u}[9T1¬0 ^ģ: Vq\!@RĊ%EY'iv1y JFZc'vy~ҋ#ڃM3 Cπm|+OL7rrK'[ߚ"l?0g0\- B GV -GM]1&:4* gn#| MPb@Ӱ#yb@ u]oRU{k( r |e!Rh '1;ײeyU%I듿"noQVNP0d߮zGm.&xj֞A\HodH##GM$H,r*W=)w{pRO:7I gG/'fl.o37p3noY\ @t3Pas&o@B@Lתkr zqÍG ѿ.0Vf XN(A 6v2f<J:Z?-BO}%}Z8Ok&;X na/-STY+ En> q7u}jweO C+$[x3_zTV# JmNtZ7cYP T]z'I-PkCP!{:w'tue(8J}T>4Lǖx %_Y0_ĹO Jpmh;\1OܧcXJ t?9jIqߏ evM`OUʾ>< xEZeދ9gAPg }@0!0d]}mh-Q|n42xkiE&H@D0ڳ3_|srߐ `jᘔH'>% aqLwG "ε1H ĢIyw&u7O Cߊ2Go202A&:d0P!NV3 %fNendstream endobj 266 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2488 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?ZUx-ӥ~RGVEhյTkҼnYFj82Z4zdTҥ~Al]J]#){l,%饈53'>PՒhGB֍(헥~VG畤h'J۰̑TwE[Zcsc:<6Hm=^qpo_E'q2sRݿ&pZ4iڿ)UKgV%yںpNa\$ⷵL &_*%~ ZEڢT&))nծDCv&=j#T!VYۿ1Ya :MF/jUQAy+g.y7cʼ7^i{֝TaZҶ+J4GJ߰;m?aZbl`|OP_jzyg9WNn5i('q:x'W>)݌}}pcTpF85Y{,\1UX+\Е/=T҄ .bĕ~#LUWvԑՕݫwNQ/kչE_!/~f3K0"k*ck^}X'_ GT~0zOσJ]NGctnEqW"ްa5~ 3LE1˞f&fpԍ:R(j'*ΕNqT%jC#iWXQVKkJNVSNS_C*XSq)rP3?ֱZc09KiQSo2c" y$?x95w [jZ0't/l+Zvҹj洁pppk.պVj^UUk1T5nj1EE(,5weiZjĭTj= hԠuuNrFl*6N\8*i5)U#5Ѧj_^^&[ *2}zf~ǂf j]mM{0zx[Ҷ~`gYՎUZ?J؈<~ٿJ޲=xՏ h\5pk+s>6H3U)ZNBgVONN Py*=j-[yI$f3Djc_[ X7͌Wp=IMumepw]:CWf^%TVfVtgе*-Uhmu|I=s۔-yR֍(&|k]Q:fMuWdֿ_H[Օ'zϙy߭QԧإWZRAڙJx=ExM?< SiE %_||ɦAn8U֎-s}M|{O AFErljʵUhŕe\kR^{'Z|40ۧ{5=j׵N6Gҍ4\kZs˷",#V5ݑψŒ*֩&ݱ?SNӣ Ͽۇ꽠~iSTWc:Nrjԝk&`|iީJ,U$j֧endstream endobj 267 0 obj << /BitsPerComponent 8 /ColorSpace /DeviceRGB /Filter /DCTDecode /Height 128 /Subtype /Image /Width 128 /Length 2479 >> stream AdobedC    %,'..+'+*17F;14B4*+=S>BHJNON/;V\UL[FMNKC $$K2+2KKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz ?5&̠c?2kCQ?X .n :Ԟ+J|{^jKq!U(/zW(?ù1X56?FhִVZث͂+z鬮Nu8"A{]5 ,\LT[l?O2p[WETe-ndzV\bSb!Z pwL.T5fVE9cRz0Nw3ZAW%VA]gu6zwxdU0=+ieMҘD۹b:dY|F y_݌c{Kh9fU+Wn+65FR> F4y=ɬ׭zYXwG椉cfkb|blk|ѵj78+árU,nsZq <>eWnI]P$]8)RS^9yk:u˸Nm)\0 Йj^S[қA_6>r=]~RQ[Celֵ^:WJGFFD˃TngL9FRw$V}K VhpM-k<\m+Ӥ^>*ci+15<OnqZ5ĒaE_߭a!6Z?U{DZ>.GXš:,^շ,9U$CZuOG+R(B;Jp1<+3pXxseWtRUYj`z): ׭h*]I7 Y&fNBh^9h!L"sucV+"YLPEhr 9DGj]dxJw]OŽhnV{l# d֘_0Uo^O~Tsn ݇-؏S-'] )[NJWn u:Sڲoz9_C^~+q45 syl= 7ŻW֤oLj3Gp08osE_5'(>Y+3ibcV%ͿK/8ԑu~j& (kЧkV:6{ބxkcҲ-W8|*<ɯΫFe̘,ǩ> stream xZKoG rؙE8ÈMΆI(JZLJN|oOuLWͨhJFXZ5U*{3|sp&fgod|3.(gYRμ qLpfEP7Wt0QM1ڹvQY&Rv1rΕ5򝐲Y%6FUM*24XhisY`!l4<A2[ҹ7D* E'b!2z2fM ˹/#1Bxp]05I O#|bx$$ne'DȻE#zm -e-B۶-#1x}B0DVxלq1n&ٳf78"nAoJ#Tޤ BмklTڹM>t yNK43vlbӪphK6X*. wQ\ΚJ`~}(텧_.Y .Ukթ`JjA5J*+({;D~*6Z0hp^s`Ҵh]8[q>L) EiUaF WZv4W9ׅ,Ws a$]4disYռmV4KbZNe[*E[wXMRl-=[PJjmF+ Ø2b8ʧ`T/V $H%VO ? _Xiw9dpk$WH^DOm6*# CL5+=?"vwK\- z|nP<ʧ;ʰJV[<]x{GLOўxI9-9:-K#%/PDD] |)4wSɧi!|$+AGx&OѼgڜ$ڳYKdV_q| P+K- ۰ |۽#&/6em. sy5̸'] (Y>MfH4}ӑ~rN)y$̮H.\!gjؾ+wK?diosrh,qFIpǞhR=f O_LlQ/IHH:$㩮}&iU #}KGDR C`IER!ijol`}OMs-{Ց'R2ơM=}_F>V٧eU$?+XO9īQ'TEw*}C4wiZ`OCz=&:Pk;<% <$Vė͑l¨&ʧqZކGyody$Hz(7;c/A_|C7QI_\$+wRJTW+466Ml^ e:`z! 'n*% Z[GZp̃4GQN7eV1d+L=OkCsA@* -Э,ecjG  &GFu]w? {s`/14-&,+\ʛ11qGHɻ%3@h-'Ad*In[>)(Es OKxaU9fY0+fd|VB_Of\rutD{WwEAy˜QYɱ F)ꍣ [7ᠶCk`g}:>1aTH~=$G2<۽ŧ)+s1@KI,9MD\kx?$̵ ipڌ~I !M*3RRYMԀyWLgY3LF]nN:.BSylI;;fGendstream endobj 269 0 obj << /Filter /FlateDecode /Length 160 >> stream x]O1 y? aI%]2~1 BH.q^ud"~|k%7Ree¢M' h~ te!נY4V1$'dSvBBJM[H4-MrG{&S|CSoendstream endobj 270 0 obj << /Filter /FlateDecode /Length 3448 >> stream xZKo {L=VG !`89%YVVhv~A~v"d[y#6YW_Fr#_՛:D?n緛ߜate"˓x=:?6g'χA2r{jD5ʍAH? [CQhC+ X%o7Ig[M1z_fRw$0H}n0TRhG$k+fߞWOPK9eAC,wT+ѹŞ]@*liƏы4;7rtF?i !d4n8ʀmo*ǧJ;_2 F*ZfNu0=z&̜B 79[`!g>,Bh`bZu}(N Zw5jyo&3"Nr H-AΠ=~1i͠Zr!+zڶ`~#E4l>ʜc)s:ΰZmOa+=0ղc޾# Ha yRuUR)vE@ lmj%Eꂩ&v"K'33=cEay8m3K(OHOX"2fۯypԳM/a ]QoNEGMDl~wXHhܜ|CtUL &ΝC<"R0O %P#(cjD&W(t+DrFI{IN57BUl@Xk!o5-{^H^ElvT=t wҗR}Jm\,;9\3XJMK6f`9P! chzl1kEu9x<"8e-Vjg2f[J$1rNJG)wuK~࢒cu)琥gDL+J|ȩaI&C(%;B}]t%~g$MAI k/lxErL"7O!d?SYSOYt}V Ph"}O1(5׈3C kW0$ٜ܌(y:z;XLpNNAJ8\g=ŔN6|`ɤ4m!.޷E^M>R wm wy pnjAuȬkOmXspLj:% GA,I я8Vx&̓ zw5b_Ũyfu%nMS{9m-lHRV޽URvt%7yj-X+_>jz$-E5֊r#92+> stream x]O10  ]ZUm?eB:gׁm_Xց$i, x`FS}endstream endobj 272 0 obj << /Filter /FlateDecode /Length 3275 >> stream xZKs 7ٔ879q(٥(L.k2)ʦu_t4zvW]Itr@Jz_OD.O~:w~ Ѻ`Vg/Oʛz]Gl}Fs6/Q'?cR{=!pG_Ĝ至o`0p*k22IIA>u9I>5C>R YnxA=lf`꜇k)lE?<&GS DZ1 A[ z'&@6͖M*4oܓ/=դ܏䆧k&Q'u^cff%נM;Q aؽ{Pu-W-mNyWrs;2,Ni8b&^6aSpwEank(ϒ} ֹ͚:8 (rQj"C@Wk G#ip|vKs+'ȗPθpQ̑DN& 0"->^#s"i頴CPd oSR\p-6 SGRrթG*;Z&:XAqx%\% |[*#>XRT&[ c ecVG^Lyy(M݈{a*l=f:np-kWfd)+JJE1gŋ5z>>!xi"`"+fpU/)\,Q1L|\`$j(ZgX~4HScF)2VV[a@676٤r dj-ckqzѲf\l Ҏ39;5?<ۮ\53e+[M&pCE2hzҵ@sJ$jĩ_ 6?m]!9ZO-XmvuQ岩,#ю}We9m[3r)4v, q=lgi#kdZV4EQHwWna2 ޭ7aU۔Dþ)w ²sxB7 &bs(*YGHg>u’t[p(7AȨ<&(h") si3(0-uoOX!|0fuVНnl mʹ<1:]f;5dQ[:3 axSD^s "BiYg49-5EBLw t2i&N+bSX@SQ:8 ;> "={^ݾn N4Qͧn[2̆z$n=-%T.vIϦ a`~5_5JnۢFhɷŬHpieFw-gK{ղ){Qt-mO T*7[FL];E4߭hD(F-唢K^SDӥvD$Z5`wѱ!^թ1X $鋳ځendstream endobj 273 0 obj << /Type /XRef /Length 335 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 274 /ID [<3147155e764c01897cc69afb438d3c20>] >> stream x;/awXfvck(4 @h\dB'B$4.D!HDIDD)hmE}4ssΞm-c\2?1˃J[նᶄ-OmKG{__)Y+=dQZ"Fezw7+ZU#eaF;'A{Ys/:tb{jeu̜RL|ϡ o=vŹ[2:n H+_71륞]tJg.|d.dLolq>D}>73 ʜ} ; endstream endobj startxref 398007 %%EOF spatstat/inst/doc/replicated.Rnw0000644000176000001440000012616112252324024016476 0ustar ripleyusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes the capabilities available in the \spst\ package for analysing such data. {\bf These capabilities are still under development and will be extended soon.} Our aim is to fit a model to the data which explains the influence of experimental conditions on the point patterns. The paper \cite{statpaper} outlines a method for fitting such models using maximum product pseudolikelihood. This has been implemented in \spst. This document is an explanation with examples on how to use the code. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ Currently there is no method for \verb![<-! that would allow you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ (the expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/inst/doc/shapefiles.pdf0000644000176000001440000022243212252324034016507 0ustar ripleyusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3335 /Filter /FlateDecode /N 76 /First 615 >> stream x[[SH~_o3SSwik*U\H\AeX-~-ٺ-">}wn݊`!Xi&g2˴X cfd6 X4H,!Zm1XȬ)l "`I$R,, &3, RYPE,4NbM [܀(4*11MLF4i9- \A &tP`dxc,PRb: eG$}.$ qvS( B@$W0#A<oi@P^7y2J;eM>'*W\,Aiv^vC$d$Oϻ!XE_k?RNFhfOC/̳0'l2[|rxjt8Sst/KҲzd`@ho~|W]Y߳bk6r[N6 LMh|]zѳdinC N#~Ub/<9 Nv@[zcׯd!1dY1a2K.:|ɨx"P$`f:%|Q Bgu:LIc3xk5F?5q h*/#-*}TĒqla] *e_\˚Κg 7b@du/x:HR2˧ND)bƔ7]%yMdğIFUeћ++dHa*j M!D7Jif+ݦ9S\ g󫶄LY_l8M vi'$>D$IDQUD"*P5F o)n;Edo\%Zd)ZL/Z'N3'G>j&fIԁSX7Du~Ol,LU͙S oiGj̉{w"uaF?[lXaO`h-® ʼn6RQ_9pY](ӕ 7YRwi! bJǴ[ @4CT,ͷ^EKS'G '8MZv_|%YN G;glD>ؖ\e2'5QĈlӊ`$>MF%E* L,rHQzݷQM(Z%|EJ %C %I6f9 &I0 FA'EpL4"'a^P/RA3C9pt!B\ ޓj rpkv% eNvȅrWa.+­"\,K!+i!'u~NZBmU |t[7;ΥKKU ;lݖ@*D D s?/sdi`$ryVC%Oij+UCM`%waF-f> '['2Yɨ 3{L G׮yb$22] +w'#LսeX%ֶm V3WpsBYO?x[Y1)0 JVyO3Cqg̰9o9yV+xUhU1#}СjKO:uDr.[/q4EB@[mP4s{ZiZ,  ,|r])KHlwKE`WeLQr/hzEcaVUUWn{&8 ipOH20f3^]yq(Y\ҫ`8C>c.Xj_.0͐۬,<t܍]o<}eqUo$^WO"ԋ}Z@ HkM O^?cǺnMݮ[_|nyK {: ښ[-ji˖Rjz:R1S5z͙{Au+Az{ap-$L:C͓4wP/ÖNu4],һB uj*QE:x]epUKKt:{eS˧spwLQͨ5J5홝wێ辵`Bm|y~<}C>-I]U$; tkUSضe>=^T"U^mYt,Brovq_:э$zz^UdYqV0Zήղvj[{S*-n|9xs L[ۻ^Ixq` zl>(t3DtfJ|)H+]&Vܚvzm^gZbwXf7NU^!6δ[* sb~sc&6Zkuc;NPLj%Nw_0#T?^;9hX)SQY52(_U޴:d,G iGcf:}hȺHg̬6'v6 '8%YӤe<,> stream 2013-12-12T20:12:11+08:00 2013-12-12T20:12:11+08:00 David M. Jones CMR17 endstream endobj 79 0 obj << /Filter /FlateDecode /Length 3179 >> stream xZIO#x3ڗc q_C P3"-R{^4G:LX[tR^)W<]&NWjuw_WkZW:F{*[J;$VAA9~V &lzQ>~V*޷#-9v*9*='b}!ocX.KA8oYW2d`V YMToW!I*wJv1BZ{n? wʳ>iWMNO9j=~Bλ@d>C6?mxEr⧅gU Ce[ +d6lLOtmV)[M% 2ۢe{sP0hk:fgS5D㤔מh0cWd,.6V %nE VgԴ6m +bmQw)5˱9-uGmMgObv&yЫہT>6&B(ihe-;4PT­6C)4y1MwF G~21daCVY8ָֹ\5~] O)@DrI xH\p8jxg>3iQ whؚuget0>7_gIެ?Iu0z(DD0ng0 }{ANkcN@bOKZ (Ti4!c܆n풹4$2 '-= ݍzʼnB`#;><l0t08`dhc*H9NY #$J[) ,̇B$T෇d.1Ny#Fc%^=;l%U a}(³?Ք" `+2bP#&CnP>I8_ų86 +ܖ1ds_ J$?(4JV#4ڸjT)Js8j},g=)#Y9+ {Ȼi6 r 3۔303q":LU!iZ^D8G)unoƍ c0BD6-@bІ }nP-b s9;9Gnxg7uZ_/J3N%+l #8ϒt5e307c̆L98 dJ8Mmm v} I#Jf() QʸPbgq5H#S[ a ~='Y5lV#;]T.*&Y4o OŲ@bH~]]u{T4pџa4]cߔP&W#]2!dJ>}XC75t*9J).GI=gڗEgju#Tfc97߶/MD1TIQ0sdk r Ml "G)kÐx3ƻ}H؎,qAlHLrlZ<֔ a/IpT>AIny.vNX<*Ö:0&31iM?wzӯPErqJߏD/?]z\% WQY*:vw-"+0?n?2}|h$bۥ2HJȆ1/  3%SOJE.U% f!CYEo#[S˩JlaK%t}$C7 1GչNK 1}5STսi$+avb%J5κ>SW%D}Jʤ"c S)%ieUH}:Wk];`Nb2T';#cL+~LNfE]bf-21Xyp߆6{!û/ۋ=Ցrx<[2&_O^}sqd1%/d0˚elE%.}ƺM(sq녶Z}.C$f?k3ip@f2S* =1(NJs*J7}'⏻M}?[Ϋ3}P m @)AiP]jX&)$K6C75?Cj1/8w v V@TleGb>DtCGjԪ;z?__PBhTZ>c1VUZi`RyƢan:23?Fh2H4xp!KSF}GcL}(:xW RHöa4w֟ey H_  ^=]Տ*1tՄd Q& 7/^ܠ=؃֔"%i~_WZ |@桴SyR{*Gڱ=Y( \<݅x \qެL&j]8ۭ]*HI9ę 4wP"G9cƬ+A‚j|lHvʅ,<*No'qӆmІ٦<~zvav 41(]S9T#ԓGau;磝o gz#c2$I4H lbDo)*q4y%׶bRPw`h RaG0(31{yCJiSW Vߵ,хl} *kC_;U݆ E<5CV :]-H[jendstream endobj 80 0 obj << /Filter /FlateDecode /Length 3525 >> stream x[KoD. ~? 1G%prX)"eI>虮]ɂV??3Lߜz_e*gU됆`yz0>_ܟXo`29njeV8C3$ {qCǜRy)Smͻ|\M<Ĭ8#t0f\0dꗵV؁V@h`p avyI֌!+g=as$ }U"%wu򻺁<_Rz+V79jM8AĕF't#ox< ;u" Qj-,v4:F8{ڎr6M@`*ƣޤ`q+d@6~s>ddkabHIGݚ0Ga6'L~@CTG~hwkdԃ ۂ>h:J:C̀fC{5և0։fmhOcBϗw/ @IOpOrpT_] 5 X+MDl_eW;r)N;qs>!,*UڶV~4U,}@:jdRpjB.S/‚d'Au?uʤ H@{Ҙ 헢rlr5 xB$02gɟ}ɡyIhr e]¶n3[AQYh I)i)A %e{筈Ѕb 5}P ;ŜK[N;-]Sq2❖g^߾M,lvR9Tڑ$ >#5؇\#*AJl:0Ibz3츌~5/2@A (-")Bm2 8D<1 ,(p5#߱pE()FtPb6Y̦ {^wxkU6cmnjSs sq&5YʮnTNN3gF`":x^jʰ)-(qb9!ηdɂ2Nvxi0Ft2jE=7]O_?V=αZͦxVb@;*&NH]HTiuvڽ@ly _Wpҳ>ljyg9탳\Rق({]ڼyە}yW@t{W]"3$zb4U% 5$[kr)niPd/kgB76ZMf3~M[ rUk\wT p.~讽?:tv1iY 8MxLsSt/:AVvL@$ğ55(NK5%nˉwa_tz:q~orH=1 %(θPy3K2JƄ嬜KF}Sk˕ %6GiǺ]jA9%{a:P앯҃~fxG`Xȸ\7Avf&6A`#hݳNOd !ٜtð$NeKc :] W]3y359f뷵+p_RֶΓ#NommHrEլ}wg ! u~1#t7|ܠCj7\eG3vgkb\`w*;U K_&>5);+ܵ455@9E6Q 7cL*|]QolY6:L%5mj:WwLZR߾ c?^d`&)%&2닟qq/?dendstream endobj 81 0 obj << /Filter /FlateDecode /Length 2659 >> stream xZ[o\ ~#~:[xO~)mӢ(4iE⇣K\[>35Wz5pH/ߜ ?ۓӄD-O~:忋O0ZzqzuBKB'8=rzϮ{TIΘFɽӺh?5 '[?L)>9wEc>})AFZ0Lևfܛ>ؽN3g,\as)Xx3_T󉁚Wn_h2Jʧlc#ыLDWLPSmmjO2&T{A}0[$T^N(LR\>?;:ζҡJ޻lf)JUr#7M;7nroem1moK${XA qQ<*B6:7­&V*DBaƂW]jy[ɋJq8z'lW/E+,DTrW*䥸1{:*ey%_~/v{L޻JnD؁~X64z|e<9uga`e悘}=ĩUZso>̜߈Q/ L<:yfnōGYX{#޾3q®m|ވ;'0~R˲B,+HA>ZWױ俏mqֲk!+y#}'<~p ܖbDRI],O=d$زȗcFZvəs?YJ/jO݁M`Z/$ _n/rƴqu7Ř)ƴ0`lV+e ڸwf25 HMя(5""u @BGvB=D g ;F;y=^DeS(rxM l,>8;m^l32F 29ԝt2$XkheB0Y@̈́WNӷkYv=*(.ߦTO "3"5{ͧnhMOiNBw7UPzRY)g5Q7/% #d>1d mah2 Vczx  ziU%7ⲵWFcW\\9TL$BQ z); pѽeQ oH\bl2FK;+( !<'|J!4q[ce)sTK)ckȟ,H*2 ZA$%B gᆖxf۴/sDe2"i6 fe^R&A8 r\1C^{;Uȍ.U llnm?Zπo|;u'G"=awlXCimmY.BiMq$3TK<*6TrWX͙<6LTwm#k79fN[> B|\W*M5sr i~8NCM[`&T\AധzcAL%\xuJ%ڨD4JX{gsfu[gZJPZe-$>+i*: *g0b-bMKƱE9x/u l3HsdgvM~x)/$۸p[vY| |ߛ'>``Qɪ))\x lak ѷ{8?z(%_4ETgE^E$e˓3O**f :QѠ>w/__N_6nuMȎ!7c 7}^1ܰx{ uFQ!(<2OhN 1 5}2Aj3Z ܷmѢ44S+M .un.ڻA8X|5\eζ<)wC[ 7C:BeQk)8czgM-R!H)Bg0˱ZL//D$e܃ʷ1.%ƲKU T3_AJ76nI)oWX-T<*ݤ?J¾GE@;;3_H79 Lr#Ψ}΅{P=Z?W${`yq\}VZ,rxp -`oyϙ\PV0.6Fk}>f5FVRF A!9N#ӭi_`ICxg>syVG#w|.HGp·J/Yt| ??<"Aendstream endobj 82 0 obj << /Filter /FlateDecode /Length 4577 >> stream x\Yo~Wq߇ap` M?ˢHb~}{{{vI =5风N$Ox/;}&NL^>ްLQDyrz,}*Oʆk9 ZM던tە %$2Z_Ih/=A3-p-\VLec/ {'2;=ށܬ!Fx& -(+8ean03'mhVYFLF̷$I/txX$!Fg}ڻ*h‰RtAɚ]~J$TL%W6W߮7FD 9GX8yE!NϿK jwT u{Ƀne\nJ8noV낿o# Wv4|#UgV߬䤆}^y Jf;iw;x BB`H̖鮉F - W70MeL>' C5Ba{Z*IhpRꠉyj;`y? ]U9KҢu Al_qQˌ\!wKB*وRP;<(jUia[yzAhrKADNJEP, &tlsZf !Qَ9mfI߼8r~wW$- 5ʦ`q8]j_q1YXhD s(sHV3#\< \6NC@EeZ CC2MY!UcCٻ`h ͬ p=AMij\0($ Aa0{ߍRRd,lN0cWOnZ׋/ua[0doԍr{nOf uxW//\?p7f8muie2F(Sɘws1ƣuN?bz$crΆd|Cqg Z=ܵ>mQWgp|?g0`_2`(× xH+sܔ9uX=@9;<]0S xA%܀n#Pe4nq)FΐQN8|.:+2SFt(Yˎ B(üCma)W|؍bƸ@hds) T?+/q29V1$]3@c~i;@P Gea"_fɭpm#+&H)M$y*:xGԗZw "e8 1-%͌A}c>}Qm^˼[=f/ fsc40 6J$->AT3jhx[yWa"t>MH-v5gEk{i5n#9r^zO?C6Rvwh ;̤T,:eUÏM}<q (U?G<=@ 49$*9藵_(%mQZ/sO;mo}Jw>8 { ^K,>KBJd ۓaT ]-Y^qʫ5Q5G85nKu|͒%W[)ܤc FHkpTH8yCΕݱ9bl߮kyl NX6a>-!+n>΃:7MCzxF] X~ HY"hqi.([{1bpVSR-<a^`0t'Cz(< o踽y,\sbM]R) <ъfvYp6OE .lۼ*bi9|4Kl8YBС(<7Bjiq;{W)KfTL8)(UΫ@!BE{o5ګC;&u9jR$m TDٖZ)ǏֵFhhӰ+R-aR|-ODb̵jR'GcN/+3X0Fb;)mNsdFP}Iu񷳳 [ȂG%"wzR/ZfB ;Pțђ"`)>¥~c!~@I-n8rdNE+H *AFlTZ 5Jub.ڶf'PqU.!R_1~} ?00GE^4ٲaoa3Y pg֦d#@2@l>(UAbqv׬RVO%MD #gzlcg1jQ+Z,.1u[k/&70C<{: pCV@&&Hz/U3N¼tfr"T;K{35Ps)keǰlTD1H\ᖱ-Qe:d +^v,LQ0UҔ[cfrV ?"v=`YKaӖ_ դB;bi\>I scQf^QR6x̑N=Z'cvy~2'ʱ[-HeEP1^NQJ\HO ^Y!,@.(̊l2d&eA]r2c0Uei$f pr &2مՍ:dW7=Ç.8}x'p'\Zçz8x}V;G<'Д:O0Gg+{Z{D/re Z. lF\RǏ8L3pʲ}6r&G{P[9*;/ו yfR^iBMApKA,û12R02E;Gs[PhTD4z9_oz>\[ deu4ĠBbՠA6 [UL=1L Ԟ_b<`!$"(DdH 7+uSz즦"P+b4G*@8qa#(V[:{H&bSϔIѱ15B T D.+^GJoX]o߰HYɰ; =9W٭$HfXxF* {+tm21O랊{As Y)>VX! tlzmo4/m]B-6}g߽K#E}ae["9|Q#эmM&˾G]Fۃ?N N nendstream endobj 83 0 obj << /Filter /FlateDecode /Length 4296 >> stream x\[o\~W#y{h zI''[I[ZE8E{gHró+Ynh~3oYяGrZy:>Js >+)E} $Mh9fyIcQԔhqntԷ("N 1Y ; 3J[}NGuybIQQw˧UK!H.:yBEYR:3h~=L oPqò12$baҕaѫ㯏 ~->ߕ:}X[4;rX r#.56ӜF(CN!z#<-Hz+I- v-ݠh ém%ZwkӽD@Ӡtm8vnBhے?&';%cA{|t!Ҧ@)e###q6hV7T ]68$=c,wvfF֩Jxnֹ 38&BW{/{ HV䐴& ]>NAzI랞B0bB.u0n"}mi)ͳ<7mƏ @']H|[1<zd;oml?^_n?$NۮRkɽD电` X[ӿXP?M@+q!~&asrdzv퇐J|4=I^&2/&{ mZh\s|72I%0{ l?ro( 9րtmeFq ޛNक़ڒ' "K;s=6V'bub&bPb1Ȟ+O{JİaRydт'Oz<Ȱunxq]I2^o 8^ *P+[)[ȨŻ|AJʂ `Xmh-d. Eu"l,FjF5>5i#BF ~QqBo֥[gt^-H'B$N^0l{~+"bůV&7pha.'j)Kp5 gU [-_xC*nW9NQ#U&<{\0Зk1antr_ҿEvEŸ<V cj/4d.J+pR1 |6lY_CoeBPCC@%_v$]I&_!Fiay*%Y؃L r<֎6}Io'ƜB"ɵ@/]`cg4^y.kyW1#fTBq% hբ]'CQF/`iwʩvmp7cATA]6o_o#~l*EeL4#ytf?LXq '?#;j)sN v* ӎVWF"/5!JΆL.5wVeOx O7BA $ dag"HP$Ao9LU'<y*54AG VQ߾:>;fendstream endobj 84 0 obj << /Filter /FlateDecode /Length 4620 >> stream x[[q~_q茦AA,#A hCKTzgg$am_եW~iŸ_w?W@XBa`G_ʐM_J)wlZ\nO֌.35 P-qp8d'c|8vDʴ`v!_*KڟGcrLO՟ӈPJJV}>_~CGy1r{d)mYCwOtb%ߟ v8ݿm^ 5@ mDNǒ}ZtZJ0˜OҤhIMO ōewG.aSy7D>!G ru1 ޟ_Fř_ [Q)1L2\hgDt3$+#yAS%Y9siyVW0c8p2Gٛ1..̼-b!˜RS2!vkɀV+RJ66C#Gԡ$>ZĴ;Z7@YF]2TJ9m ؂f6PEf2!雬c f̞$Rv:Ή2}{o*cj&4Ly&F`p cj6RJ]Fv fVȹzYJ&лA,G9.g6D>‹uKhսjyIy}6n:|л &ʶЃw >r11w-4g=75 KvzV D^6\L88]1u/T:T]M5?lSFU[ᤖ`U?!m\4چO>=MbW=rdH^dK;Y-!Lبwz:Enƾ{ohe|j*iԷ+\χΫ4?uiɼ>!Ӯ}޵w?ߵ}>[MgY͚0<:z/IoںבNr=*nx\7US}Z,B/.榱9htF;S*a}5esj?e?{Ia?&@jv~׫JWyOuʻR bv߬zU9Bozu*(uͤKmy*7߯"WGLbS5 NJ\q^LGb_" ؕhAV(+r},sn'M7SjJ5StY{4-$S1POq9ԲA"L)A.տҗ sy3ϜF+UOIJw:UʩG|VDQ^c(xgrHj"D/\SPԓ':;:3TGx|;|w9$9P8بk.4gޣͨokawHFm{{X5T UQǒ AiX>[#g*RRaUY]Ubbs:H hP-z'U$cUU?o&1wqté1kC-'7gM9Dh m}ާoٖKqrj "clI+*շ.Yz hprvYժC6Z&@=-P/c ;-@W{@2qmnѭ@ >{eJ:qHpeD_/ϱz'S!1\;&M U"# %I*Ve: )pfkЂ+0W)zebuJ?_޲?߷~1zcqsۖ#D8~6ۚg﵋S&Coŷτrw?YsGq*'n&[]Jsa+&גyW0K43tavX.A$#G;[]-c=At&n[f=*gPP`ͺoq+[/X6Eg3w-Qn5.Wo^ aĩV~,|e4~%FV;`tqej|(J8 ßID& r̶aU &(BgB`oO,Rq5N%ܕ%ᛥ//KPڝï_#]R[jQ=mH>;UhD: Ѐ`t7Bzbƈ 2]<bf_mb{c{m$Rͫ7r;?>lrQןM;dd4$kMpH$'>}=Mb v//|ѓfL:sVFкIUEnd;Fu^@U -KʎMsbwA Myu@JZ^ʘ;P$@V Ve͑n f%p@I6Ó㬀jЪp})}1Fsةq`ʉ.2mvfO4fl\ੋg7͜M5?"QY ݙ꺖(gˬL?y0&i]*s\{ֲw}$迮2Ξw37SrGPUِY ˋ\paɆ$gLqYPYw/|hz݁I<}0iߑ! #H$?bM;Q{(e` >C*zdCУ  #",?3%gw\%[ n#;P{蘂a'@ߐg$Gs] @OKeYg@qa0Bq^SeLt|ƑV6fjVK̲!Myi/JPi#WfaElч32)49Phl2eD@cai(Lۖ\m Pg,2f$ %Dy‚"#nFh=S\CR<QT Icb(@e5!G!ʼn':xD,b鉴,stP0kGz yUx'G" uS0к0@!Y;_!( I;nY1( U j)#MEX;,&{61Y! ƔRĞg_RB70]4C4x<2V"AcdŤB̲Idya0\ଉuu`dYߖrH PoTYbA(h4UJ@ I,KE`ac5 #l׉,l0"7ڣ1a( A$Tɉ=II(Tcbt (i_ Fx4fug*dW `9YE?EB!N "`~v/%?!(/QS(iY63&vF8QUW.0(0Yr;e"u@Qc,B̯iL4T'SUXA.F$V QP C@<<NJPPyD^Gᓢf*Q*^b*E(Y6ќ> stream x\[o$u~?;-^Lbl6 d qj$F;zƻ ="٬Vkvm̃8%yx.߹𔾽r/n1\]|{跗mzsK5mwj]z߫7N0މ0mЅWe+2ϋտt_>tX_eCw5ǡ ?F-~^o/:.?q{^r o9\ޔ]wn []WP.^<^W̕#I c„ZWt XnrNsnWj.8z3l08&8 nk #E'8C,<iY xBZP,{c^hV:P(.X_W !zm$D./Q[M|\g(Ju'6*3[ZQ~^dXWz%caPq> q* I#`&?÷pݡxnfMA8P ΨLDlCIŒzKK$'RaGF/ 0Dچ(ޣGـ΁h9}݊Hxp@2CV8›ٝx.@hnlNGoOwdͰ!X(BT IC@&mΉ*c-aq*!Ałď78˼Vi4LgC;Fw;q=l!:\XrR Jy[h\Ytvx;OvRE6Ql&͠U`f 4#vܐѷ*@ *"؞֒R.w$@v F\P`6 bݦi&gƒ[rLe{ՠ##ՐY//KɷZ T2 ОL M>J=joEgkxآ={rdM+NެO!/3}ɃvW͋O2al Ȑ Q\a3/k! )j#F2 p㇯K 2Q}wC~Yvgtz=ĭrcT(3xM e:N!*ӦAFP s.%\}C2ʁqK?]M^{zc?ikL.hr_\4aU6g+\M՘\yFTi %O@x4dOU $ .Z1֤ЃutFKG[u΍+įvRNL= !28ā 8v4 Ti-/ >T Fr)u(~_&%~;(/cbbPsP D*wx&xa>IB 3T0@ƒ􂻟F#p:e=w86[<\yeN1U i/xSbsr\!uX1c^? rO:buҺA+gIƕBF[T.dKIE3m? ͘g2b%"ck~(x@5& rhxww?cXX)qt}bw$c[?N Y,\3r֝psh (=h`t[}" ` uu6 Go?2!־c"3+#3GG8,HN?l<Eh]!PVM4/ޭPCGlX[M1 nt{T #gÿun ֮MXCmBc z&Ň7-\mMMWDΓNAB9@:$\I<M-ѝC ͂1%h'^Rnz}%ƅuX*4+&W+xn p_@d{8ɇtmhj Tlp@-ń8{:< xݗ]aDsw \"ڎ o%ϼPJ/$3m{pmv$.PxuXc \d)E~\j GQ+6hџrG3>Q#ZBS4".k jڣFRdj\0s! t=! _+#e#6OCg ܚ8jiZ6OJ|vj(oRBgAbcl]-)tkswolfɛtKML/*9=*iSRt "إVyHErARG'jt@[CӏwJMM+cPDт7:b2Y߬#FN)QZ-]s;,83xԊpr 饈6|LDN?&Gbd¼BE{W"dRNpT>"<΅t"__{ZА!WK ʂJ87R(C.7S-ʙS=rRvB48nq`> pHV&(y:KJ4jC(Jљsrh2q;,7i!ʙe20v!Z_(uٲ>n.?U8XG`eL.%tu8Rж|9jw,W$wnsHݏO /àlwZJ*KTF; qzfn=OSryoW|{w+\BNۤHT: UuX֒zJ (6 zvYZRؠmߜz69bCZB5)3`#yZI )-7R%[/0%9ԹAQ+K͡ZƠڭ _BG('>u*L5Zzj>OTA\Y]~ω n~$Z ݩuV}[=l aѴJ[O)gw>v(n**\A`0c6x몥fzn9e|{Z59PNgluu҅ =0g]g_DW9h0xiب7n'F!L._28\fuduh;TBE_fGh0 *<\ Ʀu|Li6{G|U 4pBۥ>AT|hR||}{^, ;R706yhu,sKM6OČŪehjj|!clU%h iȇ-e('NB80B*z1B2LD0.TuSe(LOVղ&řOZwGNKoӕfmIa;U֓-i9dhu! ?K|74zKlބO0Y~ U#Uxq  wB9-|G1I (uQoF5CHqYcJ4^Ӡr 6Je&@@ߴIY9?:GQ6*,$ⵆxA>Ԏ?-`6+X`U |"g(OEԝ(ǓE R&9lH`_ca BGeuyUf4K_`7_l[-AT,)U7\Z~ä3;-/߼l 7 Bb\lT,QQ^bz%{0~C0 N"U}'Q1FJ MF/]:nuκ5-I xu?pnFOqt#Ğu I~Џhy7Y TCU>Kko)r͝.`i&_#.%n^Z$@]amҖxx2epST.E* g`ȱtPendstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1757 >> stream xT{PTW%ltBvb^ɴ΄93goyH"4 IR=%5vy*DX-5̥ACWѺ'y%]W,,ڮrU:Yb,6.nFً11qWq3Z{AG/7uKJ,cЀJijy-{ٿ̢}:]ל:hk> q6aH-컸j/zfp!gD.ͽ|}Y.VO%?&mzV9p¿)FN3.\ӂ!y(k*+ã=)WJgK<^Klx[-ݙw K(qx0h p9َCE Պ|ڊ47NtdʰI.a~p4Μ} Y-| UM-.wS[y{Be(Ұ\w8)/u*3e+p~?IQȃߠU8t`7ueg`?/g6"E7= E2}>#)D%Å=q*Y! SC_6VLΊ-3>c`ԕz 6o'}ΌS ]6t>J R*B0n @L،ZavB 8~~BN)qg4ȵj&8N0&TPJ[&oPhJ/ ؟Éu~H|?{YR`1VO yDDX#$3%+fARDz.||Px*۽^w#C)8$pxODhuU(Gֈ4fg5PeP&|l   endstream endobj 87 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1366 >> stream x}LSW便:v&`tCŏ~[JA0R|Z+)5na:9?ŨٜsD&o<}1~P&̌_pv~ٶx B07x8nM6PEK"$qII %IzY0H Z+hI>7橕MTEӦ%Y8h.$jĪUbD/Yi4В ^) |dFB+1_i6`j4 Ø8[ebLe`"lcoaoc"c+1\Ey^4/=m" `fC bHG?~ PJ$eΝ|hyL 3x`st"P!΅tijM8+{)XM'!o)>+Mv@c}}~/h66#0]KnTVV?y0tխpXduv4(-Ue@:4g/V5"^ ^ؑ?/q ᱛ/U L2r`*6@r]j-w4ЉaS'5)dp-W goH}!04 f`nGICp(4)4b 4 vri: Q0׮]@Q(2N'N2Osa!;@QUJj6nܵe{Ǐ9wS/K89|pWIӓ)VK#rNzqCfB/.V>L"~7+QQ`)Qe-t[@=50zɹ ԖJPAz@L(w咚ÎYݮna0t646S__.Yƃ_ MMЮ*mu~m ASlq f EYY ]lMBf j&f BB1_ߨendstream endobj 88 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2090 >> stream xuUkTSW@EƢbڊ:*:>h]:Պ@GԪ<S2"K$IGxŨ'UGtuUki}s\N̟Y|}{C.IPow"ccn.ws瞨[tBD"S,^,Yl/IITqҤ(HlGKUkT*K/JJY"WƮ^,IWIB)Red\D%I%L~ITT) *eAxʔ?,Y ~OO#Ll#; "xH I f3+'Zr%Q\qnnOń/SE: 65Ԡ.?R'o12 gQ>AEºL^4 籏0A7;i(YUo+li[P(2A}VA4~a=,ⷢfUޢxX5tqQKqN@pIi@ rz8Bxc]!,>%ZhB#n)`Ttzl=#Qjbf*F `e1_>|zS<{bqxbF|A&T"O+wl޹/nO=_?o΃Wk6e {Lcs'"$FS=G,NL W= p RkO82 8N+߳/C ?Es]zgOJL`(d@AsmEs*Ҍ<րDbu<@|9t__)yUcWY#Bw 5ڌ#lbCbAs +k?қ{>BאnɮH9ܳboS  F}a΁Lϡ2!l4ְb0UUw<;tw -۪(\T_ /Y$p20[ZuiU"~1z9}H;(^ ٰ$Q$ȁ\}~ JyvcXX$zyQ2hˬ6C1k1P᳋6&?f~ؑ`+lŬiؿsÉ7RC?czC4ķh@dEgpB͵jHe]Y kasB2KPFD.&9Bo1c:N@|#=nbuW2qSh$HЕOn#7%Ah-*eik $ADKWh1 ohD-DHZzӂhmcU[sU^ p“}kzͷiQk B2 ֘|"> stream xYt׶!mL -@!$z{-Iwْllc0ZH'c_޿l[/篷_s9gϾP]P@4gzbky[ BϮ틊{}(@<'$4&|_ϑNN2i¸qSfyotvZI~ tZ;2i4Џ?**j{PؐpOFv;;|HANÍ*wZLQYC6 >?"rt(he^˽W޶m S}8ÝG9טn'L4y=(j0B>VQRaj8ZGS Fj6CmRc-<}j>5Z@-Q$j 5ZJ}@-SRTʑO R CmQݩ LRYT?ޡ({AwjՕ w9aZn]l.#Z/´3 ct}vzxӻ^K{ݰmk? ;nA n7_$;︽cd.Jk?:@40 :o{%c%GRym[2&LP6Kn[ʦ CFuj&6 Gz1ћ F&WD6DB+cF ࠺Ee^C,Y=|.l-,۴Hve}^|uAOY\N`!yQ樶 d]ֽ-zⅼu9oᠮe)1ؿlgeKWS7Kl6\ңfEg~n/Q8߅Ej^ʡ> _y J%OK/_k̃w.ZU>""z0:K,+rVeK]8wԡ[^)3껨@,7^?ά D{`?݌n I].TM`{o=ܧu@:a6LCH-r "t(5XnH?h_lwتqך_@( #~Q*9[3\F,'f ZG&4Z;^]FEwOL'{CŹ%wi.1ਪmDLs茸u7A=~ H<*?QRF YA=\sM6׼)Uf.*"s HR{iuDž`f(tbH}FY<iDbB@30?D4q[&'pF~քw hgh-3kg. c#g`}O? @b@%ɲQx*+Bcjm0Qڴl [~@gǿ꾽&}ˢ.4miD?YӪ= {,NR5JX7odiA$cB%7N|s6.Lp#u"aAkk hAnƶ|ӯoڠEu՟  OqQfG=Ehals2=A]!1D?D^h=W܉9F?S|O) ;I vc!5/C)-ZЖw?.'p~ڤ|W{65y o0drқLdJ~!KDNؑPzW{@<5~E!URa]XD /C!&a˨?,~(4\4RH54$jՋ4 |£bΪ(Mhɒc| ۨͻUD~Ya&\R8C_mrfq0zrJY9$($tͩ(wIC;Rتv:/!YP+XhPB(;HF P6}h(F ڴ(\5>mK?C䈩l+`pF[mTcrP%9#P"_mf(#7" ;wm"UnbpN{61ꙛ*.x$`dlTW~#Dl^IoMIPx|X[ps UQC#߻d$fX9.;=z9hگ:IefTtQ:5u O/h1Dm9yg R _dxAKiB4N$ue4DŽ,RtWTV#wTwQ';Z16}Nh4?6q f\g~F 3/]:w4JEfƙ1bw^i2 4i(7? orɋ%ykXO lrZM6$FTeBuRh/*QLgir~tS$: U*U OYYg pϠ *Gv?$_ +`3{]$8y;Dz?[ `Qd>=)ڴd}~iͿ#I=*3*TUf_o8z^ $x^Q+4H+G%)I! PQ\WY ̓˗o65WHw5f]2E ؂Ҭ:(W;^ﮓ3vO\ކcmxV'3ZdzVCэUPjҷ w(MuOEQWnt:Q+bֳmlx\_Gjd3ŏF{k^( r5P;?#<>V$Oq4J,#~RZZSKHiΔsN(/չ)eqꏛ]}H?.W|ѝ)"3Dt#,Y||Kb2ڜ9fw]'/I00l'T1Ƙ BN_?]k"*#"+#jj*+k!YZf5  (!6ƶMeۑ(KpD {PB`*W "m0 ɿׂ Oz0e,%yХ@^PO6XZ. _izB'5P {{T4 ɛZ@ct":y^ f幼"ak ^K[:Bz-9/FF 9KI8@zUM 4qgOsG$56ޟc~)*P+N4Ѿ~ie̗<-FmEiA;SSI̢~WqL >~<ㅽShzީ:sn3w&zo،352}gL/4t|jU$h6e:EJuv0v, ;M F]f'iH3a7Z:"$@‘[CKQNzMe'],ny=FL h;lR̸d%$pl62 R|7v -#vGm1f[ˡ t>H~+ 2jiQ f9T$dHt 2ӹ10l͎)z&F+"#|¶jOud!VPYkOK:(U" 1B߈CYYw4?TBHw1+?z(;r@߰4q}VȷxOp 4u=!`e /N> stream xW Xڞ2ESzghKRjR nVA@$B;E#P)nUW\BiEomVZ{O@C83gs#(H$]ni /1V?۱F?s 1n=jq$%Ŋp20@rsߜףFNU(:9:P%w^鯌Lr^lTbEhZtPlWQk%e*uTOJ_Vm |s._]SNƬ2KKGRuzjIO-6R)Cj)zN-(wj&ZIͦF5M_& v{N:39Ⱦy(q8% GpޠH&%a)-;9U;oyޔɾ\cb޵QOa $tϏ(4K0^RS[.T` WJt\$Y\&! Q|˼ʌ@4Κ$ 5:aُ ~XWktݧMXʓXi׍y0*N}zܢ/}[~[$1 z#vO/} ed  L }3/ fYvⱴ 1x{At)&X(a0ˎn@LmMCs_Y}Wڔ#Vtգ 70ıtA Ƙİ ,}Q\^T[ݓV@9 t RmA˲`? :T7qg0B1 { 'UE*)E]M?~Se6!ƼoVڴ,[bHػZ^v= m imu%Q$1rͿ&CR=T|F7xc fegApP 4 sܤ-HDEssgZ8y@tW!/1}A3I.Nsj0fIf ba9Hs "| H+ AzTPGjQT!$HC(Mk"{Ţo7}WăO3K(.I懹ьvvPLz_y-26.˭e|V<$/4 V[WPܚ͡욜읨1<ᥙv>uɢZQga[DM)iQ[5^(1{ @1% 4Lv?<Ԣݼ%{'O0O#,UbV nl+Q<Ur ?KɭAү8W*(~M'k9HA?D^oESD&H'ELhc;yZ{F Qaq3-%L#5-RF'&/Ɓ 5i!N:::EV=M4%,sBYʹ ʰuY:_v2 *ex&g_#X.Dvm b a(=.9+:KO䊘ً/RxQjm=b{"/p5a izE^5=:(55,]#ŗp{T.:֢jNk P *gI5Jy? n=3hY ߊ-/bѤXGyW‚thaKk_J˗._l_1`Qt\.WVW_&M mnc۝3̘4^Zɾu?sDWhaJӃP`V<jom0\x Ž5LnÄ~;J#dE^A"40]wsU#8bgf8xdz߃Na4kqB/ʲwul Q/ӲBA]P E`5d'/A%K5f2[uQ|ۏٽJW 3رu5sBq`}1PN@xU*RtZ>%Up8Tηe}q>.u&8<UY&ӯc St?ᅞ)4EpPbN|TTQ*ul؂A1\dK9#hansE,؂T2fN$4'K੅ޣ`M?A}PuT-9"zIf %-񹮑A<=7$zHc^軻`;)8.|g<{yBD$H/ ~;_[Hf5F# b-<D1Qޑڻ+uViA,3B}* nPmLT=3 DЉyvd퍿M]Sv~q|+HSQ2w P:<X=5z䜊IQ[xuk P!_[{L FaYd [1ZRYYˇVkrrPc^U DާVU:ai Lfʡ]qdDZs"GYxyeޚd?/Zѯ{ϻ9q0}ݳ+W-s[;ǬpDP o(%\dsOKމs WeW2$n^u»B[RU3u1q)-obTv߱,%WG|oEui]5Z&!EW#WziJ[ 5zx}EIa E5ax vx-S%6hW~M?cRNO<`;vD"F_,*dz; iɩ(I-n[-V;`iXxEа9~}a]m\/!KbrN|=JDɼ> stream xVyTT "vU*RIP\1 bU`f 6Ȣ 0,"6IKi^fv*󽲬|0ywf~Y~2e#XBMwI ȁ7?<>kg2Y|/~sZbTDV9aDٳgMVNQ Pk#cZz MSNx%Rԩ)))SԱIS#^8YT Si+Ա29Y  OcfL\$d]zcX7cb=L>a^gqJ&YŬf&2o2LY3f,gL,3<ƅgN˖Ȏ 9{$iI.\Px+~3T9~I#cMrzɥ=9FD΋L8DA乪%&Ul"('&HRo]z&kA@Ԅ<>=2̚9$np֗?pJtvޭu>7`^^?Z\jECv.yNLy~AG7/GdD2,z̜jsdO0:Ld[I@teG 0^[ ՃA 78&P-twLq1`߁1w?G|M I(g{NU $xe*UݑS08;w~FNaJp [!m:B')W~;+_3| cIzMRnjri4|#f C|&Kpy #Ǔ[ Ħ[6H5/BW݊kME ˦ UCA5x'*h-ZԴFYV-+r+- BQv'V([_By:pn' .9FHy 0H0M0iai/^>sY8fż Cvpƫ8Zޟ?s(JK qf TX`w-ol vBxCtLrݏ} sׯ3߱Ѭr>p>sE/+p]\nX%!$Or394Muz rDF' q!-0Pv Kk9a0dWq)g8cC{C1Ȣ sgbE16f-{'k[nq~YѠ7撕 5zqP}P/$m)a>RUbKI5)9mi™6Y+Xt3ovxY}iM{m5rV5 -ndt2c"rc]kMQVXC5ˋ 9"_vdZG Tm\D7b+dL>uoɇzp*5L&I42!S7ZvFXj⿐ZKv*>Z+ Ww8sY&lh}Ad/#5v1EGX"Wɢ2&l]kN8f@-˷cP3:hڹ7R&hܫ ڱ>d8Q싓pҡ]a2d KBix>#6;ڭ}Tģh Q9s`!;_; -֩h Yq*[d96K]g<ѵ 'Lup+0Ur ,8Gzzp򓏺ebsUö]fC8ݻwXU]N4^]SS"TIZx <߻ !àDE6JE;8jg…Ç—_u?S!##WRv T$s>o-uZ_ʩ;ft<`Cc8؆ޣFdJ4w$@:脊fC'<8C/.ao$b4Q#]yY&}I,Xe0&7"32alT5zxn~Hg;dor, a'mu˵dU(ցAa D+Ji/ΝFdkwnɿ\ajN{ ^LPK.bQQiP^^ԺkYC}} Ad4n $@(+/^6 @;h BQ$KbO 'Mp!5b**wU0;$zI-m0@Ka't [o C=8}e `zq'^Ɔou5kqq꩷Wc8onKLmjiwevk3v79\ӺD<u1!*n6 C!nZ3B , e ul2íeÊ<<Z"endstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 324 >> stream xcd`ab`dddu 1H3a!#.kc7s7넾G ~"Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*"s JKR|SR YtX޽53~OAe=rIa:Y˺v]g*{ZG\Fۖ;Q> stream xcd`ab`dddu 21H3a!.kc7s7넾G ~"Ș[_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*"s JKR|SR Yt޽M3~_+ۻsO/{Vw\B ۡ݇f<ʛUg+[yNƾ|Փxxe`qendstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2650 >> stream xVyPSw1D v$ H(ں**ޭ"B !N ȑA@!xE֮e5Zkc^wf_y30Iڼ-e$u6Љc㱾? BHb^,\ReS$R T&x+2rq`r:E$T6 5R\a~1JQX-}GѤ-}MV;_(OTKE)`8]' *\,6ehjfeX0lBRk2gj$qD=U&aac[mX v`X [b:l=- یm^W^`>X=Kz4)W]lGeN3Uĥs'SMY2E=PWMAg %sO-]n{<:z ?in6SPg5@ F79NqN+ #{!NŻtt^Cʪ,]wMe|B9z=I( DǏQ qN:4TQ#p5r?%Mh!$V^e|VS-Í"+Πc~7G_=?2 o͉^4Aћ?fh&}:T=/IC(([/vgF=|C=u^$:=KbV'.Y)ֶj)͚GBA>sj<>OB6奩 Swr=䋈k߸9d;u@?(Asg H'ZsZl]'8=p5יF8/z5GPa:AF!lR EE%2̒U\j-#;Z6eZA5n IiZ yBL7 yv|+{|*_ Đ4BD_&]29KʧwLDcE7kuu"ЃTH:#$Օؘ<-MR@ʄ3ND?T5:LAJxDʡE믽ec;ud%qvNNN5PROBݶ@,^#(kp6t 4]@/ǹXaE(i ""]11>g;O{| /ڣD}_¨2m>(U5G 4$ !~7zVzrJx18 6F~Bт'/ԝ1==AW(y q=LMGeD Eՠc膓ю~( L6EBZFef?PFo):1وy3Y}6Lvܻ!W=M+oLe:( ?m*ˌ y 0@j4~C' B=@,{Wl4U)zĠj( Tlw#ߟˣwd~EV:щ6]DNʻshґWo +{Z(,lBWV`mmw :fOgZDT;q]^U7UsSGɮvLV(.#ZoGV7{|x$3|;9(ŅtTf*dHp %8}hW>o$|zSuK&`\(H3ӅȚ Ŷ~hvϵ1~C:!PO/ȼe1YvkngؐG_ Zd;!6cm1#YF<#F^FMޛy٦5/bόeG2E2Af> stream xu}LwZNdtgn#30P6'h plRk+RZƋOE2(ZD fэBt&11L碋3%s_r|?2 qen%"\X,^X# ,HCŊ;^a6*P?^Cߞ*(ɉ3&:kN޾+g)c~K< *e J; jslIKO}:t1<]FBj5w@g`x=GBGot'y e;lDW7yt\tYd>X!{/5j. _HZK 8Up8( Hg5US$pP~@l;rchqIɞ]/@4t 6F |3I &y1* e']ֺ`uQZ& l6~^si_զ"~JDU(CL梔_޸wfe]Dœ8Cb{ꠡ! [j^h3 q++T;Lx{ܝU  5\XFnU9e8XUdnQ䃓ȸ__G)`vrgX4:66>>VcGiN-摿#j6.I95695<% gYcY? !J,Gh4KO||_3ft+F4I u> )AI t\-EJ3O͡v6i6Kzbendstream endobj 96 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 329 >> stream xcd`ab`dddw 641H3a!O/nn߷}O=J19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU8OB9)槤1000103012)ٽgf|;?̿ܥ;~3>,}} |g|Waq6;7<_s/wt\7Xp9|zaendstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4644 >> stream xW xSն>!y@+s*}""A@ :ҖINii46mڤC:2ȠseTPQ "9^}6NRHw}7M59ֿ!H$ ]vÚ~*L 18"hs % e䔜i GYpGg^TF<5!N6.'%)#.|H KHM)8%''sѬYQqQ2y҇gDDON%%F<#D<q7,ed$#R>%"Z)&;'7/?N 01i )S7lL{)=cKN{xٶGf̌*RQjzzZOM6PQMfj 4ZNͤ^VPۨ3*jM=KͥVS1j-<8%BqORèj$ES;ye**ID3C^rV[|&ds/d/Z zQ0n;lⰺa'9yQF}=ߎ1sle?}|vOŬ=~1aȄcQB x~#ESСw$ !w]w:²ieuA= ,AHĪ4i'ԓە <_sdLi4Cp8󈄨b!& Kl*>ދvTbڼ3O li[;Vz4xv4rG=VHˍF-EPXʅcl?LҚnBS%$7t|p&SWIf˟ˡGcЦ&I|0̟RQэ X ;B^x 1Ih4H6zv64r7 ,Y~Tw-^i`H4PȲ˳d)vO=vste{P$JgAѤnPwB'xy嘚cAQ|]&&ʴ)l4jg9WJ=|CIzO~w!g#%'ՈڗO\4-DkU|s/lO\ & }T0rVd~2א`v}SZA[ t)AzZ[:&ckr$t Ǵ;tEwT&q*%0(z7 XP6<@z]Y-7>6u5t4;N-GרTzxH`+E(uhhiT].Z_ DdK?){وbXcZ6}Aܒ- o}[)?[ Sc^Ts:`(l2<LR{-4 |Z̶ıվ{֮ܔ4q MrXƜ\;{-#d@hq x.]iK2 EJ=W4K2;t"V %ޕBW8ղb`^g%- -E&GD{HaV9C[LޞwCUkeu9.|^':Hea"m02 FFHI}M %< |+7%{o>oV4A+7C3z{ Ă˻x %pZ91!H45H]iXF1A-MH' 拣ܼ9)ǼVzbRȯyj 8} ܴ*OWQW-\I}6=NMѷa;aɗMxzc}Ea)Mڥض N];x< {'4':UqAJ[ݦkse+`+0Ǿ;J<1YLdC%k\jrvƿtUd5Uq+%z`Pn'7tƼdy|Wf#řc%-ɐ*?nK9 xg@:y&:DkAxL`vb#g{| 怌_zp_?05dQO>;$tKU\ +6'5\+irCjif3;k.ޫy~Ϸ'>y/,⦧pX^ /jSqU?`M&2"X1Ҧʟ-7[?1}Lr [ 8A4C!ɐdL2&B!ѰNFfpq6HY1PdhJ6xAT6\ .8še49]sJ"6; YÚ,f;R2buqj#_6$m\V?[,"BmwpB]U8il:t'9K.PXZUn=hO/R`F?}ߟ%tQ z ɕtLSɕJ R6VB=4TX Q1EGYUj*^SUBAxncqCb&[6ALJᲗX/,?xUxm%J G+7'+qr] u^\,5S/~ypG\_:4OKXOV,:M92[~~DKam}݃zэo$UfKjNf>N9'V]|gsɊF&&9x~ oӾ@ǡMYuQSu1WQz MDhٺrR,w}FӢoHrnm$fIOWsad61)L$SAZ,.lAm,zvxx'mhF9A՘Q!-5pr?DxIMd`//pغk^0FXڅ^+{~P5*ϔ_C!ج䎼j3,\ YEJ4O2FT)1&.l{]* $@Z{81~LIMHhs -B*BIkmzfK*>U3"}f+ȍ6gvjצh6JE%H:e&-a2r\0ל"F -lx5s=?MDDMZ]Du+-#Ă ,<ߕ!`4VkШ *R,u:jdn9Ο췿т0[whZMH1(u܎Uh\P53pnXHlD(E/C+endstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3079 >> stream x TWǫm*[Sa4ZDE.1c((n(YZ}UQqfv(qCbD812If>D'hnvf̜3NWUz{}2n %5=)cĩr?[INv-S[u"(Lޚ8CBdN:[pٲ7/SO ƨ7jh^O7g\ %%e6:q~lBs)z7<1[x!>,QR&7h:K$l] QqR0_@W( mu~&Q/>8rrq%0m"+`A \g33+:tۀ8ϠU_k$)p~BbN+٧rE{a(\+F/5ޝT~ÙLÄ47ZZlT]^C(6FvGxHH#P`-Y +(9f[gN3X*t95!5?+&ZWo5g;Ȩڲ?N2c$trr 0牌N3%p™dA~weuXuu^X}T~٭ IpC+KI}7]c­*M n觡M3wΚDRZmW*Y+h*A Jb?zYʼ>ŗӤTpK^Z V$ D'0`1%@O6K9yY&17' ) **(L+ v5NN tJ J-kٶh7y6FtXqH0ɢ=v}]PTtH7U F%{g-ROsuō|lծ#i6dÇ* U3q!; =!0ډ.*XRSm{,vTas^ՓugWpޘUi#&1)&,g{2K(O& D뒲ѿdLM&$'7w%Z&9haUXՑ1aB;)#o5K8,a ɐw0[ɺՒYLz4sȔ1=ٕ gv_|Y鼼 (:bh$iv'~WZӲ#x+ĕ˹8K{.T70xӎ醐m$Jqw.](m>hVKh4}0S2 6+aV⺁oI7fh`F L$R;LqXl2_߱`; \gZXGF4oڏ}t8' W%HWt]1c$Sn_}Mء<fI fy< ^y"H }T:?y08%i9[~}i_8w俿@%?!.ڑp 'qj}1[N*,%'5U#r qDVŶT6ko)d뤈S}o]-$ p28^5~]<%7k:u  cv-GqЂ#*rvoT8989iIIQendstream endobj 99 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2265 >> stream xV{TS1XKy2{SqЪRׇe@!$I< O  |Pbv[tt[{Z{m/]Ğu;q_c0KҳvmJFE1eݹ݋! /LF{(`HiLXV.6m^{Gx[E/e_N9)) ռ'yDT*KdeO)rދJL!(='y"otHZ%xYLaXX"Uʫ%2A: v`9Nlۃ=~e`ac2,l;c #q~S&?e&-޸f /Z n?_UqKR._ai5D5(c8 4;F޺ pc"Z)&BK(TF-X45p-;]dh$J)]}i/eq\[eEbz7} ;~%8:EU&&&,\p :EW 3@17RPjfE Z~LP*))v1_>GL|`GalRko>S=17Sq  ^W *$^L$>AЫ^pVqD1rR=L(&koqs:ڵ󀏸_KZYa^sFOӅks &򠓜y !MRf\ۢlx,JM&KW#Z]hD$J~eV@8=+[VWϨ3v8xPS%5ۧD ?=^B?8\C?\ЗxzQ-:ZZp.6Ihv7C4ۯ2/3jвԁ@)~h;7\c ڤ%Oob@UuV:uV_ 7ܱt-ت DKϼNhuvӔݱ#h434:Є{ (6Xgs5X\D,*6?Yf8y&(#tuD ^R.n3ݿf|ldjVix`(¿*8Tz3~nng>T-&EoYCMgWXQohFP1iw??A~{rG]۷"]VŠjC4ܰ.#  o4 a*cئLݛ#. ?mr~T$tGZp!^˯N;67 PC>=p5>Ȭ%^ծoFCdžʨZxK%Mf|KkE& 斁TM@!(6!x1-' UԔT߸u7Wb3toVn=iwJ*ZBPl:<[g`׋_B~N>i$֫^4|ݫ- -؊dt߅` .>LOp _GO{huB*udZ:?R'6Hi*񒎢 _q9'tiRD2C" ^b|DԽ_Ү3+0wO`+#ڎ_kx:f-QVK榾^xO=C7314uQ{Xj[L=ڸ }EFNhF^A0\ C6; aiBs(=`WP6'HMF#$\P̪DRU$A,-YN,KJ\iILİ` endstream endobj 100 0 obj << /Filter /FlateDecode /Length 3706 >> stream x[Ko$O pN av%eKyxW>_V # >lX9?nU}sW&lw=l~hva-Zޞ^lP&էl=|ӽݩު rvwKl}fwbm}4/?9FQ{.؎FlHhEϮGJ&)g&tw N}гKrw|;4Rv||vwbK;^Q老N{eSwJv!mNևߝ`}wKC+۝a\նat*JwM%LѽUzML l|Vi4dlO{&;a`y׍wӿmNMǺiTWԠt%)}#62ɓX}C \뼻z>)K{ÝGt}, UuCl2bT)|9Y3PmAxR;Fs~S/sN"m`h)08*ujfkt|~b4No]C%Xur5`'46< Qo]\FB o}6 @-Y{x7ھ=?\&跇F*nT7v+-[ga&[-Lc[|1(dcٳ6zIJ $`<(;x׼xASo,/"qr󲏊ǥٽT d%(ȅi4[]ᝧ`sh yX c:{O $ej 0rIR>&TA-P3lrZ 1 <1"Y( &o;'V5`);jbdfri\Y-[uM^ u wޗ%;zS =j8Ԟm"1 wn!$Y.]l0iVa~?W$c8+Dǀ/ \Bd;5`y[3ʂ:KF mg(xpANE,H#RW j{z%g?yY?K:6ڨYniy :s_?bkwIR +n5;H[b24uN zrWeAS1։4|9Da)v.*'GoJiO@/R݉ΖˀL~dbf =RoxK*n`{96Fx@gV`Bg A%;YlKQKFb *hӈ⇁*Z(l?iQnJA{UzdASwUQCJÜ%8.$~X#+`N NPlp{ kf8q OuQd! Z A"s7o蛠@bE<.vxž7 wv7`vB nx;It%w"!R ;O\1kߵôg5fSф\D3(CN"Y XT, W"hBbKVD'm$BmCDEYsM)fV{_{ 'L4sMptȍ NAH@E mQ'/#S* l֥G)ZSA9? 5E Mϊ*5L  ;-=]ނ8%;#eeBK :,SnT9i***en!xUTԾ"Cc|0&t[ᖁO!euQ1My_5;%%4#O#8K>DJ\lKzϾL8s*b)刳>-K;6Lv6xU;NLFw7-Me$=3`Y`|'C\l𮹯y۝ 1(}RV)^ 6yRozGBAg~]Ia_6[; ^HeP.-].e"l!R@b6n2N8B[ ?B8_QL unP ͘K7p,1t\B lK:ov {>)Wnah8ztLd좃{='c?e5)+ gv6i S'3"K, ҉Gg-ikSeN1ޏ븂>?_-Dҭ .V{ ߯fy >PyW;s &PI6OW~Gi$R)_8'zv'~2f%_|?~^.v_byX{S?W%G&2i 'A~PPG<7xMހÕvgPp`>@3a@vr Y9P;aݰb|I/xS6-pcYPiW^iOqᅭ^ZRV=O81ZsRb8eUɀh^L/=xpѻ"dWN&G2+8/qf%Ⱦ~>ύ:jn/ s 6* P3V%%F&\}}=vMŊMX[@=,iYZ|S=9Mk0c%Ǚ6KpUMsxqRݦ#; |yʷ^~UUnL DcH e6q4'tSߥv]aD2!HoDRޣs;Ovbvny? ; 9$=Us.Wb'N':lR4-j>ֳe}BT+:ŅxhobcGaNw緌BSfx~Nb,"XXf[L4z DYKܠ1O,jfR1k O1 |e #Ą {HT2Oo+XڃZf)h5;[X a~B 2dQƙ([ jQN-7vc8Ʀz{: -,e<]tbn aEP`lba Ϋdpzl`؝X*? )/AY!U:!,gݲ)L2' /`Kfm}_r*fc"o)(OC V]iS(0O :-OHuX m極T iAp 8Kmy +yzzyv!^V^o_"DZ#qpq-z#T>L'k-qo Eٳ_XSᛀ&]_k%ә[L$Ez?ro-endstream endobj 101 0 obj << /Filter /FlateDecode /Length 2378 >> stream xZKokWwuTX'|?( (.D$cɖؒcٽ6ΐ<촸- /gg_}-|{vӌo~̐Fz:eL Wtr( \&}7@ɔl]8ƹCv,d~V_g_}ۭ $ކ/x1Wp'^j'GkfQYL"LpJxN]yѲ{ F,JH8Rfwx #ٽf≿ZW%`:7~Xj! .ߠ.=w2:C! RȋS$ yH8!w[! 6 y(B~XD 57{ u>C0s%\rdţe_z$cګ f8D`>;)`XN Wq*Zzm8 sǝE"GXw>hݺ0x&Z 䔘K]T]PFko&~.)3f!9 Xx PrVp#cZu#upBޘXuK@dh,eH n! KCt&u:ch1ulE5A83&]ܑ-שeM7P-!(TwGCO< 22.7^.N:mL2%) 8l 5.齃0 L0 o Y+z*@D4>~ÁT*b7Օ/cls<Ѝ(1I_סD?$K]jHFц[DYBH1gdLZ@>r{T"υ/tBJ> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 103 /ID [<615dd650602c27ac920efc2802ae0514><76f4ef667cc638337f14f6ed94439f6d>] >> stream xcb&F~0 $8Ja?oV <[@$^"@ HvsɶDrH`Y 9 i.d`YUIFDl59 endstream endobj startxref 74641 %%EOF spatstat/inst/doc/getstart.R0000644000176000001440000001154512252324024015651 0ustar ripleyusers### R code from vignette source 'getstart.Rnw' ################################################### ### code chunk number 1: getstart.Rnw:5-6 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: getstart.Rnw:23-30 ################################################### library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) ################################################### ### code chunk number 3: getstart.Rnw:54-56 ################################################### getOption("SweaveHooks")[["fig"]]() data(redwood) plot(redwood, pch=16, main="") ################################################### ### code chunk number 4: getstart.Rnw:77-79 ################################################### getOption("SweaveHooks")[["fig"]]() data(longleaf) plot(longleaf, main="") ################################################### ### code chunk number 5: getstart.Rnw:136-139 ################################################### data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) ################################################### ### code chunk number 6: getstart.Rnw:154-155 (eval = FALSE) ################################################### ## mydata <- read.csv("myfile.csv") ################################################### ### code chunk number 7: getstart.Rnw:165-166 ################################################### head(mydata) ################################################### ### code chunk number 8: getstart.Rnw:181-182 (eval = FALSE) ################################################### ## mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) ################################################### ### code chunk number 9: getstart.Rnw:185-186 (eval = FALSE) ################################################### ## ppp(x.coordinates, y.coordinates, x.range, y.range) ################################################### ### code chunk number 10: getstart.Rnw:195-196 ################################################### getOption("SweaveHooks")[["fig"]]() plot(mypattern) ################################################### ### code chunk number 11: getstart.Rnw:203-204 (eval = FALSE) ################################################### ## summary(mypattern) ################################################### ### code chunk number 12: getstart.Rnw:208-209 ################################################### options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) ################################################### ### code chunk number 13: getstart.Rnw:211-212 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Kest(mypattern)) ################################################### ### code chunk number 14: getstart.Rnw:218-219 (eval = FALSE) ################################################### ## plot(envelope(mypattern,Kest)) ################################################### ### code chunk number 15: getstart.Rnw:221-222 ################################################### env <- envelope(mypattern,Kest, nsim=39) ################################################### ### code chunk number 16: getstart.Rnw:224-225 ################################################### getOption("SweaveHooks")[["fig"]]() plot(env, main="envelope(mypattern, Kest)") ################################################### ### code chunk number 17: getstart.Rnw:227-228 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 18: getstart.Rnw:234-235 ################################################### getOption("SweaveHooks")[["fig"]]() plot(density(mypattern)) ################################################### ### code chunk number 19: getstart.Rnw:245-246 (eval = FALSE) ################################################### ## marks(mypattern) <- mydata[, c(5,9)] ################################################### ### code chunk number 20: getstart.Rnw:248-249 ################################################### mypattern <-finpines ################################################### ### code chunk number 21: getstart.Rnw:252-253 (eval = FALSE) ################################################### ## plot(Smooth(mypattern)) ################################################### ### code chunk number 22: getstart.Rnw:256-257 ################################################### getOption("SweaveHooks")[["fig"]]() plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") spatstat/inst/doc/BEGINNER.txt0000644000176000001440000000140712237642736015637 0ustar ripleyusers -== Welcome to the 'spatstat' package! ==- For an overview of all capabilities, type help(spatstat) For a friendly introduction to spatstat, please read the document "Getting Started with Spatstat" (called a 'package vignette'). This can be accessed by typing help.start() and clicking on Packages -> spatstat -> User guides and Vignettes -> Getting started To handle spatial data in the 'shapefile' format, see the vignette "Handling shapefiles in the spatstat package", at the same location. For a complete two-day workshop on spatstat, please download the workshop notes at For news about new features in the latest version of spatstat, type latest.news (Press 'Q' to exit, on some computers) spatstat/inst/doc/shapefiles.R0000644000176000001440000001307512252324024016137 0ustar ripleyusers### R code from vignette source 'shapefiles.Rnw' ################################################### ### code chunk number 1: shapefiles.Rnw:7-8 ################################################### options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) ################################################### ### code chunk number 2: shapefiles.Rnw:23-29 ################################################### library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") ################################################### ### code chunk number 3: shapefiles.Rnw:104-105 (eval = FALSE) ################################################### ## library(maptools) ################################################### ### code chunk number 4: shapefiles.Rnw:109-110 (eval = FALSE) ################################################### ## x <- readShapeSpatial("mydata.shp") ################################################### ### code chunk number 5: shapefiles.Rnw:115-116 (eval = FALSE) ################################################### ## class(x) ################################################### ### code chunk number 6: shapefiles.Rnw:131-135 ################################################### baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" ################################################### ### code chunk number 7: shapefiles.Rnw:137-141 (eval = FALSE) ################################################### ## setwd(system.file("shapes", package="maptools")) ## baltim <- readShapeSpatial("baltim.shp") ## columbus <- readShapeSpatial("columbus.shp") ## fylk <- readShapeSpatial("fylk-val.shp") ################################################### ### code chunk number 8: shapefiles.Rnw:143-146 ################################################### class(baltim) class(columbus) class(fylk) ################################################### ### code chunk number 9: shapefiles.Rnw:168-169 (eval = FALSE) ################################################### ## X <- X[W] ################################################### ### code chunk number 10: shapefiles.Rnw:186-187 (eval = FALSE) ################################################### ## y <- as(x, "ppp") ################################################### ### code chunk number 11: shapefiles.Rnw:198-200 (eval = FALSE) ################################################### ## balt <- as(baltim, "ppp") ## bdata <- slot(baltim, "data") ################################################### ### code chunk number 12: shapefiles.Rnw:248-249 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ################################################### ### code chunk number 13: shapefiles.Rnw:258-259 (eval = FALSE) ################################################### ## curvegroup <- lapply(out, function(z) { do.call("superimposePSP", z)}) ################################################### ### code chunk number 14: shapefiles.Rnw:299-303 (eval = FALSE) ################################################### ## out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) ## dat <- x@data ## for(i in seq(nrow(dat))) ## out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) ################################################### ### code chunk number 15: shapefiles.Rnw:324-326 ################################################### getOption("SweaveHooks")[["fig"]]() data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") ################################################### ### code chunk number 16: shapefiles.Rnw:339-341 ################################################### getOption("SweaveHooks")[["fig"]]() data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") ################################################### ### code chunk number 17: shapefiles.Rnw:377-380 (eval = FALSE) ################################################### ## regions <- slot(x, "polygons") ## regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) ## windows <- lapply(regions, as.owin) ################################################### ### code chunk number 18: shapefiles.Rnw:385-386 (eval = FALSE) ################################################### ## te <- tess(tiles=windows) ################################################### ### code chunk number 19: shapefiles.Rnw:418-421 (eval = FALSE) ################################################### ## spatstat.options(checkpolygons=FALSE) ## y <- as(x, "owin") ## spatstat.options(checkpolygons=TRUE) ################################################### ### code chunk number 20: shapefiles.Rnw:438-439 (eval = FALSE) ################################################### ## y <- as(x, "SpatialPolygons") ################################################### ### code chunk number 21: shapefiles.Rnw:449-453 (eval = FALSE) ################################################### ## cp <- as(columbus, "SpatialPolygons") ## cregions <- slot(cp, "polygons") ## cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) ## cwindows <- lapply(cregions, as.owin) ################################################### ### code chunk number 22: shapefiles.Rnw:463-465 (eval = FALSE) ################################################### ## ch <- hyperframe(window=cwindows) ## ch <- cbind.hyperframe(ch, columbus@data) spatstat/inst/doc/getstart.Rnw0000644000176000001440000003076012252324024016216 0ustar ripleyusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley and Rolf Turner} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03} and other references in the bibliography. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 15 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the workshop notes \cite{badd10wshop} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item workshop notes: \begin{quote} A complete set of notes from an Introductory Workshop is available from \texttt{www.csiro.au/resources/pf16h.html} or by visiting \texttt{www.spatstat.org} \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} % The following is inserted from refs.bbl originally generated from refs.tex \begin{thebibliography}{10} \bibitem{badd10wshop} A. Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. Available at {\texttt{www.csiro.au/resources/pf16h.html}}. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/inst/ratfor/0000755000176000001440000000000012237642736014433 5ustar ripleyusersspatstat/inst/ratfor/Makefile0000755000176000001440000000244412237642736016102 0ustar ripleyusers RATFOR = /home/adrian/bin/ratfor77 #RATFOR = /usr/local/bin/ratfor CPP = /usr/bin/cpp ########################################################## # Sources actually written by humans: RAT_SRC = dppll.r inxypOld.r C_DOMINIC = dinfty.c dwpure.c C_MISC = raster.h areadiff.c closepair.c connect.c corrections.c \ discarea.c distances.c distmapbin.c distseg.c \ exactdist.c exactPdist.c \ massdisthack.c poly2im.c trigraf.c utils.c xyseg.c C_MH = methas.h dist2.h areaint.c badgey.c dgs.c \ diggra.c dist2.c fexitc.c getcif.c geyer.c \ lookup.c methas.c stfcr.c \ straush.c straushm.c strauss.c straussm.c C_KEST = Kloop.h Kborder.c C_SRC = $(C_DOMINIC) $(C_MISC) $(C_MH) $(C_KEST) CC_SRC = PerfectStrauss.cc HUMAN = $(RAT_SRC) $(C_SRC) $(CC_SRC) Makefile ########################################################## # Source to be generated automatically: RAT_FOR = dppll.f inxypOld.f GENERATED = $(RAT_FOR) ###################################################### ########### TARGETS ################################ target: $(GENERATED) @echo -- Done ------- tar: tar cvf src.tar $(HUMAN) clean: rm $(GENERATED) -rm src.tar ####################################################### ######### RULES ################################## .r.f: $(RATFOR) -o $@ $? spatstat/inst/ratfor/inxypOld.r0000755000176000001440000000216312237642736016431 0ustar ripleyuserssubroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry) implicit double precision(a-h,o-z) dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts) logical first, onbndry(npts) zero = 0.0d0 half = 0.5d0 one = 1.0d0 do i = 1,nedges { x0 = xp(i) y0 = yp(i) if(i == nedges) { x1 = xp(1) y1 = yp(1) } else { x1 = xp(i+1) y1 = yp(i+1) } dx = x1 - x0 dy = y1 - y0 do j = 1,npts { xcrit = (x(j) - x0)*(x(j) - x1) if(xcrit <= zero) { if(xcrit == zero) { contrib = half } else { contrib = one } ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx if(dx < 0) { if(ycrit >= zero) { score(j) = score(j) + contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else if(dx > zero) { if(ycrit < zero) { score(j) = score(j) - contrib } onbndry(j) = onbndry(j) | (ycrit == zero) } else { if(x(j) == x0) { ycrit = (y(j) - y0)*(y(j) - y1) } onbndry(j) = onbndry(j) | (ycrit <= zero) } } } } return end spatstat/inst/ratfor/dppll.r0000755000176000001440000000203312237642736015732 0ustar ripleyuserssubroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin) implicit double precision(a-h,o-z) dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np) double precision l1(nl), l2(nl), l3(nl), l4(nl) one = 1.d0 zero = 0.d0 do j = 1,nl { dx = l3(j) - l1(j) dy = l4(j) - l2(j) alen = sqrt(dx**2 + dy**2) if(alen .gt. eps) { co = dx/alen si = dy/alen } else { co = 0.5 si = 0.5 } do i = 1, np { xpx1 = x(i) - l1(j) ypy1 = y(i) - l2(j) xpx2 = x(i) - l3(j) ypy2 = y(i) - l4(j) d1 = xpx1**2 + ypy1**2 d2 = xpx2**2 + ypy2**2 dd = min(d1,d2) if(alen .gt. eps) { xpr = xpx1*co + ypy1*si if(xpr .lt. zero .or. xpr .gt. alen) { d3 = -one } else { ypr = - xpx1*si + ypy1*co d3 = ypr**2 } } else { d3 = -one } if(d3 .ge. zero) { dd = min(dd,d3) } sd =sqrt(dd) rslt(i,j) = sd if(mint.gt.0) { if(sd .lt. xmin(i)) { xmin(i) = sd if(mint.gt.1) { jmin(i) = j } } } } } return end spatstat/tests/0000755000176000001440000000000012237642736013323 5ustar ripleyusersspatstat/tests/badwindow.R0000755000176000001440000007760712237642736015450 0ustar ripleyusers"W" <- structure(list(type = "polygonal", xrange = c(486924, 491950), yrange = as.integer(c(6496537, 6506237)), bdry = list(structure(list( x = as.integer(c(486959, 487223, 487293, 487434, 487504, 487539, 487557, 488875, 488945, 490808, 490737, 490298, 490140, 490298, 490491, 490948, 491036, 491950, 491282, 491282, 491124, 491124, 491563, 491493, 491475, 491686, 491616, 490772, 490526, 489683, 489490, 489578, 489191, 488892, 488910, 488716, 488611, 488031, 487522, 487522, 487486, 487416, 487399, 487346, 487240, 487117, 487117, 487276, 487346, 487399, 487486, 487574, 487557, 487082, 486994, 487205, 487117, 487188, 487012, 486924)), y = as.integer(c(6497047, 6497012, 6497170, 6497187, 6497047, 6496959, 6496889, 6496924, 6496643, 6496643, 6496854, 6497644, 6498541, 6498857, 6497855, 6496854, 6496555, 6496537, 6500298, 6501546, 6501792, 6501985, 6502319, 6502740, 6503355, 6504375, 6505324, 6505675, 6506237, 6506237, 6505605, 6505359, 6505078, 6504023, 6503795, 6503812, 6504568, 6505201, 6505042, 6504919, 6504849, 6504884, 6504814, 6504832, 6504638, 6504515, 6503935, 6504006, 6503971, 6503865, 6503812, 6503777, 6503689, 6503303, 6502266, 6501159, 6500526, 6499437, 6498259, 6497029))), .Names = c("x", "y")), structure(list(x = as.integer(c(487186, 487182, 487186, 487186, 487126, 487126, 487156, 487156, 487186, 487186, 487117, 487126, 487156, 487140, 487156, 487156, 487186, 487181, 487186, 487186, 487216, 487194, 487205, 487187, 487216, 487216, 487186, 487186, 487158, 487186, 487156, 487156, 487147, 487156, 487096, 487096, 487090, 487096, 487066, 487066, 487096, 487096, 487156, 487156, 487066, 487066, 487036, 487036, 487015, 486994, 487064, 487066, 487066, 487066, 487156, 487156, 487126, 487126, 487096, 487096, 487066)), y = as.integer(c(6499396, 6499396, 6499426, 6499396, 6500589, 6500476, 6500476, 6500176, 6500176, 6499462, 6500526, 6500589, 6500686, 6500686, 6500805, 6500686, 6500986, 6500986, 6501021, 6500986, 6501076, 6501076, 6501159, 6501256, 6501256, 6501076, 6501406, 6501260, 6501406, 6501406, 6501466, 6501417, 6501466, 6501466, 6501766, 6501732, 6501766, 6501766, 6502936, 6502636, 6502636, 6502606, 6502606, 6502486, 6502486, 6502456, 6502456, 6502156, 6502156, 6502266, 6503086, 6503086, 6503112, 6503116, 6503116, 6503026, 6503026, 6502996, 6502996, 6502936, 6502936))), .Names = c("x", "y")), structure(list( x = as.integer(c(488956, 488956, 488926, 488926, 488896, 488896, 488866, 488866, 488836, 488836, 488806, 488806, 488776, 488776, 488686, 488686, 488716, 488716, 488626, 488626, 488596, 488596, 488566, 488566, 488536, 488536, 488416, 488416, 488446, 488446, 488416, 488416, 488446, 488446, 488476, 488476, 488506, 488506, 488536, 488536, 488566, 488566, 488536, 488536, 488566, 488566, 488536, 488536, 488566, 488566, 488596, 488596, 488566, 488566, 488596, 488596, 488626, 488626, 488656, 488656, 488686, 488686, 488716, 488716, 488776, 488776, 488806, 488806, 488836, 488836, 488866, 488866, 488926, 488926, 488986, 488986, 489046, 489046, 489136, 489136, 489106, 489106, 489076, 489076, 489046, 489046, 489016, 489016, 488986, 488986)), y = as.integer(c(6501496, 6501256, 6501256, 6501046, 6501046, 6500806, 6500806, 6500506, 6500506, 6500236, 6500236, 6499996, 6499996, 6499486, 6499486, 6499126, 6499126, 6499006, 6499006, 6499036, 6499036, 6499066, 6499066, 6499126, 6499126, 6499216, 6499216, 6499456, 6499456, 6499696, 6499696, 6499936, 6499936, 6500056, 6500056, 6500146, 6500146, 6500266, 6500266, 6500386, 6500386, 6500656, 6500656, 6500986, 6500986, 6501136, 6501136, 6501376, 6501376, 6501406, 6501406, 6501496, 6501496, 6501616, 6501616, 6501796, 6501796, 6502036, 6502036, 6502096, 6502096, 6502246, 6502246, 6502276, 6502276, 6502336, 6502336, 6502426, 6502426, 6502636, 6502636, 6502666, 6502666, 6502696, 6502696, 6502726, 6502726, 6502756, 6502756, 6502096, 6502096, 6501976, 6501976, 6501826, 6501826, 6501736, 6501736, 6501586, 6501586, 6501496))), .Names = c("x", "y")), structure(list(x = as.integer(c(490216, 490216, 490246, 490246, 490306, 490306, 490216, 490216, 490186, 490186, 490036, 490036, 489946, 489946, 489916, 489916, 489826, 489826, 489796, 489796, 489706, 489706, 489736, 489736, 489766, 489766, 489796, 489796, 489946, 489946, 489976, 489976, 490006, 490006, 490036, 490036)), y = as.integer(c(6502426, 6502246, 6502246, 6502186, 6502186, 6501946, 6501946, 6502096, 6502096, 6502156, 6502156, 6502126, 6502126, 6502096, 6502096, 6502066, 6502066, 6502036, 6502036, 6501946, 6501946, 6502036, 6502036, 6502186, 6502186, 6502216, 6502216, 6502276, 6502276, 6502306, 6502306, 6502336, 6502336, 6502366, 6502366, 6502426))), .Names = c("x", "y")), structure(list(x = as.integer(c(488642, 488716, 488910, 488892, 488926, 488926, 488956, 488956, 488986, 488986, 489016, 489016, 489046, 489046, 489076, 489076, 489106, 489106, 489136, 489136, 489166, 489166, 489046, 489046, 488926, 488926, 488746, 488746, 488536, 488536, 488506, 488506, 488416, 488416, 488326, 488326, 488356, 488356, 488416, 488416, 488446, 488446, 488626, 488626, 488656, 488656, 488686, 488686, 488656, 488656, 488626, 488626, 488596, 488596, 488566, 488566, 488536, 488536, 488506, 488506, 488476, 488476)), y = as.integer(c(6504346, 6503812, 6503795, 6504023, 6504143, 6503806, 6503806, 6503686, 6503686, 6503566, 6503566, 6503476, 6503476, 6503386, 6503386, 6503296, 6503296, 6503206, 6503206, 6503086, 6503086, 6502846, 6502846, 6503086, 6503086, 6503236, 6503236, 6503266, 6503266, 6503296, 6503296, 6503326, 6503326, 6503386, 6503386, 6503506, 6503506, 6503536, 6503536, 6503566, 6503566, 6503656, 6503656, 6503746, 6503746, 6503776, 6503776, 6503956, 6503956, 6503986, 6503986, 6504046, 6504046, 6504076, 6504076, 6504106, 6504106, 6504166, 6504166, 6504226, 6504226, 6504346))), .Names = c("x", "y")), structure(list(x = as.integer(c(489886, 489886, 489916, 489916, 489736, 489736, 489706, 489706, 489676, 489676, 489796, 489796, 489916, 489916)), y = as.integer(c(6503386, 6503146, 6503146, 6503056, 6503056, 6503206, 6503206, 6503266, 6503266, 6503356, 6503356, 6503596, 6503596, 6503386))), .Names = c("x", "y")), structure(list(x = as.integer(c(490006, 489916, 489916, 490006)), y = as.integer(c(6505666, 6505666, 6505756, 6505756))), .Names = c("x", "y")), structure(list( x = c(487426.0001, 487396.0001, 487276.0001), y = as.integer(c(6504856, 6504796, 6504676))), .Names = c("x", "y")), structure(list( x = as.integer(c(490786, 490786, 491176, 491176, 491236, 491236, 491266, 491266, 491236, 491236, 491206, 491206, 491176, 491176, 491086, 491086, 490996, 490996, 490966, 490966, 490936, 490936, 490876, 490876, 490846, 490846, 490726, 490726, 490696, 490696)), y = as.integer(c(6505366, 6505336, 6505336, 6505276, 6505276, 6505126, 6505126, 6504976, 6504976, 6504916, 6504916, 6504886, 6504886, 6504856, 6504856, 6504886, 6504886, 6504916, 6504916, 6504946, 6504946, 6505006, 6505006, 6505186, 6505186, 6505246, 6505246, 6505276, 6505276, 6505366))), .Names = c("x", "y")), structure(list(x = c(487905.9999, 487905.9999, 487935.9999, 487935.9999, 487965.9999, 487965.9999, 487905.9999, 487905.9999, 487845.9999, 487845.9999, 487755.9999, 487755.9999, 487605.9999, 487605.9999, 487635.9999, 487635.9999, 487665.9999, 487665.9999, 487725.9999, 487725.9999, 487755.9999, 487755.9999, 487785.9999, 487785.9999, 487815.9999, 487815.9999), y = as.integer(c(6505066, 6505036, 6505036, 6505006, 6505006, 6504616, 6504616, 6504586, 6504586, 6504556, 6504556, 6504526, 6504526, 6504646, 6504646, 6504766, 6504766, 6504886, 6504886, 6504976, 6504976, 6505006, 6505006, 6505036, 6505036, 6505066))), .Names = c("x", "y")), structure(list(x = as.integer(c(491416, 491326, 491326, 491416)), y = as.integer(c(6504856, 6504856, 6505006, 6505006))), .Names = c("x", "y")), structure(list( x = as.integer(c(491386, 491266, 491266, 491386)), y = as.integer(c(6504736, 6504736, 6504826, 6504826 ))), .Names = c("x", "y")), structure(list(x = c(487456.0001, 487456.0001, 487366.0001, 487366.0001, 487306.0001, 487306.0001, 487336.0001, 487336.0001, 487396.0001, 487396.0001, 487486.0001, 487486.0001), y = as.integer(c(6504586, 6504436, 6504436, 6504466, 6504466, 6504556, 6504556, 6504586, 6504586, 6504676, 6504676, 6504586))), .Names = c("x", "y")), structure(list(x = as.integer(c(489226, 489226, 489256, 489256, 489286, 489286, 489106, 489106, 489136, 489136 )), y = as.integer(c(6504646, 6504616, 6504616, 6504556, 6504556, 6504466, 6504466, 6504586, 6504586, 6504646))), .Names = c("x", "y")), structure(list(x = c(488295.9999, 488295.9999, 488205.9999, 488205.9999, 488175.9999, 488175.9999, 488205.9999, 488205.9999, 488235.9999, 488235.9999, 488325.9999, 488325.9999 ), y = as.integer(c(6504406, 6504316, 6504316, 6504376, 6504376, 6504466, 6504466, 6504496, 6504496, 6504526, 6504526, 6504406))), .Names = c("x", "y")), structure(list( x = as.integer(c(490666, 490666, 490696, 490696, 490756, 490756, 490786, 490786, 490696, 490696, 490576, 490576, 490546, 490546)), y = as.integer(c(6504466, 6504376, 6504376, 6504316, 6504316, 6504256, 6504256, 6504166, 6504166, 6504226, 6504226, 6504286, 6504286, 6504466))), .Names = c("x", "y")), structure(list( x = as.integer(c(489346, 489346, 489406, 489406, 489526, 489526, 489496, 489496, 489586, 489586, 489646, 489646, 489706, 489706, 489676, 489676, 489586, 489586, 489496, 489496, 489346, 489346, 489346, 489376, 489376, 489346, 489346, 489256, 489256, 489226, 489226, 489196, 489196, 489076, 489076, 489046, 489046, 489016, 489016, 488986, 488986, 489106, 489106, 489076, 489076, 489196, 489196, 489226, 489226, 489256, 489256)), y = as.integer(c(6503986, 6504076, 6504076, 6504166, 6504166, 6504256, 6504256, 6504346, 6504346, 6504256, 6504256, 6504196, 6504196, 6504016, 6504016, 6503896, 6503896, 6503956, 6503956, 6503986, 6503986, 6503986, 6503836, 6503836, 6503746, 6503746, 6503566, 6503566, 6503506, 6503506, 6503416, 6503416, 6503386, 6503386, 6503446, 6503446, 6503566, 6503566, 6503626, 6503626, 6503836, 6503836, 6504106, 6504106, 6504226, 6504226, 6504196, 6504196, 6504076, 6504076, 6503986))), .Names = c("x", "y")), structure(list( x = c(487935.9999, 487935.9999, 487965.9999, 487965.9999, 487845.9999, 487845.9999, 487815.9999, 487815.9999, 487845.9999, 487845.9999), y = as.integer(c(6504166, 6504136, 6504136, 6504016, 6504016, 6504046, 6504046, 6504136, 6504136, 6504166))), .Names = c("x", "y" )), structure(list(x = c(488595.9999, 488595.9999, 488625.9999, 488625.9999, 488595.9999, 488595.9999, 488505.9999, 488505.9999, 488475.9999, 488475.9999, 488505.9999, 488505.9999), y = as.integer(c(6504046, 6503986, 6503986, 6503896, 6503896, 6503716, 6503716, 6503806, 6503806, 6503986, 6503986, 6504046))), .Names = c("x", "y")), structure(list( x = c(487396.0001, 487486.0001, 487516.0001, 487126.0001, 487216.0001), y = as.integer(c(6503896, 6503836, 6503806, 6503956, 6503986))), .Names = c("x", "y" )), structure(list(x = c(488295.9999, 488295.9999, 488325.9999, 488325.9999, 488355.9999, 488355.9999, 488235.9999, 488235.9999, 488205.9999, 488205.9999, 488145.9999, 488145.9999, 488175.9999, 488175.9999), y = as.integer(c(6503806, 6503776, 6503776, 6503746, 6503746, 6503626, 6503626, 6503656, 6503656, 6503686, 6503686, 6503776, 6503776, 6503806))), .Names = c("x", "y")), structure(list(x = as.integer(c(491146, 491146, 491176, 491176, 491146, 491146, 491026, 491026, 491056, 491056)), y = as.integer(c(6503686, 6503626, 6503626, 6503536, 6503536, 6503476, 6503476, 6503656, 6503656, 6503686))), .Names = c("x", "y")), structure(list(x = c(487815.9999, 487815.9999, 487845.9999, 487845.9999, 487935.9999, 487935.9999, 487965.9999, 487965.9999, 488025.9999, 488025.9999, 488085.9999, 488085.9999, 487935.9999, 487935.9999, 487845.9999, 487845.9999, 487815.9999, 487815.9999, 487785.9999, 487785.9999, 487695.9999, 487695.9999, 487605.9999, 487605.9999), y = as.integer(c(6503506, 6503476, 6503476, 6503386, 6503386, 6503356, 6503356, 6503296, 6503296, 6503236, 6503236, 6503116, 6503116, 6503146, 6503146, 6503176, 6503176, 6503206, 6503206, 6503386, 6503386, 6503356, 6503356, 6503506))), .Names = c("x", "y")), structure(list(x = as.integer(c(490036, 490036, 490096, 490096, 490066, 490066, 490096, 490096, 489976, 489976, 489946, 489946, 489916, 489916, 489946, 489946 )), y = as.integer(c(6503506, 6503386, 6503386, 6503266, 6503266, 6503176, 6503176, 6503026, 6503026, 6503086, 6503086, 6503146, 6503146, 6503386, 6503386, 6503506))), .Names = c("x", "y")), structure(list(x = as.integer(c(489496, 489406, 489406, 489496)), y = as.integer(c(6503356, 6503356, 6503446, 6503446))), .Names = c("x", "y")), structure(list( x = c(488385.9999, 488385.9999, 488415.9999, 488415.9999, 488325.9999, 488325.9999, 488295.9999, 488295.9999 ), y = as.integer(c(6503356, 6503326, 6503326, 6503236, 6503236, 6503266, 6503266, 6503356))), .Names = c("x", "y")), structure(list(x = as.integer(c(490726, 490636, 490636, 490726)), y = as.integer(c(6503206, 6503206, 6503326, 6503326))), .Names = c("x", "y")), structure(list( x = as.integer(c(489496, 489406, 489406, 489526, 489526, 489496)), y = as.integer(c(6503056, 6503056, 6503176, 6503176, 6503086, 6503086))), .Names = c("x", "y")), structure(list(x = as.integer(c(490726, 490726, 490756, 490756, 490666, 490666, 490636, 490636)), y = as.integer(c(6503086, 6502996, 6502996, 6502876, 6502876, 6502936, 6502936, 6503086))), .Names = c("x", "y")), structure(list(x = as.integer(c(491176, 491086, 491086, 491176)), y = as.integer(c(6502996, 6502996, 6503086, 6503086))), .Names = c("x", "y")), structure(list( x = c(487785.9999, 487785.9999, 488115.9999, 488115.9999, 488265.9999, 488265.9999, 488625.9999, 488625.9999, 488806, 488806, 488836, 488836, 488806, 488806, 488776, 488776, 488686, 488686, 488655.9999, 488655.9999, 488505.9999, 488505.9999, 488475.9999, 488475.9999, 488415.9999, 488415.9999, 488355.9999, 488355.9999, 488295.9999, 488295.9999, 488325.9999, 488325.9999, 488415.9999, 488415.9999, 488445.9999, 488445.9999, 488475.9999, 488475.9999, 488505.9999, 488505.9999, 488535.9999, 488535.9999, 488565.9999, 488565.9999, 488595.9999, 488595.9999, 488655.9999, 488655.9999, 488806, 488806, 488776, 488776, 488746, 488746, 488686, 488686, 488625.9999, 488625.9999, 488535.9999, 488535.9999, 488505.9999, 488505.9999, 488475.9999, 488475.9999, 488415.9999, 488415.9999, 488385.9999, 488385.9999, 488325.9999, 488325.9999, 488295.9999, 488295.9999, 488265.9999, 488265.9999, 488205.9999, 488205.9999, 488175.9999, 488175.9999, 488085.9999, 488085.9999, 487995.9999, 487995.9999, 487875.9999, 487875.9999, 487785.9999, 487785.9999, 487755.9999, 487755.9999, 487725.9999, 487725.9999, 487755.9999, 487755.9999, 487815.9999, 487815.9999, 487785.9999, 487785.9999, 487665.9999, 487665.9999, 487605.9999, 487605.9999, 487575.9999, 487575.9999, 487545.9999, 487545.9999, 487516.0001, 487516.0001, 487456.0001, 487456.0001, 487486.0001, 487486.0001, 487545.9999, 487545.9999, 487516.0001, 487516.0001, 487545.9999, 487545.9999, 487605.9999, 487605.9999, 487635.9999, 487635.9999, 487665.9999, 487665.9999), y = as.integer(c(6503056, 6503026, 6503026, 6502996, 6502996, 6502936, 6502936, 6502906, 6502906, 6502876, 6502876, 6502786, 6502786, 6502636, 6502636, 6502606, 6502606, 6502576, 6502576, 6502546, 6502546, 6502516, 6502516, 6502486, 6502486, 6502456, 6502456, 6502396, 6502396, 6502306, 6502306, 6502216, 6502216, 6502246, 6502246, 6502276, 6502276, 6502306, 6502306, 6502336, 6502336, 6502366, 6502366, 6502426, 6502426, 6502456, 6502456, 6502486, 6502486, 6502396, 6502396, 6502366, 6502366, 6502306, 6502306, 6502246, 6502246, 6502186, 6502186, 6502156, 6502156, 6502126, 6502126, 6502006, 6502006, 6501976, 6501976, 6501946, 6501946, 6501886, 6501886, 6501856, 6501856, 6501706, 6501706, 6501676, 6501676, 6501646, 6501646, 6501616, 6501616, 6501586, 6501586, 6501556, 6501556, 6501646, 6501646, 6501766, 6501766, 6501856, 6501856, 6501946, 6501946, 6502066, 6502066, 6502096, 6502096, 6502186, 6502186, 6502246, 6502246, 6502276, 6502276, 6502306, 6502306, 6502426, 6502426, 6502636, 6502636, 6502696, 6502696, 6502786, 6502786, 6502906, 6502906, 6502966, 6502966, 6502996, 6502996, 6503026, 6503026, 6503056))), .Names = c("x", "y")), structure(list( x = as.integer(c(489466, 489466, 489496, 489496, 489526, 489526, 489586, 489586, 489616, 489616, 489586, 489586, 489616, 489616, 489586, 489586, 489556, 489556, 489586, 489586, 489556, 489556, 489436, 489436, 489406, 489406, 489316, 489316, 489196, 489196, 489106, 489106, 489166, 489166, 489226, 489226, 489166, 489166, 489196, 489196, 489226, 489226, 489256, 489256, 489286, 489286, 489316, 489316, 489376, 489376)), y = as.integer(c(6502816, 6502786, 6502786, 6502756, 6502756, 6502726, 6502726, 6502696, 6502696, 6502486, 6502486, 6502366, 6502366, 6502156, 6502156, 6502096, 6502096, 6501976, 6501976, 6501796, 6501796, 6501766, 6501766, 6501646, 6501646, 6501616, 6501616, 6501526, 6501526, 6501586, 6501586, 6501856, 6501856, 6502096, 6502096, 6502246, 6502246, 6502426, 6502426, 6502486, 6502486, 6502576, 6502576, 6502606, 6502606, 6502726, 6502726, 6502786, 6502786, 6502816))), .Names = c("x", "y")), structure(list( x = c(487276.0001, 487276.0001, 487306.0001, 487306.0001, 487216.0001, 487216.0001, 487126.0001, 487126.0001, 487156.0001, 487156.0001, 487186.0001, 487186.0001 ), y = as.integer(c(6502336, 6502306, 6502306, 6502216, 6502216, 6502096, 6502096, 6502246, 6502246, 6502306, 6502306, 6502336))), .Names = c("x", "y")), structure(list( x = as.integer(c(490126, 490036, 490036, 490186, 490186, 490126)), y = as.integer(c(6501856, 6501856, 6501976, 6501976, 6501886, 6501886))), .Names = c("x", "y")), structure(list(x = as.integer(c(490756, 490666, 490666, 490756)), y = as.integer(c(6501406, 6501406, 6501496, 6501496))), .Names = c("x", "y")), structure(list( x = c(488115.9999, 488115.9999, 488145.9999, 488145.9999, 488115.9999, 488115.9999, 488055.9999, 488055.9999, 488085.9999, 488085.9999, 488115.9999, 488115.9999, 488145.9999, 488145.9999, 488235.9999, 488235.9999, 488295.9999, 488295.9999, 488385.9999, 488385.9999, 488355.9999, 488355.9999, 488325.9999, 488325.9999, 488355.9999, 488355.9999, 488235.9999, 488235.9999, 488145.9999, 488145.9999, 488205.9999, 488205.9999, 488115.9999, 488115.9999, 488085.9999, 488085.9999, 487995.9999, 487995.9999, 488025.9999, 488025.9999, 488055.9999, 488055.9999, 488115.9999, 488115.9999, 488145.9999, 488145.9999, 488175.9999, 488175.9999, 488055.9999, 488055.9999, 487965.9999, 487965.9999, 487875.9999, 487875.9999, 487845.9999, 487845.9999, 487815.9999, 487815.9999, 487785.9999, 487785.9999, 487845.9999, 487845.9999, 487725.9999, 487725.9999, 487635.9999, 487635.9999, 487665.9999, 487665.9999, 487635.9999, 487635.9999, 487755.9999, 487755.9999, 487785.9999, 487785.9999, 487815.9999, 487815.9999, 487845.9999, 487845.9999, 487815.9999, 487815.9999, 487845.9999, 487845.9999, 487905.9999, 487905.9999, 487995.9999, 487995.9999, 488025.9999, 488025.9999 ), y = as.integer(c(6501346, 6501316, 6501316, 6501076, 6501076, 6501016, 6501016, 6500866, 6500866, 6500836, 6500836, 6500746, 6500746, 6500716, 6500716, 6500776, 6500776, 6500926, 6500926, 6500776, 6500776, 6500656, 6500656, 6500566, 6500566, 6500476, 6500476, 6500506, 6500506, 6500416, 6500416, 6500326, 6500326, 6500296, 6500296, 6500206, 6500206, 6500116, 6500116, 6500026, 6500026, 6499846, 6499846, 6499786, 6499786, 6499696, 6499696, 6499606, 6499606, 6499636, 6499636, 6499606, 6499606, 6499636, 6499636, 6499726, 6499726, 6499786, 6499786, 6499936, 6499936, 6500026, 6500026, 6499996, 6499996, 6500086, 6500086, 6500356, 6500356, 6500446, 6500446, 6500566, 6500566, 6500656, 6500656, 6500746, 6500746, 6500896, 6500896, 6501076, 6501076, 6501166, 6501166, 6501286, 6501286, 6501316, 6501316, 6501346 ))), .Names = c("x", "y")), structure(list(x = as.integer(c(489226, 489136, 489136, 489226)), y = as.integer(c(6501046, 6501046, 6501196, 6501196))), .Names = c("x", "y")), structure(list( x = as.integer(c(490666, 490576, 490576, 490636, 490636, 490726, 490726, 490696, 490696, 490666)), y = as.integer(c(6500896, 6500896, 6501106, 6501106, 6501196, 6501196, 6501046, 6501046, 6501016, 6501016 ))), .Names = c("x", "y")), structure(list(x = as.integer(c(489646, 489646, 489676, 489676, 489556, 489556)), y = as.integer(c(6500926, 6500836, 6500836, 6500716, 6500716, 6500926))), .Names = c("x", "y")), structure(list(x = as.integer(c(488986, 488986, 489046, 489046, 489106, 489106, 489016, 489016, 488986, 488986, 488896, 488896)), y = as.integer(c(6500836, 6500776, 6500776, 6500626, 6500626, 6500446, 6500446, 6500416, 6500416, 6500356, 6500356, 6500836))), .Names = c("x", "y")), structure(list(x = c(488355.9999, 488355.9999, 488385.9999, 488385.9999, 488265.9999, 488265.9999, 488205.9999, 488205.9999, 488175.9999, 488175.9999, 488205.9999, 488205.9999 ), y = as.integer(c(6500296, 6500176, 6500176, 6500026, 6500026, 6500056, 6500056, 6500116, 6500116, 6500236, 6500236, 6500296))), .Names = c("x", "y")), structure(list( x = as.integer(c(489226, 489136, 489136, 489226)), y = as.integer(c(6500146, 6500146, 6500236, 6500236 ))), .Names = c("x", "y")), structure(list(x = as.integer(c(489226, 489046, 489046, 489106, 489106, 489136, 489136, 489226 )), y = as.integer(c(6499756, 6499756, 6499846, 6499846, 6499876, 6499876, 6499936, 6499936))), .Names = c("x", "y")), structure(list(x = c(487486.0001, 487396.0001, 487396.0001, 487486.0001), y = as.integer(c(6499666, 6499666, 6499756, 6499756))), .Names = c("x", "y")), structure(list(x = c(488385.9999, 488385.9999, 488415.9999, 488415.9999, 488385.9999, 488385.9999, 488295.9999, 488295.9999, 488265.9999, 488265.9999), y = as.integer(c(6499666, 6499636, 6499636, 6499546, 6499546, 6499486, 6499486, 6499576, 6499576, 6499666))), .Names = c("x", "y")), structure(list(x = c(487935.9999, 487935.9999, 487905.9999, 487905.9999, 487875.9999, 487875.9999, 487815.9999, 487815.9999, 487785.9999, 487785.9999, 487635.9999, 487635.9999, 487605.9999, 487605.9999, 487575.9999, 487575.9999, 487695.9999, 487695.9999, 487605.9999, 487605.9999, 487785.9999, 487785.9999, 487845.9999, 487845.9999), y = as.integer(c(6499546, 6499186, 6499186, 6499156, 6499156, 6499126, 6499126, 6499066, 6499066, 6498886, 6498886, 6499066, 6499066, 6499186, 6499186, 6499306, 6499306, 6499396, 6499396, 6499486, 6499486, 6499516, 6499516, 6499546))), .Names = c("x", "y")), structure(list(x = as.integer(c(489286, 489166, 489166, 489286)), y = as.integer(c(6499396, 6499396, 6499486, 6499486))), .Names = c("x", "y")), structure(list(x = c(488295.9999, 488295.9999, 488445.9999, 488445.9999, 488505.9999, 488505.9999, 488445.9999, 488445.9999, 488385.9999, 488385.9999, 488355.9999, 488355.9999, 488115.9999, 488115.9999, 488055.9999, 488055.9999, 488025.9999, 488025.9999, 487875.9999, 487875.9999, 487995.9999, 487995.9999, 488025.9999, 488025.9999, 488115.9999, 488115.9999, 488145.9999, 488145.9999, 488175.9999, 488175.9999, 488235.9999, 488235.9999, 488265.9999, 488265.9999, 488385.9999, 488385.9999, 488355.9999, 488355.9999, 488325.9999, 488325.9999), y = as.integer(c(6499036, 6498886, 6498886, 6498796, 6498796, 6498706, 6498706, 6498676, 6498676, 6498646, 6498646, 6498616, 6498616, 6498586, 6498586, 6498556, 6498556, 6498526, 6498526, 6498646, 6498646, 6498676, 6498676, 6498706, 6498706, 6498976, 6498976, 6499006, 6499006, 6499096, 6499096, 6499306, 6499306, 6499396, 6499396, 6499306, 6499306, 6499096, 6499096, 6499036 ))), .Names = c("x", "y")), structure(list(x = as.integer(c(489886, 489766, 489766, 489886)), y = as.integer(c(6499276, 6499276, 6499396, 6499396))), .Names = c("x", "y")), structure(list( x = as.integer(c(490156, 490156, 490186, 490186, 490156, 490096, 490096, 489976, 489976, 490066, 490066, 489766, 489766, 489736, 489736, 489766, 489766, 489976, 489976, 490066, 490066)), y = as.integer(c(6499066, 6499006, 6499006, 6498766, 6498766, 6498556, 6498526, 6498526, 6498706, 6498706, 6498826, 6498826, 6498916, 6498916, 6499006, 6499006, 6499066, 6499066, 6499036, 6499036, 6499066))), .Names = c("x", "y")), structure(list( x = c(487755.9999, 487755.9999, 487665.9999, 487665.9999, 487635.9999, 487635.9999, 487516.0001, 487516.0001, 487486.0001, 487486.0001, 487396.0001, 487396.0001, 487336.0001, 487336.0001, 487575.9999, 487575.9999, 487815.9999, 487815.9999), y = as.integer(c(6498466, 6498256, 6498256, 6498226, 6498226, 6498196, 6498196, 6498226, 6498226, 6498376, 6498376, 6498406, 6498406, 6498526, 6498526, 6498556, 6498556, 6498466))), .Names = c("x", "y")), structure(list(x = as.integer(c(489316, 489226, 489226, 489316)), y = as.integer(c(6498106, 6498106, 6498226, 6498226))), .Names = c("x", "y")), structure(list( x = as.integer(c(490066, 489976, 489976, 490066)), y = as.integer(c(6497836, 6497836, 6497956, 6497956 ))), .Names = c("x", "y")), structure(list(x = as.integer(c(489436, 489346, 489346, 489466, 489466, 489436)), y = as.integer(c(6497536, 6497536, 6497926, 6497926, 6497596, 6497596))), .Names = c("x", "y")), structure(list(x = as.integer(c(490726, 490726, 490756, 490756, 490816, 490816, 490786, 490786, 490696, 490696, 490666, 490666, 490636, 490636, 490606, 490606, 490576, 490576)), y = as.integer(c(6497926, 6497656, 6497656, 6497596, 6497596, 6497506, 6497506, 6497476, 6497476, 6497536, 6497536, 6497656, 6497656, 6497746, 6497746, 6497776, 6497776, 6497926))), .Names = c("x", "y")), structure(list(x = as.integer(c(490156, 490156, 490186, 490186, 490216, 490216, 490336, 490336, 490306, 490306, 490246, 490246, 490096, 490096, 490066, 490066, 490036, 490036, 490066, 490066)), y = as.integer(c(6497746, 6497716, 6497716, 6497656, 6497656, 6497566, 6497566, 6497476, 6497476, 6497326, 6497326, 6497296, 6497296, 6497356, 6497356, 6497596, 6497596, 6497716, 6497716, 6497746))), .Names = c("x", "y")), structure(list(x = c(488025.9999, 487935.9999, 487935.9999, 488025.9999), y = as.integer(c(6497536, 6497536, 6497626, 6497626))), .Names = c("x", "y")), structure(list(x = as.integer(c(489466, 489346, 489346, 489376, 489376, 489526, 489526, 489466)), y = as.integer(c(6497206, 6497206, 6497446, 6497446, 6497506, 6497506, 6497296, 6497296))), .Names = c("x", "y")), structure(list(x = as.integer(c(490876, 490786, 490786, 490876)), y = as.integer(c(6497266, 6497266, 6497356, 6497356))), .Names = c("x", "y")), structure(list( x = as.integer(c(490936, 490936, 490996, 490996, 491026, 491026, 491086, 491086, 491206, 491206, 491116, 491116, 491086, 491086, 491056, 491056, 490996, 490996, 490936, 490936, 490906, 490906, 490876, 490876, 490846, 490846)), y = as.integer(c(6497236, 6497206, 6497206, 6497176, 6497176, 6496996, 6496996, 6496936, 6496936, 6496696, 6496696, 6496726, 6496726, 6496846, 6496846, 6496906, 6496906, 6496966, 6496966, 6497026, 6497026, 6497116, 6497116, 6497146, 6497146, 6497236))), .Names = c("x", "y")), structure(list(x = as.integer(c(490366, 490276, 490276, 490306, 490306, 490396, 490396, 490516, 490516, 490456, 490456, 490396, 490396, 490366)), y = as.integer(c(6496906, 6496906, 6497026, 6497026, 6497176, 6497176, 6497206, 6497206, 6497116, 6497116, 6497056, 6497056, 6497026, 6497026))), .Names = c("x", "y")), structure(list(x = c(487456.0001, 487486.0001, 487486.0001, 487545.9999, 487545.9999, 487216.0001, 487216.0001, 487126.0001, 487126.0001), y = as.integer(c(6497146, 6497116, 6497086, 6497086, 6496936, 6497026, 6497086, 6497086, 6497176))), .Names = c("x", "y")), structure(list( x = as.integer(c(489586, 489376, 489376, 489586)), y = as.integer(c(6496936, 6496936, 6497026, 6497026 ))), .Names = c("x", "y")))), .Names = c("type", "xrange", "yrange", "bdry"), class = "owin") spatstat/tests/alltests.R0000644000176000001440000014727512252030016015274 0ustar ripleyusers# nndist.R # Check that nndist and nnwhich give # results consistent with direct calculation from pairdist # Similarly for nncross and distfun require(spatstat) local({ eps <- sqrt(.Machine$double.eps) f <- function(mat,k) { apply(mat, 1, function(z,n) { sort(z)[n] }, n=k+1) } g <- function(mat,k) { apply(mat, 1, function(z,n) { order(z)[n] }, n=k+1) } # Two dimensions X <- runifpoint(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppp does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppp(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppp does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppp(k=5) does not agree with pairdist") # Three dimensions X <- runifpoint3(42) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.pp3 does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.pp3(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.pp3 does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.pp3(k=5) does not agree with pairdist") # m dimensions X <- runifpointx(42, boxx(c(0,1),c(0,1),c(0,1),c(0,1))) nn <- nndist(X) nnP <- f(pairdist(X), 1) if(any(abs(nn - nnP) > eps)) stop("nndist.ppx does not agree with pairdist") nn5 <- nndist(X, k=5) nn5P <- f(pairdist(X), 5) if(any(abs(nn5 - nn5P) > eps)) stop("nndist.ppx(k=5) does not agree with pairdist") nw <- nnwhich(X) nwP <- g(pairdist(X), 1) if(any(nw != nwP)) stop("nnwhich.ppx does not agree with pairdist") nw5 <- nnwhich(X, k=5) nw5P <- g(pairdist(X), 5) if(any(nw5 != nw5P)) stop("nnwhich.ppx(k=5) does not agree with pairdist") #### nncross in two dimensions X <- runifpoint(42) Y <- runifpoint(42, win=owin(c(1,2),c(1,2))) # default nncross nc <- nncross(X,Y) ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross()$which does not agree with apply(crossdist(), 1, which.min)") # sort on x nc <- nncross(X,Y, sortby="x") ncd <- nc$dist ncw <- nc$which if(any(abs(ncd - cdd) > eps)) stop("nncross(sortby=x)$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("nncross(sortby=x)$which does not agree with apply(crossdist(), 1, which.min)") # pre-sorted on x Y <- Y[order(Y$x)] nc <- nncross(X,Y, is.sorted.Y=TRUE, sortby="x") ncd <- nc$dist ncw <- nc$which cd <- crossdist(X,Y) cdd <- apply(cd, 1, min) cdw <- apply(cd, 1, which.min) if(any(abs(ncd - cdd) > eps)) stop("For sorted data, nncross()$dist does not agree with apply(crossdist(), 1, min)") if(any(ncw != cdw)) stop("For sorted data, nncross()$which does not agree with apply(crossdist(), 1, which.min)") # sanity check for nncross with k > 1 ndw <- nncross(X, Y, k=1:4)$which if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1)$which") ndw <- nncross(X, Y, k=1:4, what="which") if(any(is.na(ndw))) stop("NA's returned by nncross.ppp(k > 1, what='which')") # test of correctness for nncross with k > 1 flipcells <- flipxy(cells) calcwhich <- nncross(cells, flipcells, k=1:4, what="which") truewhich <- t(apply(crossdist(cells,flipcells), 1, order))[,1:4] if(any(calcwhich != truewhich)) stop("nncross(k > 1) gives wrong answer") # test of agreement between nngrid.h and knngrid.h # dimyx=23 (found by trial-and-error) ensures that there are no ties a <- as.matrix(nnmap(cells, what="which", dimyx=23)) b <- as.matrix(nnmap(cells, what="which", dimyx=23, k=1:2)[[1]]) if(any(a != b)) stop("algorithms in nngrid.h and knngrid.h disagree") }) require(spatstat) local({ Y <- split(urkiola) B <- Y$birch O <- Y$oak B.lam <- predict (ppm(B, ~polynom(x,y,2)), type="trend") O.lam <- predict (ppm(O, ~polynom(x,y,2)), type="trend") Kinhom(B, lambda=B.lam, correction="iso") Kinhom(B, lambda=B.lam, correction="border") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam) Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "iso") Kcross.inhom(urkiola, i="birch", j="oak", B.lam, O.lam, correction = "border") }) require(spatstat) local({ # critical R values that provoke GCC bug #323 a <- marktable(lansing, R=0.25) a <- marktable(lansing, R=0.21) a <- marktable(lansing, R=0.20) a <- marktable(lansing, R=0.10) }) # tests/NAinCov.R # Testing the response to the presence of NA's in covariates require(spatstat) local({ X <- runifpoint(42) Y <- as.im(function(x,y) { x+y }, owin()) Y[owin(c(0.2,0.4),c(0.2,0.4))] <- NA # fit model: should produce a warning but no failure misfit <- ppm(X, ~Y, covariates=list(Y=Y)) # prediction Z <- predict(misfit, type="trend") Z <- predict(misfit, type="se") # covariance matrix: all should be silent v <- vcov(misfit) ss <- vcov(misfit, what="internals") NULL }) # tests for agreement between C and interpreted code # for interpoint distances require(spatstat) local({ eps <- .Machine$double.eps * 4 # pairdist.ppp X <- rpoispp(42) dC <- pairdist(X, method="C") dR <- pairdist(X, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for pairdist() do not agree") dC <- pairdist(X, periodic=TRUE, method="C") dR <- pairdist(X, periodic=TRUE, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for pairdist(periodic=TRUE) do not agree") # crossdist.ppp Y <- rpoispp(42) dC <- crossdist(X, Y, method="C") dR <- crossdist(X, Y, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for crossdist() do not agree") dC <- crossdist(X, Y, periodic=TRUE, method="C") dR <- crossdist(X, Y, periodic=TRUE, method="interpreted") if(any(abs(dC - dR) > eps)) stop("Algorithms for crossdist(periodic=TRUE) do not agree") # nndist.ppp nnC <- nndist(X, method="C") nnI <- nndist(X, method="interpreted") if(any(abs(nnC - nnI) > eps)) stop("Algorithms for nndist() do not agree") nn3C <- nndist(X, k=3, method="C") nn3I <- nndist(X, k=3, method="interpreted") if(any(abs(nn3C - nn3I) > eps)) stop("Algorithms for nndist(k=3) do not agree") # nnwhich.ppp nwC <- nnwhich(X, method="C") nwI <- nnwhich(X, method="interpreted") if(any(nwC != nwI)) stop("Algorithms for nnwhich() do not agree") nw3C <- nnwhich(X, k=3, method="C") nw3I <- nnwhich(X, k=3, method="interpreted") if(any(nw3C != nw3I)) stop("Algorithms for nnwhich(k=3) do not agree") }) # ppmBadData.R # $Revision: 1.4 $ $Date: 2011/12/05 07:29:16 $ # Testing robustness of ppm and support functions # when data are rubbish require(spatstat) local({ # --------------------------------------------------- # from Rolf: very large proportion of data is NA SEED <- 42 K <- 101 A <- 500 X <- seq(0, A, length=K) G <- expand.grid(x=X, y=X) FOO <- function(x,y) { sin(x)^2 + cos(y)^2 } M1 <- im(matrix(FOO(G$x, G$y), K, K), xcol=X, yrow=X) M <- im(matrix(FOO(G$x, G$y), K, K)) BAR <- function(x) { exp(-6.618913 + 5.855337 * x - 8.432483 * x^2) } V <- im(BAR(M$v), xcol=X, yrow=X) # V <- eval.im(exp(-6.618913 + 5.855337 * M - 8.432483 * M^2)) set.seed(SEED) Y <- rpoispp(V) fY <- ppm(Y, ~cv + I(cv^2), covariates=list(cv=M), correction="translate") diagnose.ppm(fY) lurking(fY, covariate=as.im(function(x,y){x}, square(A)), type="raw") }) # -------------------------------------------------------- # from Andrew Bevan: numerical overflow, ill-conditioned Fisher information local({ SEED <- 42 nongranite<- owin(poly = list(x = c(0, 8500, 7000, 6400, 6400, 6700, 7000, 7200, 7300, 8000, 8100, 8800, 9500, 10000, 10000, 0), y = c(0, 0, 2000, 3800, 4000, 5000, 6500, 7400, 7500, 8000, 8100, 9000, 9500, 9600, 10000, 10000))) #Trend on raster grid rain <- as.im(X=function(x,y) { x^2 + y^2 }, W=nongranite, dimyx=100) #Generate a point pattern via a Lennard-Jones process set.seed(SEED) mod4<- rmhmodel(cif="lennard", par=list(beta=1, sigma=250, epsilon=2.2), trend=rain, w=nongranite) ljtr<- rmh(mod4, start=list(n.start=80), control=list(p=1, nrep=1e5)) #Fit a point process model to the pattern with rain as a covariate # NOTE INCORRECT TREND FORMULA ljtrmod <- ppm(ljtr, trend= ~ Z, interaction=NULL, covariates=list(Z=rain)) ss <- summary(ljtrmod) }) local({ # From Ege # Degenerate but non-null argument 'covariates' xx <- list() names(xx) <- character(0) fit <- ppm(cells, ~x, covariates = xx) st <- summary(fit) }) # # tests/ppmgam.R # # Test ppm with use.gam=TRUE # # $Revision: 1.2 $ $Date: 2013/03/01 08:46:34 $ # require(spatstat) local({ fit <- ppm(nztrees, ~s(x,y), use.gam=TRUE) mm <- model.matrix(fit) mf <- model.frame(fit) v <- vcov(fit) }) # # ppmlogi.R # # Tests of ppm(method='logi') # # $Revision: 1.2 $ Date$ # require(spatstat) local({ fit <- ppm(cells, ~x, method="logi") f <- fitted(fit) p <- predict(fit) fitS <- ppm(cells, ~x, Strauss(0.08), method="logi") fS <- fitted(fitS) pS <- predict(fitS) if(spatstat.options("allow.logi.influence")) { a <- leverage(fit) b <- influence(fit) d <- dfbetas(fit) aS <- leverage(fitS) bS <- influence(fitS) dS <- dfbetas(fitS) } }) # ppmmarkorder.R # $Revision: 1.2 $ $Date: 2011/12/05 07:29:16 $ # Test that predict.ppm, plot.ppm and plot.fitin # tolerate marks with levels that are not in alpha order # require(spatstat) local({ X <- amacrine levels(marks(X)) <- c("ZZZ", "AAA") fit <- ppm(X, ~marks, MultiStrauss(c("ZZZ","AAA"), matrix(0.06, 2, 2))) aa <- predict(fit, type="trend") bb <- predict(fit, type="cif") plot(fit) plot(fitin(fit)) }) # # tests/ppmscope.R # # Test things that might corrupt the internal format of ppm objects # # $Revision: 1.3 $ $Date: 2011/12/05 07:29:16 $ # # (1) Scoping problem that can arise when ppm splits the data require(spatstat) local({ fit <- ppm(bei, ~elev, covariates=bei.extra) mm <- model.matrix(fit) # (2) Fast update mechanism fit1 <- ppm(cells, ~x+y, Strauss(0.07)) fit2 <- update(fit1, ~y) fit3 <- update(fit2, ~x) }) # # tests/ppmtricks.R # # Test backdoor exits and hidden options in ppm # # $Revision: 1.1 $ $Date: 2013/06/17 06:54:51 $ # require(spatstat) local({ # (1) skip.border fit <- ppm(cells, ~1, Strauss(0.1), skip.border=TRUE) }) # # tests/prediction.R # # Things that might go wrong with predict() # # $Revision: 1.1 $ $Date: 2013/11/12 16:06:11 $ # require(spatstat) local({ # test of 'covfunargs' f <- function(x,y,a){ y - a } fit <- ppm(cells, ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) p <- predict(fit) }) # # tests/rmhAux.R # # $Revision: 1.1 $ $Date: 2013/02/18 10:41:27 $ # # For interactions which maintain 'auxiliary data', # verify that the auxiliary data are correctly updated. # # To do this we run rmh with nsave=1 so that the point pattern state # is saved after every iteration, then the algorithm is restarted, # and the auxiliary data are re-initialised. The final state must agree with # the result of simulation without saving. # ---------------------------------------------------- require(spatstat) local({ # Geyer: mod <- list(cif="geyer", par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=square(10)) set.seed(42) X.nosave <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1)) set.seed(42) X.save <- rmh(model=mod, start=list(n.start=50), control=list(nrep=1e3, periodic=FALSE, expand=1, nburn=0, nsave=1)) stopifnot(npoints(X.save) == npoints(X.nosave)) stopifnot(max(nncross(X.save, X.nosave)$dist) == 0) stopifnot(max(nncross(X.nosave, X.save)$dist) == 0) }) # Test examples for rmh.default # run to reasonable length # and with tests for validity added # ---------------------------------------------------- require(spatstat) local({ if(!exists("nr")) nr <- 5e3 spatstat.options(expand=1.1) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr)) # Strauss process, conditioning on n = 80: X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr)) stopifnot(X2.strauss$n == 80) # test tracking mechanism X1.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(nrep=nr), track=TRUE) X2.strauss <- rmh(model=mod01,start=list(n.start=80), control=list(p=1,nrep=nr), track=TRUE) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) # Strauss process equal to pure hardcore: mod02 <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02,start=list(n.start=60), control=list(nrep=nr)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=90), control=list(nrep=nr)) # Strauss process in a polygonal window, conditioning on n = 42. X5.strauss <- rmh(model=mod03,start=list(n.start=42), control=list(p=1,nrep=nr)) stopifnot(X5.strauss$n == 42) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss xxx$window <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=70), control=list(nrep=nr)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=250), control=list(nrep=nr)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction, inhibitory mod.area <- list(cif="areaint",par=list(beta=2,eta=0.5,r=0.5), w=square(10)) X.area <- rmh(model=mod.area,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction, clustered mod.area2 <- list(cif="areaint",par=list(beta=2,eta=1.5,r=0.5), w=square(10)) X.area2 <- rmh(model=mod.area2,start=list(n.start=60), control=list(nrep=nr)) # Area-interaction close to hard core set.seed(42) mod.area0 <- list(cif="areaint",par=list(beta=2,eta=1e-300,r=0.35), w=square(10)) X.area0 <- rmh(model=mod.area0,start=list(x.start=X3.hardcore), control=list(nrep=nr)) stopifnot(nndist(X.area0) > 0.6) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=70), control=list(nrep=nr)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=300), control=list(nrep=nr)) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=300), control=list(nrep=nr)) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=200), control=list(nrep=nr)) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=200), control=list(nrep=nr)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=100), control=list(nrep=nr)) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend mod17 <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r),w=c(0,250,0,250), trend=tr3) X1.strauss.trend <- rmh(model=mod17,start=list(n.start=90), control=list(nrep=nr)) }) # Things which should cause an error require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 1e3 # Strauss with zero intensity and p = 1 mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) out <- try(X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(p=1,nrep=nr,nverb=nv),verbose=FALSE)) if(!inherits(out, "try-error")) stop("Error not trapped (Strauss with zero intensity and p = 1) in tests/rmhErrors.R") }) # # tests/rmhExpand.R # # test decisions about expansion of simulation window # # $Revision: 1.2 $ $Date: 2011/12/05 07:29:16 $ # require(spatstat) local({ fit <- ppm(cells, ~x) # check rmhmodel.ppm mod <- rmhmodel(fit) wsim <- as.rectangle(mod$trend) if(!identical(wsim, as.owin(cells))) stop("Expansion occurred improperly in rmhmodel.ppm") }) # # tests of rmh, running multitype point processes # require(spatstat) local({ if(!exists("nr")) nr <- 5e3 if(!exists("nv")) nv <- 0 spatstat.options(expand=1.1) # Multitype Poisson modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1)) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=80), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) stopifnot(X2.straussm$n == 80) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) stopifnot(all(table(X3.straussm$marks) == c(60,20))) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=80), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=350), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=350), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) ####################################################################### ############ checks on distribution of output ####################### ####################################################################### checkp <- function(p, context, testname, failmessage, pcrit=0.01) { if(missing(failmessage)) failmessage <- paste("output failed", testname) if(p < pcrit) warning(paste(context, ",", failmessage), call.=FALSE) cat(paste("\n", context, ",", testname, "has p-value", signif(p,4), "\n")) } # Multitype Strauss code; output is multitype Poisson beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ rep(1, length(x)) } tr2 <- function(x,y){ rep(2, length(x)) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=0), control=list(nrep=1e6)) # The model is Poisson with intensity 100 for type 1 and 200 for type 2. # Total number of points is Poisson (300) # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3. # Test whether the total intensity looks right # p <- ppois(X$n, 300) p.val <- 2 * min(p, 1-p) checkp(p.val, "In multitype Poisson simulation", "test whether total number of points has required mean value") # Test whether the mark distribution looks right ta <- table(X$marks) cat("Frequencies of marks:") print(ta) checkp(chisq.test(ta, p = c(1,2)/3)$p.value, "In multitype Poisson simulation", "chi-squared goodness-of-fit test for mark distribution (1/3, 2/3)") ##### #### multitype Strauss code; fixall=TRUE; #### output is multinomial process with nonuniform locations #### the.context <- "In nonuniform multinomial simulation" beta <- 100 * c(1,1) ri <- matrix(0.07, 2, 2) gmma <- matrix(1, 2, 2) # no interaction tr1 <- function(x,y){ ifelse(x < 0.5, 0, 2) } tr2 <- function(x,y){ ifelse(y < 0.5, 1, 3) } # cdf of these distributions Fx1 <- function(x) { ifelse(x < 0.5, 0, ifelse(x < 1, 2 * x - 1, 1)) } Fy2 <- function(y) { ifelse(y < 0, 0, ifelse(y < 0.5, y/2, ifelse(y < 1, (1/2 + 3 * (y-1/2))/2, 1))) } mod <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=ri), w=owin(), trend=list(tr1,tr2)) X <- rmh(mod, start=list(n.start=c(50,50)), control=list(nrep=1e6, expand=1, p=1, fixall=TRUE)) # The model is Poisson # Mean number of type 1 points = 100 # Mean number of type 2 points = 200 # Total intensity = 300 # Marks are i.i.d. with P(type 1) = 1/3, P(type 2) = 2/3 # Test whether the coordinates look OK Y <- split(X) X1 <- Y[[names(Y)[1]]] X2 <- Y[[names(Y)[2]]] checkp(ks.test(X1$y, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of y coordinates of type 1 points") if(any(X1$x < 0.5)) { warning(paste(the.context, ",", "x-coordinates of type 1 points are IMPOSSIBLE"), call.=FALSE) } else { checkp(ks.test(Fx1(X1$x), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed x coordinates of type 1 points") } checkp(ks.test(X2$x, "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of x coordinates of type 2 points") checkp(ks.test(Fy2(X2$y), "punif")$p.value, the.context, "Kolmogorov-Smirnov test of uniformity of transformed y coordinates of type 2 points") }) # # tests/rmhTrend.R # # Problems with trend images (rmhmodel.ppm or rmhEngine) # require(spatstat) local({ set.seed(42) # Bug folder 37 of 8 feb 2011 # rmhmodel.ppm -> predict.ppm # + rmhResolveTypes -> is.subset.owin data(demopat) Z <- rescale(demopat, 7000) X <- unmark(Z) X1 <- split(Z)[[1]] Int <- density(X,dimyx=200) Lint <- eval.im(log(npoints(X1)*Int/npoints(X))) M <- as.owin(Int) MR <- intersect.owin(M,scalardilate(M,0.5,origin="midpoint")) X1 <- X1[MR] Fut <- ppm(X1,~offset(Lint),covariates=list(Lint=Lint), inter=BadGey(r=c(0.03,0.05),sat=3)) Y <- rmh(Fut,control=list(expand=M,nrep=1e3), verbose=FALSE) }) # strange boundary cases require(spatstat) local({ if(!exists("nv")) nv <- 0 if(!exists("nr")) nr <- 5e3 # Poisson process cat("Poisson\n") modP <- list(cif="poisson",par=list(beta=10), w = square(3)) XP <- rmh(model = modP, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson process case of Strauss cat("\nPoisson case of Strauss\n") modPS <- list(cif="strauss",par=list(beta=10,gamma=1,r=0.7), w = square(3)) XPS <- rmh(model=modPS, start=list(n.start=25), control=list(nrep=nr,nverb=nv)) # Strauss with zero intensity cat("\nStrauss with zero intensity\n") mod0S <- list(cif="strauss",par=list(beta=0,gamma=0.6,r=0.7), w = square(3)) X0S <- rmh(model=mod0S,start=list(n.start=80), control=list(nrep=nr,nverb=nv)) stopifnot(X0S$n == 0) # Poisson with zero intensity cat("\nPoisson with zero intensity\n") mod0P <- list(cif="poisson",par=list(beta=0), w = square(3)) X0P <- rmh(model = mod0P, start = list(n.start=25), control=list(nrep=nr,nverb=nv)) # Poisson conditioned on zero points cat("\nPoisson conditioned on zero points\n") modp <- list(cif="poisson", par=list(beta=2), w = square(10)) Xp <- rmh(modp, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(Xp$n == 0) # Multitype Poisson conditioned on zero points cat("\nMultitype Poisson conditioned on zero points\n") modp2 <- list(cif="poisson", par=list(beta=2), types=letters[1:3], w = square(10)) Xp2 <- rmh(modp2, start=list(n.start=0), control=list(p=1, nrep=nr)) stopifnot(is.marked(Xp2)) stopifnot(Xp2$n == 0) # Multitype Poisson conditioned on zero points of each type cat("\nMultitype Poisson conditioned on zero points of each type\n") Xp2fix <- rmh(modp2, start=list(n.start=c(0,0,0)), control=list(p=1, fixall=TRUE, nrep=nr)) stopifnot(is.marked(Xp2fix)) stopifnot(Xp2fix$n == 0) }) # # tests of rmhmodel.ppm # require(spatstat) local({ f <- ppm(cells) m <- rmhmodel(f) f <- ppm(cells, ~x) m <- rmhmodel(f) f <- ppm(cells, ~1, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells, ~1, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells, ~1, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells, ~1, DiggleGratton(0.05,0.1)) m <- rmhmodel(f) f <- ppm(cells, ~1, Softcore(0.5), correction="isotropic") m <- rmhmodel(f) f <- ppm(cells, ~1, Geyer(0.07,2)) m <- rmhmodel(f) f <- ppm(cells, ~1, BadGey(c(0.07,0.1,0.13),2)) m <- rmhmodel(f) f <- ppm(cells, ~1, PairPiece(r = c(0.05, 0.1, 0.2))) m <- rmhmodel(f) f <- ppm(cells, ~1, AreaInter(r=0.06)) m <- rmhmodel(f) # multitype r <- matrix(0.07, 2, 2) f <- ppm(amacrine, ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) h <- matrix(min(nndist(amacrine))/2, 2, 2) f <- ppm(amacrine, ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) diag(r) <- NA diag(h) <- NA f <- ppm(amacrine, ~1, MultiStrauss(c("off","on"),r)) m <- rmhmodel(f) f <- ppm(amacrine, ~1, MultiStraussHard(c("off","on"),r, h)) m <- rmhmodel(f) # multitype data, interaction not dependent on type f <- ppm(amacrine, ~marks, Strauss(0.05)) m <- rmhmodel(f) # trends f <- ppm(cells, ~x, Strauss(0.1)) m <- rmhmodel(f) f <- ppm(cells, ~y, StraussHard(r=0.1,hc=0.05)) m <- rmhmodel(f) f <- ppm(cells, ~x+y, Hardcore(0.07)) m <- rmhmodel(f) f <- ppm(cells, ~polynom(x,y,2), Softcore(0.5), correction="isotropic") m <- rmhmodel(f) # covariates Z <- as.im(function(x,y){ x^2+y^2 }, as.owin(cells)) f <- ppm(cells, ~z, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) Zim <- as.im(Z, as.owin(cells)) f <- ppm(cells, ~z, covariates=list(z=Zim)) m <- rmhmodel(f) Z <- as.im(function(x,y){ x^2+y }, as.owin(amacrine)) f <- ppm(amacrine, ~z + marks, covariates=list(z=Z)) m <- rmhmodel(f) m <- rmhmodel(f, control=list(p=1)) m <- rmhmodel(f, control=list(p=1,fixall=TRUE)) Zim <- as.im(Z, as.owin(amacrine)) f <- ppm(amacrine, ~z + marks, covariates=list(z=Zim)) m <- rmhmodel(f) }) # # tests/rmhmodelHybrids.R # # Test that rmhmodel.ppm and rmhmodel.default # work on Hybrid interaction models # # $Revision: 1.3 $ $Date: 2013/01/29 02:12:13 $ # require(spatstat) local({ # ......... rmhmodel.ppm ....................... fit1 <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2), C=Geyer(0.15, 1))) m1 <- rmhmodel(fit1) m1 reach(m1) # Test of handling 'IsOffset' data(cells) fit2 <- ppm(cells, ~1, Hybrid(H=Hardcore(0.05), G=Geyer(0.15, 2))) rmhmodel(fit2) # Test of handling Poisson components fit3 <- ppm(cells, ~1, Hybrid(P=Poisson(), S=Strauss(0.05))) X3 <- rmh(fit3, control=list(nrep=1e3,expand=1), verbose=FALSE) # ............ rmhmodel.default ............................ modH <- list(cif=c("strauss","geyer"), par=list(list(beta=50,gamma=0.5, r=0.1), list(beta=1, gamma=0.7, r=0.2, sat=2)), w = square(1)) rmodH <- rmhmodel(modH) rmodH reach(rmodH) # test handling of Poisson components modHP <- list(cif=c("poisson","strauss"), par=list(list(beta=5), list(beta=10,gamma=0.5, r=0.1)), w = square(1)) rmodHP <- rmhmodel(modHP) rmodHP reach(rmodHP) modPP <- list(cif=c("poisson","poisson"), par=list(list(beta=5), list(beta=10)), w = square(1)) rmodPP <- rmhmodel(modPP) rmodPP reach(rmodPP) }) # # tests/rmh.ppm.R # # $Revision: 1.1 $ $Date: 2012/10/14 07:24:21 $ # # Examples removed from rmh.ppm.Rd # stripped down to minimal tests of validity # require(spatstat) local({ op <- spatstat.options() spatstat.options(rmh.nrep=10, npixel=10, ndummy.min=10) spatstat.options(project.fast=TRUE) Nrep <- 10 X <- swedishpines # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) # Strauss - hard core process # fit <- ppm(X, ~1, StraussHard(r=7,hc=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Geyer saturation process # fit <- ppm(X, ~1, Geyer(r=7,sat=2)) # Xsim <- rmh(fit, start=list(n.start=X$n)) # Area-interaction process fit <- ppm(X, ~1, AreaInter(r=7)) Xsim <- rmh(fit, start=list(n.start=X$n)) # soft core interaction process # X <- quadscheme(X, nd=50) # fit <- ppm(X, ~1, Softcore(kappa=0.1), correction="isotropic") # Xsim <- rmh(fit, start=list(n.start=X$n)) # Diggle-Gratton pairwise interaction model # fit <- ppm(cells, ~1, DiggleGratton(0.05, 0.1)) # Xsim <- rmh(fit, start=list(n.start=cells$n)) # plot(Xsim, main="simulation from fitted Diggle-Gratton model") X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) # marked point pattern Y <- amacrine # marked Poisson models fit <- ppm(Y) Ysim <- rmh(fit) fit <- ppm(Y,~marks) Ysim <- rmh(fit) fit <- ppm(Y,~x) Ysim <- rmh(fit) # fit <- ppm(Y,~polynom(x,2)) # Ysim <- rmh(fit) fit <- ppm(Y,~marks+x) Ysim <- rmh(fit) # fit <- ppm(Y,~marks+polynom(x,2)) # Ysim <- rmh(fit) # multitype Strauss models MS <- MultiStrauss(types = levels(Y$marks), radii=matrix(0.07, ncol=2, nrow=2)) # fit <- ppm(Y,~marks*polynom(x,2), MS) fit <- ppm(Y,~marks*x, MS) Ysim <- rmh(fit) spatstat.options(op) }) # fvproblems.R require(spatstat) # This appears in the workshop notes # Problem detected by Martin Bratschi local({ Jdif <- function(X, ..., i) { Jidot <- Jdot(X, ..., i=i) J <- Jest(X, ...) dif <- eval.fv(Jidot - J) return(dif) } Z <- Jdif(amacrine, i="on") }) # # Test mathlegend code # local({ K <- Kest(cells) plot(K) plot(K, . ~ r) plot(K, . - theo ~ r) plot(K, sqrt(./pi) ~ r) plot(K, cbind(iso, theo) ~ r) plot(K, cbind(iso, theo) - theo ~ r) plot(K, sqrt(cbind(iso, theo)/pi) ~ r) plot(K, cbind(iso/2, -theo) ~ r) plot(K, cbind(iso/2, trans/2) - theo ~ r) # test expansion of .x and .y plot(K, . ~ .x) plot(K, . - theo ~ .x) plot(K, .y - theo ~ .x) plot(K, sqrt(.y) - sqrt(theo) ~ .x) # problems with parsing weird strings in levels(marks(X)) # noted by Ulf Mehlig levels(marks(amacrine)) <- c("Nastricreechia krorluppia", "Homo habilis") plot(Kcross(amacrine)) plot(alltypes(amacrine, "K")) plot(alltypes(amacrine, "J")) plot(alltypes(amacrine, pcfcross)) }) # test of case where mark levels contain illegal characters require(spatstat) local({ hyphenated <- c("a", "not-a") spaced <- c("U", "non U") suffixed <- c("a+", "a*") charred <- c("+", "*") irad <- matrix(0.1, 2,2) hrad <- matrix(0.005, 2, 2) tryit <- function(types, X, irad, hrad) { levels(marks(X)) <- types fit <- ppm(X, ~marks + polynom(x,y,2), MultiStraussHard(types=types,iradii=irad,hradii=hrad)) print(fit) print(coef(fit)) val <- fitted(fit) pred <- predict(fit) return(invisible(NULL)) } tryit(hyphenated, amacrine, irad, hrad) tryit(spaced, amacrine, irad, hrad) tryit(suffixed, amacrine, irad, hrad) tryit(charred, amacrine, irad, hrad) }) # # test cases where there are no (rows or columns of) marks # require(spatstat) local({ n <- npoints(cells) df <- data.frame(x=1:n, y=factor(sample(letters, n, replace=TRUE))) nocolumns <- c(FALSE, FALSE) norows <- rep(FALSE, n) X <- cells marks(X) <- df marks(X) <- df[,1] marks(X) <- df[,nocolumns] Z <- Y <- X[integer(0)] marks(Y) <- df[norows,] stopifnot(is.marked(Y)) marks(Z) <- df[norows,nocolumns] stopifnot(!is.marked(Z)) }) # checks validity of fast C implementation of Geyer interaction require(spatstat) local({ X <- redwood Q <- quadscheme(X) U <- union.quad(Q) EP <- equalpairs.quad(Q) G <- Geyer(0.11, 2) # The value r=0.11 is chosen to avoid hardware numerical effects (gcc bug 323). # It avoids being close any value of pairdist(redwood). # The nearest such values are 0.1077.. and 0.1131.. # By contrast if r = 0.1 there are values differing from 0.1 by 3e-17 a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match") # ... # and again for a non-integer value of 'sat' # (spotted by Thordis Linda Thorarinsdottir) G <- Geyer(0.11, 2.5) a <- pairsat.family$eval(X,U,EP,G$pot,G$par,"border") b <- G$fasteval(X,U,EP,G$pot,G$par,"border") if(!all(a==b)) stop("Results of Geyer()$fasteval and pairsat.family$eval do not match when sat is not an integer") }) require(spatstat) local({ co <- as.ppp(corners(letterR), letterR, check=FALSE) co[letterR] }) # tests/segments.R # $Revision: 1.7 $ $Date: 2011/12/05 07:29:16 $ require(spatstat) local({ # pointed out by Jeff Laake W <- owin() X <- psp(x0=.25,x1=.25,y0=0,y1=1,window=W) X[W] # migrated from 'lpp' X <- psp(runif(10),runif(10),runif(10),runif(10), window=owin()) Z <- as.mask.psp(X) Z <- pixellate(X) # test of distppll pointed out by Ang Qi Wei p <- matrix(c(1.5, 0), 1, 2) l <- matrix(c(0,0,1,0,1,0,2,0), 2, 4, byrow=T) a <- distppll(p, l, mintype=2, method="interpreted") b <- distppll(p, l, mintype=2, method="Fortran") d <- distppll(p, l, mintype=2, method="C") if(a$min.which != b$min.which) stop("conflict between Fortran and interpreted code in distppll") if(a$min.which != d$min.which) stop("conflict between C and interpreted code in distppll") # tests of pixellate.psp -> seg2pixL ns <- 50 out <- numeric(ns) for(i in 1:ns) { X <- psp(runif(1), runif(1), runif(1), runif(1), window=owin()) len <- lengths.psp(X) dlen <- sum(pixellate(X)$v) out[i] <- if(len > 1e-7) dlen/len else 1 } if(diff(range(out)) > 0.01) stop(paste( "pixellate.psp test 1: relative error [", paste(diff(range(out)), collapse=", "), "]")) # Michael Sumner's test examples set.seed(33) n <- 2001 co <- cbind(runif(n), runif(n)) ow <- owin() X <- psp(co[-n,1], co[-n,2], co[-1,1], co[-1,2], window=ow) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 2:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } wts <- 1/(lengths.psp(X) * X$n) s1 <- sum(pixellate(X, weights=wts)) if(abs(s1-1) > 0.01) { stop(paste("pixellate.psp test 3:", "sum(pixellate(X, weights))=", s1, " (should be 1)")) } X <- psp(0, 0, 0.01, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 4:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } X <- psp(0, 0, 0.001, 0.001, window=owin()) s1 <- sum(pixellate(X)) s2 <- sum(lengths.psp(X)) if(abs(s1 - s2)/s2 > 0.01) { stop(paste("pixellate.psp test 5:", "sum(pixellate(X)) = ", s1, "!=", s2, "= sum(lengths.psp(X))")) } }) # test for step() operation # require(spatstat) local({ Z <- as.im(function(x,y){ x^3 - y^2 }, nztrees$window) fitP <- ppm(nztrees, ~x+y+Z, covariates=list(Z=Z)) step(fitP) fitS <- update(fitP, Strauss(7)) step(fitS) fitM <- ppm(amacrine, ~ marks*(x+y), MultiStrauss(types=levels(marks(amacrine)), radii=matrix(0.04, 2, 2))) step(fitM) }) require(spatstat) source("badwindow.R") owinpolycheck(W,verbose=FALSE) # # tests/hobjects.R # # Validity of methods for ppm(... method="ho") # require(spatstat) local({ fit <- ppm(cells, ~1, Strauss(0.1), method="ho", nsim=10) fitx <- ppm(cells, ~offset(x), Strauss(0.1), method="ho", nsim=10) a <- AIC(fit) ax <- AIC(fitx) f <- fitted(fit) fx <- fitted(fitx) p <- predict(fit) px <- predict(fitx) }) # check kstest with strange data require(spatstat) local({ # Marked point patterns with some marks not represented AC <- split(ants, un=FALSE)$Cataglyphis AM <- split(ants, un=FALSE)$Messor DM <- distmap(AM) # should produce a warning, rather than a crash: kstest(AC, DM) # should be OK: kstest(unmark(AC), DM) # linear networks X <- runiflpp(20, simplenet) fit <- lppm(X, ~1) kstest(fit, "y") }) # # tests/kppm.R # # $Revision: 1.6 $ $Date: 2012/04/08 03:22:20 $ # # Test functionality of kppm that depends on RandomFields # require(spatstat) local({ if(require(RandomFields) && RandomFieldsSafe()) { fit0 <- kppm(redwood, ~1, "LGCP") simulate(fit0) fit <- kppm(redwood, ~x, "LGCP", covmodel=list(model="matern", nu=0.3), control=list(maxit=5)) simulate(fit) # ... and Abdollah's code fit <- kppm(redwood, ~x, cluster="Cauchy", statistic="K") simulate(fit) } }) # temporary test file for localpcfmatrix require(spatstat) local({ a <- localpcfmatrix(redwood) a plot(a) a[, 3:5] }) # check for various bugs related to factor conversions require(spatstat) local({ # make a factor image m <- factor(rep(letters[1:4], 4)) Z <- im(m, xcol=1:4, yrow=1:4) # make a point pattern set.seed(42) X <- runifpoint(20, win=as.owin(Z)) # look up the image at the points of X # (a) internal ans1 <- lookup.im(Z, X$x, X$y) stopifnot(is.factor(ans1)) # (b) user level ans2 <- Z[X] stopifnot(is.factor(ans2)) # (c) turn the image into a tessellation # and apply quadratcount V <- tess(image = Z) quadratcount(X, tess=V) }) # # tests/splitpea.R # # Check behaviour of split.ppp etc # # Thanks to Marcelino de la Cruz # # $Revision: 1.8 $ $Date: 2013/10/06 08:44:28 $ # require(spatstat) local({ W <- square(8) X <- ppp(c(2.98, 4.58, 7.27, 1.61, 7.19), c(7.56, 5.29, 5.03, 0.49, 1.65), window=W) Z <- quadrats(W, 4, 4) Yall <- split(X, Z, drop=FALSE) Ydrop <- split(X, Z, drop=TRUE) P <- Yall[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=FALSE") P <- Ydrop[[1]] if(!all(inside.owin(P$x, P$y, P$window))) stop("Black hole detected when drop=TRUE") Ydrop[[1]] <- P[1] split(X, Z, drop=TRUE) <- Ydrop # test NA handling Zbad <- quadrats(square(4), 2, 2) Ybdrop <- split(X, Zbad, drop=TRUE) Yball <- split(X, Zbad, drop=FALSE) # From Marcelino set.seed(1) W<- square(10) # the big window puntos<- rpoispp(0.5, win=W) data(letterR) r00 <- letterR r05 <- shift(letterR,c(0,5)) r50 <- shift(letterR,c(5,0)) r55 <- shift(letterR,c(5,5)) tessr4 <- tess(tiles=list(r00, r05,r50,r55)) puntosr4 <- split(puntos, tessr4, drop=TRUE) split(puntos, tessr4, drop=TRUE) <- puntosr4 }) # # tests/imageops.R # # $Revision: 1.5 $ $Date: 2011/12/05 07:29:16 $ # require(spatstat) local({ A <- as.im(owin()) B <- as.im(owin(c(1.1, 1.9), c(0,1))) Z <- imcov(A, B) stopifnot(abs(max(Z) - 0.8) < 0.1) }) # tests/triplets.R # test code for triplet interaction # $Revision: 1.4 $ $Date: 2012/07/12 02:43:32 $ require(spatstat) local({ fit <- ppm(redwood, ~1, Triplets(0.1)) fit suffstat(fit) # hard core (zero triangles, coefficient is NA) fit0 <- ppm(cells, ~1, Triplets(0.05)) fit0 suffstat(fit0) # bug case (1 triangle in data) fit1 <- ppm(cells, ~1, Triplets(0.15)) fit1 suffstat(fit1) }) # # tests/project.ppm.R # # $Revision: 1.3 $ $Date: 2012/10/22 03:12:08 $ # # Tests of projection mechanism # require(spatstat) local({ # a very unidentifiable model fit <- ppm(cells, ~Z, Strauss(1e-06), covariates=list(Z=0)) project.ppm(fit) # multitype fit2 <- ppm(amacrine, ~1, MultiStrauss(types=c("off", "on"), radii=matrix(1e-06, 2, 2))) project.ppm(fit2) # hybrids r0 <- min(nndist(redwood)) ra <- 1.25 * r0 rb <- 0.8 * r0 f <- ppm(redwood, ~1, Hybrid(A=Strauss(ra), B=Geyer(0.1, 2)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Strauss(rb), B=Geyer(0.1, 2)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Strauss(ra), B=Strauss(0.1)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Strauss(rb), B=Strauss(0.1)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Hardcore(rb), B=Strauss(0.1)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Hardcore(rb), B=Geyer(0.1, 2)), project=TRUE) f <- ppm(redwood, ~1, Hybrid(A=Geyer(rb, 1), B=Strauss(0.1)), project=TRUE) }) # # tests/hyperframe.R # # test "[.hyperframe" etc # # $Revision: 1.2 $ $Date: 2012/01/31 11:04:44 $ # lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X h[, "X"] <- lapply(X, flipxy) h[, c("X", "Y")] <- hyperframe(X=X, Y=X) # check fast code for Kest require(spatstat) local({ Kb <- Kest(cells, nlarge=0) Ku <- Kest(cells, correction="none") Kbu <- Kest(cells, correction=c("none", "border")) }) # # tests/vcovppm.R # # Check validity of vcov.ppm algorithms # # Thanks to Ege Rubak # # $Revision: 1.4 $ $Date: 2013/09/20 09:01:34 $ # require(spatstat) local({ set.seed(42) X <- rStrauss(200, .5, .05) model <- ppm(X, inter = Strauss(.05)) b <- vcov(model, generic = TRUE, algorithm = "basic") v <- vcov(model, generic = TRUE, algorithm = "vector") vc <- vcov(model, generic = TRUE, algorithm = "vectorclip") vn <- vcov(model, generic = FALSE) disagree <- function(x, y, tol=1e-7) { max(abs(x-y)) > tol } asymmetric <- function(x) { disagree(x, t(x)) } if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm") if(asymmetric(vn)) stop("Non-symmetric matrix produced by vcov.ppm Strauss algorithm") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' ") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' ") if(disagree(vn, vc)) stop("Disagreement between vcov.ppm generic and Strauss algorithms") # Geyer code xx <- c(0.7375956, 0.6851697, 0.6399788, 0.6188382) yy <- c(0.5816040, 0.6456319, 0.5150633, 0.6191592) Y <- ppp(xx, yy, window=square(1)) modelY <- ppm(Y, ~1, Geyer(0.1, 1)) b <- vcov(modelY, generic = TRUE, algorithm = "basic") v <- vcov(modelY, generic = TRUE, algorithm = "vector") vc <- vcov(modelY, generic = TRUE, algorithm = "vectorclip") if(asymmetric(b)) stop("Non-symmetric matrix produced by vcov.ppm 'basic' algorithm for Geyer model") if(asymmetric(v)) stop("Non-symmetric matrix produced by vcov.ppm 'vector' algorithm for Geyer model") if(asymmetric(vc)) stop("Non-symmetric matrix produced by vcov.ppm 'vectorclip' algorithm for Geyer model") if(disagree(v, b)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'basic' for Geyer model") if(disagree(v, vc)) stop("Disagreement between vcov.ppm algorithms 'vector' and 'vectorclip' for Geyer model") # test of pairwise.family$delta2 modelZ <- ppm(amacrine, ~1, MultiStrauss(radii=matrix(0.1, 2, 2))) b <- vcov(modelZ) g <- vcov(modelZ, generic=TRUE) if(disagree(b, g)) stop("Disagreement between vcov.ppm algorithms for MultiStrauss model") }) # tests/windows.R # Tests of owin geometry code require(spatstat) local({ # Ege Rubak spotted this problem in 1.28-1 A <- as.owin(ants) B <- dilation(A, 140) if(!is.subset.owin(A, B)) stop("is.subset.owin fails in polygonal case") # thanks to Tom Rosenbaum A <- shift(square(3), origin="midpoint") B <- shift(square(1), origin="midpoint") AB <- setminus.owin(A, B) D <- shift(square(2), origin="midpoint") if(is.subset.owin(D,AB)) stop("is.subset.owin fails for polygons with holes") }) # # test addvar options # X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) addvar(model, "x", crosscheck=TRUE) addvar(model, "x", bw.input="quad") w <- square(0.5) addvar(model, "x", subregion=w) addvar(model, "x", subregion=w, bw.input="points") # additional test of parres X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X, ~x+y) # options in parres parres(model, "x") parres(model, "x", bw.input="quad") w <- square(0.5) parres(model, "x", subregion=w) parres(model, "x", subregion=w, bw.input="quad") # check whether 'update.ppm' has messed up internals mod2 <- update(model, ~x) parres(mod2, "x") # # tests/lppstuff.R # # Tests for lpp code # # $Revision: 1.2 $ $Date: 2013/01/23 08:20:41 $ require(spatstat) local({ # check 'normalise' option in linearKinhom X <- rpoislpp(5, simplenet) fit <- lppm(X, ~x) K <- linearKinhom(X, lambda=fit, normalise=FALSE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=FALSE) plot(g) K <- linearKinhom(X, lambda=fit, normalise=TRUE) plot(K) g <- linearpcfinhom(X, lambda=fit, normalise=TRUE) plot(g) # check empty patterns OK X <- runiflpp(0, simplenet) print(X) }) # # tests/density.R # # Test behaviour of density methods and inhomogeneous summary functions # # $Revision: 1.1 $ $Date: 2013/02/26 09:13:52 $ # require(spatstat) local({ lam <- density(redwood) K <- Kinhom(redwood, lam) lamX <- density(redwood, at="points") KX <- Kinhom(redwood, lamX) }) # # tests/slrm.R # # $Revision: 1.1 $ $Date: 2013/04/19 10:14:52 $ # # Test slrm fitting and prediction when there are NA's # require(spatstat) local({ X <- copper$SouthPoints W <- owin(poly=list(x=c(0,35,35,1),y=c(1,1,150,150))) Y <- X[W] fit <- slrm(Y ~ x+y) pred <- predict(fit) }) # tests/linalgeb.R # checks validity of linear algebra code # $Revision: 1.2 $ $Date: 2013/04/18 09:14:37 $ require(spatstat) local({ p <- 3 n <- 4 x <- array(as.numeric(1:(p * n * n)), dim=c(p, n, n)) w <- matrix(1:(n*n), n, n) y <- matrix(numeric(p * p), p, p) for(i in 1:n) for(j in (1:n)[-i]) y <- y + w[i,j] * outer(x[,i,j], x[,j,i]) z <- sumsymouter(x, w) if(!identical(y,z)) stop("sumsymouter gives incorrect result") }) # # tests/undoc.R # # $Revision: 1.1 $ $Date: 2013/07/25 10:26:09 $ # # Test undocumented hacks, etc require(spatstat) local({ # pixellate.ppp accepts a data frame of weights pixellate(cells, weights=data.frame(a=1:42, b=42:1)) }) # # tests/mppm.R # # Basic tests of mppm # # $Revision: 1.1 $ $Date: 2013/11/10 08:59:08 $ # require(spatstat) local({ # test interaction formulae and subfits fit1 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~ifelse(group=="control", po, str)) fit2 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), str=Strauss(0.1)), iformula=~id * str) fit3 <- mppm(Points ~ group, simba, hyperframe(po=Poisson(), pie=PairPiece(c(0.05,0.1))), iformula=~I((group=="control") * po) + I((group=="treatment") * pie)) fit1 fit2 fit3 subfits(fit1) subfits(fit2) subfits(fit3) # test handling of offsets and zero cif values in mppm data(waterstriders) H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H, Hardcore(1.5)) mppm(Y ~ 1, data=H, StraussHard(7, 1.5)) }) # tests/ppx.R # # Test operations for ppx objects # # $Revision: 1.1 $ $Date: 2013/11/19 03:36:27 $ # require(spatstat) local({ df <- data.frame(x=c(1,2,2,1), y=c(1,2,3,1), z=c(2,3,4,2)) X <- ppx(data=df, coord.type=rep("s", 3), domain=box3()) unique(X) duplicated(X) multiplicity(X) }) spatstat/src/0000755000176000001440000000000012252324034012731 5ustar ripleyusersspatstat/src/linnncross.h0000644000176000001440000000625712252324034015304 0ustar ripleyusers/* linnncross.h Function body definitions with macros $Revision: 1.1 $ $Date: 2013/10/21 02:01:39 $ Macros used: FNAME name of function EXCLU whether serial numbers are provided WHICH whether 'nnwhich' is required */ void FNAME(np, xp, yp, /* data points 'from' */ nq, xq, yq, /* data points 'to' */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ #ifdef EXCLU idP, idQ, /* serial numbers for patterns p and q */ #endif huge, /* value taken as infinity */ /* OUTPUT */ #ifdef WHICH nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ #else nndist, /* nearest neighbour distance for each point */ #endif ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ #ifdef EXCLU int *idP, *idQ; #endif double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* nearest neighbour distance for each point */ #ifdef WHICH int *nnwhich; /* identifies nearest neighbour */ #endif { int Np, Nq, Nv, i, j; int segPi, segQj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xqj, yqj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; #ifdef EXCLU int idPi; #endif #ifdef WHICH int whichmin; #endif Np = *np; Nq = *nq; Nv = *nv; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; #ifdef WHICH nnwhich[i] = -1; #endif } /* main loop */ for(i = 0; i < Np; i++) { xpi = xp[i]; ypi = yp[i]; #ifdef EXCLU idPi = idP[i]; #endif segPi = psegmap[i]; nbi1 = from[segPi]; nbi2 = to[segPi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; #ifdef WHICH whichmin = nnwhich[i]; #endif for(j = 0; j < Nq; j++) { #ifdef EXCLU if(idQ[j] != idPi) { #endif xqj = xq[j]; yqj = yq[j]; segQj = qsegmap[j]; /* compute path distance between i and j */ if(segPi == segQj) { /* points i and j lie on the same segment; use Euclidean distance */ d = EUCLID(xpi, ypi, xqj, yqj); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segQj]; nbj2 = to[segQj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; #ifdef WHICH whichmin = j; #endif } #ifdef EXCLU } #endif } /* commit nn distance for point i */ nndist[i] = dmin; #ifdef WHICH nnwhich[i] = whichmin; #endif } } spatstat/src/linnndist.c0000755000176000001440000001167412252324034015113 0ustar ripleyusers#include /* linnndist.c Shortest-path distances between nearest neighbours in linear network $Revision: 1.1 $ $Date: 2013/10/21 02:01:14 $ linnndist linnnwhich */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #define ANSWER(I,J) answer[(J) + Np * (I)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linnndist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ answer /* nearest neighbour distance for each point */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *answer; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances */ for(i = 0; i < Np; i++) answer[i] = hugevalue; /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = answer[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; /* compute path distance between i and j */ if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn distance for point i */ if(d < dmin) dmin = d; /* update nn distance for point j */ if(d < answer[j]) answer[j] = d; } /* commit nn distance for point i */ answer[i] = dmin; } } void linnnwhich(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ huge, /* value taken as infinity */ /* OUTPUT */ nndist, /* nearest neighbour distance for each point */ nnwhich /* identifies nearest neighbour */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *huge; double *dpath; /* matrix */ double *nndist; /* vector of output values */ int *nnwhich; /* vector of output values */ { int Np, Nv, i, j, Np1; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; double dmin, hugevalue; int whichmin; Np = *np; Nv = *nv; Np1 = Np - 1; hugevalue = *huge; /* initialise nn distances and identifiers */ for(i = 0; i < Np; i++) { nndist[i] = hugevalue; nnwhich[i] = -1; } /* main loop */ for(i = 0; i < Np1; i++) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); dmin = nndist[i]; whichmin = nnwhich[i]; for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* OK, distance between i and j is d */ /* update nn for point i */ if(d < dmin) { dmin = d; whichmin = j; } /* update nn for point j */ if(d < nndist[j]) { nndist[j] = d; nnwhich[j] = i; } } /* commit nn for point i */ nndist[i] = dmin; nnwhich[i] = whichmin; } } spatstat/src/linpairdist.c0000755000176000001440000000430112252324034015420 0ustar ripleyusers#include #include #include "chunkloop.h" /* linpairdist.c Shortest-path distances between each pair of points in linear network $Revision: 1.5 $ $Date: 2012/10/12 10:21:46 $ linpairdist */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void linpairdist(np, xp, yp, /* data points */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ segmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nv, *ns; int *from, *to, *segmap; /* integer vectors (mappings) */ double *xp, *yp, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nv, i, j, Np1, maxchunk; int segi, segj, nbi1, nbi2, nbj1, nbj2; double d, xpi, ypi, xpj, ypj, dXi1, dXi2, d1Xj, d2Xj, d11, d12, d21, d22; Np = *np; Nv = *nv; Np1 = Np - 1; OUTERCHUNKLOOP(i, Np1, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np1, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; segi = segmap[i]; nbi1 = from[segi]; nbi2 = to[segi]; dXi1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dXi2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = i+1; j < Np; j++) { xpj = xp[j]; ypj = yp[j]; segj = segmap[j]; if(segi == segj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xpj, 2) + pow(ypi - ypj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[segj]; nbj2 = to[segj]; d1Xj = EUCLID(xv[nbj1], yv[nbj1], xpj, ypj); d2Xj = EUCLID(xv[nbj2], yv[nbj2], xpj, ypj); d11 = dXi1 + DPATH(nbi1,nbj1) + d1Xj; d12 = dXi1 + DPATH(nbi1,nbj2) + d2Xj; d21 = dXi2 + DPATH(nbi2,nbj1) + d1Xj; d22 = dXi2 + DPATH(nbi2,nbj2) + d2Xj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = ANSWER(j,i) = d; } ANSWER(i,i) = 0; } } } spatstat/src/closepair.c0000755000176000001440000002322412252324034015064 0ustar ripleyusers/* closepair.c $Revision: 1.31 $ $Date: 2013/11/22 01:01:12 $ Assumes point pattern is sorted in increasing order of x coordinate paircount() count the total number of pairs (i, j) with distance < rmax Cclosepaircounts count for each i the number of j with distance < rmax crosscount() count number of close pairs in two patterns (note: Ccrosspaircounts is defined in Estrauss.c) duplicatedxy() find duplicated (x,y) pairs Fclosepairs() extract close pairs of coordinates .C interface - output vectors have Fixed length Fcrosspairs() extract close pairs in two patterns .C interface - output vectors have Fixed length Vclosepairs() extract close pairs of coordinates .Call interface - output vectors have Variable length Vcrosspairs() extract close pairs in two patterns .Call interface - output vectors have Variable length */ #include #include #include #define OK 0 #define ERR_OVERFLOW 1 #define ERR_ALLOC 2 #define FAILED(X) ((void *)(X) == (void *)NULL) #define intRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (int *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(int)) #define dblRealloc(PTR, OLDLENGTH, NEWLENGTH) \ (double *) S_realloc((char *) PTR, NEWLENGTH, OLDLENGTH, sizeof(double)) double sqrt(); /* count TOTAL number of close pairs */ void paircount(nxy, x, y, rmaxi, count) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output */ int *count; { int n, maxchunk, i, j, counted; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) ++counted; } } /* end loop over i */ } } *count = counted; } /* count for each i the number of j closer than distance r */ void Cclosepaircounts(nxy, x, y, rmaxi, counts) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ /* output VECTOR, assumed initialised to 0 */ int *counts; { int n, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, a; n = *nxy; rmax = *rmaxi; r2max = rmax * rmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards from i */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } if(i + 1 < n) { /* scan forwards from i */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; a = r2max - dx * dx; if(a < 0) break; dy = y[j] - yi; a -= dy * dy; if(a >= 0) (counts[i])++; } } /* end loop over i */ } } } /* analogue for two different point patterns */ void crosscount(nn1, x1, y1, nn2, x2, y2, rmaxi, count) /* inputs */ int *nn1, *nn2; double *x1, *y1, *x2, *y2, *rmaxi; /* output */ int *count; { int n1, n2, maxchunk, i, j, jleft, counted; double x1i, y1i, rmax, r2max, xleft, dx, dy, a; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *count = counted = 0; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting index */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; a = r2max - dx * dx; if(a < 0) break; dy = y2[j] - y1i; a -= dy * dy; if(a > 0) ++counted; } } } *count = counted; } /* Find duplicated locations xx, yy are not sorted */ void duplicatedxy(n, x, y, out) /* inputs */ int *n; double *x, *y; /* output */ int *out; /* logical vector */ { int m, i, j; double xi, yi; m = *n; for(i = 1; i < m; i++) { R_CheckUserInterrupt(); xi = x[i]; yi = y[i]; for(j = 0; j < i; j++) if((x[j] == xi) && (y[j] == yi)) break; if(j == i) out[i] = 0; else out[i] = 1; } } /* ............... fixed output length .............. */ void Fclosepairs(nxy, x, y, r, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nxy, *noutmax; double *x, *y, *r; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n, k, kmax, maxchunk, i, j; double xi, yi, rmax, r2max, dx, dy, dx2, d2; n = *nxy; rmax = *r; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backwards */ for(j = i - 1; j >= 0; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } if(i + 1 < n) { /* scan forwards */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > r2max) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } } *nout = k; } void Fcrosspairs(nn1, x1, y1, nn2, x2, y2, rmaxi, noutmax, nout, iout, jout, xiout, yiout, xjout, yjout, dxout, dyout, dout, status) /* inputs */ int *nn1, *nn2, *noutmax; double *x1, *y1, *x2, *y2, *rmaxi; /* outputs */ int *nout, *iout, *jout; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; int *status; { int n1, n2, maxchunk, k, kmax, i, j, jleft; double x1i, y1i, rmax, r2max, xleft, dx, dy, dx2, d2; n1 = *nn1; n2 = *nn2; rmax = *rmaxi; r2max = rmax * rmax; *status = OK; *nout = 0; k = 0; /* k is the next available storage location and also the current length of the list */ kmax = *noutmax; if(n1 == 0 || n2 == 0) return; jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for(; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting position jleft */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j=jleft until dx > rmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > r2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { *nout = k; *status = ERR_OVERFLOW; return; } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; xiout[k] = x1i; yiout[k] = y1i; xjout[k] = x2[j]; yjout[k] = y2[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); ++k; } } } } *nout = k; } /* ........ versions that return variable-length vectors ......... */ /* return i, j only */ #define CLOSEFUN VcloseIJpairs #define CROSSFUN VcrossIJpairs #undef THRESH #undef COORDS #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS /* return i, j, xi, yi, xj, yj, dx, dy, d */ #define CLOSEFUN Vclosepairs #define CROSSFUN Vcrosspairs #undef THRESH #define COORDS #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS /* return i, j, t where t = 1{d < s} */ #define CLOSEFUN Vclosethresh #define CROSSFUN Vcrossthresh #define THRESH #undef COORDS #include "closefuns.h" #undef CLOSEFUN #undef CROSSFUN #undef THRESH #undef COORDS spatstat/src/mhloop.h0000755000176000001440000002342512252324034014411 0ustar ripleyusers /* mhloop.h This file contains the iteration loop for the Metropolis-Hastings algorithm methas.c It is #included several times in methas.c with different #defines for the following variables MH_MARKED whether the simulation is marked (= the variable 'marked' is TRUE) MH_SINGLE whether there is a single interaction (as opposed to a hybrid of several interactions) MH_TRACKING whether to save transition history MH_DEBUG whether to print debug information MH_SNOOP whether to run visual debugger $Revision: 1.18 $ $Date: 2013/05/27 02:09:10 $ */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif OUTERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(irep, algo.nrep, maxchunk, 1024) { #if MH_DEBUG Rprintf("iteration %d\n", irep); #endif if(verb) { /* print progress message every nverb iterations */ iverb = irep + 1 + algo.nrep0; if((iverb % algo.nverb) == 0) Rprintf("iteration %d\n", iverb); } itype = REJECT; nfree = state.npts - algo.ncond; /* number of 'free' points */ /* ................ generate proposal ..................... */ /* Shift or birth/death: */ if(unif_rand() > algo.p) { #if MH_DEBUG Rprintf("propose birth or death\n"); #endif /* Birth/death: */ if(unif_rand() > algo.q) { /* Propose birth: */ birthprop.u = xpropose[irep]; birthprop.v = ypropose[irep]; #if MH_MARKED birthprop.mrk = mpropose[irep]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("propose birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[birthprop.mrk]; #endif #if MH_SINGLE anumer = betavalue * (*(thecif.eval))(birthprop, state, thecdata); #else anumer = betavalue; for(k = 0; k < Ncif; k++) anumer *= (*(cif[k].eval))(birthprop, state, cdata[k]); #endif adenom = qnodds*(nfree+1); #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", anumer, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted birth\n"); #endif itype = BIRTH; /* Birth proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &birthprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = BIRTH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } else if(nfree > 0) { /* Propose death: */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif #if MH_DEBUG #if MH_MARKED Rprintf("propose death of point %d = (%lf, %lf) with mark %d\n", ix, deathprop.u, deathprop.v, deathprop.mrk); #else Rprintf("propose death of point %d = (%lf, %lf)\n", ix, deathprop.u, deathprop.v); #endif #endif /* evaluate conditional intensity */ #if MH_MARKED betavalue = model.beta[deathprop.mrk]; #endif #if MH_SINGLE adenom = betavalue * (*(thecif.eval))(deathprop, state, thecdata); #else adenom = betavalue; for(k = 0; k < Ncif; k++) adenom *= (*(cif[k].eval))(deathprop, state, cdata[k]); #endif anumer = qnodds * nfree; #if MH_DEBUG Rprintf("cif = %lf, Hastings ratio = %lf\n", adenom, anumer/adenom); #endif /* accept/reject */ if(unif_rand() * adenom < anumer) { #if MH_DEBUG Rprintf("accepted death\n"); #endif itype = DEATH; /* Death proposal accepted. */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &deathprop, anumer, adenom, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = DEATH; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = anumer; history.denominator[irep] = adenom; #endif } #endif } } else if(nfree > 0) { /* Propose shift: */ /* point to be shifted */ ix = floor(nfree * unif_rand()); if(ix < 0) ix = 0; ix = algo.ncond + ix; if(ix >= state.npts) ix = state.npts - 1; deathprop.ix = ix; deathprop.u = state.x[ix]; deathprop.v = state.y[ix]; #if MH_MARKED deathprop.mrk = state.marks[ix]; #endif /* where to shift */ shiftprop.u = xpropose[irep]; shiftprop.v = ypropose[irep]; #if MH_MARKED shiftprop.mrk = (algo.fixall) ? deathprop.mrk : mpropose[irep]; #endif shiftprop.ix = ix; #if MH_DEBUG #if MH_MARKED Rprintf("propose shift of point %d = (%lf, %lf)[mark %d] to (%lf, %lf)[mark %d]\n", ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("propose shift of point %d = (%lf, %lf) to (%lf, %lf)\n", ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); #endif #endif /* evaluate cif in two stages */ #if MH_SINGLE cvd = (*(thecif.eval))(deathprop, state, thecdata); cvn = (*(thecif.eval))(shiftprop, state, thecdata); #else cvd = cvn = 1.0; for(k = 0; k < Ncif; k++) { cvd *= (*(cif[k].eval))(deathprop, state, cdata[k]); cvn *= (*(cif[k].eval))(shiftprop, state, cdata[k]); } #endif #if MH_MARKED if(!algo.fixall) { cvn *= model.beta[shiftprop.mrk]; cvd *= model.beta[deathprop.mrk]; } #endif #if MH_DEBUG Rprintf("cif[old] = %lf, cif[new] = %lf, Hastings ratio = %lf\n", cvd, cvn, cvn/cvd); #endif /* accept/reject */ if(unif_rand() * cvd < cvn) { #if MH_DEBUG Rprintf("accepted shift\n"); #endif itype = SHIFT; /* Shift proposal accepted . */ } #if MH_SNOOP /* visual debug */ mhsnoop(&snooper, irep, &algo, &state, &shiftprop, cvn, cvd, &itype); #endif #if MH_TRACKING /* save transition history */ if(irep < history.nmax) { history.n++; history.proptype[irep] = SHIFT; history.accepted[irep] = (itype == REJECT) ? 0 : 1; #ifdef HISTORY_INCLUDES_RATIO history.numerator[irep] = cvn; history.denominator[irep] = cvd; #endif } #endif } if(itype != REJECT) { /* ....... implement the transition ............ */ if(itype == BIRTH) { /* Birth transition */ /* add point at (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing birth at (%lf, %lf) with mark %d\n", birthprop.u, birthprop.v, birthprop.mrk); #else Rprintf("implementing birth at (%lf, %lf)\n", birthprop.u, birthprop.v); #endif #endif if(state.npts + 1 > state.npmax) { #if MH_DEBUG Rprintf("!!!!!!!!!!! storage overflow !!!!!!!!!!!!!!!!!\n"); #endif /* storage overflow; allocate more storage */ Nmore = 2 * state.npmax; state.x = (double *) S_realloc((char *) state.x, Nmore, state.npmax, sizeof(double)); state.y = (double *) S_realloc((char *) state.y, Nmore, state.npmax, sizeof(double)); #if MH_MARKED state.marks = (int *) S_realloc((char *) state.marks, Nmore, state.npmax, sizeof(int)); #endif state.npmax = Nmore; /* call the initialiser again, to allocate additional space */ #if MH_SINGLE thecdata = (*(thecif.init))(state, model, algo); #else model.ipar = iparvector; for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } #endif #if MH_DEBUG Rprintf("........... storage extended .................\n"); #endif } if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, birthprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, birthprop, cdata[k]); } #endif } /* Now add point */ state.x[state.npts] = birthprop.u; state.y[state.npts] = birthprop.v; #if MH_MARKED state.marks[state.npts] = birthprop.mrk; #endif state.npts = state.npts + 1; #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif } else if(itype==DEATH) { /* Death transition */ /* delete point x[ix], y[ix] */ if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, deathprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, deathprop, cdata[k]); } #endif } ix = deathprop.ix; state.npts = state.npts - 1; #if MH_DEBUG Rprintf("implementing death of point %d\n", ix); Rprintf("\tnpts=%d\n", state.npts); #endif if(ix < state.npts) { for(j = ix; j < state.npts; j++) { state.x[j] = state.x[j+1]; state.y[j] = state.y[j+1]; #if MH_MARKED state.marks[j] = state.marks[j+1]; #endif } } } else { /* Shift transition */ /* Shift (x[ix], y[ix]) to (u,v) */ #if MH_DEBUG #if MH_MARKED Rprintf("implementing shift from %d = (%lf, %lf)[%d] to (%lf, %lf)[%d]\n", deathprop.ix, deathprop.u, deathprop.v, deathprop.mrk, shiftprop.u, shiftprop.v, shiftprop.mrk); #else Rprintf("implementing shift from %d = (%lf, %lf) to (%lf, %lf)\n", deathprop.ix, deathprop.u, deathprop.v, shiftprop.u, shiftprop.v); Rprintf("\tnpts=%d\n", state.npts); #endif #endif if(mustupdate) { /* Update auxiliary variables first */ #if MH_SINGLE (*(thecif.update))(state, shiftprop, thecdata); #else for(k = 0; k < Ncif; k++) { if(needupd[k]) (*(cif[k].update))(state, shiftprop, cdata[k]); } #endif } ix = shiftprop.ix; state.x[ix] = shiftprop.u; state.y[ix] = shiftprop.v; #if MH_MARKED state.marks[ix] = shiftprop.mrk; #endif } #if MH_DEBUG } else { Rprintf("rejected\n"); #endif } } } spatstat/src/dppll.f0000755000176000001440000000244112252324034014217 0ustar ripleyusersC Output from Public domain Ratfor, version 1.0 subroutine dppll(x,y,l1,l2,l3,l4,np,nl,eps,mint,rslt,xmin,jmin) implicit double precision(a-h,o-z) dimension x(np), y(np), rslt(np,nl), xmin(np), jmin(np) double precision l1(nl), l2(nl), l3(nl), l4(nl) one = 1.d0 zero = 0.d0 do23000 j = 1,nl dx = l3(j) - l1(j) dy = l4(j) - l2(j) alen = sqrt(dx**2 + dy**2) if(alen .gt. eps)then co = dx/alen si = dy/alen else co = 0.5 si = 0.5 endif do23004 i = 1, np xpx1 = x(i) - l1(j) ypy1 = y(i) - l2(j) xpx2 = x(i) - l3(j) ypy2 = y(i) - l4(j) d1 = xpx1**2 + ypy1**2 d2 = xpx2**2 + ypy2**2 dd = min(d1,d2) if(alen .gt. eps)then xpr = xpx1*co + ypy1*si if(xpr .lt. zero .or. xpr .gt. alen)then d3 = -one else ypr = - xpx1*si + ypy1*co d3 = ypr**2 endif else d3 = -one endif if(d3 .ge. zero)then dd = min(dd,d3) endif sd =sqrt(dd) rslt(i,j) = sd if(mint.gt.0)then if(sd .lt. xmin(i))then xmin(i) = sd if(mint.gt.1)then jmin(i) = j endif endif endif 23004 continue 23005 continue 23000 continue 23001 continue return end spatstat/src/distances.c0000755000176000001440000002327412252324034015065 0ustar ripleyusers/* distances.c Distances between pairs of points $Revision: 1.30 $ $Date: 2013/11/03 03:35:34 $ Cpairdist Pairwise distances Cpair2dist Pairwise distances squared CpairPdist Pairwise distances with periodic correction CpairP2dist Pairwise distances squared, with periodic correction Ccrossdist Pairwise distances for two sets of points Ccross2dist Pairwise distances squared, for two sets of points CcrossPdist Pairwise distances for two sets of points, periodic correction Cmatchxy Find matches between two sets of points */ #include #include #include "chunkloop.h" double sqrt(); void Cpairdist(n, x, y, squared, d) /* inputs */ int *n; double *x, *y; int *squared; /* output */ double *d; { void Cpair1dist(), Cpair2dist(); if(*squared == 0) { Cpair1dist(n, x, y, d); } else { Cpair2dist(n, x, y, d); } } void Cpair1dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = sqrt( dx * dx + dy * dy ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* squared distances */ void Cpair2dist(n, x, y, d) /* inputs */ int *n; double *x, *y; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dist = dx * dx + dy * dy; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void Ccrossdist(nfrom, xfrom, yfrom, nto, xto, yto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; int *squared; /* output */ double *d; { void Ccross1dist(), Ccross2dist(); if(*squared == 0) { Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } else { Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d); } } void Ccross1dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = sqrt( dx * dx + dy * dy ); } } } } /* squared distances */ void Ccross2dist(nfrom, xfrom, yfrom, nto, xto, yto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy; nf = *nfrom; nt = *nto; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; *dptr = dx * dx + dy * dy; } } } } /* distances with periodic correction */ void CpairPdist(n, x, y, xwidth, yheight, squared, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; int *squared; /* output */ double *d; { void CpairP1dist(), CpairP2dist(); if(*squared == 0) { CpairP1dist(n, x, y, xwidth, yheight, d); } else { CpairP2dist(n, x, y, xwidth, yheight, d); } } void CpairP1dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = sqrt( dx2p + dy2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } /* same function without the sqrt */ void CpairP2dist(n, x, y, xwidth, yheight, d) /* inputs */ int *n; double *x, *y, *xwidth, *yheight; /* output */ double *d; { int i, j, npoints, maxchunk; double *dp; double xi, yi, dx, dy, dx2, dy2, dx2p, dy2p, dist, wide, high; npoints = *n; wide = *xwidth; high = *yheight; /* set d[0,0] = 0 */ *d = 0.0; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { xi = x[i]; yi = y[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dist = dx2p + dy2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } } void CcrossPdist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; int *squared; /* output */ double *d; { void CcrossP1dist(), CcrossP2dist(); if(*squared == 0) { CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } else { CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d); } } void CcrossP1dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = sqrt( dx2p + dy2p ); } } } } void CcrossP2dist(nfrom, xfrom, yfrom, nto, xto, yto, xwidth, yheight, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *xto, *yto, *xwidth, *yheight; /* output */ double *d; { int i, j, nf, nt, maxchunk; double *dptr; double xj, yj, dx, dy, dx2, dy2, dx2p, dy2p, wide, high; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; dptr = d; OUTERCHUNKLOOP(j, nt, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nt, maxchunk, 16384) { xj = xto[j]; yj = yto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; *dptr = dx2p + dy2p; } } } } /* matchxy Find matches between two lists of points */ void Cmatchxy(na, xa, ya, nb, xb, yb, match) /* inputs */ int *na, *nb; double *xa, *ya, *xb, *yb; /* output */ int *match; /* match[i] = j+1 if xb[j], yb[j] matches xa[i], ya[i] */ /* match[i] = 0 if no such point matches xa[i], ya[i] */ { int i, j, Na, Nb, maxchunk; double xai, yai; Na = *na; Nb = *nb; OUTERCHUNKLOOP(i, Na, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Na, maxchunk, 16384) { xai = xa[i]; yai = ya[i]; match[i] = 0; for (j=0; j < Nb; j++) { if(xai == xb[j] && yai == yb[j]) { match[i] = j+1; break; } } } } } spatstat/src/functable.h0000755000176000001440000000310212252324034015044 0ustar ripleyusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions of C structures for spatial statistics function estimates. Usually the estimates are of the form f^(x) = a^(x)/b^(x); we store f^ and also a^ and b^ to cater for applications with replicated data. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ typedef struct Ftable { /* double precision function table */ double t0; double t1; int n; /* number of entries */ double *f; double *num; /* f[i] = num[i]/denom[i] */ double *denom; } Ftable; typedef struct Itable { /* integer count table e.g for histograms */ double t0; double t1; int n; int *num; int *denom; /* usually p[i] = num[i]/denom[i] */ } Itable; typedef struct H4table { /* Four histograms, for censored data */ double t0; double t1; int n; int *obs; /* observed lifetimes: o_i = min(t_i, c_i) */ int *nco; /* uncensored lifetimes: o_i for which t_i <= c_i */ int *cen; /* censoring times: c_i */ int *ncc; /* censor times of uncensored data: c_i for which t_i <= c_i */ int upperobs; /* number of o_i that exceed t1 */ int uppercen; /* number of c_i that exceed t1 */ } H4table; spatstat/src/triplets.c0000644000176000001440000000615512252324034014752 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Triplets process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Triplets { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; int *neighbour; /* scratch list of neighbours of current point */ int Nmax; /* length of scratch space allocated */ } Triplets; /* initialiser function */ Cdata *tripletsinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Triplets *triplets; triplets = (Triplets *) R_alloc(1, sizeof(Triplets)); /* create scratch space */ triplets->Nmax = 1024; triplets->neighbour = (int *) R_alloc(1024, sizeof(int)); /* Interpret model parameters*/ triplets->gamma = model.ipar[0]; triplets->r = model.ipar[1]; /* No longer passed as r^2 */ triplets->r2 = triplets->r * triplets->r; triplets->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Triplets gamma=%lf, r=%lf\n", triplets->gamma, triplets->r); #endif /* is the model numerically equivalent to hard core ? */ triplets->hard = (triplets->gamma < DOUBLE_EPS); triplets->loggamma = (triplets->hard) ? 0 : log(triplets->gamma); /* periodic boundary conditions? */ triplets->per = (model.period[0] > 0.0); return((Cdata *) triplets); } /* conditional intensity evaluator */ double tripletscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, j, k, nj, nk, N, Nmax, Nmore, N1; int *neighbour; double *x, *y; double u, v; double r2, d2, cifval; Triplets *triplets; triplets = (Triplets *) cdata; r2 = triplets->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); neighbour = triplets->neighbour; Nmax = triplets->Nmax; N = 0; /* compile list of neighbours */ for(j=0; j < npts; j++) { if(j != ix) { d2 = dist2either(u,v,x[j],y[j],triplets->period); if(d2 < r2) { /* add j to list of neighbours of current point */ if(N >= Nmax) { /* storage space overflow: reallocate */ Nmore = 2 * Nmax; triplets->neighbour = neighbour = (int *) S_realloc((char *) triplets->neighbour, Nmore, Nmax, sizeof(int)); triplets->Nmax = Nmax = Nmore; } neighbour[N] = j; N++; } } } /* count r-close (ordered) pairs of neighbours */ kount = 0; if(N > 1) { N1 = N - 1; for(j = 0; j < N1; j++) { nj = neighbour[j]; for(k = j+1; k < N; k++) { nk = neighbour[k]; if(nj != nk) { d2 = dist2either(x[nj],y[nj],x[nk],y[nk],triplets->period); if(d2 < r2) kount++; } } } } if(triplets->hard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((triplets->loggamma) * kount); #ifdef MHDEBUG Rprintf("triplet count=%d cif=%lf\n", kount, cifval); #endif return cifval; } Cifns TripletsCifns = { &tripletsinit, &tripletscif, (updafunptr) NULL, NO}; spatstat/src/loccumx.h0000644000176000001440000000371012252324034014555 0ustar ripleyusers/* loccumx.h C template for loccum.c grid-to-data functions $Revision: 1.5 $ $Date: 2012/11/10 06:13:52 $ macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(ntest, xtest, ytest, ndata, xdata, ydata, vdata, nr, rmax, ans) /* inputs */ int *ntest, *ndata, *nr; double *xtest, *ytest, *xdata, *ydata, *vdata; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point of first pattern */ { int Ntest, Ndata, Nr, Nans; double Rmax; int i, j, k, jleft, kmin, maxchunk, columnstart; double Rmax2, rstep, xtesti, ytesti, xleft; double dx, dy, dx2, d2, d, contrib; Ntest = *ntest; Ndata = *ndata; Nr = *nr; Rmax = *rmax; if(Ntest == 0) return; Nans = Nr * Ntest; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } if(Ndata == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; jleft = 0; OUTERCHUNKLOOP(i, Ntest, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ntest, maxchunk, 8196) { xtesti = xtest[i]; ytesti = ytest[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* adjust starting point */ xleft = xtesti - Rmax; while((xdata[jleft] < xleft) && (jleft+1 < Ndata)) ++jleft; /* process from jleft until |dx| > Rmax */ for(j=jleft; j < Ndata; j++) { dx = xdata[j] - xtesti; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = ydata[j] - ytesti; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = vdata[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } spatstat/src/constants.h0000644000176000001440000000051512252324034015117 0ustar ripleyusers/* constants.h Ensure that required constants are defined (Insurance against flaky installations) $Revision: 1.1 $ $Date: 2013/08/09 08:14:15 $ */ #ifndef M_PI_2 #define M_PI_2 1.570796326794897 #endif #ifndef M_PI #define M_PI 3.141592653589793 #endif #ifndef M_2PI #define M_2PI 6.283185307179586 #endif spatstat/src/inxypOld.f0000755000176000001440000000273412252324034014717 0ustar ripleyusersC Output from Public domain Ratfor, version 1.0 subroutine inxyp(x,y,xp,yp,npts,nedges,score,onbndry) implicit double precision(a-h,o-z) dimension x(npts), y(npts), xp(nedges), yp(nedges), score(npts) logical first, onbndry(npts) zero = 0.0d0 half = 0.5d0 one = 1.0d0 do23000 i = 1,nedges x0 = xp(i) y0 = yp(i) if(i .eq. nedges)then x1 = xp(1) y1 = yp(1) else x1 = xp(i+1) y1 = yp(i+1) endif dx = x1 - x0 dy = y1 - y0 do23004 j = 1,npts xcrit = (x(j) - x0)*(x(j) - x1) if(xcrit .le. zero)then if(xcrit .eq. zero)then contrib = half else contrib = one endif ycrit = y(j)*dx - x(j)*dy + x0*dy - y0*dx if(dx .lt. 0)then if(ycrit .ge. zero)then score(j) = score(j) + contrib endif onbndry(j) = onbndry(j) .or. (ycrit .eq. zero) else if(dx .gt. zero)then if(ycrit .lt. zero)then score(j) = score(j) - contrib endif onbndry(j) = onbndry(j) .or. (ycrit .eq. zero) else if(x(j) .eq. x0)then ycrit = (y(j) - y0)*(y(j) - y1) endif onbndry(j) = onbndry(j) .or. (ycrit .le. zero) endif endif endif 23004 continue 23005 continue 23000 continue 23001 continue return end spatstat/src/Kborder.c0000755000176000001440000000155112252324034014472 0ustar ripleyusers#include #include #include /* Kborder.c Efficient computation of border-corrected estimates of K for large datasets KborderI() Estimates K function, returns integer numerator & denominator KborderD() Estimates K function, returns double precision numerator & denominator Kwborder() Estimates Kinhom. Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $ */ #undef WEIGHTED #define FNAME KborderI #define OUTTYPE int #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME KborderD #define OUTTYPE double #include "Kborder.h" #undef FNAME #undef OUTTYPE #define FNAME Kwborder #define WEIGHTED #define OUTTYPE double #include "Kborder.h" spatstat/src/knnXdist.h0000644000176000001440000001501312252324034014704 0ustar ripleyusers #if (1 == 0) /* knnXdist.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.10 $ $Date: 2013/12/10 03:29:55 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif #ifdef TRACER int kk; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif #ifdef TRACER Rprintf("i=%d : (%lf, %lf) ..................... \n", i, x1i, y1i); #endif if(lastjwhich < npoints2) { #ifdef TRACER Rprintf("\tForward search from lastjwhich=%d:\n", lastjwhich); #endif /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { #ifdef TRACER Rprintf("\tjright=%d \t (%lf, %lf)\n", jright, x2[jright], y2[jright]); #endif dy = y2[jright] - y1i; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jright); #endif #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end forward search */ #ifdef TRACER Rprintf("\tEnd forward search\n"); #endif } if(lastjwhich > 0) { #ifdef TRACER Rprintf("\tBackward search from lastjwhich=%d:\n", lastjwhich); #endif /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { #ifdef TRACER Rprintf("\tjleft=%d \t (%lf, %lf)\n", jleft, x2[jleft], y2[jleft]); #endif dy = y1i - y2[jleft]; dy2 = dy * dy; #ifdef TRACER Rprintf("\t\t dy2=%lf,\t d2minK=%lf\n", dy2, d2minK); #endif if(dy2 > d2minK) /* note that dy2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #ifdef TRACER Rprintf("\t\t %d and %d are not identical\n", i, jleft); #endif #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; #ifdef TRACER Rprintf("\t\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ #ifdef TRACER Rprintf("\t\t overwrite d2min[nk1]=%lf by d2=%lf\n", d2min[nk1], d2); #endif d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } #ifdef TRACER Rprintf("\t\t sorted nn distances:\n"); for(kk = 0; kk < nk; kk++) Rprintf("\t\t d2min[%d] = %lf\n", kk, d2min[kk]); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; #ifdef TRACER Rprintf("\t\t d2minK=%lf\n", d2minK); #endif } #ifdef EXCLUDE } #endif } /* end backward search */ #ifdef TRACER Rprintf("\tEnd backward search\n"); #endif } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/nn3Ddist.h0000644000176000001440000000364412252324034014577 0ustar ripleyusers/* nn3Ddist.h Code template for nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.5 $ $Date: 2013/06/28 10:38:46 $ */ void FNAME(n, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n; double *x, *y, *z, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints, i, j, maxchunk; double d2, d2min, xi, yi, zi, dx, dy, dz, dz2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0){ for(j = i - 1; j >= 0; --j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } /* search forward */ if(i < npoints - 1) { for(j = i + 1; j < npoints; ++j) { dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2min) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = j; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = which + 1; #endif } } } spatstat/src/Kborder.h0000755000176000001440000001066312252324034014503 0ustar ripleyusers/* Kborder.h Code template for K function estimators in Kborder.c Variables: FNAME function name OUTTYPE storage type of the output vectors ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.11 $ $Date: 2013/09/18 04:06:59 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif b, nr, rmax, numer, denom) /* inputs */ int *nxy, *nr; double *x, *y, *b, *rmax; #ifdef WEIGHTED double *w; #endif /* outputs */ OUTTYPE *numer, *denom; { int i, j, l, n, nt, n1, nt1, lmin, lmax, maxchunk; double dt, tmax, xi, yi, bi, maxsearch, max2search; double bratio, dratio, dij, dij2, dx, dy, dx2; OUTTYPE *numerLowAccum, *numerHighAccum, *denomAccum; OUTTYPE naccum, daccum; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; nt1 = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; /* initialise */ numerLowAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); numerHighAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); denomAccum = (OUTTYPE *) R_alloc(nt, sizeof(OUTTYPE)); for(l = 0; l < nt; l++) numer[l] = denom[l] = numerLowAccum[l] = numerHighAccum[l] = denomAccum[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { /* -------- DENOMINATOR -------------*/ bi = b[i]; #ifdef WEIGHTED wi = w[i]; #endif /* increment denominator for all r < b[i] */ bratio = bi/dt; /* lmax is the largest integer STRICTLY less than bratio */ lmax = (int) ceil(bratio) - 1; lmax = (lmax <= nt1) ? lmax : nt1; /* effectively increment entries 0 to lmax */ if(lmax >= 0) denomAccum[lmax] += WI; /* ---------- NUMERATOR -----------*/ /* scan through points (x[j],y[j]) */ xi = x[i]; yi = y[i]; maxsearch = (bi < tmax) ? bi : tmax; max2search = maxsearch * maxsearch; /* scan backward from i-1 until |x[j]-x[i]| > maxsearch or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } /* scan forward from i+1 until x[j]-x[i] > maxsearch or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= max2search) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < max2search) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r such that dij <= r < bi */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmax >= lmin) { #ifdef WEIGHTED wij = wi * wj; #endif numerLowAccum[lmin] += WIJ; numerHighAccum[lmax] += WIJ; } } } } } } /* Now use the accumulated values to compute the numerator and denominator. The value of denomAccum[l] should be added to denom[k] for all k <= l. numerHighAccum[l] should be added to numer[k] for all k <=l numerLowAccum[l] should then be subtracted from numer[k] for k <= l. */ for(l=nt1, naccum=daccum=ZERO; l>=0; l--) { daccum += denomAccum[l]; denom[l] = daccum; naccum += numerHighAccum[l]; numer[l] = naccum; naccum -= numerLowAccum[l]; } } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/Knone.h0000644000176000001440000000567212252324034014166 0ustar ripleyusers/* Knone.h Code template for K function estimators in Knone.c Variables: FNAME function name OUTTYPE storage type of the output 'numer' ('int' or 'double') WEIGHTED #defined for weighted (inhom) K function Copyright (C) Adrian Baddeley, Julian Gilbey and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.6 $ $Date: 2013/09/18 04:08:26 $ */ void FNAME( nxy, x, y, #ifdef WEIGHTED w, #endif nr, rmax, numer) /* inputs */ int *nxy, *nr; double *x, *y, *rmax; #ifdef WEIGHTED double *w; #endif /* output */ OUTTYPE *numer; { int i, j, l, n, nt, n1, lmin, lmax, maxchunk; double dt, tmax, tmax2, xi, yi; double dratio, dij, dij2, dx, dy, dx2; #ifdef WEIGHTED double wi, wj, wij; #endif #ifdef WEIGHTED #define ZERO 0.0 #define WI wi #define WJ wj #define WIJ wij #else #define ZERO 0 #define WI 1 #define WJ 1 #define WIJ 1 #endif n = *nxy; nt = *nr; n1 = n - 1; lmax = nt - 1; dt = (*rmax)/(nt-1); tmax = *rmax; tmax2 = tmax * tmax; /* initialise */ for(l = 0; l < nt; l++) numer[l] = ZERO; if(n == 0) return; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { #ifdef WEIGHTED wi = w[i]; #endif xi = x[i]; yi = y[i]; /* scan backward from i-1 until x[j] < x[i] -tmax or until we run out */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* effectively increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } /* scan forward from i+1 until x[j] > x[i] + tmax or until we run out */ if(i < n1) { for(j=i+1; j < n; j++) { /* squared interpoint distance */ dx = x[j] - xi; dx2 = dx * dx; if(dx2 >= tmax2) break; dy = y[j] - yi; dij2 = dx2 + dy * dy; if(dij2 < tmax2) { #ifdef WEIGHTED wj = w[j]; #endif /* increment numerator for all r >= dij */ dij = (double) sqrt(dij2); dratio = dij/dt; /* smallest integer greater than or equal to dratio */ lmin = (int) ceil(dratio); /* increment entries lmin to lmax inclusive */ if(lmin <= lmax) { #ifdef WEIGHTED wij = wi * wj; #endif numer[lmin] += WIJ; } } } } } } /* Now accumulate the numerator. */ if(nt > 1) for(l=1; l < nt; l++) numer[l] += numer[l-1]; } #undef ZERO #undef WI #undef WJ #undef WIJ spatstat/src/linalg.c0000755000176000001440000000716012252324034014352 0ustar ripleyusers/* linalg.c Home made linear algebra Yes, really $Revision: 1.9 $ $Date: 2013/09/25 06:07:24 $ Csumouter Cwsumouter Cquadform Csumsymouter Cwsumsymouter */ #include #include #include "chunkloop.h" /* ............... matrices ..............................*/ /* Csumouter computes the sum of outer products of columns of x y = sum[j] (x[,j] %o% x[,j]) */ void Csumouter(x, n, p, y) double *x; /* p by n matrix */ int *n, *p; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += xij * xkj; } } } } } /* Cwsumouter computes the weighted sum of outer products of columns of x y = sum[j] (w[j] * x[,j] %o% x[,j]) */ void Cwsumouter(x, n, p, w, y) double *x; /* p by n matrix */ int *n, *p; double *w; /* weight vector, length n */ double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, maxchunk; register double wj, xij, wjxij, xkj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { wj = w[j]; xcolj = x + j * P; for(i = 0; i < P; i++) { xij = xcolj[i]; wjxij = wj * xij; for(k = 0; k < P; k++) { xkj = xcolj[k]; y[k * P + i] += wjxij * xkj; } } } } } /* computes the quadratic form values y[j] = x[,j] %*% v %*% t(x[,j]) */ void Cquadform(x, n, p, v, y) double *x; /* p by n matrix */ int *n, *p; double *v; /* p by p matrix */ double *y; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, xkj, vik, yj; register double *xcolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; yj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { xkj = xcolj[k]; vik = v[k * P + i]; yj += xij * vik * xkj; } } y[j] = yj; } } } /* computes the bilinear form values z[j] = x[,j] %*% v %*% t(y[,j]) */ void Cbiform(x, y, n, p, v, z) double *x, *y; /* p by n matrices */ int *n, *p; double *v; /* p by p matrix */ double *z; /* output vector, length n */ { int N, P; register int i, j, k, maxchunk; register double xij, vik, ykj, zj; register double *xcolj, *ycolj; N = *n; P = *p; OUTERCHUNKLOOP(j, N, maxchunk, 2048) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, N, maxchunk, 2048) { xcolj = x + j * P; ycolj = y + j * P; zj = 0; for(i = 0; i < P; i++) { xij = xcolj[i]; for(k = 0; k < P; k++) { ykj = ycolj[k]; vik = v[k * P + i]; zj += xij * vik * ykj; } } z[j] = zj; } } } /* ............... 3D arrays ...................... */ #undef FNAME #undef WEIGHTED /* sumsymouter computes the sum of outer products x[,i,j] %o% x[,j,i] over all pairs i, j */ #define FNAME Csumsymouter #include "sumsymouter.h" #undef FNAME /* wsumsymouter computes the weighted sum of outer products w[i,j] * (x[,i,j] %o% x[,j,i]) over all pairs i, j */ #define FNAME Cwsumsymouter #define WEIGHTED #include "sumsymouter.h" #undef FNAME #undef WEIGHTED spatstat/src/nn3DdistX.h0000644000176000001440000000520412252324034014721 0ustar ripleyusers/* nn3DdistX.h Code template for nearest-neighbour algorithms for 3D point patterns Input is two point patterns - supports 'nncross' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if the two patterns may include common points (which are not to be counted as neighbours) Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT BOTH POINT PATTERNS ARE SORTED IN ASCENDING ORDER OF THE z COORDINATE If EXCLUDE is #defined, Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. $Revision: 1.5 $ $Date: 2013/09/20 10:01:25 $ */ void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* outputs */ double *nnd; int *nnwhich; { int npoints1, npoints2, i, j, jwhich, lastjwhich; double d2, d2min, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; for(i = 0; i < npoints1; i++) { R_CheckUserInterrupt(); d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(j = lastjwhich - 1; j >= 0; --j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(j = lastjwhich; j < npoints2; ++j) { dz = z2[j] - z1i; dz2 = dz * dz; if(dz2 > d2min) break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[j] != id1i) { #endif dx = x2[j] - x1i; dy = y2[j] - y1i; d2 = dx * dx + dy * dy + dz2; if (d2 < d2min) { d2min = d2; jwhich = j; } #ifdef EXCLUDE } #endif } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH /* convert to R indexing */ nnwhich[i] = jwhich + 1; #endif lastjwhich = jwhich; } } spatstat/src/closefuns.h0000644000176000001440000003154112252324034015107 0ustar ripleyusers/* closefuns.h Function definitions to be #included in closepair.c several times with different values of macros. Macros used: CLOSEFUN name of function for 'closepairs' CROSSFUN name of function for 'crosspairs' COORDS if defined, also return xi, yi, xj, yj, dx, dy, d THRESH if defined, also return 1(d < s) $Revision: 1.3 $ $Date: 2013/05/22 10:21:28 $ */ SEXP CLOSEFUN(SEXP xx, SEXP yy, SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { double *x, *y; double xi, yi, rmax, r2max, dx, dy, d2; int n, k, kmax, kmaxold, maxchunk, i, j, m; /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut, dOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP, *dOutP; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx = AS_NUMERIC(xx)); PROTECT(yy = AS_NUMERIC(yy)); PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS 5 #else #define NINPUTS 4 #endif /* Translate arguments from R to C */ x = NUMERIC_POINTER(xx); y = NUMERIC_POINTER(yy); n = LENGTH(xx); rmax = *(NUMERIC_POINTER(rr)); kmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif k = 0; /* k is the next available storage location and also the current length of the list */ if(n > 0 && kmax > 0) { /* allocate space */ iout = (int *) R_alloc(kmax, sizeof(int)); jout = (int *) R_alloc(kmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(kmax, sizeof(double)); yiout = (double *) R_alloc(kmax, sizeof(double)); xjout = (double *) R_alloc(kmax, sizeof(double)); yjout = (double *) R_alloc(kmax, sizeof(double)); dxout = (double *) R_alloc(kmax, sizeof(double)); dyout = (double *) R_alloc(kmax, sizeof(double)); dout = (double *) R_alloc(kmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(kmax, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < n) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n) maxchunk = n; for(; i < maxchunk; i++) { xi = x[i]; yi = y[i]; if(i > 0) { /* scan backward */ for(j = i - 1; j >= 0; j--) { dx = x[j] - xi; if(dx < -rmax) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } } } if(i + 1 < n) { /* scan forward */ for(j = i + 1; j < n; j++) { dx = x[j] - xi; if(dx > rmax) break; dy = y[j] - yi; d2 = dx * dx + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(k >= kmax) { /* overflow; allocate more space */ kmaxold = kmax; kmax = 2 * kmax; iout = intRealloc(iout, kmaxold, kmax); jout = intRealloc(jout, kmaxold, kmax); #ifdef COORDS xiout = dblRealloc(xiout, kmaxold, kmax); yiout = dblRealloc(yiout, kmaxold, kmax); xjout = dblRealloc(xjout, kmaxold, kmax); yjout = dblRealloc(yjout, kmaxold, kmax); dxout = dblRealloc(dxout, kmaxold, kmax); dyout = dblRealloc(dyout, kmaxold, kmax); dout = dblRealloc(dout, kmaxold, kmax); #endif #ifdef THRESH tout = intRealloc(tout, kmaxold, kmax); #endif } jout[k] = j + 1; /* R indexing */ iout[k] = i + 1; #ifdef COORDS xiout[k] = xi; yiout[k] = yi; xjout[k] = x[j]; yjout[k] = y[j]; dxout[k] = dx; dyout[k] = dy; dout[k] = sqrt(d2); #endif #ifdef THRESH tout[k] = (d2 <= s2) ? 1 : 0; #endif ++k; } } } /* end of i loop */ } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(k)); PROTECT(jOut = NEW_INTEGER(k)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(k)); PROTECT(yiOut = NEW_NUMERIC(k)); PROTECT(xjOut = NEW_NUMERIC(k)); PROTECT(yjOut = NEW_NUMERIC(k)); PROTECT(dxOut = NEW_NUMERIC(k)); PROTECT(dyOut = NEW_NUMERIC(k)); PROTECT(dOut = NEW_NUMERIC(k)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(k)); #endif if(k > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < k; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define MIDDLE 1 #else #define MIDDLE 0 #endif #ifdef COORDS #define TAIL 7 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS SET_VECTOR_ELT(Out, HEAD+MIDDLE, xiOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+1, yiOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+2, xjOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+3, yjOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+4, dxOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+5, dyOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+6, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef MIDDLE #undef TAIL SEXP CROSSFUN(SEXP xx1, SEXP yy1, SEXP xx2, SEXP yy2, SEXP rr, #ifdef THRESH SEXP ss, #endif SEXP nguess) { /* input vectors */ double *x1, *y1, *x2, *y2; /* lengths */ int n1, n2, nout, noutmax, noutmaxold, maxchunk; /* distance parameter */ double rmax, r2max; /* indices */ int i, j, jleft, m; /* temporary values */ double x1i, y1i, xleft, dx, dy, dx2, d2; /* local storage */ int *iout, *jout; /* R objects in return value */ SEXP Out, iOut, jOut; /* external storage pointers */ int *iOutP, *jOutP; #ifdef COORDS SEXP xiOut, yiOut, xjOut, yjOut, dxOut, dyOut, dOut; double *xiOutP, *yiOutP, *xjOutP, *yjOutP, *dxOutP, *dyOutP, *dOutP; double *xiout, *yiout, *xjout, *yjout, *dxout, *dyout, *dout; #endif #ifdef THRESH double s, s2; int *tout; SEXP tOut; int *tOutP; #endif /* protect R objects from garbage collector */ PROTECT(xx1 = AS_NUMERIC(xx1)); PROTECT(yy1 = AS_NUMERIC(yy1)); PROTECT(xx2 = AS_NUMERIC(xx2)); PROTECT(yy2 = AS_NUMERIC(yy2)); PROTECT(rr = AS_NUMERIC(rr)); PROTECT(nguess = AS_INTEGER(nguess)); #ifdef THRESH PROTECT(ss = AS_NUMERIC(ss)); #define NINPUTS 7 #else #define NINPUTS 6 #endif /* Translate arguments from R to C */ x1 = NUMERIC_POINTER(xx1); y1 = NUMERIC_POINTER(yy1); x2 = NUMERIC_POINTER(xx2); y2 = NUMERIC_POINTER(yy2); n1 = LENGTH(xx1); n2 = LENGTH(xx2); rmax = *(NUMERIC_POINTER(rr)); noutmax = *(INTEGER_POINTER(nguess)); r2max = rmax * rmax; #ifdef THRESH s = *(NUMERIC_POINTER(ss)); s2 = s * s; #endif nout = 0; /* nout is the next available storage location and also the current length of the list */ if(n1 > 0 && n2 > 0 && noutmax > 0) { /* allocate space */ iout = (int *) R_alloc(noutmax, sizeof(int)); jout = (int *) R_alloc(noutmax, sizeof(int)); #ifdef COORDS xiout = (double *) R_alloc(noutmax, sizeof(double)); yiout = (double *) R_alloc(noutmax, sizeof(double)); xjout = (double *) R_alloc(noutmax, sizeof(double)); yjout = (double *) R_alloc(noutmax, sizeof(double)); dxout = (double *) R_alloc(noutmax, sizeof(double)); dyout = (double *) R_alloc(noutmax, sizeof(double)); dout = (double *) R_alloc(noutmax, sizeof(double)); #endif #ifdef THRESH tout = (int *) R_alloc(noutmax, sizeof(int)); #endif jleft = 0; i = 0; maxchunk = 0; while(i < n1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > n1) maxchunk = n1; for( ; i < maxchunk; i++) { x1i = x1[i]; y1i = y1[i]; /* adjust starting point jleft */ xleft = x1i - rmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from j = jleft until dx > rmax */ for(j=jleft; j < n2; j++) { /* squared interpoint distance */ dx = x2[j] - x1i; if(dx > rmax) break; dx2 = dx * dx; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* add this (i, j) pair to output */ if(nout >= noutmax) { /* overflow; allocate more space */ noutmaxold = noutmax; noutmax = 2 * noutmax; iout = intRealloc(iout, noutmaxold, noutmax); jout = intRealloc(jout, noutmaxold, noutmax); #ifdef COORDS xiout = dblRealloc(xiout, noutmaxold, noutmax); yiout = dblRealloc(yiout, noutmaxold, noutmax); xjout = dblRealloc(xjout, noutmaxold, noutmax); yjout = dblRealloc(yjout, noutmaxold, noutmax); dxout = dblRealloc(dxout, noutmaxold, noutmax); dyout = dblRealloc(dyout, noutmaxold, noutmax); dout = dblRealloc(dout, noutmaxold, noutmax); #endif #ifdef THRESH tout = intRealloc(tout, noutmaxold, noutmax); #endif } iout[nout] = i + 1; /* R indexing */ jout[nout] = j + 1; #ifdef COORDS xiout[nout] = x1i; yiout[nout] = y1i; xjout[nout] = x2[j]; yjout[nout] = y2[j]; dxout[nout] = dx; dyout[nout] = dy; dout[nout] = sqrt(d2); #endif #ifdef THRESH tout[nout] = (d2 <= s2) ? 1 : 0; #endif ++nout; } } } } } /* return a list of vectors */ PROTECT(iOut = NEW_INTEGER(nout)); PROTECT(jOut = NEW_INTEGER(nout)); #ifdef COORDS PROTECT(xiOut = NEW_NUMERIC(nout)); PROTECT(yiOut = NEW_NUMERIC(nout)); PROTECT(xjOut = NEW_NUMERIC(nout)); PROTECT(yjOut = NEW_NUMERIC(nout)); PROTECT(dxOut = NEW_NUMERIC(nout)); PROTECT(dyOut = NEW_NUMERIC(nout)); PROTECT(dOut = NEW_NUMERIC(nout)); #endif #ifdef THRESH PROTECT(tOut = NEW_INTEGER(nout)); #endif if(nout > 0) { iOutP = INTEGER_POINTER(iOut); jOutP = INTEGER_POINTER(jOut); #ifdef COORDS xiOutP = NUMERIC_POINTER(xiOut); yiOutP = NUMERIC_POINTER(yiOut); xjOutP = NUMERIC_POINTER(xjOut); yjOutP = NUMERIC_POINTER(yjOut); dxOutP = NUMERIC_POINTER(dxOut); dyOutP = NUMERIC_POINTER(dyOut); dOutP = NUMERIC_POINTER(dOut); #endif #ifdef THRESH tOutP = INTEGER_POINTER(tOut); #endif for(m = 0; m < nout; m++) { iOutP[m] = iout[m]; jOutP[m] = jout[m]; #ifdef COORDS xiOutP[m] = xiout[m]; yiOutP[m] = yiout[m]; xjOutP[m] = xjout[m]; yjOutP[m] = yjout[m]; dxOutP[m] = dxout[m]; dyOutP[m] = dyout[m]; dOutP[m] = dout[m]; #endif #ifdef THRESH tOutP[m] = tout[m]; #endif } } #define HEAD 2 #ifdef THRESH #define MIDDLE 1 #else #define MIDDLE 0 #endif #ifdef COORDS #define TAIL 7 #else #define TAIL 0 #endif PROTECT(Out = NEW_LIST(HEAD+MIDDLE+TAIL)); SET_VECTOR_ELT(Out, 0, iOut); SET_VECTOR_ELT(Out, 1, jOut); #ifdef THRESH SET_VECTOR_ELT(Out, HEAD, tOut); #endif #ifdef COORDS SET_VECTOR_ELT(Out, HEAD+MIDDLE, xiOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+1, yiOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+2, xjOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+3, yjOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+4, dxOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+5, dyOut); SET_VECTOR_ELT(Out, HEAD+MIDDLE+6, dOut); #endif UNPROTECT(NINPUTS+1+HEAD+MIDDLE+TAIL); /* 1 is for 'Out' itself */ return(Out); } #undef NINPUTS #undef HEAD #undef MIDDLE #undef TAIL spatstat/src/dist2.c0000755000176000001440000000377712252324034014143 0ustar ripleyusers# include #include #include "yesno.h" /* dist2: squared distance in torus dist2thresh: faster code for testing whether dist2 < r2 dist2Mthresh: same as dist2thresh, but does not assume the points are within one period of each other. */ double dist2(u,v,x,y,period) double u, v, x, y; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, d2; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp)? dx : dxp; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp)? dy : dyp; d2 = a * a + b * b; return d2; } double dist2either(u,v,x,y,period) double u, v, x, y; double *period; { if(period[0] < 0.0) return pow(u-x,2) + pow(v-y,2); return(dist2(u,v,x,y,period)); } int dist2thresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue <= 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue > b * b) return YES; return NO; } int dist2Mthresh(u,v,x,y,period,r2) double u, v, x, y, r2; double *period; { double wide, high, dx, dy, dxp, dyp, a, b, residue; /* points are NOT assumed to lie within one period of each other */ wide = period[0]; high = period[1]; dx = u - x; if(dx < 0.0) dx = -dx; while(dx > wide) dx -= wide; dxp = wide - dx; a = (dx < dxp) ? dx : dxp; residue = r2 - a * a; if(residue < 0.0) return NO; dy = v - y; if(dy < 0.0) dy = -dy; while(dy > high) dy -= high; dyp = high - dy; b = (dy < dyp) ? dy : dyp; if (residue >= b * b) return YES; return NO; } spatstat/src/utils.c0000755000176000001440000000073612252324034014246 0ustar ripleyusers/* utils.c $Revision: 1.2 $ $Date: 2006/10/19 10:22:21 $ Small utilities */ void drevcumsum(double *x, int *nx) { int i; double sumx; double *xp; i = *nx - 1; xp = x + i; sumx = *xp; while(i > 0) { --i; --xp; sumx += *xp; *xp = sumx; } } void irevcumsum(int *x, int *nx) { int i; int sumx; int *xp; i = *nx - 1; xp = x + i; sumx = *xp; while(i > 0) { --i; --xp; sumx += *xp; *xp = sumx; } } spatstat/src/nndistX.h0000644000176000001440000000606512252324034014540 0ustar ripleyusers #if (1 == 0) /* nndistX.h Code template for C functions supporting nncross THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF y COORDINATE This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.5 $ $Date: 2013/09/18 04:49:18 $ */ #endif void FNAME(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, maxchunk, i, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i, y1i, dx, dy, dy2, hu, hu2; #ifdef EXCLUDE int id1i; #endif hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { d2min = hu2; jwhich = -1; x1i = x1[i]; y1i = y1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dy = y2[jright] - y1i; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dx = x2[jright] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jright; } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dy = y1i - y2[jleft]; dy2 = dy * dy; if(dy2 > d2min) /* note that dy2 >= d2min could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dx = x2[jleft] - x1i; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; jwhich = jleft; } #ifdef EXCLUDE } #endif } /* end backward search */ } /* commit values */ #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = jwhich + 1; /* R indexing */ #endif lastjwhich = jwhich; } } } spatstat/src/exactdist.c0000755000176000001440000001460512252324034015076 0ustar ripleyusers/* exactdist.c Exact distance transform of a point pattern (used to estimate the empty space function F) $Revision: 1.12 $ $Date: 2011/09/20 07:36:17 $ Author: Adrian Baddeley Sketch of functionality: the 'data' are a finite list of points in R^2 (x,y coordinates) and the 'output' is a real valued image whose entries are distances, with the value for each pixel equalling the distance from that pixel to the nearest point of the data pattern. Routines: exact_dt_R() interface to R exact_dt() implementation of distance transform dist_to_bdry() compute distance to edge of image frame shape_raster() initialise a Raster structure The appropriate calling sequence for exact_dt_R() is exemplified in 'exactdt.R' */ #undef DEBUG #include #include "raster.h" #ifdef DEBUG #include #endif void shape_raster(ras,data,xmin,ymin,xmax,ymax,nrow,ncol,mrow,mcol) Raster *ras; /* the raster structure to be initialised */ void *data; int nrow, ncol; /* absolute dimensions of storage array */ int mrow, mcol; /* margins clipped off */ /* e.g. valid width is ncol - 2*mcol columns */ double xmin, ymin, /* image dimensions in R^2 after clipping */ xmax, ymax; { ras->data = data; ras->nrow = nrow; ras->ncol = ncol; ras->length = nrow * ncol; ras->rmin = mrow; ras->rmax = nrow - mrow - 1; ras->cmin = mcol; ras->cmax = ncol - mcol - 1; ras->x0 = ras->xmin = xmin; ras->x1 = ras->xmax = xmax; ras->y0 = ras->ymin = ymin; ras->y1 = ras->ymax = ymax; ras->xstep = (xmax-xmin)/(ncol - 2 * mcol - 1); ras->ystep = (ymax-ymin)/(nrow - 2 * mrow - 1); /* Rprintf("xstep,ystep = %lf,%lf\n", ras->xstep,ras->ystep); */ } void exact_dt(x, y, npt, dist, index) double *x, *y; /* data points */ int npt; Raster *dist; /* exact distance to nearest point */ Raster *index; /* which point x[i],y[i] is closest */ { int i,j,k,l,m; double d; int ii; double dd; /* double bdiag; */ /* initialise rasters */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*index,int,UNDEFINED) d = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,d) /* If the list of data points is empty, ... exit now */ if(npt == 0) return; for(i = 0; i < npt; i++) { /* Rprintf("%ld -> (%lf,%lf)\n", i, x[i], y[i]); */ j = RowIndex(*dist,y[i]); k = ColIndex(*dist,x[i]); /* if(!Inside(*dist,j,k)) Rprintf("(%ld,%ld) out of bounds\n",j,k); else if (!Inside(*dist,j+1,k+1)) Rprintf("(%ld+1,%ld+1) out of bounds\n",j,k); */ for(l = j; l <= j+1; l++) for(m = k; m <= k+1; m++) { d = DistanceToSquared(x[i],y[i],*index,l,m); if( Is_Undefined(Entry(*index,l,m,int)) || Entry(*dist,l,m,double) > d) { /* Rprintf("writing (%ld,%ld) -> %ld\t%lf\n", l,m,i,d); */ Entry(*index,l,m,int) = i; Entry(*dist,l,m,double) = d; /* Rprintf("checking: %ld, %lf\n", Entry(*index,l,m,int), Entry(*dist,l,m,double)); */ } } } /* for(j = 0; j <= index->nrow; j++) for(k = 0; k <= index->ncol; k++) Rprintf("[%ld,%ld] %ld\t%lf\n", j,k,Entry(*index,j,k,int),Entry(*dist,j,k,double)); */ /* how to update the distance values */ #define COMPARE(ROW,COL,RR,CC) \ d = Entry(*dist,ROW,COL,double); \ ii = Entry(*index,RR,CC,int); \ /* Rprintf(" %lf\t (%ld,%ld) |-> %ld\n", d, RR, CC, ii); */ \ if(Is_Defined(ii) /* && ii < npt */ \ && Entry(*dist,RR,CC,double) < d) { \ dd = DistanceSquared(x[ii],y[ii],Xpos(*index,COL),Ypos(*index,ROW)); \ if(dd < d) { \ /* Rprintf("(%ld,%ld) <- %ld\n", ROW, COL, ii); */ \ Entry(*index,ROW,COL,int) = ii; \ Entry(*dist,ROW,COL,double) = dd; \ /* Rprintf("checking: %ld, %lf\n", Entry(*index,ROW,COL,int), Entry(*dist,ROW,COL,double)); */\ } \ } /* bound on diagonal step distance */ /* bdiag = sqrt(index->xstep * index->xstep + index->ystep * index->ystep); */ /* forward pass */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) { /* Rprintf("Neighbourhood of (%ld,%ld):\n", j,k); */ COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = index->rmax; j >= index->rmin; j--) for(k = index->cmax; k >= index->cmin; k--) { COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of the distances^2 */ for(j = index->rmin; j <= index->rmax; j++) for(k = index->cmin; k <= index->cmax; k++) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } #define MIN(A,B) (((A) < (B)) ? (A) : (B)) void dist_to_bdry(d) /* compute distance to boundary from each raster point */ Raster *d; /* of course this is easy for a rectangular grid but we implement it in C for ease of future modification */ { int j, k; double x, y, xd, yd; for(j = d->rmin; j <= d->rmax;j++) { y = Ypos(*d,j); yd = MIN(y - d->ymin, d->ymax - y); for(k = d->cmin; k <= d->cmax;k++) { x = Xpos(*d,k); xd = MIN(x - d->xmin, d->xmax - x); Entry(*d,j,k,double) = MIN(xd,yd); } } } /* R interface */ void exact_dt_R(x, y, npt, xmin, ymin, xmax, ymax, nr, nc, mr, mc, distances, indices, boundary) double *x, *y; /* input data points */ int *npt; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ /* output arrays */ double *distances; /* distance to nearest point */ int *indices; /* index to nearest point */ double *boundary; /* distance to boundary */ { Raster dist, index, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &index, (void *) indices, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); exact_dt(x, y, (int) *npt, &dist, &index); dist_to_bdry(&bdist); } spatstat/src/corrections.c0000755000176000001440000002203612252324034015435 0ustar ripleyusers/* corrections.c Edge corrections $Revision: 1.12 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include #include #include "chunkloop.h" #include "yesno.h" #include "constants.h" #undef DEBUG /* This constant is defined in Rmath.h */ #define TWOPI M_2PI #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define BETWEEN(X,X0,X1) (((X) - (X0)) * ((X) - (X1)) <= 0) #define UNDER(X,Y,X0,Y0,X1,Y1) \ (((Y1) - (Y0)) * ((X) - (X0)) >= ((Y) - (Y0)) * ((X1)- (X0))) #define UNDERNEATH(X,Y,X0,Y0,X1,Y1) \ (((X0) < (X1)) ? UNDER(X,Y,X0,Y0,X1,Y1) : UNDER(X,Y,X1,Y1,X0,Y0)) #define TESTINSIDE(X,Y,X0,Y0,X1,Y1) \ (BETWEEN(X,X0,X1) && UNDERNEATH(X, Y, X0, Y0, X1, Y1)) void ripleybox(nx, x, y, rmat, nr, xmin, ymin, xmax, ymax, epsilon, out) /* inputs */ int *nx, *nr; /* dimensions */ double *x, *y; /* coordinate vectors of length nx */ double *rmat; /* matrix nx by nr */ double *xmin, *ymin, *xmax, *ymax; /* box dimensions */ double *epsilon; /* threshold for proximity to corner */ /* output */ double *out; /* output matrix nx by nr */ { int i, j, n, m, ijpos, ncor, maxchunk; double xx, yy, x0, y0, x1, y1, dL, dR, dU, dD, aL, aU, aD, aR, rij; double cL, cU, cD, cR, bLU, bLD, bRU, bRD, bUL, bUR, bDL, bDR; double corner, extang; double eps; n = *nx; m = *nr; x0 = *xmin; y0 = *ymin; x1 = *xmax; y1 = *ymax; eps = *epsilon; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xx = x[i]; yy = y[i]; /* perpendicular distance from point to each edge of rectangle L = left, R = right, D = down, U = up */ dL = xx - x0; dR = x1 - xx; dD = yy - y0; dU = y1 - yy; /* test for corner of the rectangle */ #define ABS(X) (((X) >= 0) ? (X) : (-X)) #define SMALL(X) ((ABS(X) < eps) ? 1 : 0) ncor = SMALL(dL) + SMALL(dR) + SMALL(dD) + SMALL(dU); corner = (ncor >= 2) ? YES : NO; /* angle between - perpendicular to edge of rectangle and - line from point to corner of rectangle */ bLU = atan2(dU, dL); bLD = atan2(dD, dL); bRU = atan2(dU, dR); bRD = atan2(dD, dR); bUL = atan2(dL, dU); bUR = atan2(dR, dU); bDL = atan2(dL, dD); bDR = atan2(dR, dD); for(j = 0; j < m; j++) { ijpos = j * n + i; rij = rmat[ijpos]; #ifdef DEBUG Rprintf("rij = %lf\n", rij); #endif /* half the angle subtended by the intersection between the circle of radius r[i,j] centred on point i and each edge of the rectangle (prolonged to an infinite line) */ aL = (dL < rij) ? acos(dL/rij) : 0.0; aR = (dR < rij) ? acos(dR/rij) : 0.0; aD = (dD < rij) ? acos(dD/rij) : 0.0; aU = (dU < rij) ? acos(dU/rij) : 0.0; #ifdef DEBUG Rprintf("aL = %lf\n", aL); Rprintf("aR = %lf\n", aR); Rprintf("aD = %lf\n", aD); Rprintf("aU = %lf\n", aU); #endif /* apply maxima */ cL = MIN(aL, bLU) + MIN(aL, bLD); cR = MIN(aR, bRU) + MIN(aR, bRD); cU = MIN(aU, bUL) + MIN(aU, bUR); cD = MIN(aD, bDL) + MIN(aD, bDR); #ifdef DEBUG Rprintf("cL = %lf\n", cL); Rprintf("cR = %lf\n", cR); Rprintf("cD = %lf\n", cD); Rprintf("cU = %lf\n", cU); #endif /* total exterior angle over 2 pi */ extang = (cL + cR + cU + cD)/TWOPI; /* add pi/2 for corners */ if(corner) extang += 1/4; #ifdef DEBUG Rprintf("extang = %lf\n", extang); #endif /* OK, now compute weight */ out[ijpos] = 1 / (1 - extang); } } } } void ripleypoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *rmat; double *x0, *y0, *x1, *y1; /* output */ double *out; { int n, m, i, j, k, l, nradperpt, ncut, nchanges, maxchunk; double xcentre, ycentre, xx0, yy0, xx1, yy1, xx01, yy01; double x, y, radius, radius2, dx0, dx1, dy0; double a, b, c, t, det, sqrtdet, tmp; double theta[6], delta[7], tmid[7]; double xtest, ytest, contrib, total; n = *nc; nradperpt = *nr; m = *nseg; OUTERCHUNKLOOP(i, n, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 16384) { xcentre = xc[i]; ycentre = yc[i]; #ifdef DEBUG Rprintf("centre = (%lf, %lf)\n", xcentre, ycentre); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = radius * radius; #ifdef DEBUG Rprintf("radius = %lf\n", radius); #endif total = 0.0; for(k=0; k < m; k++) { #ifdef DEBUG Rprintf("k = %d\n", k); #endif ncut = 0; xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #ifdef DEBUG Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1); #endif /* intersection with left edge */ dx0 = xx0 - xcentre; det = radius2 - dx0 * dx0; if(det > 0) { sqrtdet = sqrt(det); y = ycentre + sqrtdet; if(y < yy0) { theta[ncut] = atan2(y - ycentre, dx0); #ifdef DEBUG Rprintf("cut left at theta= %lf\n", theta[ncut]); #endif ncut++; } y = ycentre - sqrtdet; if(y < yy0) { theta[ncut] = atan2(y-ycentre, dx0); #ifdef DEBUG Rprintf("cut left at theta= %lf\n", theta[ncut]); #endif ncut++; } } else if(det == 0) { if(ycentre < yy0) { theta[ncut] = atan2(0.0, dx0); #ifdef DEBUG Rprintf("tangent left at theta= %lf\n", theta[ncut]); #endif ncut++; } } /* intersection with right edge */ dx1 = xx1 - xcentre; det = radius2 - dx1 * dx1; if(det > 0) { sqrtdet = sqrt(det); y = ycentre + sqrtdet; if(y < yy1) { theta[ncut] = atan2(y - ycentre, dx1); #ifdef DEBUG Rprintf("cut right at theta= %lf\n", theta[ncut]); #endif ncut++; } y = ycentre - sqrtdet; if(y < yy1) { theta[ncut] = atan2(y - ycentre, dx1); #ifdef DEBUG Rprintf("cut right at theta= %lf\n", theta[ncut]); #endif ncut++; } } else if(det == 0) { if(ycentre < yy1) { theta[ncut] = atan2(0.0, dx1); #ifdef DEBUG Rprintf("tangent right at theta= %lf\n", theta[ncut]); #endif ncut++; } } /* intersection with top segment */ xx01 = xx1 - xx0; yy01 = yy1 - yy0; dy0 = yy0 - ycentre; a = xx01 * xx01 + yy01 * yy01; b = 2 * (xx01 * dx0 + yy01 * dy0); c = dx0 * dx0 + dy0 * dy0 - radius2; det = b * b - 4 * a * c; if(det > 0) { sqrtdet = sqrt(det); t = (sqrtdet - b)/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("hits segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } t = (-sqrtdet - b)/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("hits segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } } else if(det == 0) { t = - b/(2 * a); if(t >= 0 && t <= 1) { x = xx0 + t * xx01; y = yy0 + t * yy01; theta[ncut] = atan2(y - ycentre, x - xcentre); #ifdef DEBUG Rprintf("tangent to segment: t = %lf, theta = %lf\n", t, theta[ncut]); #endif ++ncut; } } /* for safety, force all angles to be in range [0, 2 * pi] */ if(ncut > 0) for(l = 0; l < ncut; l++) if(theta[l] < 0) theta[l] += TWOPI; /* sort angles */ if(ncut > 1) { do { nchanges = 0; for(l = 0; l < ncut - 1; l++) { if(theta[l] > theta[l+1]) { /* swap */ ++nchanges; tmp = theta[l]; theta[l] = theta[l+1]; theta[l+1] = tmp; } } } while(nchanges > 0); } #ifdef DEBUG if(ncut > 0) { for(l = 0; l < ncut; l++) Rprintf("theta[%d] = %lf\n", l, theta[l]); } #endif /* compute length of circumference inside polygon */ if(ncut == 0) { /* entire circle is either in or out */ xtest = xcentre + radius; ytest = ycentre; if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) contrib = TWOPI; else contrib = 0.0; } else { /* find midpoints and lengths of pieces (adding theta = ) */ delta[0] = theta[0]; tmid[0] = theta[0]/2; if(ncut > 1) { for(l = 1; l < ncut; l++) { delta[l] = theta[l] - theta[l-1]; tmid[l] = (theta[l] + theta[l-1])/2; } } delta[ncut] = TWOPI - theta[ncut - 1]; tmid[ncut] = (TWOPI + theta[ncut-1])/2; contrib = 0.0; for(l = 0; l <= ncut; l++) { #ifdef DEBUG Rprintf("delta[%d] = %lf\n", l, delta[l]); #endif xtest = xcentre + radius * cos(tmid[l]); ytest = ycentre + radius * sin(tmid[l]); if(TESTINSIDE(xtest, ytest, xx0, yy0, xx1, yy1)) { contrib += delta[l]; #ifdef DEBUG Rprintf("... inside\n"); } else { Rprintf("... outside\n"); #endif } } } /* multiply by sign of trapezium */ if(xx0 < xx1) contrib *= -1; #ifdef DEBUG Rprintf("contrib = %lf\n", contrib); #endif total += contrib; } out[ j * n + i] = total; #ifdef DEBUG Rprintf("total = %lf\n", total); #endif } } } } spatstat/src/PerfectDiggleGratton.h0000644000176000001440000001342112252324034017146 0ustar ripleyusers // ........................... Diggle-Gratton process .......................... // $Revision: 1.5 $ $Date: 2012/03/10 11:22:56 $ class DiggleGrattonProcess : public PointProcess { public: double beta, delta, rho, kappa, rhominusdelta, deltasquared, rhosquared; DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k); ~DiggleGrattonProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DiggleGrattonProcess::DiggleGrattonProcess(double xmin, double xmax, double ymin, double ymax, double b, double d, double r, double k) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; delta = d; rho = r; kappa = k; deltasquared = delta * delta; rhosquared = rho * rho; rhominusdelta = rho - delta; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DiggleGrattonProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { if(dsquared < deltasquared) { rtn = 0; } else { dist = sqrt(dsquared); t = (dist - delta)/rhominusdelta; rtn = pow(t, kappa); } } return(rtn); } void DiggleGrattonProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DiggleGrattonProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DiggleGrattonProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DiggleGrattonProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DiggleGrattonProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDiggleGratton(SEXP beta, SEXP delta, SEXP rho, SEXP kappa, SEXP xrange, SEXP yrange) { // input parameters double Beta, Delta, Rho, Kappa, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(delta = AS_NUMERIC(delta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(kappa = AS_NUMERIC(kappa)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Delta = *(NUMERIC_POINTER(delta)); Rho = *(NUMERIC_POINTER(rho)); Kappa = *(NUMERIC_POINTER(kappa)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise DiggleGratton point process DiggleGrattonProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Delta,Rho,Kappa); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/Ediggra.c0000755000176000001440000000354412252324034014450 0ustar ripleyusers#include #include #include "chunkloop.h" /* Ediggra.c $Revision: 1.4 $ $Date: 2012/03/28 05:55:50 $ C implementation of 'eval' for DiggleGratton interaction (exponentiated) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Ediggra(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, ddelta, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *ddelta, *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double delta, rho, delta2, rho2, rhominusdelta; double product; nsource = *nnsource; ntarget = *nntarget; delta = *ddelta; rho = *rrho; rho2 = rho * rho; delta2 = delta * delta; rhominusdelta = rho - delta; if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting point */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process until dx > rho (or until product is zero) */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) { if(d2 <= delta2) { product = 0; break; } else product *= (sqrt(d2) - delta)/rhominusdelta; } } } values[j] = product; } } } spatstat/src/nndist.h0000644000176000001440000000405112252324034014401 0ustar ripleyusers/* nndist.h Code template for C functions supporting nndist and nnwhich (k=1) THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in nndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.2 $ $Date: 2012/03/14 02:37:27 $ */ void FNAME(n, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n; double *x, *y, *huge; /* outputs */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, i, maxchunk, left, right; double d2, d2min, xi, yi, dx, dy, dy2, hu, hu2; #ifdef WHICH int which; #endif hu = *huge; hu2 = hu * hu; npoints = *n; /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { d2min = hu2; #ifdef WHICH which = -1; #endif xi = x[i]; yi = y[i]; if(i < npoints - 1) { /* search forward */ for(right = i + 1; right < npoints; ++right) { dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = right; #endif } } } if(i > 0){ /* search backward */ for(left = i - 1; left >= 0; --left) { dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2min) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2min) { d2min = d2; #ifdef WHICH which = left; #endif } } } #ifdef DIST nnd[i] = sqrt(d2min); #endif #ifdef WHICH nnwhich[i] = which + 1; /* R indexing */ #endif } } } spatstat/src/localpcf.h0000755000176000001440000000453512252324034014677 0ustar ripleyusers/* localpcf.h Source template for versions of local pair correlation Requires variable: WEIGHTED Assumes point patterns are sorted in increasing order of x coordinate $Revision: 1.5 $ $Date: 2012/03/27 04:50:04 $ */ #ifdef WEIGHTED #define FNAME locWpcfx #else #define FNAME locpcfx #endif void FNAME(nn1, x1, y1, id1, nn2, x2, y2, id2, #ifdef WEIGHTED w2, #endif nnr, rmaxi, del, pcf) /* inputs */ int *nn1, *nn2, *nnr; double *x1, *y1, *x2, *y2; int *id1, *id2; double *rmaxi, *del; #ifdef WEIGHTED double *w2; #endif /* output */ double *pcf; /* matrix of column vectors of pcf's for each point of first pattern */ { int n1, n2, nr, i, j, k, jleft, kmin, kmax, id1i, maxchunk; double x1i, y1i, rmax, delta, xleft, dx, dy, dx2; double d2, d2max, dmax, d; double rstep, rvalue, frac, contrib, weight, coef; n1 = *nn1; n2 = *nn2; nr = *nnr; rmax = *rmaxi; delta = *del; dmax = rmax + delta; /* maximum relevant value of interpoint distance */ d2max = dmax * dmax; rstep = rmax/(nr-1); coef = 3.0 /(4.0 * delta); if(n1 == 0 || n2 == 0) return; jleft = 0; OUTERCHUNKLOOP(i, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n1, maxchunk, 8196) { x1i = x1[i]; y1i = y1[i]; id1i = id1[i]; /* adjust starting point */ xleft = x1i - dmax; while((x2[jleft] < xleft) && (jleft+1 < n2)) ++jleft; /* process from jleft until |dx| > dmax */ for(j=jleft; j < n2; j++) { dx = x2[j] - x1i; dx2 = dx * dx; if(dx2 > d2max) break; dy = y2[j] - y1i; d2 = dx2 + dy * dy; if(d2 <= d2max && id2[j] != id1i) { d = sqrt(d2); kmin = (int) floor((d-delta)/rstep); kmax = (int) ceil((d+delta)/rstep); if(kmin <= nr-1 && kmax >= 0) { /* nonempty intersection with range of r values */ /* compute intersection */ if(kmin < 0) kmin = 0; if(kmax >= nr) kmax = nr-1; /* */ weight = coef/d; #ifdef WEIGHTED weight = weight * w2[j]; #endif for(k = kmin; k <= kmax; k++) { rvalue = k * rstep; frac = (d - rvalue)/delta; /* Epanechnikov kernel with halfwidth delta */ contrib = (1 - frac * frac); if(contrib > 0) pcf[k + nr * i] += contrib * weight; } } } } } } } #undef FNAME spatstat/src/PerfectStraussHard.h0000644000176000001440000001276412252324034016670 0ustar ripleyusers // ..................... Strauss-Hardcore process .......................... // $Revision: 1.2 $ $Date: 2012/03/10 11:23:17 $ class StraussHardProcess : public PointProcess { public: double beta, gamma, H, R, Hsquared, Rsquared; StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc); ~StraussHardProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussHardProcess::StraussHardProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri, double Hc) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; H = Hc; Rsquared = R * R; Hsquared = H * H; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussHardProcess::Interaction(double dsquared) { if(dsquared >= Rsquared) return(1.0); if(dsquared >= Hsquared) return(gamma); return(0.0); } void StraussHardProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussHardProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussHardProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussHardProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussHardProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectStraussHard(SEXP beta, SEXP gamma, SEXP r, SEXP hc, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, H, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; PointProcess *TheProcess; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(hc = AS_NUMERIC(hc)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 6 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); H = *(NUMERIC_POINTER(hc)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise StraussHard point process StraussHardProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R, H); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(10); // 6 arguments plus xout, yout, nout, out return(out); } } spatstat/src/massdisthack.c0000755000176000001440000000355612252324034015567 0ustar ripleyusers/* HACKED from R-2.0.1/src/appl/massdist.c by Adrian Baddeley Changes indicated by 'AB' */ /* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 1996-2004 Robert Gentleman and Ross Ihaka and the * R Development Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef HAVE_CONFIG_H #include #endif #include #include void massdisthack(double *x, int *nx, double *xmass, /* AB: new variable */ double *xlow, double *xhigh, double *y, int *ny) { double fx, xdelta, xmi, xpos; /* AB */ int i, ix, ixmax, ixmin; ixmin = 0; ixmax = *ny - 2; /* AB: line deleted */ xdelta = (*xhigh - *xlow) / (*ny - 1); for(i=0; i < *ny ; i++) y[i] = 0; for(i=0; i < *nx ; i++) { if(R_FINITE(x[i])) { xpos = (x[i] - *xlow) / xdelta; ix = floor(xpos); fx = xpos - ix; xmi = xmass[i]; /* AB: new line */ if(ixmin <= ix && ix <= ixmax) { y[ix] += (1 - fx) * xmi; /* AB */ y[ix + 1] += fx * xmi; /* AB */ } else if(ix == -1) { y[0] += fx * xmi; /* AB */ } else if(ix == ixmax + 1) { y[ix] += (1 - fx) * xmi; /* AB */ } } } /* AB: lines deleted */ } spatstat/src/mhsnoop.c0000644000176000001440000001064712252324034014570 0ustar ripleyusers#include #include #include #include "methas.h" #include "mhsnoopdef.h" /* mhsnoop.c $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ support for visual debugger in RMH */ /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif void initmhsnoop(Snoop *s, SEXP env) { s->active = isEnvironment(env); s->nextstop = 0; /* stop at iteration 0 */ s->nexttype = NO_TYPE; /* deactivated */ if(s->active) { s->env = env; s->expr = findVar(install("callbackexpr"), env); } else { s->env = s->expr = R_NilValue; } } void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype) { SEXP e; int npts, j; /* passed from C to R before debugger */ SEXP Sirep, Sx, Sy, Sm, Sproptype, Sproplocn, Spropmark, Spropindx; SEXP Snumer, Sdenom, Sitype; double *Px, *Py, *Pproplocn; int *Pm; /* passed from R to C after debugger */ SEXP Sinxt, Stnxt, SitypeUser; #if MH_DEBUG Rprintf("mhsnoop called at iteration %d\n", irep); #endif if(!(s->active)) return; #if MH_DEBUG Rprintf("mhsnoop is active\n"); #endif /* execute when the simulation reaches the next stopping time: a specified iteration number 'nextstop' or a specified proposal type 'nexttype' */ if(irep != s->nextstop && prop->itype != s->nexttype) return; #if MH_DEBUG Rprintf("debug triggered\n"); #endif /* environment for communication with R */ e = s->env; /* copy data to R */ /* copy iteration number */ PROTECT(Sirep = NEW_INTEGER(1)); *(INTEGER_POINTER(Sirep)) = irep; setVar(install("irep"), Sirep, e); UNPROTECT(1); /* copy (x,y) coordinates */ npts = state->npts; PROTECT(Sx = NEW_NUMERIC(npts)); PROTECT(Sy = NEW_NUMERIC(npts)); Px = NUMERIC_POINTER(Sx); Py = NUMERIC_POINTER(Sy); for(j = 0; j < npts; j++) { Px[j] = state->x[j]; Py[j] = state->y[j]; } setVar(install("xcoords"), Sx, e); setVar(install("ycoords"), Sy, e); UNPROTECT(2); /* copy marks */ if(state->ismarked) { PROTECT(Sm = NEW_INTEGER(npts)); Pm = INTEGER_POINTER(Sm); for(j = 0; j < npts; j++) { Pm[j] = state->marks[j]; } setVar(install("mcodes"), Sm, e); UNPROTECT(1); } /* proposal type */ PROTECT(Sproptype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sproptype)) = prop->itype; setVar(install("proptype"), Sproptype, e); UNPROTECT(1); /* proposal coordinates */ PROTECT(Sproplocn = NEW_NUMERIC(2)); Pproplocn = NUMERIC_POINTER(Sproplocn); Pproplocn[0] = prop->u; Pproplocn[1] = prop->v; setVar(install("proplocn"), Sproplocn, e); UNPROTECT(1); /* proposal mark value */ if(state->ismarked) { PROTECT(Spropmark = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropmark)) = prop->mrk; setVar(install("propmark"), Spropmark, e); UNPROTECT(1); } /* proposal point index */ PROTECT(Spropindx = NEW_INTEGER(1)); *(INTEGER_POINTER(Spropindx)) = prop->ix; setVar(install("propindx"), Spropindx, e); UNPROTECT(1); /* Metropolis-Hastings numerator and denominator */ PROTECT(Snumer = NEW_NUMERIC(1)); PROTECT(Sdenom = NEW_NUMERIC(1)); *(NUMERIC_POINTER(Snumer)) = numer; *(NUMERIC_POINTER(Sdenom)) = denom; setVar(install("numerator"), Snumer, e); setVar(install("denominator"), Sdenom, e); UNPROTECT(2); /* tentative outcome of proposal */ PROTECT(Sitype = NEW_INTEGER(1)); *(INTEGER_POINTER(Sitype)) = *itype; setVar(install("itype"), Sitype, e); UNPROTECT(1); /* ..... call visual debugger */ #if MH_DEBUG Rprintf("executing callback\n"); #endif eval(s->expr, s->env); /* update outcome of proposal */ SitypeUser = findVar(install("itype"), e); *itype = *(INTEGER_POINTER(SitypeUser)); #if MH_DEBUG Rprintf("Assigning itype = %d\n", *itype); #endif /* update stopping time */ Sinxt = findVar(install("inxt"), e); s->nextstop = *(INTEGER_POINTER(Sinxt)); Stnxt = findVar(install("tnxt"), e); s->nexttype = *(INTEGER_POINTER(Stnxt)); #if MH_DEBUG if(s->nextstop >= 0) Rprintf("Next stop: iteration %d\n", s->nextstop); if(s->nexttype >= 0) { if(s->nexttype == BIRTH) Rprintf("Next stop: first birth proposal\n"); if(s->nexttype == DEATH) Rprintf("Next stop: first death proposal\n"); if(s->nexttype == SHIFT) Rprintf("Next stop: first shift proposal\n"); } #endif return; } spatstat/src/xyseg.c0000755000176000001440000005263612252324034014253 0ustar ripleyusers/* xyseg.c Computation with line segments xysegint compute intersections between line segments $Revision: 1.19 $ $Date: 2013/09/18 04:59:17 $ */ #include #include #include #include #include #include "chunkloop.h" #define NIETS -1.0 #undef DEBUG #define INSIDE01(X,E) (X * (1.0 - X) >= -E) /* --------------- PAIRS OF PSP OBJECTS ---------------------- */ /* xysegint Determines intersections between each pair of line segments drawn from two lists of line segments. Line segments are given as x0, y0, dx, dy where (x0,y0) is the first endpoint and (dx, dy) is the vector from the first to the second endpoint. Points along a line segment are represented in parametric coordinates, (x,y) = (x0, y0) + t * (dx, dy). Output from xysegint() consists of five matrices xx, yy, ta, tb, ok. The (i,j)-th entries in these matrices give information about the intersection between the i-th segment in list 'a' and the j-th segment in list 'b'. The information is ok[i,j] = 1 if there is an intersection = 0 if not xx[i,j] = x coordinate of intersection yy[i,j] = y coordinate of intersection ta[i,j] = parameter of intersection point relative to i-th segment in list 'a' tb[i,j] = parameter of intersection point relative to j-th segment in list 'b' */ void xysegint(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, xx, yy, ta, tb, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ta, *tb; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; xx[ijpos] = yy[ijpos] = ta[ijpos] = tb[ijpos] = NIETS; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0a[i], y0a[i], x0a[i] + dxa[i], y0a[i] + dya[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0b[j], y0b[j], x0b[j] + dxb[j], y0b[j] + dyb[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; ta[ijpos] = tta = - dyb[j] * diffx + dxb[j] * diffy; tb[ijpos] = ttb = - dya[i] * diffx + dxa[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; xx[ijpos] = x0a[i] + tta * dxa[i]; yy[ijpos] = y0a[i] + tta * dya[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", xx[ijpos], yy[ijpos]); #endif } } } } } } /* Stripped-down version of xysegint that just returns logical matrix */ void xysi(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, ma, mb, ijpos, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ ok[ijpos] = 1; } } } } } } /* Test whether there is at least one intersection */ void xysiANY(na, x0a, y0a, dxa, dya, nb, x0b, y0b, dxb, dyb, eps, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *x0a, *y0a, *dxa, *dya, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* output (single logical value) */ int *ok; { int i, j, ma, mb, maxchunk; double determinant, absdet, diffx, diffy, tta, ttb, epsilon; *ok = 0; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { for(i = 0; i < ma; i++) { determinant = dxb[j] * dya[i] - dyb[j] * dxa[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0b[j] - x0a[i])/determinant; diffy = (y0b[j] - y0a[i])/determinant; tta = - dyb[j] * diffx + dxb[j] * diffy; ttb = - dya[i] * diffx + dxa[i] * diffy; if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ *ok = 1; return; } } } } } } /* Analogue of xysegint when segments in list 'a' are infinite vertical lines */ void xysegVslice(na, xa, nb, x0b, y0b, dxb, dyb, eps, yy, ok) /* inputs (vectors of coordinates) */ int *na, *nb; double *xa, *x0b, *y0b, *dxb, *dyb; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *yy; int *ok; { int i, j, ma, mb, ijpos, maxchunk; double diffx0, diffx1, width, abswidth, epsilon; int notvertical; ma = *na; mb = *nb; epsilon = *eps; OUTERCHUNKLOOP(j, mb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mb, maxchunk, 8196) { /* determine whether segment j is nearly vertical */ width = dxb[j]; abswidth = (width > 0) ? width : -width; notvertical = (abswidth <= epsilon); for(i = 0; i < ma; i++) { ijpos = j * ma + i; ok[ijpos] = 0; yy[ijpos] = NIETS; /* test whether vertical line i separates endpoints of segment j */ diffx0 = xa[i] - x0b[j]; diffx1 = diffx0 - width; if(diffx0 * diffx1 <= 0) { /* intersection */ ok[ijpos] = 1; /* compute y-coordinate of intersection point */ if(notvertical) { yy[ijpos] = y0b[j] + diffx0 * dyb[j]/width; } else { /* vertical or nearly-vertical segment: pick midpoint */ yy[ijpos] = y0b[j] + dyb[j]/2.0; } } } } } } /* -------------- ONE PSP OBJECT ---------------------------- */ /* Similar to xysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ void xysegXint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; xx[ijpos] = yy[ijpos] = ti[ijpos] = ti[jipos] = NIETS; xx[jipos] = yy[jipos] = tj[ijpos] = tj[jipos] = NIETS; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; xx[iipos] = yy[iipos] = ti[iipos] = tj[iipos] = NIETS; } } /* Reduced version of xysegXint that returns logical matrix 'ok' only */ void xysxi(n, x0, y0, dx, dy, eps, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ int *ok; { int i, j, m, mm1, ijpos, jipos, iipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; mm1 = m - 1; OUTERCHUNKLOOP(j, mm1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm1, maxchunk, 8196) { for(i = j+1; i < m; i++) { ijpos = j * m + i; jipos = i * m + j; ok[ijpos] = ok[jipos] = 0; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; } } } } } /* assign diagonal */ for(i = 0; i < m; i++) { iipos = i * m + i; ok[iipos] = 0; } } /* ---------------------- ONE CLOSED POLYGON ------------------------ */ /* Identify self-intersections in a closed polygon (Similar to xysegXint, but does not compare segments which are cyclically adjacent in the list) */ void Cxypolyselfint(n, x0, y0, dx, dy, eps, xx, yy, ti, tj, ok) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* input (tolerance for determinant) */ double *eps; /* outputs (matrices) */ double *xx, *yy, *ti, *tj; int *ok; { int i, j, k, m, m2, mm1, mm2, mstop, ijpos, jipos, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; m = *n; epsilon = *eps; m2 = m * m; /* initialise matrices */ for(k = 0; k < m2; k++) { ok[k] = 0; xx[k] = yy[k] = ti[k] = tj[k] = NIETS; } if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { ijpos = j * m + i; jipos = i * m + j; determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (x0[j] - x0[i])/determinant; diffy = (y0[j] - y0[i])/determinant; ti[ijpos] = tti = - dy[j] * diffx + dx[j] * diffy; tj[ijpos] = ttj = - dy[i] * diffx + dx[i] * diffy; tj[jipos] = ti[ijpos]; ti[jipos] = tj[ijpos]; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { ok[ijpos] = ok[jipos] = 1; xx[ijpos] = xx[jipos] = x0[i] + tti * dx[i]; yy[ijpos] = yy[jipos] = y0[i] + tti * dy[i]; } } } } } } /* Just determines whether there is self-intersection (exits quicker & uses less space) */ void xypsi(n, x0, y0, dx, dy, xsep, ysep, eps, proper, answer) /* inputs (vectors of coordinates) */ int *n; double *x0, *y0, *dx, *dy; /* inputs (distances beyond which intersection is impossible) */ double *xsep, *ysep; /* input (tolerance for determinant) */ double *eps; /* input (flag) */ int *proper; /* output */ int *answer; { int i, j, m, mm1, mm2, mstop, prop, maxchunk; double determinant, absdet, diffx, diffy, tti, ttj, epsilon; double Xsep, Ysep; m = *n; prop = *proper; Xsep = *xsep; Ysep = *ysep; epsilon = *eps; *answer = 0; if(m <= 2) return; /* Compare j with j+2, j+3, ...., m-1 Don't compare 0 with m-1 */ mm1 = m - 1; mm2 = m - 2; OUTERCHUNKLOOP(j, mm2, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, mm2, maxchunk, 8196) { mstop = (j > 0) ? m : mm1; for(i = j+2; i < mstop; i++) { diffx = x0[j] - x0[i]; diffy = y0[j] - y0[i]; if(diffx < Xsep && diffx > -Xsep && diffy < Ysep && diffy > -Ysep) { determinant = dx[j] * dy[i] - dy[j] * dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = diffx/determinant; diffy = diffy/determinant; tti = - dy[j] * diffx + dx[j] * diffy; ttj = - dy[i] * diffx + dx[i] * diffy; if(INSIDE01(tti, epsilon) && INSIDE01(ttj, epsilon)) { /* intersection occurs */ if(prop == 0 || (tti != 0.0 && tti != 1.0) || (ttj != 0.0 && ttj != 1.0)) { /* proper intersection */ *answer = 1; return; } } } } } } } } /* ---------------- .Call INTERFACE --------------------------- Analogues of functions above, but using the .Call interface and dynamic storage allocation, to save space. */ SEXP Cxysegint(SEXP x0a, SEXP y0a, SEXP dxa, SEXP dya, SEXP x0b, SEXP y0b, SEXP dxb, SEXP dyb, SEXP eps) { int i, j, k, na, nb; double determinant, absdet, diffx, diffy, tta, ttb; int nout, noutmax, newmax, maxchunk; double epsilon; double *x0A, *y0A, *dxA, *dyA, *x0B, *y0B, *dxB, *dyB; double *ta, *tb, *x, *y; int *ia, *jb; SEXP out, iAout, jBout, tAout, tBout, xout, yout; double *tAoutP, *tBoutP, *xoutP, *youtP; int *iAoutP, *jBoutP; PROTECT(x0a = AS_NUMERIC(x0a)); PROTECT(y0a = AS_NUMERIC(y0a)); PROTECT(dxa = AS_NUMERIC(dxa)); PROTECT(dya = AS_NUMERIC(dya)); PROTECT(x0b = AS_NUMERIC(x0b)); PROTECT(y0b = AS_NUMERIC(y0b)); PROTECT(dxb = AS_NUMERIC(dxb)); PROTECT(dyb = AS_NUMERIC(dyb)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 9 protected */ /* get pointers */ x0A = NUMERIC_POINTER(x0a); y0A = NUMERIC_POINTER(y0a); dxA = NUMERIC_POINTER(dxa); dyA = NUMERIC_POINTER(dya); x0B = NUMERIC_POINTER(x0b); y0B = NUMERIC_POINTER(y0b); dxB = NUMERIC_POINTER(dxb); dyB = NUMERIC_POINTER(dyb); /* determine length of vectors */ na = LENGTH(x0a); nb = LENGTH(x0b); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = (na > nb) ? na : nb; nout = 0; ia = (int *) R_alloc(noutmax, sizeof(int)); jb = (int *) R_alloc(noutmax, sizeof(int)); ta = (double *) R_alloc(noutmax, sizeof(double)); tb = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data and collect intersections */ OUTERCHUNKLOOP(j, nb, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nb, maxchunk, 8196) { for(i = 0; i < na; i++) { determinant = dxB[j] * dyA[i] - dyB[j] * dxA[i]; absdet = (determinant > 0) ? determinant : -determinant; #ifdef DEBUG Rprintf("i = %d, j = %d\n", i, j); Rprintf("segment A[i]: (%lf, %lf) to (%lf, %lf)\n", x0A[i], y0A[i], x0A[i] + dxA[i], y0A[i] + dyA[i]); Rprintf("segment B[j]: (%lf, %lf) to (%lf, %lf)\n", x0B[j], y0B[j], x0B[j] + dxB[j], y0B[j] + dyB[j]); Rprintf("determinant=%lf\n", determinant); #endif if(absdet > epsilon) { diffx = (x0B[j] - x0A[i])/determinant; diffy = (y0B[j] - y0A[i])/determinant; tta = - dyB[j] * diffx + dxB[j] * diffy; ttb = - dyA[i] * diffx + dxA[i] * diffy; #ifdef DEBUG Rprintf("ta = %lf, tb = %lf\n", tta, ttb); #endif if(INSIDE01(tta, epsilon) && INSIDE01(ttb, epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ia = (int *) S_realloc((char *) ia, newmax, noutmax, sizeof(int)); jb = (int *) S_realloc((char *) jb, newmax, noutmax, sizeof(int)); ta = (double *) S_realloc((char *) ta, newmax, noutmax, sizeof(double)); tb = (double *) S_realloc((char *) tb, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ta[nout] = tta; tb[nout] = ttb; ia[nout] = i; jb[nout] = j; x[nout] = x0A[i] + tta * dxA[i]; y[nout] = y0A[i] + tta * dyA[i]; #ifdef DEBUG Rprintf("segments intersect at (%lf, %lf)\n", x[nout], y[nout]); #endif ++nout; } } } } } /* pack up */ PROTECT(iAout = NEW_INTEGER(nout)); PROTECT(jBout = NEW_INTEGER(nout)); PROTECT(tAout = NEW_NUMERIC(nout)); PROTECT(tBout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 9 + 6 = 15 protected */ iAoutP = INTEGER_POINTER(iAout); jBoutP = INTEGER_POINTER(jBout); tAoutP = NUMERIC_POINTER(tAout); tBoutP = NUMERIC_POINTER(tBout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { iAoutP[k] = ia[k]; jBoutP[k] = jb[k]; tAoutP[k] = ta[k]; tBoutP[k] = tb[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 15 + 1 = 16 protected */ SET_VECTOR_ELT(out, 0, iAout); SET_VECTOR_ELT(out, 1, jBout); SET_VECTOR_ELT(out, 2, tAout); SET_VECTOR_ELT(out, 3, tBout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(16); return(out); } /* Similar to Cxysegint, but computes intersections between all pairs of segments in a single list, excluding the diagonal comparisons of course */ SEXP CxysegXint(SEXP x0, SEXP y0, SEXP dx, SEXP dy, SEXP eps) { int i, j, k, n, n1; double determinant, absdet, diffx, diffy, tti, ttj; int nout, noutmax, newmax, maxchunk; double epsilon; double *X0, *Y0, *Dx, *Dy; double *ti, *tj, *x, *y; int *ii, *jj; SEXP out, iout, jout, tiout, tjout, xout, yout; double *tioutP, *tjoutP, *xoutP, *youtP; int *ioutP, *joutP; PROTECT(x0 = AS_NUMERIC(x0)); PROTECT(y0 = AS_NUMERIC(y0)); PROTECT(dx = AS_NUMERIC(dx)); PROTECT(dy = AS_NUMERIC(dy)); PROTECT(eps = AS_NUMERIC(eps)); /* that's 5 protected */ /* get pointers */ X0 = NUMERIC_POINTER(x0); Y0 = NUMERIC_POINTER(y0); Dx = NUMERIC_POINTER(dx); Dy = NUMERIC_POINTER(dy); /* determine length of vectors */ n = LENGTH(x0); epsilon = *(NUMERIC_POINTER(eps)); /* guess amount of storage required for output */ noutmax = n; nout = 0; ii = (int *) R_alloc(noutmax, sizeof(int)); jj = (int *) R_alloc(noutmax, sizeof(int)); ti = (double *) R_alloc(noutmax, sizeof(double)); tj = (double *) R_alloc(noutmax, sizeof(double)); x = (double *) R_alloc(noutmax, sizeof(double)); y = (double *) R_alloc(noutmax, sizeof(double)); /* scan data */ n1 = n - 1; OUTERCHUNKLOOP(j, n1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, n1, maxchunk, 8196) { for(i = j+1; i < n; i++) { determinant = Dx[j] * Dy[i] - Dy[j] * Dx[i]; absdet = (determinant > 0) ? determinant : -determinant; if(absdet > epsilon) { diffx = (X0[j] - X0[i])/determinant; diffy = (Y0[j] - Y0[i])/determinant; tti = - Dy[j] * diffx + Dx[j] * diffy; ttj = - Dy[i] * diffx + Dx[i] * diffy; if(INSIDE01(tti,epsilon) && INSIDE01(ttj,epsilon)) { /* intersection */ if(nout >= noutmax) { /* storage overflow - increase space */ newmax = 4 * noutmax; ii = (int *) S_realloc((char *) ii, newmax, noutmax, sizeof(int)); jj = (int *) S_realloc((char *) jj, newmax, noutmax, sizeof(int)); ti = (double *) S_realloc((char *) ti, newmax, noutmax, sizeof(double)); tj = (double *) S_realloc((char *) tj, newmax, noutmax, sizeof(double)); x = (double *) S_realloc((char *) x, newmax, noutmax, sizeof(double)); y = (double *) S_realloc((char *) y, newmax, noutmax, sizeof(double)); noutmax = newmax; } ti[nout] = tti; tj[nout] = ttj; ii[nout] = i; jj[nout] = j; x[nout] = X0[i] + tti * Dx[i]; y[nout] = Y0[i] + tti * Dy[i]; ++nout; } } } } } /* pack up */ PROTECT(iout = NEW_INTEGER(nout)); PROTECT(jout = NEW_INTEGER(nout)); PROTECT(tiout = NEW_NUMERIC(nout)); PROTECT(tjout = NEW_NUMERIC(nout)); PROTECT(xout = NEW_NUMERIC(nout)); PROTECT(yout = NEW_NUMERIC(nout)); /* 5 + 6 = 11 protected */ ioutP = INTEGER_POINTER(iout); joutP = INTEGER_POINTER(jout); tioutP = NUMERIC_POINTER(tiout); tjoutP = NUMERIC_POINTER(tjout); xoutP = NUMERIC_POINTER(xout); youtP = NUMERIC_POINTER(yout); for(k = 0; k < nout; k++) { ioutP[k] = ii[k]; joutP[k] = jj[k]; tioutP[k] = ti[k]; tjoutP[k] = tj[k]; xoutP[k] = x[k]; youtP[k] = y[k]; } PROTECT(out = NEW_LIST(6)); /* 11 + 1 = 12 protected */ SET_VECTOR_ELT(out, 0, iout); SET_VECTOR_ELT(out, 1, jout); SET_VECTOR_ELT(out, 2, tiout); SET_VECTOR_ELT(out, 3, tjout); SET_VECTOR_ELT(out, 4, xout); SET_VECTOR_ELT(out, 5, yout); UNPROTECT(12); return(out); } spatstat/src/knndistance.c0000644000176000001440000001043312252324034015377 0ustar ripleyusers/* knndistance.c K-th Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.8 $ $Date: 2013/12/10 03:29:45 $ Function definitions are #included from knndist.h and knnXdist.h THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: knndsort k-th nearest neighbour distances knnwhich k-th nearest neighbours knnsort k-th nearest neighbours and their distances ONE LIST TO ANOTHER LIST: knnXdist Nearest neighbour distance from one list to another knnXwhich Nearest neighbour ID from one list to another knnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: knnXEdist Nearest neighbour distance from one list to another, overlapping knnXEwhich Nearest neighbour ID from one list to another, overlapping knnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* knndsort nearest neighbours 1:kmax returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knndsort #define DIST #include "knndist.h" /* knnwhich nearest neighbours 1:kmax returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnwhich #define WHICH #include "knndist.h" /* knnsort nearest neighbours 1:kmax returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnsort #define DIST #define WHICH #include "knndist.h" /* --------------- two distinct point patterns X and Y --------------- */ /* general interface */ void knnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnX(), knnXdist(), knnXwhich(); void knnXE(), knnXEdist(), knnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnX(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } else { if(di && wh) { knnXE(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(di) { knnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } else if(wh) { knnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, kmax, nnd, nnwhich, huge); } } } /* Turn off the debugging tracer in knnXdist.h */ #undef TRACER /* knnXdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXdist #define DIST #include "knnXdist.h" /* knnXwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXwhich #define WHICH #include "knnXdist.h" /* knnX returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnX #define DIST #define WHICH #include "knnXdist.h" /* --------------- overlapping point patterns X and Y --------------- */ /* knnXEdist returns distances only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEdist #define DIST #define EXCLUDE #include "knnXdist.h" /* knnXEwhich returns identifiers only */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXEwhich #define WHICH #define EXCLUDE #include "knnXdist.h" /* knnXE returns distances and indices */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME knnXE #define DIST #define WHICH #define EXCLUDE #include "knnXdist.h" spatstat/src/geom3.h0000755000176000001440000000041012252324034014112 0ustar ripleyusers/* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Definitions for 3D geometrical structures */ typedef struct Point { double x; double y; double z; } Point; typedef struct Box { double x0; double x1; double y0; double y1; double z0; double z1; } Box; spatstat/src/lineardisc.c0000755000176000001440000001732612252324034015226 0ustar ripleyusers#include #include #include "chunkloop.h" /* lineardisc.c Disc of radius r in linear network $Revision: 1.10 $ $Date: 2013/05/27 02:09:10 $ */ #define DPATH(I,J) dpath[(J) + Nv * (I)] #include "yesno.h" #undef DEBUG void lineardisc(f, seg, /* centre of disc (local coords) */ r, /* radius of disc */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ allinside, boundary, dxv, nendpoints) int *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ /* OUTPUTS */ int *allinside, *boundary; /* vectors of status for each segment */ double *dxv; /* vector of distances for each vertex */ int *nendpoints; { int Nv, Ns; double f0, rad; int seg0; int i, A, B, fromi, toi, allin, bdry, reachable, nends, maxchunk; double length0, dxA, dxB, dxAvi, dxBvi, residue; double *resid; int *covered; Nv = *nv; Ns = *ns; f0 = *f; seg0 = *seg; rad = *r; /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from x to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; /* visit vertices */ covered = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); OUTERCHUNKLOOP(i, Nv, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 16384) { /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxv[i] = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxv[i]; resid[i] = (residue > 0)? residue : 0; /* determine whether vertex i is inside the disc of radius r */ covered[i] = (residue >= 0); } } /* Now visit line segments. */ nends = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 16384) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from centre (x, y) */ allin = covered[A] && covered[B]; bdry = !allin; if(bdry) { if(!covered[A]) nends++; if(!covered[B]) nends++; } } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; reachable = (covered[fromi] || covered[toi]); if(reachable) { allin = covered[fromi] && covered[toi] && (resid[fromi] + resid[toi] >= lengths[i]); bdry = !allin; } else allin = bdry = NO; if(bdry) { if(covered[fromi]) nends++; if(covered[toi]) nends++; } } allinside[i] = allin; boundary[i] = bdry; } } *nendpoints = nends; } /* ------------------------------------------------- */ /* count endpoints of several discs in a network */ /* ------------------------------------------------- */ void Ccountends(np, f, seg, /* centres of discs (local coords) */ r, /* radii of discs */ nv, xv, yv, /* network vertices */ ns, from, to, /* network segments */ dpath, /* shortest path distances between vertices */ lengths, /* segment lengths */ toler, /* tolerance */ nendpoints /* output counts of endpoints */ ) int *np, *nv, *ns; int *from, *to; /* integer vectors (mappings) */ double *f, *r; int *seg; double *xv, *yv; /* vectors of coordinates of vertices */ double *dpath; /* matrix of shortest path distances between vertices */ double *lengths; /* vector of segment lengths */ double *toler; /* tolerance for merging endpoints and vertices */ /* OUTPUT */ int *nendpoints; { int Np, Nv, Ns; double f0, rad; int seg0; int i, m, A, B, fromi, toi, reachable, nends, maxchunk, covfrom, covto, allin; double length0, dxA, dxB, dxAvi, dxBvi, dxvi, residue, resfrom, resto, tol; double *resid; int *covered, *terminal; Np = *np; Nv = *nv; Ns = *ns; tol = *toler; covered = (int *) R_alloc((size_t) Nv, sizeof(int)); terminal = (int *) R_alloc((size_t) Nv, sizeof(int)); resid = (double *) R_alloc((size_t) Nv, sizeof(double)); /* loop over centre points */ OUTERCHUNKLOOP(m, Np, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(m, Np, maxchunk, 256) { f0 = f[m]; seg0 = seg[m]; rad = r[m]; #ifdef DEBUG Rprintf("\nCentre point %d lies in segment %d\n", m, seg0); #endif /* endpoints of segment containing centre */ A = from[seg0]; B = to[seg0]; /* distances from centre to A and B */ length0 = lengths[seg0]; dxA = f0 * length0; dxB = (1-f0) * length0; nends = 0; /* visit vertices */ for(i = 0; i < Nv; i++) { /* distance going through A */ dxAvi = dxA + DPATH(A,i); /* distance going through B */ dxBvi = dxB + DPATH(B,i); /* shortest path distance to this vertex */ dxvi = (dxAvi < dxBvi) ? dxAvi : dxBvi; /* distance left to 'spend' from this vertex */ residue = rad - dxvi; if(residue > tol) { resid[i] = residue; covered[i] = YES; terminal[i] = NO; } else if(residue < -tol) { resid[i] = 0; covered[i] = terminal[i] = NO; } else { /* vertex is within 'tol' of an endpoint - deem it to be one */ resid[i] = 0; covered[i] = terminal[i] = YES; /* vertex is an endpoint of disc */ ++nends; } } #ifdef DEBUG Rprintf("%d terminal endpoints\n", nends); #endif /* Now visit line segments to count any endpoints that are interior to the segments. */ for(i = 0; i < Ns; i++) { /* Determine which line segments are completely inside the disc, and which cross the boundary. */ if(i == seg0) { /* initial segment: disc starts from (x0, y0) */ if(!covered[A]) nends++; if(!covered[B]) nends++; #ifdef DEBUG if(!covered[A]) Rprintf("A not covered\n"); if(!covered[B]) Rprintf("B not covered\n"); #endif } else { /* another segment: disc extends in from either endpoint */ fromi = from[i]; toi = to[i]; covfrom = covered[fromi]; covto = covered[toi]; resfrom = resid[fromi]; resto = resid[toi]; reachable = covfrom || covto; #ifdef DEBUG residue = resfrom + resto - lengths[i]; Rprintf("%d: %s %s: %lf + %lf - %lf = %lf sign %s\n", i, (terminal[fromi]) ? "T" : ((covfrom) ? "Y" : "N"), (terminal[toi]) ? "T" : ((covto) ? "Y" : "N"), resfrom, resto, lengths[i], residue, (residue < 0) ? "-" : ((residue > 0) ? "+" : "0")); #endif if(reachable) { residue = resfrom + resto - lengths[i]; allin = covfrom && covto && (residue >= 0); #ifdef DEBUG if(allin) { Rprintf("Covered\n"); } else if((terminal[fromi] || terminal[toi]) && (residue >= - tol * lengths[i])) { Rprintf("Deemed to be covered\n"); } else Rprintf("Reachable\n"); #endif allin = allin || ((terminal[fromi] || terminal[toi]) && (residue >= - tol)); if(!allin) { /* segment is not entirely covered by disc - infer endpoint(s) in interior of segment */ if(covfrom && !terminal[fromi]) nends++; if(covto && !terminal[toi]) nends++; #ifdef DEBUG if(covfrom && !terminal[fromi]) Rprintf("fromi => end\n"); if(covto && !terminal[toi]) Rprintf("toi => end\n"); #endif } } } } nendpoints[m] = nends; } } } spatstat/src/nndistance.c0000755000176000001440000001054512252324034015233 0ustar ripleyusers/* nndistance.c Nearest Neighbour Distances between points Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 Licence: GNU Public Licence >= 2 $Revision: 1.21 $ $Date: 2013/11/03 03:36:27 $ THE FOLLOWING FUNCTIONS ASSUME THAT y IS SORTED IN ASCENDING ORDER SINGLE LIST: nndistsort Nearest neighbour distances nnwhichsort Nearest neighbours nnsort Nearest neighbours & distances ONE LIST TO ANOTHER LIST: nnXdist Nearest neighbour distance from one list to another nnXwhich Nearest neighbour ID from one list to another nnX Nearest neighbour ID & distance from one list to another ONE LIST TO ANOTHER OVERLAPPING LIST: nnXEdist Nearest neighbour distance from one list to another, overlapping nnXEwhich Nearest neighbour ID from one list to another, overlapping nnXE Nearest neighbour ID & distance */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER */ /* ------------------- one point pattern X --------------------- */ /* nndistsort: nearest neighbour distances */ #undef FNAME #undef DIST #undef WHICH #define FNAME nndistsort #define DIST #include "nndist.h" /* nnwhichsort: id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnwhichsort #define WHICH #include "nndist.h" /* nnsort: distance & id of nearest neighbour */ #undef FNAME #undef DIST #undef WHICH #define FNAME nnsort #define DIST #define WHICH #include "nndist.h" /* --------------- two distinct point patterns X and Y ----------------- */ /* general interface */ void nnXinterface(n1, x1, y1, id1, n2, x2, y2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *x2, *y2, *huge; int *id1, *id2; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnX(), nnXdist(), nnXwhich(); void nnXE(), nnXEdist(), nnXEwhich(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnX(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXE(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(di) { nnXEdist(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEwhich(n1, x1, y1, id1, n2, x2, y2, id2, nnd, nnwhich, huge); } } } /* nnXdist: nearest neighbour distance (from each point of X to the nearest point of Y) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXdist #define DIST #include "nndistX.h" /* nnXwhich: nearest neighbour id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXwhich #define WHICH #include "nndistX.h" /* nnX: nearest neighbour distance and id */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnX #define DIST #define WHICH #include "nndistX.h" /* --------------- two point patterns X and Y with common points --------- */ /* Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. */ /* nnXEdist: similar to nnXdist but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEdist #define DIST #define EXCLUDE #include "nndistX.h" /* nnXEwhich: similar to nnXwhich but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXEwhich #define WHICH #define EXCLUDE #include "nndistX.h" /* nnXE: similar to nnX but allows X and Y to include common points (which are not to be counted as neighbours) */ #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE #define FNAME nnXE #define DIST #define WHICH #define EXCLUDE #include "nndistX.h" spatstat/src/dinfty.c0000755000176000001440000000677612252324034014415 0ustar ripleyusers/* dinfty.c $Revision: 1.6 $ $Date: 2011/09/20 07:42:18 $ Code by Dominic Schuhmacher Modified by Adrian Baddeley */ #include #include #define COST(I,J) (d)[n * (J) + (I)] int arraymax(int *a, int n); void swap(int i, int j, int *a); int largestmobpos(int *mobile, int *current, int *collectvals, int n); /* ------------ The main function ----------------------------- */ void dinfty_R(int *d, int *num, int *assignment) { int i,j; /* indices */ int lmp, lmq; /* largest mobile position and its neighbor */ int newmax; int n, currmin; int *current, *travel, *mobile, *assig, *distrelev, *collectvals; n = *num; /* scratch space */ assig = (int *) R_alloc((long) n, sizeof(int)); travel = (int *) R_alloc((long) n, sizeof(int)); mobile = (int *) R_alloc((long) n, sizeof(int)); current = (int *) R_alloc((long) n, sizeof(int)); distrelev = (int *) R_alloc((long) n, sizeof(int)); collectvals = (int *) R_alloc((long) (n * n), sizeof(int)); /* */ /* We use the Johnson-Trotter Algorithm for listing permutations */ /* */ /* Initialize the algorithm */ for (i = 0; i < n; i++) { travel[i] = -1; /* all numbers traveling to the left */ mobile[i] = 1; /* all numbers mobile */ current[i] = i; /* current permutation is the identity */ assig[i] = i; /* best permutation up to now is the identity */ distrelev[i] = COST(i, i); /* pick relevant entries in the cost matrix */ } currmin = arraymax(distrelev, n); /* minimal max up to now */ /* The main loop */ while(arraymax(mobile, n) == 1) { lmp = largestmobpos(mobile, current, collectvals, n); lmq = lmp + travel[lmp]; swap(lmp, lmq, current); swap(lmp, lmq, travel); for (i = 0; i < n; i++) { if (current[i] > current[lmq]) travel[i] = -travel[i]; j = i + travel[i]; if (j < 0 || j > n-1 || current[i] < current[j]) mobile[i] = 0; else mobile[i] = 1; distrelev[i] = COST(i, current[i]); } /* Calculation of new maximal value */ newmax = arraymax(distrelev, n); if (newmax < currmin) { currmin = newmax; for (i = 0; i < n; i++) { assig[i] = current[i]; } } } /* For testing: print distance from within C program Rprintf("Prohorov distance is %d\n", currmin); */ /* "Return" the final assignment */ for (i = 0; i < n; i++) { assignment[i] = assig[i] + 1; } } /* ------------------------------------------------------------*/ /* Maximal element of an integer array */ int arraymax(int *a, int n) { int i, amax; if(n < 1) return(-1); amax = a[0]; if(n > 1) for(i = 0; i < n; i++) if(a[i] > amax) amax = a[i]; return(amax); } /* Swap elements i and j in array a */ void swap(int i, int j, int *a) { int v; v = a[i]; a[i] = a[j]; a[j] = v; } /* Return index of largest mobile number in current */ int largestmobpos(int *mobile, int *current, int *collectvals, int n) { int i,j, maxval; j = 0; for (i = 0; i < n; i++) { if (mobile[i] == 1) { collectvals[j] = current[i]; j++; } } maxval = arraymax(collectvals, j); for (i = 0; i < n; i++) { if (current[i] == maxval) { return(i); } } error("Internal error: largestmobpos failed"); return(0); } spatstat/src/PerfectHardcore.h0000644000176000001440000001137712252324034016153 0ustar ripleyusers // ........................... Hardcore process .......................... // $Revision: 1.4 $ $Date: 2012/03/10 11:23:09 $ class HardcoreProcess : public PointProcess { public: double beta, R, Rsquared; HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri); ~HardcoreProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; HardcoreProcess::HardcoreProcess(double xmin, double xmax, double ymin, double ymax, double b, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double HardcoreProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = 0; return(rtn); } void HardcoreProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void HardcoreProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating HardcoreProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating HardcoreProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating HardcoreProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectHardcore(SEXP beta, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Hardcore point process HardcoreProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/geyer.c0000755000176000001440000002363212252324034014221 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" void fexitc(const char *msg); #undef MH_DEBUG /* Conditional intensity function for a Geyer saturation process. */ typedef struct Geyer { /* model parameters */ double gamma; double r; double s; /* transformations of the parameters */ double r2; double loggamma; int hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; #ifdef MH_DEBUG int *freshaux; int prevtype; #endif } Geyer; Cdata *geyerinit(state, model, algo) State state; Model model; Algor algo; { int i, j, n1; Geyer *geyer; double r2; double *period; DECLARE_CLOSE_VARS; geyer = (Geyer *) R_alloc(1, sizeof(Geyer)); /* Interpret model parameters*/ geyer->gamma = model.ipar[0]; geyer->r = model.ipar[1]; /* not squared any more */ geyer->s = model.ipar[2]; geyer->r2 = geyer->r * geyer->r; #ifdef MHDEBUG Rprintf("Initialising Geyer gamma=%lf, r=%lf, sat=%lf\n", geyer->gamma, geyer->r, geyer->s); #endif /* is the model numerically equivalent to hard core ? */ geyer->hard = (geyer->gamma < DOUBLE_EPS); geyer->loggamma = (geyer->hard) ? 0 : log(geyer->gamma); /* periodic boundary conditions? */ geyer->period = model.period; geyer->per = (model.period[0] > 0.0); /* allocate storage for auxiliary counts */ geyer->aux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); #ifdef MH_DEBUG geyer->freshaux = (int *) R_alloc((size_t) state.npmax, sizeof(int)); geyer->prevtype = -42; #endif r2 = geyer->r2; /* Initialise auxiliary counts */ for(i = 0; i < state.npmax; i++) geyer->aux[i] = 0; if(geyer->per) { /* periodic */ period = geyer->period; if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } else { /* Euclidean distance */ if(state.npts > 1) { n1 = state.npts - 1; for(i = 0; i < n1; i++) { for(j = i+1; j < state.npts; j++) { if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) { geyer->aux[i] += 1; geyer->aux[j] += 1; } } } } } return((Cdata *) geyer); } double geyercif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, npts, tee; double u, v, r2, s; double w, a, b, f, cifval; double *x, *y; int *aux; double *period; Geyer *geyer; DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; npts = state.npts; if(npts==0) return ((double) 1.0); x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; r2 = geyer->r2; s = geyer->s; period = geyer->period; aux = geyer->aux; /* tee = neighbour count at the point in question; w = sum of changes in (saturated) neighbour counts at other points */ tee = w = 0.0; if(prop.itype == BIRTH) { if(geyer->per) { /* periodic distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } else { /* Euclidean distance */ for(j=0; j 1) /* j is not saturated after addition of (u,v) */ w = w + 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w = w + f; } } } } else if(prop.itype == DEATH) { tee = aux[ix]; if(geyer->per) { /* Periodic distance */ for(j=0; j 0) /* j is not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } else { /* Euclidean distance */ for(j=0; j 0) /* j was not saturated */ w = w + 1; /* deletion of 'ix' decreases count by 1 */ else { f = f+1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w = w + f; } } } } } } else if(prop.itype == SHIFT) { /* Compute the cif at the new point, not the ratio of new/old */ if(geyer->per) { /* Periodic distance */ for(j=0; j= b) w = w + 1; } } } else { /* Euclidean distance */ for(j=0; j= b) w = w + 1; } } } } w = w + ((tee < s) ? tee : s); if(geyer->hard) { if(tee > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp(geyer->loggamma*w); return cifval; } void geyerupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, j; int oldclose, newclose; double u, v, xix, yix, r2; double *x, *y; int *aux; double *period; Geyer *geyer; #ifdef MH_DEBUG int *freshaux; int i; int oc, nc; #endif DECLARE_CLOSE_VARS; geyer = (Geyer *) cdata; period = geyer->period; aux = geyer->aux; r2 = geyer->r2; x = state.x; y = state.y; npts = state.npts; #ifdef MH_DEBUG /* ........................ debugging cross-check ................ */ /* recompute 'aux' values afresh */ freshaux = geyer->freshaux; for(i = 0; i < state.npts; i++) freshaux[i] = 0; if(geyer->per) { /* periodic */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE_PERIODIC(state.x[i], state.y[i], state.x[j], state.y[j], period, r2)) freshaux[i] += 1; } } } else { /* Euclidean distance */ for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(i == j) continue; if(CLOSE(state.x[i], state.y[i], state.x[j], state.y[j], r2)) freshaux[i] += 1; } } } /* Check agreement with 'aux' */ for(j = 0; j < state.npts; j++) { if(aux[j] != freshaux[j]) { Rprintf("\n\taux[%d] = %d, freshaux[%d] = %d\n", j, aux[j], j, freshaux[j]); Rprintf("\tnpts = %d\n", state.npts); Rprintf("\tperiod = (%lf, %lf)\n", period[0], period[1]); if(geyer->prevtype == BIRTH) error("updaux failed after BIRTH"); if(geyer->prevtype == DEATH) error("updaux failed after DEATH"); if(geyer->prevtype == SHIFT) error("updaux failed after SHIFT"); error("updaux failed at start"); } } /* OK. Record type of this transition */ geyer->prevtype = prop.itype; /* ................ end debug cross-check ................ */ #endif if(prop.itype == BIRTH) { /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counter for new point */ aux[npts] = 0; /* update all auxiliary counters */ if(geyer->per) { /* periodic distance */ for(j=0; j < npts; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { aux[j] += 1; aux[npts] += 1; } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { aux[j] += 1; aux[npts] += 1; } } } } else if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; /* decrement auxiliary counter for each point */ if(geyer->per) { /* periodic distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } else { /* Euclidean distance */ for(j=0; j= ix) aux[j-1] = aux[j]; } } } else if(prop.itype == SHIFT) { /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute auxiliary counter for point 'ix' */ aux[ix] = 0; /* update auxiliary counters for other points */ if(geyer->per) { for(j=0; j #include #include #include "chunkloop.h" /* localpcf.c $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ Assumes point patterns are sorted in increasing order of x coordinate */ #undef WEIGHTED #include "localpcf.h" #define WEIGHTED 1 #include "localpcf.h" spatstat/src/seg2pix.c0000755000176000001440000002223712252324034014467 0ustar ripleyusers#include #include #include #include #include "chunkloop.h" #undef DEBUG /* seg2pix.c Discretise line segment on pixel grid seg2pixI pixel value is indicator = 1 if any line crosses pixel seg2pixL pixel value is total (weighted) length of lines inside pixel (rescale R data so that pixels are integer) pixels numbered 0, ..., nx-1 and 0, ..., ny-1 with boundaries at x=0, x=nx, y=0, y=ny. */ #define V(I,J) out[(I) + (J) * (Ny)] int clamp(k, n0, n1) int k, n0, n1; { int m; m = k; if(m < n0) m = n0; if(m > n1) m = n1; return(m); } void seg2pixI(ns,x0,y0,x1,y1,nx,ny,out) int *ns; /* number of segments */ double *x0,*y0,*x1,*y1; /* coordinates of segment endpoints */ int *nx, *ny; /* dimensions of pixel array (columns, rows) */ int *out; { int Ns, Nx, Ny, i, j, k, m, m0, m1, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i, dx, dy; double leni; double xleft, yleft, xright, yright, slope; double xstart, ystart, xfinish, yfinish; int mleft, mright, kstart, kfinish, kmin, kmax; Ns = *ns; Nx = *nx; Ny = *ny; for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; dx = x1i - x0i; dy = y1i - y0i; leni = hypot(dx, dy); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf)\n", x0i, y0i, x1i, y1i); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) = 1; } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) = 1; } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); m0 = clamp((int) floor(x0i), 0, Nx-1); m1 = clamp((int) floor(x1i), 0, Nx-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("row %d: columns [%d, %d]\n", j, mmin, mmax); #endif for(k = mmin; k <= mmax; k++) V(j,k) = 1; } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); m0 = clamp((int) floor(y0i), 0, Ny-1); m1 = clamp((int) floor(y1i), 0, Ny-1); mmin = (m0 < m1) ? m0: m1; mmax = (m0 < m1) ? m1: m0; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", k, mmin, mmax); #endif for(j = mmin; j <= mmax; j++) V(j,k) = 1; } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); kmin = (kstart < kfinish) ? kstart : kfinish; kmax = (kstart < kfinish) ? kfinish : kstart; #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) V(k, m) = 1; } } /* end of if-else */ } } #ifdef DEBUG Rprintf("done\n"); #endif } void seg2pixL(ns,x0,y0,x1,y1,weights,pixwidth,pixheight,nx,ny,out) int *ns; double *x0,*y0,*x1,*y1,*weights; /* segment coordinates and weights */ double *pixwidth, *pixheight; /* original pixel dimensions */ int *nx, *ny; double *out; /* output matrix */ { int Ns, Nx, Ny, i, j, k, m, mmin, mmax, maxchunk; double x0i, x1i, y0i, y1i; double leni; double xleft, yleft, xright, yright, slope, scalesecant; double xlow, xhigh, ylow, yhigh, invslope, scalecosecant; double xstart, ystart, xfinish, yfinish; double xxx0, xxx1, yyy0, yyy1; int mleft, mright, kstart, kfinish, kmin, kmax; double pwidth, pheight, pwidth2, pheight2; double wti; Ns = *ns; Nx = *nx; Ny = *ny; /* one scaled x unit = 'pwidth' original x units one scaled y unit = 'pheight' original y units */ pwidth = *pixwidth; pheight = *pixheight; pwidth2 = pwidth * pwidth; pheight2 = pheight * pheight; /* zero the matrix */ for(k = 0; k < Ny - 1; k++) for(j = 0; j < Nx - 1; j++) V(k, j) = 0; OUTERCHUNKLOOP(i, Ns, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Ns, maxchunk, 8196) { x0i = x0[i]; y0i = y0[i]; x1i = x1[i]; y1i = y1[i]; wti = weights[i]; leni = sqrt(pwidth2 * pow(x1i - x0i, 2) + pheight2 * pow(y1i-y0i, 2)); #ifdef DEBUG Rprintf("(%lf, %lf) to (%lf, %lf), length %lf\n", x0i, y0i, x1i, y1i, leni); #endif if(leni < 0.001) { /* tiny segment */ #ifdef DEBUG Rprintf("tiny\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(x1i) == floor(x0i) && floor(y1i) == floor(y0i)) { /* contained in one cell */ #ifdef DEBUG Rprintf("contained in one cell\n"); #endif k = clamp((int) floor(x0i), 0, Nx-1); j = clamp((int) floor(y0i), 0, Ny-1); V(j,k) += wti * leni; } else if(floor(y1i) == floor(y0i)) { /* horizontal */ #ifdef DEBUG Rprintf("horizontal\n"); #endif j = clamp((int) floor(y1i), 0, Ny-1); if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } mmin = clamp((int) floor(xleft), 0, Nx-1); mmax = clamp((int) floor(xright), 0, Nx-1); slope = (yright - yleft)/(xright - xleft); scalesecant = wti * sqrt(pwidth2 + slope * slope * pheight2); /* For this slope, one scaled x unit means 'pwidth' original x units and slope * pheight original y units i.e. line length sqrt(pwidth^2 + slope^2 * pheight^2) */ for(k = mmin; k <= mmax; k++) { xstart = (k == mmin) ? xleft : k; xfinish = (k == mmax) ? xright : (k+1); V(j,k) += (xfinish - xstart) * scalesecant; } } else if(floor(x1i) == floor(x0i)) { /* vertical */ #ifdef DEBUG Rprintf("vertical\n"); #endif k = clamp((int) floor(x1i), 0, Nx-1); if(y1i > y0i) { xlow = x0i; ylow = y0i; xhigh = x1i; yhigh = y1i; } else { xlow = x1i; ylow = y1i; xhigh = x0i; yhigh = y0i; } mmin = clamp((int) floor(ylow), 0, Ny-1); mmax = clamp((int) floor(yhigh), 0, Ny-1); invslope = (xhigh - xlow)/(yhigh - ylow); scalecosecant = wti * sqrt(pheight2 + invslope * invslope * pwidth2); #ifdef DEBUG Rprintf("i = %d\n", i); Rprintf("inverse slope = %lf\n", invslope); Rprintf("scaled cosecant = %lf\n", scalecosecant); #endif /* For this slope, one scaled y unit means 'pheight' original y units and invslope * pwidth original x units i.e. line length sqrt(pheight^2 + invslope^2 * pwidth^2) */ for(j = mmin; j <= mmax; j++) { ystart = (j == mmin)? ylow : j; yfinish = (j == mmax)? yhigh : (j+1); V(j,k) += (yfinish - ystart) * scalecosecant; } } else { /* general case */ #ifdef DEBUG Rprintf("general\n"); #endif if(x1i > x0i) { xleft = x0i; yleft = y0i; xright = x1i; yright = y1i; } else { xleft = x1i; yleft = y1i; xright = x0i; yright = y0i; } slope = (yright - yleft)/(xright - xleft); mleft = clamp((int) floor(xleft), 0, Nx-1); mright = clamp((int) floor(xright), 0, Nx-1); #ifdef DEBUG Rprintf("column range [%d, %d]\n", mleft, mright); #endif /* treat each vertical slice */ for(m = mleft; m <= mright; m++) { if(m == mleft) { xstart = xleft; ystart = yleft; } else { xstart = m; ystart = yleft + slope * (xstart - xleft); } if(m == mright) { xfinish = xright; yfinish = yright; } else { xfinish = m+1; yfinish = yleft + slope * (xfinish - xleft); } kstart = clamp((int) floor(ystart), 0, Ny-1); kfinish = clamp((int) floor(yfinish), 0, Ny-1); if(ystart < yfinish) { kmin = kstart; kmax = kfinish; ylow = ystart; yhigh = yfinish; } else { kmin = kfinish; kmax = kstart; ylow = yfinish; yhigh = ystart; } #ifdef DEBUG Rprintf("column %d: rows [%d, %d]\n", m, kmin, kmax); #endif for(k = kmin; k <= kmax; k++) { yyy0 = (k == kmin) ? ylow : k; yyy1 = (k == kmax) ? yhigh : (k+1); xxx0 = xstart + (yyy0 - ystart)/slope; xxx1 = xstart + (yyy1 - ystart)/slope; V(k, m) += wti * sqrt(pow(yyy1 - yyy0, 2) * pheight2 + pow(xxx1 - xxx0, 2) * pwidth2); } } } } } #ifdef DEBUG Rprintf("done.\n"); #endif } spatstat/src/lincrossdist.c0000644000176000001440000000456612252324034015630 0ustar ripleyusers#include #include #include "chunkloop.h" /* lincrossdist.c Shortest-path distances between pairs of points in linear network $Revision: 1.3 $ $Date: 2012/10/13 03:45:41 $ lincrossdist */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) void lincrossdist(np, xp, yp, /* data points from which distances are measured */ nq, xq, yq, /* data points to which distances are measured */ nv, xv, yv, /* network vertices */ ns, from, to, /* segments */ dpath, /* shortest path distances between vertices */ psegmap, /* map from data points to segments */ qsegmap, /* map from data points to segments */ /* OUTPUT */ answer /* shortest path distances between points */ ) int *np, *nq, *nv, *ns; int *from, *to, *psegmap, *qsegmap; /* integer vectors (mappings) */ double *xp, *yp, *xq, *yq, *xv, *yv; /* vectors of coordinates */ double *dpath, *answer; /* matrices */ { int Np, Nq, Nv, i, j, maxchunk; int Psegi, Qsegj, nbi1, nbi2, nbj1, nbj2; double xpi, ypi, xqj, yqj; double d, dPiV1, dPiV2, dV1Qj, dV2Qj, d11, d12, d21, d22; Np = *np; Nq = *nq; Nv = *nv; OUTERCHUNKLOOP(i, Np, maxchunk, 1024) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Np, maxchunk, 1024) { xpi = xp[i]; ypi = yp[i]; Psegi = psegmap[i]; nbi1 = from[Psegi]; nbi2 = to[Psegi]; dPiV1 = EUCLID(xpi, ypi, xv[nbi1], yv[nbi1]); dPiV2 = EUCLID(xpi, ypi, xv[nbi2], yv[nbi2]); for(j = 0; j < Nq; j++) { xqj = xq[j]; yqj = yq[j]; Qsegj = qsegmap[j]; if(Psegi == Qsegj) { /* points i and j lie on the same segment; use Euclidean distance */ d = sqrt(pow(xpi - xqj, 2) + pow(ypi - yqj, 2)); } else { /* Shortest path from i to j passes through ends of segments; Calculate shortest of 4 possible paths from i to j */ nbj1 = from[Qsegj]; nbj2 = to[Qsegj]; dV1Qj = EUCLID(xv[nbj1], yv[nbj1], xqj, yqj); dV2Qj = EUCLID(xv[nbj2], yv[nbj2], xqj, yqj); d11 = dPiV1 + DPATH(nbi1,nbj1) + dV1Qj; d12 = dPiV1 + DPATH(nbi1,nbj2) + dV2Qj; d21 = dPiV2 + DPATH(nbi2,nbj1) + dV1Qj; d22 = dPiV2 + DPATH(nbi2,nbj2) + dV2Qj; d = d11; if(d12 < d) d = d12; if(d21 < d) d = d21; if(d22 < d) d = d22; } /* write */ ANSWER(i,j) = d; } } } } spatstat/src/Estrauss.c0000755000176000001440000000306412252324034014714 0ustar ripleyusers#include #include #include "chunkloop.h" /* Estrauss.c $Revision: 1.3 $ $Date: 2012/03/28 05:56:24 $ C implementation of 'eval' for Strauss interaction Calculates number of data points within distance r of each quadrature point (when 'source' = quadrature points, 'target' = data points) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Ccrosspaircounts(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, counts) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax; /* output */ int *counts; { int nsource, ntarget, maxchunk, j, i, ileft, counted; double xsourcej, ysourcej, rmax, r2max, xleft, dx, dy, dx2, d2; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; r2max = rmax * rmax; if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { counted = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft to iright */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2max) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) ++counted; } counts[j] = counted; } } } spatstat/src/methas.c0000755000176000001440000002643712252324034014375 0ustar ripleyusers#include #include #include #include "methas.h" #include "chunkloop.h" #include "mhsnoop.h" void fexitc(const char *msg); /* To switch on debugging code, insert the line: #define MH_DEBUG YES */ #ifndef MH_DEBUG #define MH_DEBUG NO #endif /* This is the value of 'ix' when we are proposing a birth. It must be equal to -1 so that NONE+1 = 0. */ #define NONE -1 extern Cifns getcif(char *); SEXP xmethas( SEXP ncif, SEXP cifname, SEXP beta, SEXP ipar, SEXP iparlen, SEXP period, SEXP xprop, SEXP yprop, SEXP mprop, SEXP ntypes, SEXP nrep, SEXP p, SEXP q, SEXP nverb, SEXP nrep0, SEXP x, SEXP y, SEXP marks, SEXP ncond, SEXP fixall, SEXP track, SEXP snoopenv) { char *cifstring; double cvd, cvn, qnodds, anumer, adenom, betavalue; double *iparvector; int verb, marked, mustupdate, itype; int nfree; int irep, ix, j, maxchunk, iverb; int Ncif; int *plength; long Nmore; double *xx, *yy, *xpropose, *ypropose; int *mm, *mpropose, *pp, *aa; SEXP out, xout, yout, mout, pout, aout; int tracking, ntrack; #ifdef HISTORY_INCLUDES_RATIO SEXP numout, denout; double *nn, *dd; #endif State state; Model model; Algor algo; Propo birthprop, deathprop, shiftprop; History history; Snoop snooper; /* The following variables are used only for a non-hybrid interaction */ Cifns thecif; /* cif structure */ Cdata *thecdata; /* pointer to initialised cif data block */ /* The following variables are used only for a hybrid interaction */ Cifns *cif; /* vector of cif structures */ Cdata **cdata; /* vector of pointers to initialised cif data blocks */ int *needupd; /* vector of logical values */ int k; /* loop index for cif's */ /* =================== Protect R objects from garbage collector ======= */ PROTECT(ncif = AS_INTEGER(ncif)); PROTECT(cifname = AS_CHARACTER(cifname)); PROTECT(beta = AS_NUMERIC(beta)); PROTECT(ipar = AS_NUMERIC(ipar)); PROTECT(iparlen = AS_INTEGER(iparlen)); PROTECT(period = AS_NUMERIC(period)); PROTECT(xprop = AS_NUMERIC(xprop)); PROTECT(yprop = AS_NUMERIC(yprop)); PROTECT(mprop = AS_INTEGER(mprop)); PROTECT(ntypes = AS_INTEGER(ntypes)); PROTECT(nrep = AS_INTEGER(nrep)); PROTECT( p = AS_NUMERIC(p)); PROTECT( q = AS_NUMERIC(q)); PROTECT(nverb = AS_INTEGER(nverb)); PROTECT(nrep0 = AS_INTEGER(nrep0)); PROTECT( x = AS_NUMERIC(x)); PROTECT( y = AS_NUMERIC(y)); PROTECT( marks = AS_INTEGER(marks)); PROTECT(fixall = AS_INTEGER(fixall)); PROTECT(ncond = AS_INTEGER(ncond)); PROTECT(track = AS_INTEGER(track)); /* that's 21 protected objects */ /* =================== Translate arguments from R to C ================ */ /* Ncif is the number of cif's plength[i] is the number of interaction parameters in the i-th cif */ Ncif = *(INTEGER_POINTER(ncif)); plength = INTEGER_POINTER(iparlen); /* copy RMH algorithm parameters */ algo.nrep = *(INTEGER_POINTER(nrep)); algo.nverb = *(INTEGER_POINTER(nverb)); algo.nrep0 = *(INTEGER_POINTER(nrep0)); algo.p = *(NUMERIC_POINTER(p)); algo.q = *(NUMERIC_POINTER(q)); algo.fixall = ((*(INTEGER_POINTER(fixall))) == 1); algo.ncond = *(INTEGER_POINTER(ncond)); /* copy model parameters without interpreting them */ model.beta = NUMERIC_POINTER(beta); model.ipar = iparvector = NUMERIC_POINTER(ipar); model.period = NUMERIC_POINTER(period); model.ntypes = *(INTEGER_POINTER(ntypes)); state.ismarked = marked = (model.ntypes > 1); /* copy initial state */ state.npts = LENGTH(x); state.npmax = 4 * ((state.npts > 256) ? state.npts : 256); state.x = (double *) R_alloc(state.npmax, sizeof(double)); state.y = (double *) R_alloc(state.npmax, sizeof(double)); xx = NUMERIC_POINTER(x); yy = NUMERIC_POINTER(y); if(marked) { state.marks =(int *) R_alloc(state.npmax, sizeof(int)); mm = INTEGER_POINTER(marks); } if(!marked) { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; } } else { for(j = 0; j < state.npts; j++) { state.x[j] = xx[j]; state.y[j] = yy[j]; state.marks[j] = mm[j]; } } #if MH_DEBUG Rprintf("\tnpts=%d\n", state.npts); #endif /* access proposal data */ xpropose = NUMERIC_POINTER(xprop); ypropose = NUMERIC_POINTER(yprop); mpropose = INTEGER_POINTER(mprop); /* we need to initialise 'mpropose' to keep compilers happy. mpropose is only used for marked patterns. Note 'mprop' is always a valid pointer */ /* ================= Allocate space for cifs etc ========== */ if(Ncif > 1) { cif = (Cifns *) R_alloc(Ncif, sizeof(Cifns)); cdata = (Cdata **) R_alloc(Ncif, sizeof(Cdata *)); needupd = (int *) R_alloc(Ncif, sizeof(int)); } else { /* Keep the compiler happy */ cif = (Cifns *) R_alloc(1, sizeof(Cifns)); cdata = (Cdata **) R_alloc(1, sizeof(Cdata *)); needupd = (int *) R_alloc(1, sizeof(int)); } /* ================= Determine process to be simulated ========== */ /* Get the cif's */ if(Ncif == 1) { cifstring = (char *) STRING_VALUE(cifname); thecif = getcif(cifstring); mustupdate = NEED_UPDATE(thecif); if(thecif.marked && !marked) fexitc("cif is for a marked point process, but proposal data are not marked points; bailing out."); /* Keep compiler happy*/ cif[0] = thecif; needupd[0] = mustupdate; } else { mustupdate = NO; for(k = 0; k < Ncif; k++) { cifstring = (char *) CHAR(STRING_ELT(cifname, k)); cif[k] = getcif(cifstring); needupd[k] = NEED_UPDATE(cif[k]); if(needupd[k]) mustupdate = YES; if(cif[k].marked && !marked) fexitc("component cif is for a marked point process, but proposal data are not marked points; bailing out."); } } /* ============= Initialise transition history ========== */ tracking = (*(INTEGER_POINTER(track)) != 0); /* Initialise even if not needed, to placate the compiler */ if(tracking) { history.nmax = algo.nrep; } else { history.nmax = 1; } history.n = 0; history.proptype = (int *) R_alloc(history.nmax, sizeof(int)); history.accepted = (int *) R_alloc(history.nmax, sizeof(int)); #ifdef HISTORY_INCLUDES_RATIO history.numerator = (double *) R_alloc(history.nmax, sizeof(double)); history.denominator = (double *) R_alloc(history.nmax, sizeof(double)); #endif /* ============= Visual debugging ========== */ /* Active if 'snoopenv' is an environment */ #if MH_DEBUG Rprintf("Initialising mhsnoop\n"); #endif initmhsnoop(&snooper, snoopenv); #if MH_DEBUG Rprintf("Initialised\n"); if(snooper.active) Rprintf("Debugger is active.\n"); #endif /* ================= Initialise algorithm ==================== */ /* Interpret the model parameters and initialise auxiliary data */ if(Ncif == 1) { thecdata = (*(thecif.init))(state, model, algo); /* keep compiler happy */ cdata[0] = thecdata; } else { for(k = 0; k < Ncif; k++) { if(k > 0) model.ipar += plength[k-1]; cdata[k] = (*(cif[k].init))(state, model, algo); } } /* Set the fixed elements of the proposal objects */ birthprop.itype = BIRTH; deathprop.itype = DEATH; shiftprop.itype = SHIFT; birthprop.ix = NONE; if(!marked) birthprop.mrk = deathprop.mrk = shiftprop.mrk = NONE; /* Set up some constants */ verb = (algo.nverb !=0); qnodds = (1.0 - algo.q)/algo.q; /* Set value of beta for unmarked process */ /* (Overwritten for marked process, but keeps compiler happy) */ betavalue = model.beta[0]; /* ============= Run Metropolis-Hastings ================== */ /* Initialise random number generator */ GetRNGstate(); /* Here comes the code for the M-H loop. The basic code (in mhloop.h) is #included many times using different options The C preprocessor descends through a chain of files mhv1.h, mhv2.h, ... to enumerate all possible combinations of flags. */ #include "mhv1.h" /* relinquish random number generator */ PutRNGstate(); /* ============= Done ================== */ /* Create space for output, and copy final state */ /* Point coordinates */ PROTECT(xout = NEW_NUMERIC(state.npts)); PROTECT(yout = NEW_NUMERIC(state.npts)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); for(j = 0; j < state.npts; j++) { xx[j] = state.x[j]; yy[j] = state.y[j]; } /* Marks */ if(marked) { PROTECT(mout = NEW_INTEGER(state.npts)); mm = INTEGER_POINTER(mout); for(j = 0; j < state.npts; j++) mm[j] = state.marks[j]; } else { /* Keep the compiler happy */ PROTECT(mout = NEW_INTEGER(1)); mm = INTEGER_POINTER(mout); mm[0] = 0; } /* Transition history */ if(tracking) { PROTECT(pout = NEW_INTEGER(algo.nrep)); PROTECT(aout = NEW_INTEGER(algo.nrep)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); for(j = 0; j < algo.nrep; j++) { pp[j] = history.proptype[j]; aa[j] = history.accepted[j]; } #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(algo.nrep)); PROTECT(denout = NEW_NUMERIC(algo.nrep)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); for(j = 0; j < algo.nrep; j++) { nn[j] = history.numerator[j]; dd[j] = history.denominator[j]; } #endif } else { /* Keep the compiler happy */ PROTECT(pout = NEW_INTEGER(1)); PROTECT(aout = NEW_INTEGER(1)); pp = INTEGER_POINTER(pout); aa = INTEGER_POINTER(aout); pp[0] = aa[0] = 0; #ifdef HISTORY_INCLUDES_RATIO PROTECT(numout = NEW_NUMERIC(1)); PROTECT(denout = NEW_NUMERIC(1)); nn = NUMERIC_POINTER(numout); dd = NUMERIC_POINTER(denout); nn[0] = dd[0] = 0; #endif } /* Pack up into list object for return */ if(!tracking) { /* no transition history */ if(!marked) { PROTECT(out = NEW_LIST(2)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); } else { PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); } } else { /* transition history */ if(!marked) { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(6)); #else PROTECT(out = NEW_LIST(4)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, pout); SET_VECTOR_ELT(out, 3, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 4, numout); SET_VECTOR_ELT(out, 5, denout); #endif } else { #ifdef HISTORY_INCLUDES_RATIO PROTECT(out = NEW_LIST(7)); #else PROTECT(out = NEW_LIST(5)); #endif SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, mout); SET_VECTOR_ELT(out, 3, pout); SET_VECTOR_ELT(out, 4, aout); #ifdef HISTORY_INCLUDES_RATIO SET_VECTOR_ELT(out, 5, numout); SET_VECTOR_ELT(out, 6, denout); #endif } } #ifdef HISTORY_INCLUDES_RATIO UNPROTECT(29); /* 21 arguments plus xout, yout, mout, pout, aout, out, numout, denout */ #else UNPROTECT(27); /* 21 arguments plus xout, yout, mout, pout, aout, out */ #endif return(out); } spatstat/src/dwpure.c0000755000176000001440000002320212252324034014405 0ustar ripleyusers/* dwpure.c $Revision: 1.5 $ $Date: 2011/09/20 07:54:53 $ Code by Dominic Schuhmacher */ #include #include #include typedef struct State { int n1, n2; /* vectors of length n1 (rows) and n2 (cols) */ int *rowmass, *colmass; /* mass to be moved from row / to col */ int *rowlab, *collab; /* row and col labels (specify previous node (row for collab, col for rowlab)) */ int *rowflow, *colflow; /* second component of labels (specify flow through current node) */ int *rowsurplus, *colsurplus; /* the surplus in each row/col under the current flow */ int *dualu, *dualv; /* vectors of dual variables (u for rows, v for cols) */ int *rowhelper, *colhelper; /* helping vector to store intermediate results */ /* could be local in initcost at the moment */ /* n by n matrices */ int *d; /* matrix of costs */ int *flowmatrix; /* matrix of flows */ int *arcmatrix; /* matrix of arcs for restriced primal problem (1 if arc, 0 if no arc) should be unsigned char to save memory however need to workout problem with R_alloc first (see below) */ /* n*n vector */ int *collectvals; } State; #define COST(I,J,STATE,NVALUE) ((STATE)->d)[(NVALUE) * (J) + (I)] #define FLOW(I,J,STATE,NVALUE) ((STATE)->flowmatrix)[(NVALUE) * (J) + (I)] #define ARC(I,J,STATE,NVALUE) ((STATE)->arcmatrix)[(NVALUE) * (J) + (I)] #define MIN(A,B) ((A)<(B) ? (A) : (B)) int arraysum(int *a, int n); int arraymin(int *a, int n); void initvalues(State *state); void maxflow(State *state); void updateduals(State *state); void augmentflow(int startcol, State *state); /* ------------ The main function ----------------------------- */ void dwpure(int *d, int *rmass, int *cmass, int *numr, int *numc, int *flowmatrix) { int i,j; /* indices */ int n1,n2; unsigned char feasible = 0; /* boolean for main loop */ State state; /* inputs */ state.n1 = n1 = *numr; state.n2 = n2 = *numc; state.d = d; state.rowmass = rmass; state.colmass = cmass; /* scratch space */ state.rowlab = (int *) R_alloc((long) n1, sizeof(int)); state.collab = (int *) R_alloc((long) n2, sizeof(int)); state.rowflow = (int *) R_alloc((long) n1, sizeof(int)); state.colflow = (int *) R_alloc((long) n2, sizeof(int)); state.rowsurplus = (int *) R_alloc((long) n1, sizeof(int)); state.colsurplus = (int *) R_alloc((long) n2, sizeof(int)); state.dualu = (int *) R_alloc((long) n1, sizeof(int)); state.dualv = (int *) R_alloc((long) n2, sizeof(int)); state.rowhelper = (int *) R_alloc((long) n1, sizeof(int)); state.colhelper = (int *) R_alloc((long) n2, sizeof(int)); state.flowmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.arcmatrix = (int *) R_alloc((long) (n1 * n2), sizeof(int)); state.collectvals = (int *) R_alloc((long) (n1 * n2), sizeof(int)); for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { state.flowmatrix[(n1)*(j) + i] = 0; state.arcmatrix[(n1)*(j) + i] = 0; state.collectvals[(n1)*(j) + i] = 0; } } for (i = 0; i < n1; ++i) { state.rowlab[i] = 0; state.rowflow[i] = 0; state.rowsurplus[i] = 0; state.dualu[i] = 0; state.rowhelper[i] = 0; } for (j = 0; j < n2; ++j) { state.collab[j] = 0; state.colflow[j] = 0; state.colsurplus[j] = 0; state.dualv[j] = 0; state.colhelper[j] = 0; } /* Initialize dual variables, arcmatrix, and surpluses */ initvalues(&state); /* For testing: print out cost matrix for (i = 0; i < n1; ++i) { for (j = 0; j < n2; ++j) { Rprintf("%d ", COST(i, j, &state, n1)); } Rprintf("\n"); } */ /* The main loop */ while(feasible == 0) { maxflow(&state); if (arraysum(state.rowsurplus, n1) > 0) { updateduals(&state); /* also updates arcmatrix */ } else { feasible = 1; } } /* "Return" the final flowmatrix */ for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { flowmatrix[n1*j+i] = state.flowmatrix[n1*j+i]; } } } /* ------------ Functions called by dwpure_R ------------------------- */ /* Sum of integer array */ int arraysum(int *a, int n) { int i; int asum = 0; for (i = 0; i < n; i++) asum += a[i]; return(asum); } /* Minimal element of an integer array */ int arraymin(int *a, int n) { int i, amin; if (n < 1) return(-1); amin = a[0]; if (n > 1) for (i = 0; i < n; i++) if (a[i] < amin) amin = a[i]; return(amin); } /* Initialize cost matrix: subtract in each row its minimal entry (from all the entries in the row), then subtract in each column its minimal entry (from all the entries in the column) */ void initvalues(State *state) { int i,j,n1,n2; n1 = state->n1; n2 = state->n2; /* Initial surpluses; can I do this shorter? later on surpluses are updated in flow augmentation step */ for (i = 0; i < n1; i++) state->rowsurplus[i] = state->rowmass[i]; for (j = 0; j < n2; j++) state->colsurplus[j] = state->colmass[j]; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) state->colhelper[j] = COST(i, j, state, n1); state->dualu[i] = arraymin(state->colhelper, n2); } for (j = 0; j < n2; j++) { for (i = 0; i < n1; i++) state->rowhelper[i] = COST(i, j, state, n1) - state->dualu[i]; state->dualv[j] = arraymin(state->rowhelper, n1); } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Maximize the flow on the (zeros of the) current cost matrix */ void maxflow(State *state) { int breakthrough; /* col. no. in which breakthrough occurs */ unsigned char labelfound = 1; /* 0 if no more labels can be found */ int i,j,n1,n2; n1 = state->n1; n2 = state->n2; while (labelfound == 1) { breakthrough = -1; /* initialize labels */ for (i = 0; i < n1; i++) { if (state->rowsurplus[i] > 0) { state->rowlab[i] = -5; state->rowflow[i] = state->rowsurplus[i]; } else { state->rowlab[i] = -1; /* setting rowflow to zero isn't necessary! */ } } for (j = 0; j < n2; j++) state->collab[j] = -1; /* setting colflow to zero isn't necessary! */ /* -1 means "no index", -5 means "source label" (rows only) */ while (labelfound == 1 && breakthrough == -1) { labelfound = 0; /* label unlabeled column j that permits flow from some labeled row i */ /* ("permits flow" means arcmatrix[i][j] = 1). Do so for every j */ for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) { for (j = 0; j < n2; j++) { if (ARC(i, j, state, n1) == 1 && state->collab[j] == -1) { state->collab[j] = i; state->colflow[j] = state->rowflow[i]; labelfound = 1; if (state->colsurplus[j] > 0 && breakthrough == -1) breakthrough = j; } } } } /* label unlabeled row i that already sends flow to some labeled col j */ /* ("already sends" means flowmatrix[i][j] > 0). Do so for every i */ for (j = 0; j < n2; j++) { if (state->collab[j] != -1) { for (i = 0; i < n1; i++) { if (FLOW(i, j, state, n1) > 0 && state->rowlab[i] == -1) { state->rowlab[i] = j; state->rowflow[i] = MIN(state->colflow[j],FLOW(i, j, state, n1)); labelfound = 1; } } } } } if (breakthrough != -1) augmentflow(breakthrough, state); } } /* Update the dual variables (called if solution of restricted primal is not feasible for the original problem): determine the minimum over the submatrix given by all labeled rows and unlabeled columns, and subtract it from all labeled rows and add it to all labeled columns. */ void updateduals(State *state) { int i,j,n1,n2,mini; int count = 0; n1 = state->n1; n2 = state->n2; for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (state->rowlab[i] != -1 && state->collab[j] == -1) { state->collectvals[count] = COST(i, j, state, n1) - state->dualu[i] - state->dualv[j]; count++; } } } mini = arraymin(state->collectvals, count); for (i = 0; i < n1; i++) { if (state->rowlab[i] != -1) state->dualu[i] += mini; } for (j = 0; j < n2; j++){ if (state->collab[j] != -1) state->dualv[j] -= mini; } for (i = 0; i < n1; i++) { for (j = 0; j < n2; j++) { if (COST(i, j, state, n1) == state->dualu[i] + state->dualv[j]) ARC(i, j, state, n1) = 1; else ARC(i, j, state, n1) = 0; } } } /* Augment the flow on the graph given by arcmatrix (by aug) according to the row and column labels starting in column startcol */ /* Adjust the surpluses while we're at it (first row and last col have -aug) */ void augmentflow(int startcol, State *state) { int k,l,aug,n1; /* int i,j,k,l,aug,n1,n2; */ n1 = state->n1; l = startcol; aug = MIN(state->colflow[l], state->colsurplus[l]); state->colsurplus[l] -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; while (l != -5) { FLOW(k, l, state, n1) -= aug; k = state->collab[l]; FLOW(k, l, state, n1) += aug; l = state->rowlab[k]; } state->rowsurplus[k] -= aug; } spatstat/src/distmapbin.c0000755000176000001440000000644412252324034015242 0ustar ripleyusers/* distmapbin.c Distance transform of a discrete binary image (8-connected path metric) $Revision: 1.6 $ $Date: 2011/11/20 03:34:16 $ */ #include #include "raster.h" #include void dist_to_bdry(); void shape_raster(); void distmap_bin(in, dist) Raster *in; /* input: binary image */ Raster *dist; /* output: distance to nearest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, dnew; double xstep, ystep, diagstep, huge; int rmin, rmax, cmin, cmax; /* distances between neighbouring pixels */ xstep = in->xstep; ystep = in->ystep; diagstep = sqrt(xstep * xstep + ystep * ystep); if(xstep < 0) xstep = -xstep; if(ystep < 0) ystep = -ystep; /* effectively infinite distance */ huge = 2.0 * Distance(dist->xmin,dist->ymin,dist->xmax,dist->ymax); /* image boundaries */ rmin = in->rmin; rmax = in->rmax; cmin = in->cmin; cmax = in->cmax; #define DISTANCE(ROW, COL) Entry(*dist, ROW, COL, double) #define MASKTRUE(ROW, COL) (Entry(*in, ROW, COL, int) != 0) #define MASKFALSE(ROW, COL) (Entry(*in, ROW, COL, int) == 0) #define UPDATE(D, ROW, COL, STEP) \ dnew = STEP + DISTANCE(ROW, COL); \ if(D > dnew) D = dnew /* initialise edges to boundary condition */ for(j = rmin-1; j <= rmax+1; j++) { DISTANCE(j, cmin-1) = (MASKTRUE(j, cmin-1)) ? 0.0 : huge; DISTANCE(j, cmax+1) = (MASKTRUE(j, cmax+1)) ? 0.0 : huge; } for(k = cmin-1; k <= cmax+1; k++) { DISTANCE(rmin-1, k) = (MASKTRUE(rmin-1, k)) ? 0.0 : huge; DISTANCE(rmax+1, k) = (MASKTRUE(rmax+1, k)) ? 0.0 : huge; } /* forward pass */ for(j = rmin; j <= rmax; j++) { R_CheckUserInterrupt(); for(k = cmin; k <= cmax; k++) { if(MASKTRUE(j, k)) d = DISTANCE(j, k) = 0.0; else { d = huge; UPDATE(d, j-1, k-1, diagstep); UPDATE(d, j-1, k, ystep); UPDATE(d, j-1, k+1, diagstep); UPDATE(d, j, k-1, xstep); DISTANCE(j,k) = d; } } } /* backward pass */ for(j = rmax; j >= rmin; j--) { R_CheckUserInterrupt(); for(k = cmax; k >= cmin; k--) { if(MASKFALSE(j,k)) { d = DISTANCE(j,k); UPDATE(d, j+1, k+1, diagstep); UPDATE(d, j+1, k, ystep); UPDATE(d, j+1, k-1, diagstep); UPDATE(d, j, k+1, xstep); DISTANCE(j,k) = d; } } } } /* R interface */ void distmapbin(xmin, ymin, xmax, ymax, nr, nc, in, distances, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ int *in; /* input: binary image */ double *distances; /* output: distance to nearest point */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, bdist; shape_raster( &data, (void *) in, *xmin,*ymin,*xmax,*ymax, *nr+2, *nc+2, 1, 1); shape_raster( &dist, (void *) distances,*xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, *nr+2,*nc+2,1,1); distmap_bin(&data, &dist); dist_to_bdry(&bdist); } spatstat/src/dist2.h0000755000176000001440000000412012252324034014127 0ustar ripleyusers/* dist2.h External declarations for the functions defined in dist2.c and In-line cpp macros for similar purposes $Revision: 1.18 $ $Date: 2013/02/21 00:24:27 $ */ double dist2(double u, double v, double x, double y, double *period); double dist2either(double u, double v, double x, double y, double *period); int dist2thresh(double u, double v, double x, double y, double *period, double r2); int dist2Mthresh(double u, double v, double x, double y, double *period, double r2); /* Efficient macros to test closeness of points */ /* These must be declared (note: some files e.g. straush.c use 'RESIDUE' explicitly) */ #define DECLARE_CLOSE_VARS \ register double DX, DY, DXP, DYP, RESIDUE #define DECLARE_CLOSE_D2_VARS \ register double DX, DY, DXP, DYP, DX2 #define CLOSE(U,V,X,Y,R2) \ ((DX = X - U), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && \ ((DY = Y - V), (RESIDUE > DY * DY)))) #define CLOSE_D2(U,V,X,Y,R2,D2) \ ((DX = X - U), \ (DX2 = DX * DX), \ (DX2 < R2) && (((DY = Y - V), \ (D2 = DX2 + DY * DY), \ (D2 < R2)))) /* The following calculates X mod P, but it works only if X \in [-P, P] so that X is the difference between two values that lie in an interval of length P */ #define CLOSE_PERIODIC(U,V,X,Y,PERIOD,R2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (RESIDUE = R2 - DX * DX), \ ((RESIDUE > 0.0) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (RESIDUE > DY * DY) ))) #define CLOSE_PERIODIC_D2(U,V,X,Y,PERIOD,R2,D2) \ ((DX = X - U), \ (DX = (DX < 0.0) ? -DX : DX), \ (DXP = (PERIOD)[0] - DX), \ (DX = (DX < DXP) ? DX : DXP), \ (D2 = DX * DX), \ ((D2 < R2) && ((DY = Y - V), \ (DY = (DY < 0.0) ? -DY : DY), \ (DYP = (PERIOD)[1] - DY), \ (DY = (DY < DYP) ? DY : DYP), \ (D2 += DY * DY), \ (D2 < R2) ))) spatstat/src/dgs.c0000755000176000001440000000505612252324034013663 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" #include "constants.h" /* Conditional intensity computation for Diggle-Gates-Stibbard process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = sin^2(pi*t/2*rho) for t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ #define PION2 M_PI_2 /* pi/2 defined in Rmath.h */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Dgs { double rho; double rho2; double pion2rho; double *period; int per; } Dgs; /* initialiser function */ Cdata *dgsinit(state, model, algo) State state; Model model; Algor algo; { Dgs *dgs; /* allocate storage */ dgs = (Dgs *) R_alloc(1, sizeof(Dgs)); /* Interpret model parameters*/ dgs->rho = model.ipar[0]; dgs->period = model.period; /* constants */ dgs->rho2 = pow(dgs->rho, 2); dgs->pion2rho = PION2/dgs->rho; /* periodic boundary conditions? */ dgs->per = (model.period[0] > 0.0); return((Cdata *) dgs); } /* conditional intensity evaluator */ double dgscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, r2, pairprod, cifval; Dgs *dgs; DECLARE_CLOSE_D2_VARS; dgs = (Dgs *) cdata; r2 = dgs->rho2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(dgs->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],dgs->period,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jperiod,r2,d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], r2, d2)) pairprod *= sin(dgs->pion2rho * sqrt(d2)); } } if(ixp1 < npts) { for(j=ixp1; jpion2rho * sqrt(d2)); } } } /* sin to sin^2 */ cifval = pairprod * pairprod; return cifval; } Cifns DgsCifns = { &dgsinit, &dgscif, (updafunptr) NULL, NO}; spatstat/src/distan3.c0000755000176000001440000002436312252324034014455 0ustar ripleyusers/* distan3.c Distances between pairs of 3D points $Revision: 1.3 $ $Date: 2013/11/03 03:34:15 $ D3pairdist Pairwise distances D3pair2dist Pairwise distances squared D3pairPdist Pairwise distances with periodic correction D3pairP2dist Pairwise distances squared, with periodic correction D3crossdist Pairwise distances for two sets of points D3cross2dist Pairwise distances squared, for two sets of points D3crossPdist Pairwise distances for two sets of points, periodic correction matchxyz Find matches between two sets of points */ #include /* #include */ double sqrt(); void D3pairdist(n, x, y, z, squared, d) /* inputs */ int *n; double *x, *y, *z; int *squared; /* output */ double *d; { void D3pair1dist(), D3pair2dist(); if(*squared == 0) { D3pair1dist(n, x, y, z, d); } else { D3pair2dist(n, x, y, z, d); } } void D3pair1dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = sqrt( dx * dx + dy * dy + dz * dz ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* squared distances */ void D3pair2dist(n, x, y, z, d) /* inputs */ int *n; double *x, *y, *z; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dist; npoints = *n; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dist = dx * dx + dy * dy + dz * dz; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; int *squared; /* output */ double *d; { void D3cross1dist(), D3cross2dist(); if(*squared == 0) { D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } else { D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d); } } void D3cross1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = sqrt( dx * dx + dy * dy + dz * dz ); } } } /* squared distances */ void D3cross2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz; nf = *nfrom; nt = *nto; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; *dptr = dx * dx + dy * dy + dz * dz; } } } /* distances with periodic correction */ void D3pairPdist(n, x, y, z, xwidth, yheight, zdepth, squared, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3pairP1dist(), D3pairP2dist(); if(*squared == 0) { D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d); } else { D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d); } } void D3pairP1dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = sqrt( dx2p + dy2p + dz2p ); /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } /* same function without the sqrt */ void D3pairP2dist(n, x, y, z, xwidth, yheight, zdepth, d) /* inputs */ int *n; double *x, *y, *z, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, npoints; double *dp; double xi, yi, zi, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, dist, wide, high, deep; npoints = *n; wide = *xwidth; high = *yheight; deep = *zdepth; /* set d[0,0] = 0 */ *d = 0.0; for (i=1; i < npoints; i++) { xi = x[i]; yi = y[i]; zi = z[i]; /* point at the start of column i */ dp = d + i * npoints; /* set diagonal to zero */ dp[i] = 0.0; for (j=0; j < i; j++) { dx = x[j] - xi; dy = y[j] - yi; dz = z[j] - zi; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dz + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dist = dx2p + dy2p + dz2p; /* upper triangle */ *dp = dist; ++dp; /* lower triangle */ d[ j * npoints + i] = dist; } } } void D3crossPdist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, squared, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; int *squared; /* output */ double *d; { void D3crossP1dist(), D3crossP2dist(); if(*squared == 0) { D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } else { D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d); } } void D3crossP1dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = sqrt( dx2p + dy2p + dz2p ); } } } void D3crossP2dist(nfrom, xfrom, yfrom, zfrom, nto, xto, yto, zto, xwidth, yheight, zdepth, d) /* inputs */ int *nto, *nfrom; double *xfrom, *yfrom, *zfrom, *xto, *yto, *zto, *xwidth, *yheight, *zdepth; /* output */ double *d; { int i, j, nf, nt; double *dptr; double xj, yj, zj, dx, dy, dz, dx2, dy2, dz2, dx2p, dy2p, dz2p, wide, high, deep; nf = *nfrom; nt = *nto; wide = *xwidth; high = *yheight; deep = *zdepth; dptr = d; for (j=0; j < nt; j++) { xj = xto[j]; yj = yto[j]; zj = zto[j]; for(i = 0; i < nf; i++, dptr++) { dx = xj - xfrom[i]; dy = yj - yfrom[i]; dz = zj - zfrom[i]; dx2p = dx * dx; dy2p = dy * dy; dz2p = dz * dz; dx2 = (dx - wide) * (dx - wide); dy2 = (dy - high) * (dy - high); dz2 = (dz - deep) * (dz - deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; dx2 = (dx + wide) * (dx + wide); dy2 = (dy + high) * (dy + high); dz2 = (dy + deep) * (dz + deep); if(dx2 < dx2p) dx2p = dx2; if(dy2 < dy2p) dy2p = dy2; if(dz2 < dz2p) dz2p = dz2; *dptr = dx2p + dy2p + dz2p; } } } /* matchxyz Find matches between two lists of points */ void matchxyz(na, xa, ya, za, nb, xb, yb, zb, match) /* inputs */ int *na, *nb; double *xa, *ya, *za, *xb, *yb, *zb; /* output */ int *match; { int i, j, Na, Nb; double xai, yai, zai; Na = *na; Nb = *nb; for (i=1; i < Na; i++) { xai = xa[i]; yai = ya[i]; zai = za[i]; match[i] = 0; for (j=0; j < Nb; j++) if(xai == xb[j] && yai == yb[j] && zai == zb[i]) { match[i] = j; break; } } } spatstat/src/Knone.c0000644000176000001440000000154712252324034014156 0ustar ripleyusers#include #include #include /* Knone.c Efficient computation of uncorrected estimates of K for large datasets KnoneI() Estimates K function, returns integer numerator KnoneD() Estimates K function, returns double precision numerator Kwnone() Estimates Kinhom, returns double precision numerator Functions require (x,y) data to be sorted in ascending order of x and expect r values to be equally spaced and starting at zero $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #undef WEIGHTED #define FNAME KnoneI #define OUTTYPE int #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME KnoneD #define OUTTYPE double #include "Knone.h" #undef FNAME #undef OUTTYPE #define FNAME Kwnone #define WEIGHTED #define OUTTYPE double #include "Knone.h" spatstat/src/PerfectDGS.h0000644000176000001440000001231512252324034015032 0ustar ripleyusers // ........................... Diggle-Gates-Stibbard process ................ // $Revision: 1.3 $ $Date: 2012/03/10 11:22:50 $ #ifndef PI #define PI 3.14159265358979 #endif class DgsProcess : public PointProcess { public: double beta, rho, rhosquared; DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r); ~DgsProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); }; DgsProcess::DgsProcess(double xmin, double xmax, double ymin, double ymax, double b, double r) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; rho = r; rhosquared = rho * rho; InteractionRange = rho; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double DgsProcess::Interaction(double dsquared) { double rtn, dist, t; rtn = 1; if(dsquared < rhosquared) { dist = sqrt(dsquared); t = sin((PI/2) * dist/rho); rtn = t * t; } return(rtn); } void DgsProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void DgsProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating DgsProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating DgsProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating DgsProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } // ........................... Interface to R .......................... extern "C" { SEXP PerfectDGS(SEXP beta, SEXP rho, SEXP xrange, SEXP yrange) { // input parameters double Beta, Rho, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; long int StartTime, EndTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(rho = AS_NUMERIC(rho)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 4 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Rho = *(NUMERIC_POINTER(rho)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ Rho); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ Rho); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Diggle-Gates-Stibbard point process DgsProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax,Beta,Rho); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); // pack up into output list PROTECT(out = NEW_LIST(3)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); // return UNPROTECT(8); // 4 arguments plus xout, yout, nout, out return(out); } } spatstat/src/sphefrac.c0000755000176000001440000000622612252324034014701 0ustar ripleyusers#include #include #include "geom3.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ Routine for calculating surface area of sphere intersected with box # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2013 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif static double pi = 3.141592653589793; /* Factor of 4 * pi * r * r IS ALREADY TAKEN OUT */ double sphesfrac(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double a1(), a2(), a3(); int i, j; p[1] = point->x - box->x0; p[2] = point->y - box->y0; p[3] = point->z - box->z0; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += a1(p[i],r) + a1(q[i],r); #ifdef DEBUG Rprintf("i = %d, a1 = %f, a1 = %f\n", i, a1(p[i],r), a1(q[i],r)); #endif } DBG("Past a1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= a2(p[i], p[j], r) + a2(p[i], q[j], r) + a2(q[i], p[j], r) + a2(q[i], q[j], r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past a2", sum) sum += a3(p[1], p[2], p[3], r) + a3(p[1], p[2], q[3], r); DBG("sum", sum) sum += a3(p[1], q[2], p[3], r) + a3(p[1], q[2], q[3], r); DBG("sum", sum) sum += a3(q[1], p[2], p[3], r) + a3(q[1], p[2], q[3], r); DBG("sum", sum) sum += a3(q[1], q[2], p[3], r) + a3(q[1], q[2], q[3], r); DBG("Past a3", sum) return(1 - sum); } double a1(t, r) double t, r; { /* This is the function A1 divided by 4 pi r^2 */ if(t >= r) return(0.0); return((1 - t/r) * 0.5); } double a2(t1, t2, r) double t1, t2, r; { double c2(); /* This is A2 divided by 4 pi r^2 because c2 is C divided by pi */ return(c2( t1 / r, t2 / r) / 2.0); } double a3(t1, t2, t3, r) double t1, t2, t3, r; { double c3(); /* This is A3 divided by 4 pi r^2 because c3 is C divided by pi */ return(c3(t1 / r, t2 / r, t3 / r) / 4.0); } double c2(a, b) double a, b; { double z, z2; double c2(); /* This is the function C(a, b, 0) divided by pi - assumes a, b > 0 */ if( ( z2 = 1.0 - a * a - b * b) < 0.0 ) return(0.0); z = sqrt(z2); return((atan2(z, a * b) - a * atan2(z, b) - b * atan2(z, a)) / pi); } double c3(a, b, c) double a, b, c; { double za, zb, zc, sum; /* This is C(a,b,c) divided by pi. Arguments assumed > 0 */ if(a * a + b * b + c * c >= 1.0) return(0.0); za = sqrt(1 - b * b - c * c); zb = sqrt(1 - a * a - c * c); zc = sqrt(1 - a * a - b * b); sum = atan2(zb, a * c) + atan2(za, b * c) + atan2(zc, a * b) - a * atan2(zb, c) + a * atan2(b, zc) - b * atan2(za, c) + b * atan2(a, zc) - c * atan2(zb, a) + c * atan2(b, za); return(sum / pi - 1); } spatstat/src/connect.c0000755000176000001440000000627112252324034014537 0ustar ripleyusers/* connect.c Connected component transforms cocoImage: connected component transform of a discrete binary image (8-connected topology) cocoGraph: connected component labels for a discrete graph specified by a list of edges $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include #include #include "raster.h" void shape_raster(); #include "yesno.h" /* workhorse function for cocoImage */ void comcommer(im) Raster *im; /* raster must have been dimensioned by shape_raster() */ /* Pixel values assumed to be 0 in background, and distinct nonzero integers in foreground */ { int j,k; int rmin, rmax, cmin, cmax; int label, curlabel, minlabel; int nchanged; /* image boundaries */ rmin = im->rmin; rmax = im->rmax; cmin = im->cmin; cmax = im->cmax; #define ENTRY(ROW, COL) Entry(*im, ROW, COL, int) #define UPDATE(ROW,COL,BEST,NEW) \ NEW = ENTRY(ROW, COL); \ if(NEW != 0 && NEW < BEST) \ BEST = NEW nchanged = 1; while(nchanged >0) { nchanged = 0; R_CheckUserInterrupt(); for(j = rmin; j <= rmax; j++) { for(k = cmin; k <= cmax; k++) { curlabel = ENTRY(j, k); if(curlabel != 0) { minlabel = curlabel; UPDATE(j-1, k-1, minlabel, label); UPDATE(j-1, k, minlabel, label); UPDATE(j-1, k+1, minlabel, label); UPDATE(j, k-1, minlabel, label); UPDATE(j, k, minlabel, label); UPDATE(j, k+1, minlabel, label); UPDATE(j+1, k-1, minlabel, label); UPDATE(j+1, k, minlabel, label); UPDATE(j+1, k+1, minlabel, label); if(minlabel < curlabel) { ENTRY(j, k) = minlabel; nchanged++; } } } } } } void cocoImage(mat, nr, nc) int *mat; /* input: binary image */ int *nr, *nc; /* raster dimensions EXCLUDING margin of 1 on each side */ { Raster im; shape_raster( &im, (void *) mat, (double) 1, (double) 1, (double) *nc, (double) *nr, *nr+2, *nc+2, 1, 1); comcommer(&im); } void cocoGraph(nv, ne, ie, je, label, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ /* output */ int *label; /* vector of component labels for each vertex */ /* Component label is lowest serial number of any vertex in the connected component */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, i, j, k, niter, labi, labj, changed; Nv = *nv; Ne = *ne; /* initialise labels */ for(k = 0; k < Nv; k++) label[k] = k; for(niter = 0; niter < Nv; niter++) { R_CheckUserInterrupt(); changed = NO; for(k = 0; k < Ne; k++) { i = ie[k]; j = je[k]; labi = label[i]; labj = label[j]; if(labi < labj) { label[j] = labi; changed = YES; } else if(labj < labi) { label[i] = labj; changed = YES; } } if(!changed) { /* algorithm has converged */ *status = 0; return; } } /* error exit */ *status = 1; return; } spatstat/src/mhv1.h0000644000176000001440000000040112252324034013750 0ustar ripleyusers/* mhv1.h marked or unmarked simulation */ #undef MH_MARKED if(marked) { /* marked process */ #define MH_MARKED YES #include "mhv2.h" #undef MH_MARKED } else { /* unmarked process */ #define MH_MARKED NO #include "mhv2.h" #undef MH_MARKED } spatstat/src/knndist.h0000644000176000001440000000744312252324034014564 0ustar ripleyusers/* knndist.h Code template for C functions supporting knndist and knnwhich THE FOLLOWING CODE ASSUMES THAT y IS SORTED IN ASCENDING ORDER This code is #included multiple times in knndistance.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2012 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ void FNAME(n, kmax, x, y, #ifdef DIST nnd, #endif #ifdef WHICH nnwhich, #endif huge) /* inputs */ int *n, *kmax; double *x, *y, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ #ifdef DIST double *nnd; #endif #ifdef WHICH int *nnwhich; #endif { int npoints, maxchunk, nk, nk1, i, k, k1, left, right, unsorted; double d2, d2minK, xi, yi, dx, dy, dy2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints) maxchunk = npoints; for(; i < maxchunk; i++) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dy = yi - y[left]; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[left] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = left; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dy = y[right] - yi; dy2 = dy * dy; if(dy2 > d2minK) break; dx = x[right] - xi; d2 = dx * dx + dy2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = right; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search finished for point i */ #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* end of i loop */ } } } spatstat/src/mhsnoopdef.h0000644000176000001440000000104112252324034015240 0ustar ripleyusers/* mhsnoopdef.h Define structure 'Snoop' containing visual debugger parameters and state $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #ifndef R_INTERNALS_H_ #include #endif typedef struct Snoop { int active; /* true or false */ int nextstop; /* jump to iteration number 'nextstop' */ int nexttype; /* jump to the next proposal of type 'nexttype' */ SEXP env; /* environment for exchanging data with R */ SEXP expr; /* callback expression for visual debugger */ } Snoop; #define NO_TYPE -1 spatstat/src/inxyp.c0000755000176000001440000000301412252324034014245 0ustar ripleyusers/* inxyp.c Point-in-polygon test NB: relative to other versions, 'score' is multiplied by 2 (and is an integer) $Revision: 1.7 $ $Date: 2013/09/18 04:20:13 $ */ #include #include "chunkloop.h" void inxyp(x,y,xp,yp,npts,nedges,score,onbndry) /* inputs */ double *x, *y; /* points to be tested */ int *npts; double *xp, *yp; /* polygon vertices */ int *nedges; /* outputs */ int *score; int *onbndry; { int i, j, Npts, Nedges, Ne1, contrib, maxchunk; double x0, y0, x1, y1, dx, dy, xj, yj, xcrit, ycrit; Npts = *npts; Nedges = *nedges; Ne1 = Nedges - 1; x0 = xp[Ne1]; y0 = yp[Ne1]; OUTERCHUNKLOOP(i, Nedges, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nedges, maxchunk, 16384) { /* visit edge (x0,y0) -> (x1,y1) */ x1 = xp[i]; y1 = yp[i]; dx = x1 - x0; dy = y1 - y0; for(j = 0; j < Npts; j++) { xj = x[j]; yj = y[j]; xcrit = (xj - x0) * (xj - x1); if(xcrit <= 0) { if(xcrit == 0) { contrib = 1; } else { contrib = 2; } ycrit = yj * dx - xj * dy + x0 * dy - y0 * dx; if(dx < 0) { if(ycrit >= 0) score[j] += contrib; onbndry[j] = onbndry[j] | (ycrit == 0); } else if(dx > 0) { if(ycrit < 0) score[j] -= contrib; onbndry[j] = onbndry[j] | (ycrit == 0); } else { if(xj == x0) ycrit = (yj - y0) * (yj - y1); onbndry[j] = onbndry[j] | (ycrit <= 0); } } } /* next edge */ x0 = x1; y0 = y1; } } } spatstat/src/Ediggatsti.c0000755000176000001440000000325212252324034015166 0ustar ripleyusers#include #include #include #include "chunkloop.h" #include "constants.h" /* Ediggatsti.c $Revision: 1.2 $ $Date: 2012/03/28 05:55:38 $ C implementation of 'eval' for DiggleGatesStibbard interaction Assumes point patterns are sorted in increasing order of x coordinate */ void Ediggatsti(nnsource, xsource, ysource, idsource, nntarget, xtarget, ytarget, idtarget, rrho, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget; int *idsource, *idtarget; double *rrho; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft, idsourcej; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rho, rho2, coef, product; nsource = *nnsource; ntarget = *nntarget; rho = *rrho; rho2 = rho * rho; coef = M_PI_2/rho; if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 65536) { product = 1; xsourcej = xsource[j]; ysourcej = ysource[j]; idsourcej = idsource[j]; /* adjust starting position */ xleft = xsourcej - rho; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rho */ for(i=ileft; i < ntarget; i++) { dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > rho2) break; if(idtarget[i] != idsourcej) { dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= rho2) product *= sin(sqrt(d2) * coef); } } values[j] = log(product * product); } } } spatstat/src/lookup.c0000755000176000001440000001170612252324034014416 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for a general pairwise interaction process with the pairwise interaction function given by a ``lookup table'', passed through the par argument. */ /* For debugging code, insert the line: #define DEBUG 1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lookup { int nlook; int equisp; double delta; double rmax; double r2max; double *h; /* values of pair interaction */ double *r; /* r values if not equally spaced */ double *r2; /* r^2 values if not equally spaced */ double *period; int per; } Lookup; /* initialiser function */ Cdata *lookupinit(state, model, algo) State state; Model model; Algor algo; { int i, nlook; double ri; Lookup *lookup; lookup = (Lookup *) R_alloc(1, sizeof(Lookup)); /* Interpret model parameters*/ lookup->nlook = nlook = model.ipar[0]; lookup->equisp = (model.ipar[1] > 0); lookup->delta = model.ipar[2]; lookup->rmax = model.ipar[3]; lookup->r2max = pow(lookup->rmax, 2); /* periodic boundary conditions? */ lookup->period = model.period; lookup->per = (model.period[0] > 0.0); /* If the r-values are equispaced only the h vector is included in ``par'' after ``rmax''; the entries of h then consist of h[0] = par[5], h[1] = par[6], ..., h[k-1] = par[4+k], ..., h[nlook-1] = par[4+nlook]. If the r-values are NOT equispaced then the individual r values are needed and these are included as r[0] = par[5+nlook], r[1] = par[6+nlook], ..., r[k-1] = par[4+nlook+k], ..., r[nlook-1] = par[4+2*nlook]. */ lookup->h = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) lookup->h[i] = model.ipar[4+i]; if(!(lookup->equisp)) { lookup->r = (double *) R_alloc((size_t) nlook, sizeof(double)); lookup->r2 = (double *) R_alloc((size_t) nlook, sizeof(double)); for(i = 0; i < nlook; i++) { ri = lookup->r[i] = model.ipar[4+nlook+i]; lookup->r2[i] = ri * ri; } } #ifdef DEBUG Rprintf("Exiting lookupinit: nlook=%d, equisp=%d\n", nlook, lookup->equisp); #endif return((Cdata *) lookup); } /* conditional intensity evaluator */ double lookupcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, nlook, k, kk, ix, ixp1, j; double *x, *y; double u, v; double r2max, d2, d, delta, cifval, ux, vy; Lookup *lookup; lookup = (Lookup *) cdata; r2max = lookup->r2max; delta = lookup->delta; nlook = lookup->nlook; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lookup->equisp) { /* equispaced r values */ if(lookup->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d = sqrt(dist2(u,v,x[j],y[j],lookup->period)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod)); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d = hypot(u - x[j], v-y[j]); k = floor(d/delta); if(k < nlook) { if(k < 0) k = 0; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jh[k]; } } } } } else { /* non-equispaced r values */ if(lookup->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],lookup->period); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jperiod); if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { ux = u - x[j]; vy = v - y[j]; d2 = ux * ux + vy * vy; if(d2 < r2max) { for(kk = 0; kk < nlook && lookup->r2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } if(ixp1 < npts) { for(j=ixp1; jr2[kk] <= d2; kk++) ; k = (kk == 0) ? 0 : kk-1; cifval *= lookup->h[k]; } } } } } return cifval; } Cifns LookupCifns = { &lookupinit, &lookupcif, (updafunptr) NULL, NO}; spatstat/src/straushm.c0000755000176000001440000001550112252324034014750 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStraussHard { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double *hc2; /* squared radii */ double *rad2hc2; /* r^2 - h^2 */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStraussHard; /* initialiser function */ Cdata *straushminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, h, r2, h2, logg, range2; MultiStraussHard *multistrausshard; multistrausshard = (MultiStraussHard *) R_alloc(1, sizeof(MultiStraussHard)); multistrausshard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrausshard->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrausshard->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->rad2hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrausshard->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrausshard->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 values of gamma, then n^2 values of r, then n^2 values of h */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[ i + j*ntypes]; r = model.ipar[ n2 + i + j*ntypes]; h = model.ipar[2*n2 + i + j*ntypes]; r2 = r * r; h2 = h * h; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrausshard->gamma, i, j, ntypes) = g; MAT(multistrausshard->rad, i, j, ntypes) = r; MAT(multistrausshard->hc, i, j, ntypes) = h; MAT(multistrausshard->rad2, i, j, ntypes) = r2; MAT(multistrausshard->hc2, i, j, ntypes) = h2; MAT(multistrausshard->rad2hc2, i, j, ntypes) = r2-h2; MAT(multistrausshard->hard, i, j, ntypes) = hard; MAT(multistrausshard->loggamma, i, j, ntypes) = logg; if(r2 > range2) range2 = r2; } } multistrausshard->range2 = range2; /* periodic boundary conditions? */ multistrausshard->period = model.period; multistrausshard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrausshard); } /* conditional intensity evaluator */ double straushmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStraussHard *multistrausshard; DECLARE_CLOSE_D2_VARS; multistrausshard = (MultiStraussHard *) cdata; range2 = multistrausshard->range2; period = multistrausshard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrausshard->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrausshard->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrausshard->rad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) { if(d2 < MAT(multistrausshard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } MAT(multistrausshard->kount, mrk, mrkj, ntypes)++; } } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrausshard->kount, m1, m2, ntypes); if(MAT(multistrausshard->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrausshard->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussHardCifns = { &straushminit, &straushmcif, (updafunptr) NULL, YES}; spatstat/src/idw.c0000755000176000001440000000703412252324034013667 0ustar ripleyusers/* idw.c Inverse-distance weighted smoothing $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ */ #include #include #include "chunkloop.h" #define MAT(X,I,J,NROW) (X)[(J) + (NROW) * (I)] /* inverse-distance smoothing from data points onto pixel grid */ void Cidw(x, y, v, n, xstart, xstep, nx, ystart, ystep, ny, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *xstart, *xstep, *ystart, *ystep; /* pixel grid */ int *nx, *ny; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output arrays - assumed initialised 0 */ { int N, i, Nx, Ny, ix, iy; double xg, yg, x0, dx, y0, dy, pon2, d2, w; N = *n; Nx = *nx; Ny = *ny; x0 = *xstart; y0 = *ystart; dx = *xstep; dy = *ystep; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/d2; MAT(num, ix, iy, Ny) += w * v[i]; MAT(den, ix, iy, Ny) += w; } /* compute ratio */ MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny); } } } else { /* general case */ for(ix = 0, xg=x0; ix < Nx; ix++, xg+=dx) { if(ix % 256 == 0) R_CheckUserInterrupt(); for(iy = 0, yg=y0; iy < Ny; iy++, yg+=dy) { /* loop over data points, accumulating numerator and denominator */ for(i = 0; i < N; i++) { d2 = (xg - x[i]) * (xg - x[i]) + (yg - y[i]) * (yg - y[i]); w = 1.0/pow(d2, pon2); MAT(num, ix, iy, Ny) += w * v[i]; MAT(den, ix, iy, Ny) += w; } /* compute ratio */ MAT(rat, ix, iy, Ny) = MAT(num, ix, iy, Ny)/MAT(den, ix, iy, Ny); } } } } /* Leave-one-out IDW at data points only */ void idwloo(x, y, v, n, power, num, den, rat) double *x, *y, *v; /* data points and values */ int *n; double *power; /* exponent for IDW */ double *num, *den, *rat; /* output vectors - assumed initialised 0 */ { int N, i, j, maxchunk; double xi, yi, d2, w, pon2; N = *n; pon2 = (*power)/2.0; if(pon2 == 1.0) { /* slightly faster code when power=2 */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; num[i] += w * v[j]; den[i] += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/d2; num[i] += w * v[j]; den[i] += w; } } /* compute ratio */ rat[i] = num[i]/den[i]; } } } else { /* general case */ OUTERCHUNKLOOP(i, N, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 16384) { xi = x[i]; yi = y[i]; if(i > 0) { for(j = 0; j < i; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); num[i] += w * v[j]; den[i] += w; } } if(i < N-1) { for(j = i+1; j < N; j++) { d2 = (xi - x[j]) * (xi - x[j]) + (yi - y[j]) * (yi - y[j]); w = 1.0/pow(d2, pon2); num[i] += w * v[j]; den[i] += w; } } /* compute ratio */ rat[i] = num[i]/den[i]; } } } } spatstat/src/knngrid.h0000644000176000001440000001272412252324034014544 0ustar ripleyusers #if (1 == 0) /* knngrid.h Code template for C functions k-nearest neighbours (k > 1) of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in knngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.5 $ $Date: 2013/10/22 01:33:12 $ */ #endif #undef PRINTALOT void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow; int i, j, ijpos; int Npoints, Nk, Nk1; int mleft, mright, mwhich, lastmwhich, unsorted, k, k1; double X0, Y0, Xstep, Ystep; double d2, d2minK, xj, yi, dx, dy, dx2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif Nxcol = *nx; Nyrow = *ny; Npoints = *np; Nk = *kmax; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; Nk1 = Nk - 1; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = 0; /* create space to store the nearest neighbour distances and indices for the current grid point */ d2min = (double *) R_alloc((size_t) Nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) Nk, sizeof(int)); #endif /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); #ifdef PRINTALOT Rprintf("j=%d, xj=%lf\n", j, xj); #endif for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { #ifdef PRINTALOT Rprintf("\ti=%d, yi = %lf\n", i, yi); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < Nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mright); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ d2min[Nk1] = d2; mwhich = mright; #ifdef WHICH which[Nk1] = mright; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; #ifdef PRINTALOT Rprintf("\t\t%d\n", mleft); #endif if(dx2 > d2minK) /* note that dx2 >= d2minK could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2minK) { #ifdef PRINTALOT Rprintf("\t\t\tNeighbour: d2=%lf\n", d2); #endif /* overwrite last entry in list of neighbours */ mwhich = mleft; d2min[Nk1] = d2; #ifdef WHICH which[Nk1] = mleft; #endif /* bubble sort */ unsorted = YES; for(k = Nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[Nk1]; #ifdef PRINTALOT Rprintf("\t\t\tUpdated d2minK=%lf\n", d2minK); for(k = 0; k < Nk; k++) Rprintf("\t\t\t\td2min[%d]=%lf\n", k, d2min[k]); #ifdef WHICH for(k = 0; k < Nk; k++) Rprintf("\t\t\t\twhich[%d]=%d\n", k, which[k]); #endif #endif } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; #ifdef PRINTALOT Rprintf("\t\tlastmwhich=%d\n", lastmwhich); #endif /* copy nn distances for grid point (i, j) to output array nnd[ , i, j] */ ijpos = Nk * (i + j * Nyrow); for(k = 0; k < Nk; k++) { #ifdef DIST nnd[ijpos + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[ijpos + k] = which[k] + 1; /* R indexing */ #endif } /* end of loop over points i */ } } } spatstat/src/strauss.c0000755000176000001440000000474312252324034014614 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Strauss process */ /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct Strauss { double gamma; double r; double loggamma; double r2; double *period; int hard; int per; } Strauss; /* initialiser function */ Cdata *straussinit(state, model, algo) State state; Model model; Algor algo; { /* create storage for model parameters */ Strauss *strauss; strauss = (Strauss *) R_alloc(1, sizeof(Strauss)); /* Interpret model parameters*/ strauss->gamma = model.ipar[0]; strauss->r = model.ipar[1]; /* No longer passed as r^2 */ strauss->r2 = strauss->r * strauss->r; strauss->period = model.period; #ifdef MHDEBUG Rprintf("Initialising Strauss gamma=%lf, r=%lf\n", strauss->gamma, strauss->r); #endif /* is the model numerically equivalent to hard core ? */ strauss->hard = (strauss->gamma < DOUBLE_EPS); strauss->loggamma = (strauss->hard) ? 0 : log(strauss->gamma); /* periodic boundary conditions? */ strauss->per = (model.period[0] > 0.0); return((Cdata *) strauss); } /* conditional intensity evaluator */ double strausscif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, cifval; Strauss *strauss; DECLARE_CLOSE_VARS; strauss = (Strauss *) cdata; r2 = strauss->r2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],strauss->period, r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jperiod, r2)) ++kount; } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j], r2)) ++kount; } } if(ixp1 < npts) { for(j=ixp1; jhard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp((strauss->loggamma) * kount); return cifval; } Cifns StraussCifns = { &straussinit, &strausscif, (updafunptr) NULL, NO}; spatstat/src/call3d.c0000755000176000001440000002474712252324034014260 0ustar ripleyusers/* $Revision: 1.5 $ $Date: 2010/10/24 10:57:02 $ R interface Pass data between R and internally-defined data structures # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #include #include "geom3.h" #include "functable.h" #undef DEBUG #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif void g3one(Point *p, int n, Box *b, Ftable *g); void g3three(Point *p, int n, Box *b, Ftable *g); void g3cen(Point *p, int n, Box *b, H4table *count); void k3trans(Point *p, int n, Box *b, Ftable *k); void k3isot(Point *p, int n, Box *b, Ftable *k); void pcf3trans(Point *p, int n, Box *b, Ftable *pcf, double delta); void pcf3isot(Point *p, int n, Box *b, Ftable *pcf, double delta); void phatminus(Point *p, int n, Box *b, double vside, Itable *count); void phatnaive(Point *p, int n, Box *b, double vside, Itable *count); void p3hat4(Point *p, int n, Box *b, double vside, H4table *count); /* ALLOCATION OF SPACE FOR STRUCTURES/ARRAYS We have defined an alloc() and free() function for each type. However, the free() functions currently do nothing, because we use R_alloc to allocate transient space, which is freed automatically by R. */ Ftable * allocFtable(n) /* allocate function table of size n */ int n; { Ftable *x; x = (Ftable *) R_alloc(1, sizeof(Ftable)); x->n = n; x->f = (double *) R_alloc(n, sizeof(double)); x->num = (double *) R_alloc(n, sizeof(double)); x->denom = (double *) R_alloc(n, sizeof(double)); return(x); } void freeFtable(x) Ftable *x; { } Itable * allocItable(n) int n; { Itable *x; x = (Itable *) R_alloc(1, sizeof(Itable)); x->n = n; x->num = (int *) R_alloc(n, sizeof(int)); x->denom = (int *) R_alloc(n, sizeof(int)); return(x); } void freeItable(x) Itable *x; { } H4table * allocH4table(n) int n; { H4table *x; x = (H4table *) R_alloc(1, sizeof(H4table)); x->n = n; x->obs = (int *) R_alloc(n, sizeof(int)); x->nco = (int *) R_alloc(n, sizeof(int)); x->cen = (int *) R_alloc(n, sizeof(int)); x->ncc = (int *) R_alloc(n, sizeof(int)); return(x); } void freeH4table(x) H4table *x; { } Box * allocBox() /* I know this is ridiculous but it's consistent. */ { Box *b; b = (Box *) R_alloc(1, sizeof(Box)); return(b); } void freeBox(x) Box *x; { } Point * allocParray(n) /* allocate array of n Points */ int n; { Point *p; p = (Point *) R_alloc(n, sizeof(Point)); return(p); } void freeParray(x) Point *x; { } /* CREATE AND INITIALISE DATA STORAGE */ Ftable * MakeFtable(t0, t1, n) double *t0, *t1; int *n; { Ftable *tab; int i, nn; nn = *n; tab = allocFtable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->f[i] = 0.0; tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } Itable * MakeItable(t0, t1, n) double *t0, *t1; int *n; { Itable *tab; int i, nn; nn = *n; tab = allocItable(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->num[i] = 0; tab->denom[i] = 0; } return(tab); } H4table * MakeH4table(t0, t1, n) double *t0, *t1; int *n; { H4table *tab; int i, nn; nn = *n; tab = allocH4table(nn); tab->t0 = *t0; tab->t1 = *t1; for(i = 0; i < nn; i++) { tab->obs[i] = 0; tab->nco[i] = 0; tab->cen[i] = 0; tab->ncc[i] = 0; } tab->upperobs = 0; tab->uppercen = 0; return(tab); } /* CONVERSION OF DATA TYPES R -> internal including allocation of internal data types as needed */ Point * RtoPointarray(x,y,z,n) double *x, *y, *z; int *n; { int i, nn; Point *p; nn = *n; p = allocParray(nn); for(i = 0; i < nn; i++) { p[i].x = x[i]; p[i].y = y[i]; p[i].z = z[i]; } return(p); } Box * RtoBox(x0, x1, y0, y1, z0, z1) double *x0, *x1, *y0, *y1, *z0, *z1; { Box *b; b = allocBox(); b->x0 = *x0; b->x1 = *x1; b->y0 = *y0; b->y1 = *y1; b->z0 = *z0; b->z1 = *z1; return(b); } /* CONVERSION OF DATA TYPES internal -> R Note: it can generally be assumed that the R arguments are already allocated vectors of correct length, so we do not allocate them. */ void FtabletoR(tab, t0, t1, n, f, num, denom) /* internal */ Ftable *tab; /* R representation */ double *t0, *t1; int *n; double *f, *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *n = tab->n; for(i = 0; i < tab->n; i++) { f[i] = tab->f[i]; num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeFtable(tab); } void ItabletoR(tab, t0, t1, m, num, denom) /* internal */ Itable *tab; /* R representation */ double *t0, *t1; int *m; int *num, *denom; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; for(i = 0; i < tab->n; i++) { num[i] = tab->num[i]; denom[i] = tab->denom[i]; } freeItable(tab); } void H4tabletoR(tab, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) /* internal */ H4table *tab; /* R representation */ double *t0, *t1; int *m; int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { int i; *t0 = tab->t0; *t1 = tab->t1; *m = tab->n; *upperobs = tab->upperobs; *uppercen = tab->uppercen; for(i = 0; i < tab->n; i++) { obs[i] = tab->obs[i]; nco[i] = tab->nco[i]; cen[i] = tab->cen[i]; ncc[i] = tab->ncc[i]; } freeH4table(tab); } /* R CALLING INTERFACE These routines are called from R by > .C("routine-name", ....) */ void RcallK3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: k3trans(p, (int) *n, b, tab); break; case 1: k3isot(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); k3trans(p, (int) *n, b, tab); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch(*method) { case 1: g3one(p, (int) *n, b, tab); break; case 3: g3three(p, (int) *n, b, tab); break; default: Rprintf("Method %d not implemented: defaults to 3\n", *method); g3three(p, (int) *n, b, tab); } FtabletoR(tab, t0, t1, m, f, num, denom); } void RcallG3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside RcallG3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); g3cen(p, (int) *n, b, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving RcallG3cen\n") } void RcallF3(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, num, denom, method) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* Itable */ int *num, *denom; int *method; { Point *p; Box *b; Itable *count; DEBUGMESSAGE("Inside Rcall_f3\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeItable(t0, t1, m); switch((int) *method) { case 0: phatnaive(p, (int) *n, b, *vside, count); break; case 1: phatminus(p, (int) *n, b, *vside, count); break; default: Rprintf("Method %d not recognised: defaults to 1\n", *method); phatminus(p, (int) *n, b, *vside, count); } ItabletoR(count, t0, t1, m, num, denom); DEBUGMESSAGE("Leaving Rcall_f3\n") } void RcallF3cen(x,y,z, n, x0, x1, y0, y1, z0, z1, vside, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *vside; double *t0, *t1; int *m; /* H4table */ int *obs, *nco, *cen, *ncc; int *upperobs, *uppercen; { Point *p; Box *b; H4table *count; DEBUGMESSAGE("Inside Rcallf3cen\n") p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); count = MakeH4table(t0, t1, m); p3hat4(p, (int) *n, b, *vside, count); H4tabletoR(count, t0, t1, m, obs, nco, cen, ncc, upperobs, uppercen); DEBUGMESSAGE("Leaving Rcallf3cen\n") } void Rcallpcf3(x,y,z, n, x0, x1, y0, y1, z0, z1, t0, t1, m, f, num, denom, method, delta) double *x, *y, *z; /* points */ int *n; double *x0, *x1, /* box */ *y0, *y1, *z0, *z1; double *t0, *t1; /* Ftable */ int *m; double *f, *num, *denom; int *method; double *delta; /* Epanechnikov kernel halfwidth */ { Point *p; Box *b; Ftable *tab; p = RtoPointarray(x, y, z, n); b = RtoBox(x0, x1, y0, y1, z0, z1); tab = MakeFtable(t0, t1, m); switch((int) *method) { case 0: pcf3trans(p, (int) *n, b, tab, (double) *delta); break; case 1: pcf3isot(p, (int) *n, b, tab, (double) *delta); break; default: Rprintf("Method %d not implemented: defaults to 0\n", *method); pcf3trans(p, (int) *n, b, tab, (double) *delta); break; } FtabletoR(tab, t0, t1, m, f, num, denom); } spatstat/src/veegraf.c0000644000176000001440000000677112252324034014527 0ustar ripleyusers/* veegraf.c $Revision: 1.2 $ $Date: 2013/05/21 08:11:27 $ Given the edges of a graph, determine all "Vees" i.e. triples (i, j, k) where i ~ j and i ~ k. */ #include #include #include #include "chunkloop.h" #undef DEBUGVEE SEXP graphVees(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ /* Edges should NOT be repeated symmetrically. Indices need not be sorted. */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of triples */ int Nt, Ntmax; /* number of triples */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGVEE Rprintf("i=%d ---------- \n", i); #endif /* Find Vee triples with apex 'i' */ /* First, find all vertices j connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { jj[Nj] = je[m]; Nj++; } else if(je[m] == i) { jj[Nj] = ie[m]; Nj++; } } /* save triples (i,j,k) */ #ifdef DEBUGVEE Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGVEE Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGVEE Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triplet indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } spatstat/src/raster.h0000755000176000001440000000474712252324034014421 0ustar ripleyusers/* raster.h Definition of raster structures & operations requires (for floor()) $Revision: 1.3 $ $Date: 2004/11/15 19:25:11 $ */ typedef struct Raster{ /* array of data */ char *data; /* coerced to appropriate type */ int nrow; /* dimensions of entire array */ int ncol; int length; int rmin; /* position of valid subrectangle */ int rmax; int cmin; int cmax; /* definition of mapping into continuous space */ double x0; /* position of entry (rmin,cmin) */ double y0; double x1; /* position of entry (rmax,cmax) */ double y1; double xstep; /* x increment for each column step */ double ystep; /* y increment for each row step */ /* xstep = (x1 - x0)/(cmax - cmin) = (x1 - x0)/(number of valid columns - 1) CAN BE POSITIVE OR NEGATIVE */ /* image of valid subrectangle */ double xmin; /* = min{x0,x1} */ double xmax; double ymin; double ymax; } Raster; /* how to clear the data */ #define Clear(ARRAY,TYPE,VALUE) \ { unsigned int i; TYPE *p; \ for(i = 0, p = (TYPE *) (ARRAY).data; i < (ARRAY).length; i++, p++) \ *p = VALUE; } /* how to index a rectangular array stored sequentially in row-major order */ #define Entry(ARRAY,ROW,COL,TYPE) \ ((TYPE *)((ARRAY).data))[COL + (ROW) * ((ARRAY).ncol)] /* test for indices inside subrectangle */ #define Inside(ARRAY,ROW,COL) \ ( (ROW >= (ARRAY).rmin) && (ROW <= (ARRAY).rmax) && \ (COL >= (ARRAY).cmin) && (COL <= (ARRAY).cmax)) /* how to compute the position in R^2 corresponding to a raster entry */ #define Xpos(ARRAY,COL) \ ((ARRAY).x0 + (ARRAY).xstep * (COL - (ARRAY).cmin)) #define Ypos(ARRAY,ROW) \ ((ARRAY).y0 + (ARRAY).ystep * (ROW - (ARRAY).rmin)) #define Distance(X,Y,XX,YY) sqrt((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceTo(X,Y,ARRAY,ROW,COL)\ Distance(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) #define DistanceSquared(X,Y,XX,YY) ((X - XX)* (X - XX) + (Y - YY) * (Y - YY)) #define DistanceToSquared(X,Y,ARRAY,ROW,COL)\ DistanceSquared(X,Y,Xpos(ARRAY,COL),Ypos(ARRAY,ROW)) /* how to map a point (x,y) in R^2 to a raster entry */ /* (x,y) is guaranteed to lie in the rectangle bounded by the images of the entries (r,c), (r+1,c), (r,c+1), (r+1,c+1) where r = RowIndex(..) and c = ColIndex(..). */ #define RowIndex(ARRAY,Y) \ ((ARRAY).rmin + (int) floor(((Y) - (ARRAY).y0)/(ARRAY).ystep)) #define ColIndex(ARRAY,X) \ ((ARRAY).cmin + (int) floor(((X) - (ARRAY).x0)/(ARRAY).xstep)) spatstat/src/hardcore.c0000755000176000001440000000410712252324034014671 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Hardcore { double h; /* hard core distance */ double h2; double *period; int per; } Hardcore; /* initialiser function */ Cdata *hardcoreinit(state, model, algo) State state; Model model; Algor algo; { Hardcore *hardcore; double h; hardcore = (Hardcore *) R_alloc(1, sizeof(Hardcore)); /* Interpret model parameters*/ hardcore->h = h = model.ipar[0]; hardcore->h2 = h * h; hardcore->period = model.period; /* periodic boundary conditions? */ hardcore->per = (model.period[0] > 0.0); return((Cdata *) hardcore); } /* conditional intensity evaluator */ double hardcorecif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double h2, a; Hardcore *hardcore; hardcore = (Hardcore *) cdata; h2 = hardcore->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(hardcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],hardcore->period, h2)) return((double) 0.0); } } if(ixp1 < npts) { for(j=ixp1; jperiod, h2)) return((double) 0.0); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { a = h2 - pow(u - x[j], 2); if(a > 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { a -= pow(v - y[j], 2); if(a > 0) return((double) 0.0); } } } } return ((double) 1.0); } Cifns HardcoreCifns = { &hardcoreinit, &hardcorecif, (updafunptr) NULL, NO}; spatstat/src/denspt.c0000755000176000001440000002036212252324034014400 0ustar ripleyusers#include #include #include "chunkloop.h" #include "pairloop.h" #include "constants.h" /* denspt.c $Revision: 1.12 $ $Date: 2013/09/18 04:16:50 $ Assumes point pattern is sorted in increasing order of x coordinate *denspt* Density estimate at points *smoopt* Smoothed mark values at points */ #define TWOPI M_2PI double sqrt(), exp(); #define STD_DECLARATIONS \ int n, i, j, maxchunk; \ double xi, yi, rmax, r2max, dx, dy, dx2, d2 #define STD_INITIALISE \ n = *nxy; \ rmax = *rmaxi; \ r2max = rmax * rmax /* ----------------- density estimation -------------------- */ void denspt(nxy, x, y, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-d2/twosig2); } , { result[i] = coef * resulti; }) } void wtdenspt(nxy, x, y, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double sigma, twosig2; STD_INITIALISE; sigma = *sig; twosig2 = 2.0 * sigma * sigma; coef = 1.0/(TWOPI * sigma * sigma); if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * exp(-d2/twosig2); }, { result[i] = coef * resulti; } ) } /* ------------- anisotropic versions -------------------- */ void adenspt(nxy, x, y, rmaxi, detsigma, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; PAIRLOOP( { resulti = 0.0; }, { resulti += exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } void awtdenspt(nxy, x, y, rmaxi, detsigma, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *rmaxi; /* maximum distance at which points contribute */ double *detsigma; /* determinant of variance matrix */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of weighted density values */ { STD_DECLARATIONS; double resulti, coef; double detsig, s11, s12, s21, s22; STD_INITIALISE; detsig = *detsigma; coef = 1.0/(TWOPI * sqrt(detsig)); s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP( { resulti = 0.0; }, { resulti += weight[j] * \ exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); }, { result[i] = coef * resulti; }) } /* --------------- smoothing --------------------------- */ void smoopt(nxy, x, y, v, self, rmaxi, sig, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sig; /* Gaussian sd */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ if(countself != 0) { \ numer += 1; \ denom += v[i]; \ } \ result[i] = numer/denom; \ }) } void wtsmoopt(nxy, x, y, v, self, rmaxi, sig, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance */ double *sig; /* Gaussian sd */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of computed smoothed values */ { STD_DECLARATIONS; int countself; double sigma, twosig2; double numer, denom, wij; STD_INITIALISE; sigma = *sig; countself = *self; twosig2 = 2.0 * sigma * sigma; if(n == 0) return; PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-d2/twosig2); \ denom += wij; \ numer += wij * v[j]; \ }, { \ if(countself != 0) { \ numer += weight[i]; \ denom += weight[i] * v[i]; \ } \ result[i] = numer/denom; \ }) } /* ------------- anisotropic versions -------------------- */ void asmoopt(nxy, x, y, v, self, rmaxi, sinv, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP({ numer = denom = 0.0; }, { \ wij = exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ if(countself != 0) { \ numer += 1; \ denom += v[i]; \ } \ result[i] = numer/denom; \ }) } void awtsmoopt(nxy, x, y, v, self, rmaxi, sinv, weight, result) /* inputs */ int *nxy; /* number of (x,y) points */ double *x, *y; /* (x,y) coordinates */ double *v; /* vector of mark values to be smoothed */ int *self; /* 0 if leave-one-out */ double *rmaxi; /* maximum distance at which points contribute */ double *sinv; /* inverse variance matrix (2x2, flattened) */ double *weight; /* vector of weights */ /* output */ double *result; /* vector of smoothed values */ { STD_DECLARATIONS; int countself; double s11, s12, s21, s22; double numer, denom, wij; STD_INITIALISE; countself = *self; s11 = sinv[0]; s12 = sinv[1]; s21 = sinv[2]; s22 = sinv[3]; if(n == 0) return; PAIRLOOP({ numer = denom = 0.0; }, { \ wij = weight[j] * exp(-(dx * (dx * s11 + dy * s12) \ + dy * (dx * s21 + dy * s22))/2.0); \ denom += wij; \ numer += wij * v[j]; \ }, { \ if(countself != 0) { \ numer += weight[i]; \ denom += weight[i] * v[i]; \ } \ result[i] = numer/denom; \ }) } spatstat/src/exactPdist.c0000755000176000001440000001007612252324034015214 0ustar ripleyusers/* exactPdist.c `Pseudoexact' distance transform of a discrete binary image (the closest counterpart to `exactdist.c') $Revision: 1.12 $ $Date: 2011/05/17 12:27:20 $ */ #include #include "raster.h" void dist_to_bdry(); void shape_raster(); void ps_exact_dt(in, dist, row, col) Raster *in; /* input: binary image */ Raster *dist; /* output: exact distance to nearest point */ Raster *row; /* output: row index of closest point */ Raster *col; /* output: column index of closest point */ /* rasters must have been dimensioned by shape_raster() and must all have identical dimensions and margins */ { int j,k; double d, x, y; int r, c; double dnew; double huge; /* double bdiag; */ /* initialise */ #define UNDEFINED -1 #define Is_Defined(I) (I >= 0) #define Is_Undefined(I) (I < 0) Clear(*row,int,UNDEFINED) Clear(*col,int,UNDEFINED) huge = 2.0 * DistanceSquared(dist->xmin,dist->ymin,dist->xmax,dist->ymax); Clear(*dist,double,huge) /* if input pixel is TRUE, set distance to 0 and make pixel point to itself */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) if(Entry(*in, j, k, int) != 0) { Entry(*dist, j, k, double) = 0.0; Entry(*row, j, k, int) = j; Entry(*col, j, k, int) = k; } /* how to update the distance values */ #define GETVALUES(ROW,COL) \ x = Xpos(*in, COL); \ y = Ypos(*in, ROW); \ d = Entry(*dist,ROW,COL,double); #define COMPARE(ROW,COL,RR,CC) \ r = Entry(*row,RR,CC,int); \ c = Entry(*col,RR,CC,int); \ if(Is_Defined(r) && Is_Defined(c) \ && Entry(*dist,RR,CC,double) < d) { \ dnew = DistanceSquared(x, y, Xpos(*in,c), Ypos(*in,r)); \ if(dnew < d) { \ Entry(*row,ROW,COL,int) = r; \ Entry(*col,ROW,COL,int) = c; \ Entry(*dist,ROW,COL,double) = dnew; \ d = dnew; \ } \ } /* bound on diagonal step distance squared */ /* bdiag = (in->xstep * in->xstep + in->ystep * in->ystep); */ /* forward pass */ for(j = in->rmin; j <= in->rmax; j++) for(k = in->cmin; k <= in->cmax; k++) { GETVALUES(j, k) COMPARE(j,k, j-1,k-1) COMPARE(j,k, j-1, k) COMPARE(j,k, j-1,k+1) COMPARE(j,k, j, k-1) } /* backward pass */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) { GETVALUES(j, k) COMPARE(j,k, j+1,k+1) COMPARE(j,k, j+1, k) COMPARE(j,k, j+1,k-1) COMPARE(j,k, j, k+1) } /* take square roots of distances^2 */ for(j = in->rmax; j >= in->rmin; j--) for(k = in->cmax; k >= in->cmin; k--) Entry(*dist,j,k,double) = sqrt(Entry(*dist,j,k,double)); } /* R interface */ void ps_exact_dt_R(xmin, ymin, xmax, ymax, nr, nc, mr, mc, inp, distances, rows, cols, boundary) double *xmin, *ymin, *xmax, *ymax; /* x, y dimensions */ int *nr, *nc; /* raster dimensions EXCLUDING margins */ int *mr, *mc; /* margins */ int *inp; /* input: binary image */ double *distances; /* output: distance to nearest point */ int *rows; /* output: row of nearest point (start= 0) */ int *cols; /* output: column of nearest point (start = 0) */ double *boundary; /* output: distance to boundary of rectangle */ /* all images must have identical dimensions including a margin of 1 on each side */ { Raster data, dist, row, col, bdist; int mrow, mcol, nrow, ncol; mrow = *mr; mcol = *mc; /* full dimensions */ nrow = *nr + 2 * mrow; ncol = *nc + 2 * mcol; shape_raster( &data, (void *) inp, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &dist, (void *) distances, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &row, (void *) rows, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &col, (void *) cols, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); shape_raster( &bdist, (void *) boundary, *xmin,*ymin,*xmax,*ymax, nrow, ncol, mrow, mcol); ps_exact_dt(&data, &dist, &row, &col); dist_to_bdry(&bdist); } spatstat/src/areaint.c0000755000176000001440000001624412252324034014532 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity function for an area-interaction process: cif = eta^(1-B) where B = (uncovered area)/(pi r^2) */ #define NGRID 16 /* To explore serious bug, #define BADBUG */ #undef BADBUG /* Format for storage of parameters and precomputed/auxiliary data */ typedef struct AreaInt { /* model parameters */ double eta; double r; /* transformations of the parameters */ double r2; double range2; double logeta; int hard; /* periodic distance */ double *period; int per; /* grid counting */ double dx; double xgrid0; int *my; int kdisc; /* scratch space for saving list of neighbours */ int *neighbour; } AreaInt; /* initialiser function */ Cdata *areaintInit(state, model, algo) State state; Model model; Algor algo; { double r, dx, dy, x0; int i, my, kdisc; AreaInt *areaint; /* create storage */ areaint = (AreaInt *) R_alloc(1, sizeof(AreaInt)); /* Interpret model parameters*/ areaint->eta = model.ipar[0]; areaint->r = r = model.ipar[1]; #ifdef BADBUG Rprintf("r = %lf\n", r); #endif areaint->r2 = r * r; areaint->range2 = 4 * r * r; /* square of interaction distance */ /* is the model numerically equivalent to hard core ? */ areaint->hard = (areaint->eta == 0.0); areaint->logeta = (areaint->hard) ? log(DOUBLE_XMIN) : log(areaint->eta); #ifdef BADBUG if(areaint->hard) Rprintf("Hard core recognised\n"); #endif /* periodic boundary conditions? */ areaint->period = model.period; areaint->per = (model.period[0] > 0.0); #ifdef BADBUG if(areaint->per) { Rprintf("*** periodic boundary conditions ***\n"); Rprintf("period = %lf, %lf\n", model.period[0], model.period[1]); } #endif /* grid counting */ dx = dy = areaint->dx = (2 * r)/NGRID; #ifdef BADBUG Rprintf("areaint->dx = %lf\n", areaint->dx); #endif areaint->xgrid0 = -r + dx/2; areaint->my = (int *) R_alloc((long) NGRID, sizeof(int)); kdisc = 0; for(i = 0; i < NGRID; i++) { x0 = areaint->xgrid0 + i * dx; my = floor(sqrt(r * r - x0 * x0)/dy); my = (my < 0) ? 0 : my; areaint->my[i] = my; #ifdef BADBUG Rprintf("\tmy[%ld] = %ld\n", i, my); #endif kdisc += 2 * my + 1; } areaint->kdisc = kdisc; #ifdef BADBUG Rprintf("areaint->kdisc = %ld\n", areaint->kdisc); #endif /* allocate space for neighbour indices */ areaint->neighbour = (int *) R_alloc((long) state.npmax, sizeof(int)); return((Cdata *) areaint); } #ifdef BADBUG void fexitc(); #endif /* conditional intensity evaluator */ double areaintCif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *period, *x, *y; double u, v; double r2, dx, dy, a, range2; double xgrid, ygrid, xgrid0, covfrac, cifval; int kount, kdisc, kx, my, ky; int *neighbour; int nn, k; AreaInt *areaint; areaint = (AreaInt *) cdata; r2 = areaint->r2; range2 = areaint->range2; /* square of interaction distance */ dy = dx = areaint->dx; kdisc = areaint->kdisc; /* pointers */ period = areaint->period; neighbour = areaint->neighbour; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return ((double) 1.0); if(!areaint->per) { /* .......... Euclidean distance .................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(ix > 0) { for(j=0; j < ix; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(ixp1 < npts) { for(j=ixp1; j < npts; j++) { a = range2 - pow(u - x[j], 2); if(a > 0.) { a -= pow(v - y[j], 2); if(a > 0.) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } } if(nn == 0) { /* no neighbours; no interaction */ cifval = 1.0; return cifval; } else if(areaint->hard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ if(nn > 0) { for(k=0; k < nn; k++) { j = neighbour[k]; a = r2 - pow(xgrid - x[j], 2); if(a > 0) { a -= pow(ygrid - y[j], 2); if(a > 0) { /* point j covers grid point */ ++kount; break; } } } } /* finished consideration of grid point (xgrid, ygrid) */ } } } } else { /* ............. periodic distance ...................... First identify which data points are neighbours of (u,v) */ nn = 0; ixp1 = ix + 1; if(ix > 0) { for(j=0; j < ix; j++) { if(dist2thresh(u,v,x[j],y[j],period,range2)) { /* point j is a neighbour of (u,v) */ neighbour[nn] = j; ++nn; } } } if(ixp1 < npts) { for(j=ixp1; jhard) { /* neighbours forbidden if it's a hard core process */ cifval = 0.0; return cifval; } else { /* scan a grid of points centred at (u,v) */ kount = 0; xgrid0 = u + areaint->xgrid0; for(kx=0; kxmy[kx]; for(ky=(-my); ky<=my; ky++) { ygrid = v + ky * dy; /* Grid point (xgrid,ygrid) is inside disc of radius r centred at (u,v) Loop through all neighbouring data points to determine whether the grid point is covered by another disc */ for(k=0; k < nn; k++) { j = neighbour[k]; if(dist2Mthresh(xgrid,ygrid,x[j],y[j],period,r2)) { /* point j covers grid point */ ++kount; break; } } /* finished considering grid point (xgrid,ygrid) */ } } } } /* `kdisc' is the number of grid points in the disc `kount' is the number of COVERED grid points in the disc */ /* Hard core case has been handled. */ /* Usual calculation: covered area fraction */ covfrac = ((double) kount)/((double) kdisc); cifval = exp(areaint->logeta * covfrac); #ifdef BADBUG if(!R_FINITE(cifval)) { Rprintf("Non-finite CIF value\n"); Rprintf("kount=%ld, kdisc=%ld, covfrac=%lf, areaint->logeta=%lf\n", kount, kdisc, covfrac, areaint->logeta); Rprintf("u=%lf, v=%lf\n", u, v); fexitc("Non-finite CIF"); } #endif return cifval; } Cifns AreaIntCifns = { &areaintInit, &areaintCif, (updafunptr) NULL, NO}; spatstat/src/Perfect.cc0000755000176000001440000005742112252324034014644 0ustar ripleyusers// Debug switch // #define DBGS #include #include #include #include #include #include #include #include // #include // FILE *out; // File i/o is deprecated in R implementation #ifdef DBGS #define CHECK(PTR,MESSAGE) if(((void *) PTR) == ((void *) NULL)) error(MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) { \ Rprintf("Value of %s exceeds upper limit %d\n", XNAME, HIGH); \ X = HIGH; \ } else if((X) < (LOW)) { \ Rprintf("Value of %s is below %d\n", XNAME, LOW); \ X = LOW; \ } #else #define CHECK(PTR,MESSAGE) #define CLAMP(X, LOW, HIGH, XNAME) \ if((X) > (HIGH)) X = HIGH; else if((X) < (LOW)) X = LOW; #endif // ......................................... // memory allocation // using R_alloc #define ALLOCATE(TYPE) (TYPE *) R_alloc(1, sizeof(TYPE)) #define FREE(PTR) // Alternative using Calloc and Free // #define ALLOCATE(TYPE) (TYPE *) Calloc(1, sizeof(TYPE)) // #define FREE(PTR) Free(PTR) void R_CheckUserInterrupt(void); struct Point{ long int No; float X; float Y; float R; struct Point *next; }; struct Point2{ long int No; float X; float Y; char InLower[2]; double Beta; double TempBeta; struct Point2 *next; }; struct Point3{ char Case; char XCell; char YCell; struct Point3 *next; }; // const float Pi=3.141593; double slumptal(void){ return(runif((double) 0.0, (double) 1.0)); } long int poisson(double lambda){ return((long int)rpois(lambda)); } // ........................... Point patterns .......................... class Point2Pattern { public: long int UpperLiving[2]; long int MaxXCell, MaxYCell, NoP; double XCellDim, YCellDim, Xmin, Xmax, Ymin, Ymax; struct Point2 *headCell[10][10],*dummyCell; char DirX[10], DirY[10]; Point2Pattern(double xmin, double xmax, double ymin, double ymax, long int mxc, long int myc){ long int i,j; UpperLiving[0] = 0; UpperLiving[1] = 0; Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; DirX[1] = 1; DirY[1] = 0; DirX[2] = 1; DirY[2] = -1; DirX[3] = 0; DirY[3] = -1; DirX[4] = -1; DirY[4] = -1; DirX[5] = -1; DirY[5] = 0; DirX[6] = -1; DirY[6] = 1; DirX[7] = 0; DirY[7] = 1; DirX[8] = 1; DirY[8] = 1; NoP = 0; // dummyCell = ALLOCATE(struct Point2); // dummyCell->next = dummyCell; dummyCell->No = 0; MaxXCell = mxc; MaxYCell = myc; if(MaxXCell>9) MaxXCell = 9; if(MaxYCell>9) MaxYCell = 9; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // headCell[i][j] = ALLOCATE(struct Point2); // headCell[i][j]->next=dummyCell; } } XCellDim = (Xmax-Xmin)/((double)(MaxXCell+1)); YCellDim = (Ymax-Ymin)/((double)(MaxYCell+1)); }; ~Point2Pattern(){} void Print(); void Return(double *X, double *Y, int *num, int maxnum); long int Count(); long int UpperCount(); void Empty(); void Clean(); // void DumpToFile(char FileName[100]); // void ReadFromFile(char FileName[100]); }; void Point2Pattern::Print(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ //Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Print()"); while(TempCell->next != TempCell){ k++; Rprintf("%f %f %ld %ld %ld=%d %ld=%d UL0 %d UL1 %d %f\n", TempCell->X,TempCell->Y,k, TempCell->No, i,int(TempCell->X/XCellDim), j,int(TempCell->Y/YCellDim), TempCell->InLower[0],TempCell->InLower[1], TempCell->Beta); TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Print() loop"); } } } Rprintf("Printed %ld points.\n",k); } void Point2Pattern::Return(double *X, double *Y, int *num, int maxnum){ long int i,j,k; k =0; *num = 0; #ifdef DBGS Rprintf("executing Return()\n"); #endif if(UpperLiving[0]<=maxnum){ struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ #ifdef DBGS // Rprintf("%d %d:\n",i,j); #endif TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Return()"); while(TempCell->next != TempCell){ X[k] = TempCell->X; Y[k] = TempCell->Y; k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Return() loop"); } } } *num = k; } else { *num = -1; } } long int Point2Pattern::Count(){ long int i,j,k; k = 0; struct Point2 *TempCell; for(i=0;i<=MaxXCell;i++){ for(j=0;j<=MaxYCell;j++){ // Rprintf("%d %d:\n",i,j); TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Count()"); while(TempCell->next != TempCell){ k++; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Count() loop"); } } } //Rprintf("Printed %d points.\n",k); return(k); } // a quick (over)estimate of the number of points in the pattern, // for storage allocation long int Point2Pattern::UpperCount(){ return(UpperLiving[0]); } void Point2Pattern::Empty(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS long int k; k=0; Rprintf("executing Empty()\n"); #endif for(i=0; i<=this->MaxXCell; i++){ for(j=0; j<=this->MaxYCell; j++){ TempCell = headCell[i][j]->next; CHECK(TempCell, "internal error: TempCell is null in Empty()"); while(TempCell!=TempCell->next){ #ifdef DBGS // k++; Rprintf("%d %d %d\n",i,j,k); #endif TempCell2 = TempCell->next; FREE(TempCell); TempCell = TempCell2; CHECK(TempCell, "internal error: TempCell is null in Empty() loop"); } headCell[i][j]->next = dummyCell; } } } void Point2Pattern::Clean(){ struct Point2 *TempCell, *TempCell2; long int i,j; #ifdef DBGS Rprintf("executing Clean()\n"); #endif for(i=0; i<=MaxXCell; i++){ for(j=0; j<=MaxYCell; j++){ TempCell = headCell[i][j]; CHECK(TempCell, "internal error: TempCell is null in Clean()"); TempCell2 = headCell[i][j]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean()"); while(TempCell2!=TempCell2->next){ TempCell2->No = 0; if(TempCell2->InLower[0]==0){ TempCell->next = TempCell2->next; FREE(TempCell2); TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop A"); } else{ TempCell2 = TempCell2->next; TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Clean() loop B"); CHECK(TempCell2, "internal error: TempCell2 is null in Clean() loop B"); } } } } } //void Point2Pattern::DumpToFile(char FileName[100]){ // FILE *out; // long int i,j; // out = fopen(FileName,"w"); // struct Point2 *TempCell; // for(i=0;i<=MaxXCell;i++){ // for(j=0;j<=MaxYCell;j++){ // //Rprintf("%d %d:\n",i,j); // TempCell = headCell[i][j]->next; // while(TempCell->next != TempCell){ // fprintf(out,"%f\t%f\t%ld\n", // TempCell->X,TempCell->Y,TempCell->No); // TempCell = TempCell->next; // } // } //} //fclose(out); //} //void Point2Pattern::ReadFromFile(char FileName[100]){ // FILE *out; //long int k,XCell,YCell; //float f1,xs,ys; //out = fopen(FileName,"r"); //struct Point2 *TempCell; //k=0; //while(feof(out)==0){ // k++; // fscanf(out,"%f%f\n",&xs,&ys); // //Rprintf("%f %f\n",xs,ys); // // // TempCell = ALLOCATE(struct Point2); // // // TempCell->No = k; // TempCell->X = xs; // TempCell->Y = ys; // TempCell->InLower[0] = 1; // TempCell->InLower[1] = 1; // // f1 = (xs-Xmin)/XCellDim; XCell = int(f1); // if(XCell>MaxXCell) XCell = MaxXCell; // f1 = (ys-Ymin)/YCellDim; YCell = int(f1); // if(YCell>MaxYCell) YCell = MaxYCell; // // TempCell->next = headCell[XCell][YCell]->next; // headCell[XCell][YCell]->next = TempCell; // //} //fclose(out); //Rprintf("%ld points loaded.\n",k); // //} // ........................... Point processes .......................... // ...................... (stationary, pairwise interaction) ............ class PointProcess { public: double Xmin, Xmax, Ymin, Ymax, TotalBirthRate, InteractionRange; PointProcess(double xmin, double xmax, double ymin, double ymax){ Xmin = xmin; Xmax = xmax; Ymin = ymin; Ymax = ymax; } ~PointProcess(){} virtual void NewEvent(double *x, double *y, char *InWindow)=0; virtual void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP)=0; virtual double Interaction(double dsquared)=0; // virtual void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // Rprintf("Define CalcBeta...\n"); // } // virtual void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ //Rprintf("Define CheckBeta...\n"); //} // virtual double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p) //{ return(0.0);}; // virtual double lnDens(Point2Pattern *p2p); // virtual void Beta(struct Point2 *TempCell){ // TempCell->Beta = 0; // Rprintf("Define Beta...\n");}; }; //double PointProcess::lnDens(Point2Pattern *p2p){ //// double f1; //long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx; //double dy,dx, lnDens,dst2; //struct Point2 *TempCell, *TempCell2; // //dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); //dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); //rx = int(InteractionRange/dx+1.0); //ry = int(InteractionRange/dy+1.0); // // //Rprintf("1:%f 2:%f 3:%d 4:%d 5:%f 6:%f\n",dx,dy,rx,ry, // // this->InteractionRange,InteractionRange); // //Rprintf("mx:%d my:%d\n",p2p->MaxXCell,p2p->MaxYCell); // // lnDens = 0; // // //Rprintf("lnDens: %f (0)\n",lnDens); // // for(xc = 0; xc <= p2p->MaxXCell; xc++){ // for(yc = 0; yc <= p2p->MaxYCell; yc++){ // //if(xc==1) Rprintf("%d %d\n",xc,yc); // CHECK(p2p->headCell[xc][yc], // "internal error: p2p->headCell[xc][yc] is null in lnDens()"); // TempCell = p2p->headCell[xc][yc]->next; // CHECK(TempCell, "internal error: TempCell is null in lnDens()"); // while(TempCell != TempCell->next){ // lnDens += log(TempCell->Beta); // //Rprintf("lnDens: %f (1) %d %d %d %d Beta %f\n",lnDens,xc,yc, // // p2p->MaxXCell,p2p->MaxYCell,TempCell->Beta); // //if(lnDens<(-100000)){Rprintf("%f",lnDens); scanf("%f",&f1);} // if(InteractionRange>0){ // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // //if(xc==1) Rprintf("%d %d %d %d %d %d\n",xco,yco,fx,tx,fy,ty); // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnDens() loop"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop A"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnDens += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnDens() loop B"); // } // } // } // //Rprintf("lnDens: %f\n",lnDens); // } // TempCell = TempCell->next; // CHECK(TempCell, // "internal error: TempCell is null in lnDens() at end"); // } // } // } // return(lnDens); // //} // ........................... Sampler .......................... class Sampler{ public: PointProcess *PP; Point2Pattern *P2P; long int GeneratedPoints, LivingPoints, NoP; //long int UpperLiving[2]; Sampler(PointProcess *p){ PP = p;} ~Sampler(){} void Sim(Point2Pattern *p2p, long int *ST, long int *ET); long int BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition); // WAS: Sampler::Forward void Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD); }; void Sampler::Forward(long int TS, long int TT, char TX, char TY, struct Point *Proposal, long int *DDD){ long int XCell, YCell, DirectionN; double dtmp2,dtmpx,dtmpy, tmpR, TempGamma[2], TempI; struct Point2 *TempCell, *TempCell2; float f1; /* Birth */ if(TT==1){ f1 = (Proposal->X-P2P->Xmin)/P2P->XCellDim; XCell = int(f1); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (Proposal->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(f1); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); // TempCell = ALLOCATE(struct Point2); // TempCell->No = Proposal->No; TempCell->X = Proposal->X; TempCell->Y = Proposal->Y; tmpR = Proposal->R; TempCell->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell; TempCell->InLower[0]=0; TempCell->InLower[1]=0; TempGamma[0] = 1.0; TempGamma[1] = 1.0; /*same cell*/ TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case"); while(TempCell2 != TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop"); } /*eight other cells*/ for(DirectionN=1;DirectionN<=8;DirectionN++){ if(((XCell+P2P->DirX[DirectionN])>=0) && ((XCell+P2P->DirX[DirectionN])<=P2P->MaxXCell) && ((YCell+P2P->DirY[DirectionN])>=0) && ((YCell+P2P->DirY[DirectionN])<=P2P->MaxYCell)){ CHECK(P2P->headCell[XCell+P2P->DirX[DirectionN]][YCell+P2P->DirY[DirectionN]], "internal error: HUGE P2P EXPRESSION is null in Forward() birth case loop A"); TempCell2 = P2P->headCell[XCell+P2P->DirX[DirectionN]] [YCell+P2P->DirY[DirectionN]]->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop B"); while(TempCell2!=TempCell2->next){ dtmpx = TempCell->X - TempCell2->X; dtmpy = TempCell->Y - TempCell2->Y; dtmp2 = dtmpx*dtmpx+dtmpy*dtmpy; TempI = PP->Interaction(dtmp2); if(TempCell2->InLower[0]==1) TempGamma[0] = TempGamma[0]*TempI; if(TempCell2->InLower[1]==1) TempGamma[1] = TempGamma[1]*TempI; TempCell2=TempCell2->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() birth case loop C"); } } } if(tmpR <= TempGamma[1] ){ TempCell->InLower[0]=1; P2P->UpperLiving[0] = P2P->UpperLiving[0] +1; } if(tmpR <= TempGamma[0] ){ TempCell->InLower[1]=1; P2P->UpperLiving[1] = P2P->UpperLiving[1] +1; } } /* Death */ if(TT==0){ TempCell=P2P->headCell[(int)TX][(int)TY]; CHECK(TempCell, "internal error: TempCell is null in Forward() death case"); while(TempCell->next->No != *DDD){ TempCell = TempCell->next; CHECK(TempCell, "internal error: TempCell is null in Forward() death case loop"); if(TempCell->next == TempCell) { Rprintf("internal error: unexpected self-reference. Dumping...\n"); P2P->Print(); error("internal error: unexpected self-reference"); break; } }; CHECK(TempCell->next, "internal error: TempCell->next is null in Forward() death case"); if(*DDD!=TempCell->next->No) Rprintf("diagnostic message: multi cell: !!DDD:%ld TempUpper->No:%ld ", *DDD,TempCell->No); if(TempCell->next->InLower[0]==1) P2P->UpperLiving[0] = P2P->UpperLiving[0] -1; if(TempCell->next->InLower[1]==1) P2P->UpperLiving[1] = P2P->UpperLiving[1] -1; TempCell2 = TempCell->next; CHECK(TempCell2, "internal error: TempCell2 is null in Forward() death case B"); TempCell->next = TempCell2->next; FREE(TempCell2); /* Common stuff */ //KillCounter ++; *DDD = *DDD - 1; } } long int Sampler::BirthDeath(long int TimeStep, struct Point *headLiving, struct Point *headDeleted, struct Point3 *headTransition){ long int i,n; float f1,f2,f3,f4; double xtemp,ytemp; char InWindow, Success; struct Point *TempPoint, *TempPoint2; struct Point3 *TempTransition; R_CheckUserInterrupt(); f1 = LivingPoints; f2 = PP->TotalBirthRate; f3 = f2/(f1+f2); f4 = slumptal(); n = 0; Success = 0; //Rprintf("LivingPoints: %d TotalBirthRate %f GeneratedPoints %d\n", // LivingPoints,PP->TotalBirthRate,GeneratedPoints); /* Birth */ while(Success==0){ if(f4NewEvent(&xtemp, &ytemp, &InWindow); //Rprintf("Ping 2 (BD)\n"); if(InWindow==1){ Success = 1; // TempTransition = ALLOCATE(struct Point3); // //Rprintf("Ping 3 (BD)\n"); TempTransition->Case = 0; LivingPoints ++; GeneratedPoints ++; // TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = GeneratedPoints; TempPoint->R = slumptal(); TempPoint->next = headLiving->next; headLiving->next = TempPoint; NoP ++; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; TempTransition->XCell = int(f1); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; TempTransition->YCell = int(f1); //Rprintf("X %f XCell %d\n",TempPoint->X,TempTransition->XCell); // CLAMP(TempTransition->XCell, 0, P2P->MaxXCell, "TempTransition->XCell"); CLAMP(TempTransition->YCell, 0, P2P->MaxYCell, "TempTransition->YCell"); TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } /* Death */ else{ Success = 1; // TempTransition = ALLOCATE(struct Point3); // TempTransition->Case = 1; f1 = LivingPoints; f2 = f1*slumptal()+1.0; n = int(f2); if(n < 1) n = 1; if(n>LivingPoints){ // Rprintf("diagnostic message: random integer n=%ld > %ld = number of living points\n", n,LivingPoints); n=LivingPoints; } TempPoint2 = TempPoint = headLiving; for(i=1; i<=n; i++){ TempPoint2 = TempPoint; TempPoint = TempPoint->next; } TempPoint2->next = TempPoint->next; TempPoint->next = headDeleted->next; headDeleted->next = TempPoint; LivingPoints --; NoP --; TempTransition->next = headTransition->next; headTransition->next = TempTransition; } } return(n); } void Sampler::Sim(Point2Pattern *p2p, long int *ST, long int *ET) { P2P = p2p; long int StartTime, EndTime, TimeStep, D0Time, D0Living; long int XCell, YCell, DDD, i; float f1; /* Initialising linked listed for backward simulation */ struct Point *headDeleted, *headLiving, *dummyDeleted, *dummyLiving; struct Point *TempPoint; // headLiving = ALLOCATE(struct Point); dummyLiving = ALLOCATE(struct Point); // headLiving->next = dummyLiving; dummyLiving->next = dummyLiving; // headDeleted = ALLOCATE(struct Point); dummyDeleted = ALLOCATE(struct Point); // headDeleted->next = dummyDeleted; dummyDeleted->next = dummyDeleted; struct Point2 *TempCell2; struct Point3 *headTransition, *dummyTransition; // headTransition = ALLOCATE(struct Point3); dummyTransition = ALLOCATE(struct Point3); // headTransition->next = dummyTransition; dummyTransition->next = dummyTransition; PP->GeneratePoisson(headLiving, &GeneratedPoints, &LivingPoints, &NoP); StartTime=1; EndTime=1; TimeStep = 0; D0Time = 0; D0Living = GeneratedPoints; long int tmp, D0; do{ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); if(tmp>0){ if(tmp>(LivingPoints+1-D0Living)){ D0Living --; } } D0Time++; }while(D0Living>0); tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); StartTime=1; EndTime=D0Time+1; D0 = 0; do{ if(D0==1){ for(TimeStep=StartTime;TimeStep<=EndTime;TimeStep ++){ tmp=BirthDeath(TimeStep, headLiving, headDeleted, headTransition); } } D0 = 1; P2P->Empty(); /* headUpper->next = dummyUpper; dummyUpper->next = dummyUpper; for(XCell=0;XCell<=P2P->MaxXCell;XCell++){ for(YCell=0;YCell<=P2P->MaxYCell;YCell++){ headUpperCell[XCell][YCell]->next=dummyUpper; } } */ P2P->UpperLiving[0] = LivingPoints; P2P->UpperLiving[1] = 0; P2P->NoP = 0; i=0; TempPoint = headLiving->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim()"); while(TempPoint!=TempPoint->next){ i++; // TempCell2 = ALLOCATE(struct Point2); // TempCell2->No = TempPoint->No; TempCell2->X = TempPoint->X; TempCell2->Y = TempPoint->Y; TempCell2->InLower[0] = 1; TempCell2->InLower[1] = 0; f1 = (TempPoint->X-P2P->Xmin)/P2P->XCellDim; XCell = int(floor(f1)); CLAMP(XCell, 0, P2P->MaxXCell, "XCell"); f1 = (TempPoint->Y-P2P->Ymin)/P2P->YCellDim; YCell = int(floor(f1)); CLAMP(YCell, 0, P2P->MaxYCell, "YCell"); TempCell2->next = P2P->headCell[XCell][YCell]->next; P2P->headCell[XCell][YCell]->next = TempCell2; TempPoint = TempPoint->next; CHECK(TempPoint, "internal error: TempPoint is null in Sim() loop"); } //P2P->DumpToFile("temp0.dat"); struct Point3 *TempTransition; struct Point *Proposal; TempTransition = headTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim()"); Proposal = headDeleted->next; DDD = GeneratedPoints; for(TimeStep=EndTime;TimeStep>=1;TimeStep--){ R_CheckUserInterrupt(); Forward(TimeStep,TempTransition->Case, TempTransition->XCell,TempTransition->YCell, Proposal,&DDD); if(TempTransition->Case == 1) Proposal = Proposal ->next; TempTransition = TempTransition->next; CHECK(TempTransition, "internal error: TempTransition is null in Sim() loop"); } /* Doubling strategy used!*/ StartTime = EndTime+1; EndTime=EndTime*2; //P2P->DumpToFile("temp.dat"); }while(P2P->UpperLiving[0]!=P2P->UpperLiving[1]); P2P->Clean(); i=0; struct Point *TempPoint2; TempPoint = headLiving; TempPoint2 = headLiving->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position B"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop C"); } FREE(TempPoint); i = 0; TempPoint = headDeleted; TempPoint2 = headDeleted->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() position D"); while(TempPoint!=TempPoint->next){ i++; FREE(TempPoint); TempPoint = TempPoint2; TempPoint2 = TempPoint2->next; CHECK(TempPoint2, "internal error: TempPoint2 is null in Sim() loop D"); } FREE(TempPoint); //Rprintf("%d ",i); struct Point3 *TempTransition,*TempTransition2; i = 0; TempTransition = headTransition; TempTransition2 = headTransition->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() position E"); while(TempTransition!=TempTransition->next){ i++; FREE(TempTransition); TempTransition = TempTransition2; TempTransition2 = TempTransition2->next; CHECK(TempTransition2, "internal error: TempTransition2 is null in Sim() loop F"); } FREE(TempTransition); //Rprintf("%d ST: %d ET: %d\n",i,StartTime,EndTime); //scanf("%f",&f1); *ST = StartTime; *ET = EndTime; } #include "PerfectStrauss.h" #include "PerfectStraussHard.h" #include "PerfectHardcore.h" #include "PerfectDiggleGratton.h" #include "PerfectDGS.h" spatstat/src/areapair.c0000644000176000001440000000360312252324034014663 0ustar ripleyusers/* areapair.c $Revision: 1.6 $ $Date: 2013/09/18 04:11:42 $ Specialised code for the second order conditional intensity of the area-interaction process */ #include #include #include "yesno.h" /* computes area of b(A, r) \int b(B, r) \setminus \bigcup_i b(X[i], r) */ void delta2area(xa, ya, xb, yb, nother, xother, yother, radius, epsilon, pixcount) double *xa, *ya, *xb, *yb; int *nother; double *xother, *yother; double *radius, *epsilon; int *pixcount; { int Ni, Nj, Nk, i, j, k, count, covered; double xA, yA, xB, yB, r, eps, r2; double xmin, xmax, ymin, ymax, xi, yj; double dxA, dyA; double dxB, dyB; double dx, dy; Nk = *nother; xA = *xa; yA = *ya; xB = *xb; yB = *yb; r = *radius; eps = *epsilon; r2 = r * r; /* find intersection of squares centred on A and B */ if(xA < xB) { xmin = xB - r; xmax = xA + r; } else { xmin = xA - r; xmax = xB + r; } if(xmin > xmax) return; if(yA < yB) { ymin = yB - r; ymax = yA + r; } else { ymin = yA - r; ymax = yB + r; } if(ymin > ymax) return; /* set up grid */ Ni = (int) ceil((xmax - xmin)/eps) + 1; Nj = (int) ceil((ymax - ymin)/eps) + 1; count = 0; for(i = 0, xi = xmin; i < Ni; i++, xi += eps) { dxA = xi - xA; for(j = 0, yj = ymin; j < Nj; j++, yj += eps) { dyA = yj - yA; if(dxA * dxA + dyA * dyA <= r2) { /* grid point belongs to b(A, r) */ dxB = xi - xB; dyB = yj - yB; if(dxB * dxB + dyB * dyB <= r2) { /* grid point belongs to b(A,r) \cap b(B,r) */ covered = NO; /* test whether it is covered by another b(X[k], r) */ for(k = 0; k < Nk; k++) { dx = xi - xother[k]; dy = yj - yother[k]; if(dx * dx + dy * dy <= r2) { covered = YES; break; } } if(!covered) { ++count; } } } } } *pixcount = count; } spatstat/src/yesno.h0000644000176000001440000000011612252324034014235 0ustar ripleyusers/* yesno.h */ #ifndef YES #define YES (0 == 0) #define NO (!YES) #endif spatstat/src/dist2dpath.c0000755000176000001440000000052712252324034015152 0ustar ripleyusers#include #include /* given matrix of edge lengths compute matrix of shortest-path distances Uses dist2dpath.h */ #define FNAME Ddist2dpath #define DTYPE double #define FLOATY #include "dist2dpath.h" #undef FNAME #undef DTYPE #undef FLOATY #define FNAME Idist2dpath #define DTYPE int #include "dist2dpath.h" spatstat/src/distseg.c0000755000176000001440000001166212252324034014550 0ustar ripleyusers/* distseg.c Distances from point pattern to line segment pattern Distance transform of a line segment pattern nndist2segs: minimum distance from point to any line segment prdist2segs: pairwise distances from each point to each line segment $Revision: 1.9 $ $Date: 2012/03/27 05:38:51 $ Author: Adrian Baddeley */ #include #include #include #include #include "chunkloop.h" void nndist2segs(xp, yp, npoints, x0, y0, x1, y1, nsegments, epsilon, dist2, index) /* input */ double *xp, *yp; /* point/pixel coordinates */ int *npoints; double *x0, *y0, *x1, *y1; /* line segment endpoints */ int *nsegments; double *epsilon; /* tolerance for short segments */ /* output */ double *dist2; /* squared distance from pixel to nearest line segment INITIALISED TO LARGE VALUE */ int *index; /* which line segment is closest */ { int i,j, np, nseg, maxchunk; double dx,dy,leng,co,si; /* parameters of segment */ double xdif0,ydif0,xdif1,ydif1,xpr,ypr; /* vectors */ double dsq0,dsq1,dsq,dsqperp; /* squared distances */ double eps; np = *npoints; nseg = *nsegments; eps = *epsilon; OUTERCHUNKLOOP(j, nseg, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nseg, maxchunk, 16384) { dx = x1[j] - x0[j]; dy = y1[j] - y0[j]; leng = hypot(dx, dy); if(leng > eps) { /* normal case */ co = dx/leng; si = dy/leng; for(i = 0; i < np; i++) { /* vectors from pixel to segment endpoints */ xdif0 = xp[i] - x0[j]; ydif0 = yp[i] - y0[j]; xdif1 = xp[i] - x1[j]; ydif1 = yp[i] - y1[j]; /* squared distances to segment endpoints */ dsq0 = xdif0 * xdif0 + ydif0 * ydif0; dsq1 = xdif1 * xdif1 + ydif1 * ydif1; dsq = (dsq0 < dsq1) ? dsq0 : dsq1; /* rotate pixel around 1st endpoint of segment so that line segment lies in x axis */ xpr = xdif0 * co + ydif0 * si; ypr = -xdif0 * si + ydif0 * co; /* perpendicular distance applies only in perpendicular region */ if(xpr >= 0.0 && xpr <= leng) { dsqperp = ypr * ypr; if(dsqperp < dsq) dsq = dsqperp; } if(dist2[i] > dsq) { dist2[i] = dsq; index[i] = j; } } } else { /* short segment - use endpoints only */ for(i = 0; i < np; i++) { /* vectors from pixel to segment endpoints */ xdif0 = xp[i] - x0[j]; ydif0 = yp[i] - y0[j]; xdif1 = xp[i] - x1[j]; ydif1 = yp[i] - y1[j]; /* squared distances to segment endpoints */ dsq0 = xdif0 * xdif0 + ydif0 * ydif0; dsq1 = xdif1 * xdif1 + ydif1 * ydif1; dsq = (dsq0 < dsq1) ? dsq0 : dsq1; if(dist2[i] > dsq) { dist2[i] = dsq; index[i] = j; } } } } } } void prdist2segs(xp, yp, npoints, x0, y0, x1, y1, nsegments, epsilon, dist2) /* input */ double *xp, *yp; /* point/pixel coordinates */ int *npoints; double *x0, *y0, *x1, *y1; /* line segment endpoints */ int *nsegments; double *epsilon; /* tolerance for short segments */ /* output */ double *dist2; /* squared distances from each pixel to each line segment */ { int i,j, np, nseg, maxchunk; double dx,dy,leng,co,si; /* parameters of segment */ double xdif0,ydif0,xdif1,ydif1,xpr,ypr; /* vectors */ double dsq0,dsq1,dsq,dsqperp; /* squared distances */ double eps; np = *npoints; nseg = *nsegments; eps = *epsilon; OUTERCHUNKLOOP(j, nseg, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nseg, maxchunk, 16384) { dx = x1[j] - x0[j]; dy = y1[j] - y0[j]; leng = hypot(dx, dy); if(leng > eps) { /* normal case */ co = dx/leng; si = dy/leng; for(i = 0; i < np; i++) { /* vectors from pixel to segment endpoints */ xdif0 = xp[i] - x0[j]; ydif0 = yp[i] - y0[j]; xdif1 = xp[i] - x1[j]; ydif1 = yp[i] - y1[j]; /* squared distances to segment endpoints */ dsq0 = xdif0 * xdif0 + ydif0 * ydif0; dsq1 = xdif1 * xdif1 + ydif1 * ydif1; dsq = (dsq0 < dsq1) ? dsq0 : dsq1; /* rotate pixel around 1st endpoint of segment so that line segment lies in x axis */ xpr = xdif0 * co + ydif0 * si; ypr = -xdif0 * si + ydif0 * co; /* perpendicular distance applies only in perpendicular region */ if(xpr >= 0.0 && xpr <= leng) { dsqperp = ypr * ypr; if(dsqperp < dsq) dsq = dsqperp; } dist2[i + j * np] = dsq; } } else { /* short segment */ for(i = 0; i < np; i++) { /* vectors from pixel to segment endpoints */ xdif0 = xp[i] - x0[j]; ydif0 = yp[i] - y0[j]; xdif1 = xp[i] - x1[j]; ydif1 = yp[i] - y1[j]; /* squared distances to segment endpoints */ dsq0 = xdif0 * xdif0 + ydif0 * ydif0; dsq1 = xdif1 * xdif1 + ydif1 * ydif1; dsq = (dsq0 < dsq1) ? dsq0 : dsq1; dist2[i + j * np] = dsq; } } } } } spatstat/src/diggra.c0000755000176000001440000000637012252324034014343 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Diggle-Gratton process */ /* Conditional intensity function for a pairwise interaction point process with interaction function as given by e(t) = 0 for t < delta = (t-delta)/(rho-delta)^kappa for delta <= t < rho = 1 for t >= rho (See page 767 of Diggle, Gates, and Stibbard, Biometrika vol. 74, 1987, pages 763 -- 770.) */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Diggra { double kappa; double delta; double rho; double delta2; /* delta^2 */ double rho2; /* rho^2 */ double fac; /* 1/(rho-delta) */ double *period; int per; } Diggra; /* initialiser function */ Cdata *diggrainit(state, model, algo) State state; Model model; Algor algo; { Diggra *diggra; diggra = (Diggra *) R_alloc(1, sizeof(Diggra)); /* Interpret model parameters*/ diggra->kappa = model.ipar[0]; diggra->delta = model.ipar[1]; diggra->rho = model.ipar[2]; diggra->period = model.period; /* constants */ diggra->delta2 = pow(diggra->delta, 2); diggra->rho2 = pow(diggra->rho, 2); diggra->fac = 1/(diggra->rho - diggra->delta); /* periodic boundary conditions? */ diggra->per = (model.period[0] > 0.0); return((Cdata *) diggra); } /* conditional intensity evaluator */ double diggracif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairprod, cifval; double rho2, delta, delta2, fac; double *period; DECLARE_CLOSE_D2_VARS; Diggra *diggra; diggra = (Diggra *) cdata; period = diggra->period; rho2 = diggra->rho2; delta = diggra->delta; delta2 = diggra->delta2; fac = diggra->fac; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = pairprod = 1.0; if(npts == 0) return(cifval); ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(diggra->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,rho2,d2)) { if(d2 < delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], rho2, d2)) { if(d2 <= delta2) { cifval = 0.0; return(cifval); } else { pairprod *= fac * (sqrt(d2)-delta); } } } } if(ixp1 < npts) { for(j=ixp1; jkappa); return cifval; } Cifns DiggraCifns = { &diggrainit, &diggracif, (updafunptr) NULL, NO}; spatstat/src/mhv3.h0000644000176000001440000000042712252324034013762 0ustar ripleyusers/* mhv3.h tracking or not */ #undef MH_TRACKING if(tracking) { /* saving transition history */ #define MH_TRACKING YES #include "mhv4.h" #undef MH_TRACKING } else { /* not saving transition history */ #define MH_TRACKING NO #include "mhv4.h" #undef MH_TRACKING } spatstat/src/nngrid.c0000644000176000001440000000356212252324034014364 0ustar ripleyusers/* nngrid.c Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.4 $ $Date: 2013/11/03 03:41:23 $ Function body definition is #included from nngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void nnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ wantdist, wantwhich, /* options */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void nnGdw(), nnGd(), nnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { nnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(di) { nnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } else if(wh) { nnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* nnGdw returns distances and indices */ #define FNAME nnGdw #define DIST #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGd returns distances only */ #define FNAME nnGd #define DIST #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH /* nnGw returns indices only */ #define FNAME nnGw #define WHICH #include "nngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/discarea.c0000755000176000001440000001502412252324034014655 0ustar ripleyusers/* disc.c Area of intersection between disc and polygonal window $Revision: 1.6 $ $Date: 2011/12/03 00:15:52 $ */ #undef DEBUG #include #include #define MIN(A,B) (((A) < (B)) ? (A) : (B)) #define MAX(A,B) (((A) > (B)) ? (A) : (B)) #ifndef PI #define PI 3.1415926535898 #endif void discareapoly(nc, xc, yc, nr, rmat, nseg, x0, y0, x1, y1, eps, out) /* inputs */ int *nc, *nr, *nseg; double *xc, *yc, *rmat; double *x0, *y0, *x1, *y1; double *eps; /* output */ double *out; { int n, m, i, j, k, nradperpt; double radius, radius2, total, contrib; double xx0, xx1, yy0, yy1, xleft, xright, yleft, yright, xcentre, ycentre; double epsilon; double DiscContrib(); n = *nc; nradperpt = *nr; m = *nseg; epsilon = *eps; for(i = 0; i < n; i++) { xcentre = xc[i]; ycentre = yc[i]; #ifdef DEBUG Rprintf("\ni = %d:\n centre = (%lf, %lf)\n", i, xcentre, ycentre); #endif for(j = 0; j < nradperpt; j++) { radius = rmat[ j * n + i]; radius2 = radius * radius; #ifdef DEBUG Rprintf("radius = %lf\n", radius); #endif total = 0.0; for(k=0; k < m; k++) { #ifdef DEBUG Rprintf("k = %d\n", k); #endif xx0 = x0[k]; yy0 = y0[k]; xx1 = x1[k]; yy1 = y1[k]; #ifdef DEBUG Rprintf("(%lf,%lf) to (%lf,%lf)\n", xx0, yy0, xx1, yy1); #endif /* refer to unit disc at origin */ /* arrange so that xleft < xright */ if(radius <= epsilon) contrib = 0.0; else if(xx0 < xx1) { xleft = (xx0 - xcentre)/radius; xright = (xx1 - xcentre)/radius; yleft = (yy0 - ycentre)/radius; yright = (yy1 - ycentre)/radius; contrib = - radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } else { xleft = (xx1 - xcentre)/radius; xright = (xx0 - xcentre)/radius; yleft = (yy1 - ycentre)/radius; yright = (yy0 - ycentre)/radius; contrib = radius2 * DiscContrib(xleft,yleft,xright,yright,epsilon); } #ifdef DEBUG Rprintf("contrib = %lf\n contrib/(pi * r^2)=%lf\n", contrib, contrib/(PI * radius2)); #endif total += contrib; } out[ j * n + i] = total; #ifdef DEBUG Rprintf("total = %lf\ntotal/(pi * r^2) = %lf\n", total, total/(PI * radius2)); #endif } } } /* area of intersection of unit disc with halfplane x <= v */ #ifdef DEBUG #define TRIGBIT(V) trigbit(V) double trigbit(v) double v; { double zero, result; zero = 0.0; if(v < -1.0) return(zero); if(v > 1.0) return(PI); result = PI/2 + asin(v) + v * sqrt(1 - v * v); Rprintf("trigbit: v = %lf, asin(v)=%lf, result=%lf\n", v, asin(v), result); return(result); } #else #define TRIGBIT(V) (((V) <= -1.0) ? 0.0 : (((V) >= 1.0) ? PI : \ (PI/2 + asin(V) + (V) * sqrt(1 - (V) * (V))))) #endif /* Find the area of intersection between a disc centre = (0,0), radius = 1 and the trapezium with upper segment (xleft, yleft) to (xright, yright) ASSUMES xleft < xright */ double DiscContrib(xleft, yleft, xright, yright, eps) double xleft, yleft, xright, yright, eps; /* NOTE: unit disc centred at origin */ { double xlo, xhi, zero, slope, intercept, A, B, C, det; double xcut1, xcut2, ycut1, ycut2, xunder1, xunder2, dx, dx2, result; #ifdef DEBUG double increm; Rprintf( "DiscContrib: xleft=%lf, yleft=%lf, xright=%lf, yright=%lf\n", xleft, yleft, xright, yright); #endif zero = 0.0; /* determine relevant range of x coordinates */ xlo = MAX(xleft, (-1.0)); xhi = MIN(xright, 1.0); if(xlo >= xhi - eps) { /* intersection is empty or negligible */ #ifdef DEBUG Rprintf("intersection is empty or negligible\n"); #endif return(zero); } /* find intersection points between the circle and the line containing upper segment */ slope = (yright - yleft)/(xright - xleft); intercept = yleft - slope * xleft; A = 1 + slope * slope; B = 2 * slope * intercept; C = intercept * intercept - 1.0; det = B * B - 4 * A * C; #ifdef DEBUG Rprintf("slope=%lf, intercept=%lf\nA = %lf, B=%lf, C=%lf, det=%lf\n", slope, intercept, A, B, C, det); #endif if(det <= 0.0) { /* no crossing between disc and infinite line */ if(intercept < 0.0) /* segment is below disc; intersection is empty */ return(zero); /* segment is above disc */ result = TRIGBIT(xhi) - TRIGBIT(xlo); return(result); } xcut1 = (- B - sqrt(det))/(2 * A); xcut2 = (- B + sqrt(det))/(2 * A); /* partition [xlo, xhi] into pieces delimited by {xcut1, xcut2} */ if(xcut1 >= xhi || xcut2 <= xlo) { /* segment is outside disc */ if(yleft < 0.0) { #ifdef DEBUG Rprintf("segment is beneath disc\n"); #endif result = zero; } else { #ifdef DEBUG Rprintf("segment is above disc\n"); #endif result = TRIGBIT(xhi) - TRIGBIT(xlo); } return(result); } /* possibly three parts */ #ifdef DEBUG Rprintf("up to three pieces\n"); #endif result = zero; ycut1 = intercept + slope * xcut1; ycut2 = intercept + slope * xcut2; if(xcut1 > xlo) { /* part to left of cut */ #ifdef DEBUG Rprintf("left of cut: [%lf, %lf]\n", xlo, xcut1); if(ycut1 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xcut1) - TRIGBIT(xlo); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut1 >= 0.0) result += TRIGBIT(xcut1) - TRIGBIT(xlo); #endif } if(xcut2 < xhi) { /* part to right of cut */ #ifdef DEBUG Rprintf("right of cut: [%lf, %lf]\n", xcut2, xhi); if(ycut2 < 0.0) Rprintf("below disc - no intersection\n"); else { increm = TRIGBIT(xhi) - TRIGBIT(xcut2); Rprintf("increment = %lf\n", increm); result += increm; } #else if(ycut2 >= 0.0) result += TRIGBIT(xhi) - TRIGBIT(xcut2); #endif } /* part underneath cut */ xunder1 = MAX(xlo, xcut1); xunder2 = MIN(xhi, xcut2); dx = xunder2 - xunder1; dx2 = xunder2 * xunder2 - xunder1 * xunder1; #ifdef DEBUG Rprintf("underneath cut: [%lf, %lf]\n", xunder1, xunder2); increm = intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; Rprintf("increment = %lf\n", increm); result += increm; #else result += intercept * dx + slope * dx2/2 + (TRIGBIT(xunder2) - TRIGBIT(xunder1))/2; #endif return(result); } #ifdef DEBUG /* interface to low level function, for debugging only */ void RDCtest(xleft, yleft, xright, yright, eps, value) double *xleft, *yleft, *xright, *yright, *eps, *value; { double DiscContrib(); *value = DiscContrib(*xleft, *yleft, *xright, *yright, *eps); } #endif spatstat/src/chunkloop.h0000644000176000001440000000144412252324034015107 0ustar ripleyusers/* chunkloop.h Divide a loop into chunks Convenient for divide-and-recombine, and reducing calls to R_CheckUserInterrupt, etc. $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ */ #define OUTERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ IVAR = 0; \ ICHUNK = 0; \ while(IVAR < LOOPLENGTH) #define INNERCHUNKLOOP(IVAR, LOOPLENGTH, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > LOOPLENGTH) ICHUNK = LOOPLENGTH; \ for(; IVAR < ICHUNK; IVAR++) #define XOUTERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ IVAR = ISTART; \ ICHUNK = 0; \ while(IVAR <= IEND) #define XINNERCHUNKLOOP(IVAR, ISTART, IEND, ICHUNK, CHUNKSIZE) \ ICHUNK += CHUNKSIZE; \ if(ICHUNK > IEND) ICHUNK = IEND; \ for(; IVAR <= IEND; IVAR++) #define CHUNKLOOP_H spatstat/src/scan.c0000644000176000001440000000365412252324034014031 0ustar ripleyusers/* scan.c Scan transform $Revision: 1.2 $ $Date: 2012/04/16 12:00:07 $ */ #include #include #include "raster.h" void shape_raster(); void Cscantrans(x, y, npt, R, out) double *x, *y; /* data points */ int npt; double R; /* radius */ Raster *out; /* scan image */ { int i,j,k,l,m; double d2, R2; int rmin, rmax, cmin, cmax, Rrow, Rcol, lmin, lmax, mmin, mmax; /* initialise raster */ Clear(*out,int,0); /* If the list of data points is empty, ... exit now */ if(npt == 0) return; R2 = R * R; cmin = out->cmin; cmax = out->cmax; rmin = out->rmin; rmax = out->rmax; /* disc size in rows/columns */ Rrow = (int) ceil(R/(out->ystep)); Rcol = (int) ceil(R/(out->xstep)); if(Rrow < 1) Rrow = 1; if(Rcol < 1) Rcol = 1; /* run through points */ for(i = 0; i < npt; i++) { j = RowIndex(*out,y[i]); k = ColIndex(*out,x[i]); lmin = j - Rrow; if(lmin < rmin) lmin = rmin; lmax = j + Rrow; if(lmax > rmax) lmax = rmax; mmin = k - Rcol; if(mmin < cmin) mmin = cmin; mmax = k + Rcol; if(mmax > cmax) mmax = cmax; for(l = lmin; l <= lmax; l++) { for(m = mmin; m <= mmax; m++) { d2 = DistanceToSquared(x[i],y[i],*out,l,m); if(d2 <= R2) Entry(*out,l,m,int) += 1; } } } } /* R interface */ void scantrans(x, y, n, xmin, ymin, xmax, ymax, nr, nc, R, counts) double *x, *y; /* input data points */ int *n; double *xmin, *ymin, *xmax, *ymax; /* guaranteed bounding box */ int *nr, *nc; /* desired raster dimensions */ double *R; /* radius */ /* output array */ int *counts; /* number of R-close points */ { Raster out; int nrow, ncol, npoints; double r; nrow = *nr; ncol = *nc; npoints = *n; r = *R; shape_raster( &out, (void *) counts, *xmin,*ymin,*xmax,*ymax, nrow, ncol, 0, 0); Cscantrans(x, y, npoints, r, &out); } spatstat/src/PerfectStrauss.h0000644000176000001440000002151612252324034016064 0ustar ripleyusers // ........................... Strauss process .......................... // $Revision: 1.3 $ $Date: 2012/03/10 11:52:48 $ class StraussProcess : public PointProcess { public: double beta, gamma, R, Rsquared; StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri); ~StraussProcess(){} void NewEvent(double *x, double *y, char *InWindow); void GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP); double Interaction(double dsquared); // void CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // void CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm); // double lnCondInt(struct Point2 *TempCell, Point2Pattern *p2p); // void Beta(struct Point2 *TempCell); // void CalcBeta(Point2Pattern *p2p); }; StraussProcess::StraussProcess(double xmin, double xmax, double ymin, double ymax, double b, double g, double Ri) : PointProcess(xmin, xmax, ymin, ymax){ beta = b; gamma = g; R = Ri; Rsquared = R * R; InteractionRange = R; TotalBirthRate = beta*(xmax-xmin)*(ymax-ymin); } double StraussProcess::Interaction(double dsquared) { double rtn; rtn = 1; if(dsquared < Rsquared) rtn = gamma; return(rtn); } void StraussProcess::NewEvent(double *x, double *y, char *InWindow) { double Xdim, Ydim; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; *x = slumptal()*Xdim+Xmin; *y = slumptal()*Ydim+Ymin; *InWindow = 1; } void StraussProcess::GeneratePoisson(Point *headPoint, long int *GeneratedPoints, long int *LivingPoints, long int *NoP) { int i; double xtemp, ytemp, L, Xdim, Ydim; struct Point *TempPoint; Xdim = Xmax-Xmin; Ydim = Ymax-Ymin; L = beta*Xdim*Ydim; *GeneratedPoints = poisson(L); *LivingPoints = *GeneratedPoints; for (i=1; i<=*GeneratedPoints ; i++){ //Rprintf("Generating StraussProcess Poisson 3\n"); //scanf("%f",&f1); xtemp = slumptal()*Xdim+Xmin; ytemp = slumptal()*Ydim+Ymin; // //Rprintf("Generating StraussProcess Poisson 3.2\n"); TempPoint = ALLOCATE(struct Point); // TempPoint->X = xtemp; TempPoint->Y = ytemp; TempPoint->No = i; TempPoint->R = slumptal(); //Rprintf("Generating StraussProcess Poisson 3.6\n"); TempPoint->next = headPoint->next; headPoint->next = TempPoint; *NoP = *NoP + 1; } } //void StraussProcess::CalcBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; ibeta; // k++; // } // } //} //void StraussProcess::CheckBeta(long int xsidepomm, long int ysidepomm, // double *betapomm){ // long int i,j,k; // // double d1; // k=0; // // Rprintf("\ndiagnostic message: Strauss CalcBeta... %ld %ld\n",xsidepomm,ysidepomm); // for(i=0; i0.001) && (k==0)){ // Rprintf("%f %f %f %ld %ld\n",fabs(*(betapomm + i*ysidepomm + j)- beta), // *(betapomm + i*ysidepomm + j),beta,i,j); // k++; // // scanf("%lf",&d1); // } // } // } //} //double StraussProcess::lnCondInt(struct Point2 *TempCell, // Point2Pattern *p2p){ // double f1; // long int xco,yco,xc,yc,fx,tx,fy,ty,ry,rx,k; // double dy,dx, lnCI,dst2; // struct Point2 *TempCell2; // // f1 = (TempCell->X-p2p->Xmin)/p2p->XCellDim; xc = int(f1); // CLAMP(xc, 0, p2p->MaxXCell, "xc"); // f1 = (TempCell->Y-p2p->Ymin)/p2p->YCellDim; yc = int(f1); // CLAMP(yc, 0, p2p->MaxYCell, "yc"); // // dx = (Xmax-Xmin)/(double(p2p->MaxXCell+1)); // dy = (Ymax-Ymin)/(double(p2p->MaxYCell+1)); // rx = int(this->InteractionRange/dx+1.0); // ry = int(this->InteractionRange/dy+1.0); // // lnCI = log(TempCell->Beta); // // k = 0; // // if((xc+rx)<=p2p->MaxXCell) tx=xc+rx; else tx = p2p->MaxXCell; // if((yc+ry)<=p2p->MaxYCell) ty=yc+ry; else ty = p2p->MaxYCell; // if((xc-rx)>=0) fx=xc-rx; else fx = 0; // if((yc-ry)>=0) fy=yc-ry; else fy = 0; // // //Rprintf("MCI! %d %d %d %d\n",fx,tx,fy,ty); // // for(xco = fx; xco <= tx; xco++){ // for(yco = fy; yco <= ty; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in lnCondInt()"); // TempCell2 = p2p->headCell[xco][yco]->next; // CHECK(TempCell2, "internal error: TempCell2 is null in lnCondInt()"); // while(TempCell2!=TempCell2->next){ // if(TempCell2 != TempCell){ // k++; // dst2 = pow(TempCell->X-TempCell2->X,2)+ // pow(TempCell->Y-TempCell2->Y,2); // lnCI += log(Interaction(dst2)); // } // TempCell2 = TempCell2->next; // CHECK(TempCell2, // "internal error: TempCell2 is null in lnCondInt() loop"); // } // } // } // return(lnCI); //} //void StraussProcess::Beta(struct Point2 *TempCell){ // TempCell->Beta = beta; //} //void StraussProcess::CalcBeta(Point2Pattern *p2p){ // long int xco,yco; // // double dy,dx; // struct Point2 *TempMother; // // for(xco = 0; xco <= p2p->MaxXCell; xco++){ // for(yco = 0; yco <= p2p->MaxYCell; yco++){ // CHECK(p2p->headCell[xco][yco], // "internal error: p2p->headCell[xco][yco] is null in CalcBeta()"); // TempMother = p2p->headCell[xco][yco]->next; // CHECK(TempMother, "internal error: TempMother is null in CalcBeta()"); // while(TempMother!=TempMother->next){ // TempMother->Beta = this->beta; // TempMother = TempMother->next; // CHECK(TempMother, // "internal error: TempMother is null in CalcBeta() loop"); // } // } // } //} // ........................... Interface to R .......................... extern "C" { SEXP PerfectStrauss(SEXP beta, SEXP gamma, SEXP r, SEXP xrange, SEXP yrange) { // input parameters double Beta, Gamma, R, Xmin, Xmax, Ymin, Ymax; double *Xrange, *Yrange; // internal int xcells, ycells; PointProcess *TheProcess; long int EndTime, StartTime; // output int noutmax; SEXP xout, yout, nout, out; double *xx, *yy; int *nn; SEXP stout, etout; int *ss, *ee; // protect arguments from garbage collector PROTECT(beta = AS_NUMERIC(beta)); PROTECT(gamma = AS_NUMERIC(gamma)); PROTECT(r = AS_NUMERIC(r)); PROTECT(xrange = AS_NUMERIC(xrange)); PROTECT(yrange = AS_NUMERIC(yrange)); // that's 5 protected objects // extract arguments Beta = *(NUMERIC_POINTER(beta)); Gamma = *(NUMERIC_POINTER(gamma)); R = *(NUMERIC_POINTER(r)); Xrange = NUMERIC_POINTER(xrange); Xmin = Xrange[0]; Xmax = Xrange[1]; Yrange = NUMERIC_POINTER(yrange); Ymin = Yrange[0]; Ymax = Yrange[1]; // compute cell array size xcells = (int) floor((Xmax-Xmin)/ R); if(xcells > 9) xcells = 9; if(xcells < 1) xcells = 1; ycells = (int) floor((Ymax-Ymin)/ R); if(ycells > 9) ycells = 9; if(ycells < 1) ycells = 1; #ifdef DBGS Rprintf("xcells %d ycells %d\n",xcells,ycells); Rprintf("Initialising\n"); #endif // Initialise Strauss point process StraussProcess ExampleProcess(Xmin,Xmax,Ymin,Ymax, Beta, Gamma, R); // Initialise point pattern Point2Pattern ExamplePattern(Xmin,Xmax,Ymin,Ymax, xcells, ycells); // parameters: min x, max x, min y, max y, "cells" in x and y direction // used for speeding up neighbour counting, 9 is max here #ifdef DBGS Rprintf("Initialisation complete\n"); #endif // Synchronise random number generator GetRNGstate(); // Initialise perfect sampler Sampler PerfectSampler(&ExampleProcess); // Perform perfect sampling PerfectSampler.Sim(&ExamplePattern, &StartTime, &EndTime); // Synchronise random number generator PutRNGstate(); // Get upper estimate of number of points noutmax = ExamplePattern.UpperCount() + 1; // Allocate space for output PROTECT(xout = NEW_NUMERIC(noutmax)); PROTECT(yout = NEW_NUMERIC(noutmax)); PROTECT(nout = NEW_INTEGER(1)); PROTECT(stout = NEW_INTEGER(1)); PROTECT(etout = NEW_INTEGER(1)); xx = NUMERIC_POINTER(xout); yy = NUMERIC_POINTER(yout); nn = INTEGER_POINTER(nout); ss = INTEGER_POINTER(stout); ee = INTEGER_POINTER(etout); // copy data into output storage ExamplePattern.Return(xx, yy, nn, noutmax); *ss = StartTime; *ee = EndTime; // pack up into output list PROTECT(out = NEW_LIST(5)); SET_VECTOR_ELT(out, 0, xout); SET_VECTOR_ELT(out, 1, yout); SET_VECTOR_ELT(out, 2, nout); SET_VECTOR_ELT(out, 3, stout); SET_VECTOR_ELT(out, 4, etout); // return UNPROTECT(11); // 5 arguments plus xout, yout, nout, stout, etout, out return(out); } } spatstat/src/pcf3.c0000755000176000001440000001201112252324034013726 0ustar ripleyusers#include #include #include #include #include "geom3.h" #include "functable.h" #include "chunkloop.h" #include "constants.h" /* $Revision: 1.7 $ $Date: 2012/03/27 05:01:41 $ pair correlation function of 3D point pattern (Epanechnikov kernel) pcf3trans translation correction pcf3isot isotropic correction */ #define FOURPI (2.0 * M_2PI) void pcf3trans(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; register double vx, vy, vz, tval; Point *ip, *jp; double dt, vol, lambda, denom; double coef, twocoef, frac, invweight, kernel; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { /* compute pairwise distance */ jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute (inverse) edge correction weight */ vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); invweight = vx * vy * vz * FOURPI * dist * dist; if(invweight > 0.0) { for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel / invweight; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* multiplied by 2 because we only visited i < j pairs */ twocoef = 2.0 * coef; /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= twocoef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0) ? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } void pcf3isot(p, n, b, pcf, delta) Point *p; int n; Box *b; Ftable *pcf; double delta; { register int i, j, l, lmin, lmax, maxchunk; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, mass, tval; double coef, frac, kernel; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < pcf->n; l++) { (pcf->denom)[l] = denom; (pcf->num)[l] = 0.0; } /* spacing of argument in result vector */ dt = (pcf->t1 - pcf->t0)/(pcf->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ OUTERCHUNKLOOP(i, n, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, n, maxchunk, 8196) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( ((dist - delta) - pcf->t0) / dt ); lmax = floor( ((dist + delta) - pcf->t0) / dt ); if(lmax >= 0 && lmin < pcf->n) { /* kernel centred at 'dist' has nonempty intersection with specified range of t values */ /* compute intersection */ if(lmin < 0) lmin = 0; if(lmax >= pcf->n) lmax = pcf->n - 1; /* compute edge correction weight */ mass = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); mass *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; if(mass > 0.0) { mass /= FOURPI * dist * dist; for(l = lmin; l < pcf->n; l++) { tval = pcf->t0 + l * dt; /* unnormalised Epanechnikov kernel with halfwidth delta */ frac = (dist - tval)/delta; kernel = (1 - frac * frac); if(kernel > 0) (pcf->num)[l] += kernel * mass; } } } } } } /* constant factor in kernel */ coef = 3.0/(4.0 * delta); /* normalise kernel and compute ratio estimate */ for(l = 0; l < pcf->n; l++) { (pcf->num)[l] *= coef; (pcf->f)[l] = ((pcf->denom)[l] > 0.0)? (pcf->num)[l] / (pcf->denom)[l] : 0.0; } } spatstat/src/straush.c0000755000176000001440000000573512252324034014603 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Hard core Strauss process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct StraussHard { double gamma; double r; /* interaction distance */ double h; /* hard core distance */ double loggamma; double r2; double h2; double r2h2; /* r^2 - h^2 */ double *period; int hard; int per; } StraussHard; /* initialiser function */ Cdata *straushinit(state, model, algo) State state; Model model; Algor algo; { StraussHard *strausshard; strausshard = (StraussHard *) R_alloc(1, sizeof(StraussHard)); /* Interpret model parameters*/ strausshard->gamma = model.ipar[0]; strausshard->r = model.ipar[1]; /* No longer passed as r^2 */ strausshard->h = model.ipar[2]; /* No longer passed as h^2 */ strausshard->r2 = pow(strausshard->r, 2); strausshard->h2 = pow(strausshard->h, 2); strausshard->r2h2 = strausshard->r2 - strausshard->h2; strausshard->period = model.period; /* is the interaction numerically equivalent to hard core ? */ strausshard->hard = (strausshard->gamma < DOUBLE_EPS); strausshard->loggamma = (strausshard->hard) ? 0 : log(strausshard->gamma); /* periodic boundary conditions? */ strausshard->per = (model.period[0] > 0.0); return((Cdata *) strausshard); } /* conditional intensity evaluator */ double straushcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, kount, ix, ixp1, j; double *x, *y; double u, v; double r2, r2h2, cifval; StraussHard *strausshard; double *period; DECLARE_CLOSE_VARS; strausshard = (StraussHard *) cdata; r2 = strausshard->r2; r2h2 = strausshard->r2h2; period = strausshard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; if(npts == 0) return((double) 1.0); kount = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(strausshard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC(u,v,x[j],y[j],period,r2)) { /* RESIDUE = r2 - distance^2 */ if(RESIDUE > r2h2) return(0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return(0.0); ++kount; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE(u,v,x[j],y[j],r2)) { if(RESIDUE > r2h2) return(0.0); ++kount; } } } if(ixp1 < npts) { for(j=ixp1; j r2h2) return(0.0); ++kount; } } } } if(strausshard->hard) { if(kount > 0) cifval = 0.0; else cifval = 1.0; } else cifval = exp(strausshard->loggamma*kount); return cifval; } Cifns StraussHardCifns = { &straushinit, &straushcif, (updafunptr) NULL, NO}; spatstat/src/badgey.c0000755000176000001440000003136512252324034014343 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* To get debug output, insert the line: #define DEBUG 1 */ void fexitc(const char *msg); /* Conditional intensity function for a multiscale saturation process. parameter vector: ipar[0] = ndisc ipar[1] = gamma[0] ipar[2] = r[0] ipar[3] = s[0] ... */ typedef struct BadGey { /* model parameters */ int ndisc; double *gamma; double *r; double *s; /* transformations of the parameters */ double *r2; double *loggamma; int *hard; /* periodic distance */ double *period; int per; /* auxiliary counts */ int *aux; /* matrix[ndisc, npmax]: neighbour counts in current state */ int *tee; /* vector[ndisc] : neighbour count at point in question */ double *w; /* vector[ndisc] : sum of changes in counts at other points */ } BadGey; Cdata *badgeyinit(state, model, algo) State state; Model model; Algor algo; { int i, j, k, i0, ndisc, nmatrix; double r, g, d2; BadGey *badgey; /* create storage */ badgey = (BadGey *) R_alloc(1, sizeof(BadGey)); badgey->ndisc = ndisc = model.ipar[0]; /* Allocate space for parameter vectors */ badgey->gamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->r = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->s = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Derived values */ badgey->r2 = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->loggamma = (double *) R_alloc((size_t) ndisc, sizeof(double)); badgey->hard = (int *) R_alloc((size_t) ndisc, sizeof(int)); /* copy and transform parameters */ for(i=0; i < ndisc; i++) { i0 = 3*i + 1; g = badgey->gamma[i] = model.ipar[i0]; r = badgey->r[i] = model.ipar[i0 + 1]; badgey->s[i] = model.ipar[i0 + 2]; badgey->r2[i] = r * r; badgey->hard[i] = (g < DOUBLE_EPS); badgey->loggamma[i] = (g < DOUBLE_EPS) ? 0 : log(g); } /* periodic boundary conditions? */ badgey->period = model.period; badgey->per = (model.period[0] > 0.0); /* Allocate scratch space */ badgey->tee = (int *) R_alloc((size_t) ndisc, sizeof(int)); badgey->w = (double *) R_alloc((size_t) ndisc, sizeof(double)); /* Allocate space for auxiliary counts */ nmatrix = ndisc * state.npmax; badgey->aux = (int *) R_alloc((size_t) nmatrix, sizeof(int)); /* Initialise auxiliary counts */ for(i = 0; i < nmatrix; i++) badgey->aux[i] = 0; for(i = 0; i < state.npts; i++) { for(j = 0; j < state.npts; j++) { if(j == i) continue; d2 = dist2either(state.x[i], state.y[i], state.x[j], state.y[j], badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) MAT(badgey->aux, k, i, ndisc) += 1; } } } #ifdef DEBUG Rprintf("Finished initialiser; ndisc=%d\n", ndisc); #endif return((Cdata *) badgey); } #define AUX(I,J) MAT(aux, I, J, ndisc) double badgeycif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int ix, j, k, npts, ndisc, tk; double u, v, d2; double a, dd2, b, f, r2, s, cifval; double *x, *y; int *tee, *aux; double *w; BadGey *badgey; badgey = (BadGey *) cdata; #ifdef DEBUG Rprintf("Entering badgeycif\n"); #endif npts = state.npts; cifval = 1.0; if(npts==0) return cifval; x = state.x; y = state.y; u = prop.u; v = prop.v; ix = prop.ix; ndisc = badgey->ndisc; tee = badgey->tee; aux = badgey->aux; w = badgey->w; /* For disc k, tee[k] = neighbour count at the point in question; w[k] = sum of changes in (saturated) neighbour counts at other points */ if(prop.itype == BIRTH) { /* compute tee[k] and w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { tee[k]++; f = badgey->s[k] - AUX(k,j); if(f > 1) /* j is not saturated after addition of (u,v) */ w[k] += 1; /* addition of (u,v) increases count by 1 */ else if(f > 0) /* j becomes saturated by addition of (u,v) */ w[k] += f; } } } } } else if(prop.itype == DEATH) { /* extract current auxiliary counts for point ix */ /* compute w[k] from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = AUX(k,ix); w[k] = 0.0; } /* compute change in counts for other points */ if(badgey->per) { /* Periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { f = badgey->s[k] - AUX(k,j); if(f > 0) /* j is not saturated */ w[k] += 1; /* deletion of 'ix' decreases count by 1 */ else { f += 1; if(f > 0) { /* j is not saturated after deletion of 'ix' (s must be fractional) */ w[k] += f; } } } } } } } else if(prop.itype == SHIFT) { /* compute auxiliary counts from scratch */ for(k = 0; k < ndisc; k++) { tee[k] = 0; w[k] = 0.0; } /* Compute the cif at the new point, not the ratio of new/old */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = dist2(x[ix],y[ix], x[j],y[j],badgey->period); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2 < r2) { /* shifted point is a neighbour of point j */ tee[k]++; a = AUX(k,j); s = badgey->s[k]; /* Adjust */ dd2 = pow(x[ix] - x[j], 2) + pow(y[ix] - y[j], 2); if(dd2 < r2) a -= 1; b = a + 1; /* b is the number of neighbours of point j in new state */ if(a < s && s < b) { w[k] += s - a; /* s is fractional and j is saturated */ } else if(s >= b) w[k] += 1; } } } } } #ifdef DEBUG Rprintf("ndisc=%d\n", ndisc); #endif /* compute total change in saturated count */ for(k = 0; k < ndisc; k++) { s = badgey->s[k]; tk = tee[k]; w[k] += ((tk < s) ? tk : s); #ifdef DEBUG Rprintf("s[%d]=%lf, t[%d]=%d, w[%d]=%lf\n", k, s, k, tk, k, w[k]); #endif } /* evaluate cif */ for(k = 0; k < ndisc; k++) { if(badgey->hard[k]) { if(tee[k] > 0) return(0.0); /* else cifval multiplied by 0^0 = 1 */ } else cifval *= exp(badgey->loggamma[k] * w[k]); } return cifval; } void badgeyupd(state, prop, cdata) State state; Propo prop; Cdata *cdata; { /* Declare other variables */ int ix, npts, ndisc, j, k; double u, v, xix, yix, r2, d2, d2old, d2new; double *x, *y; int *aux; BadGey *badgey; badgey = (BadGey *) cdata; aux = badgey->aux; /* 'state' is current state before transition */ x = state.x; y = state.y; npts = state.npts; ndisc = badgey->ndisc; #ifdef DEBUG Rprintf("start update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif if(prop.itype == BIRTH) { #ifdef DEBUG Rprintf("Update for birth ---- \n"); #endif /* Birth */ u = prop.u; v = prop.v; /* initialise auxiliary counters for new point x[npts], y[npts] */ for(k = 0; k < ndisc; k++) AUX(k, npts) = 0; /* update all auxiliary counters */ if(badgey->per) { /* periodic distance */ for(j=0; j < npts; j++) { d2 = dist2(u,v,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX(k, j) += 1; AUX(k, npts) += 1; } } } } else { /* Euclidean distance */ for(j=0; j < npts; j++) { d2 = pow(u - x[j], 2) + pow(v - y[j], 2); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { AUX( k, j) += 1; AUX( k, npts) += 1; } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j <= npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == DEATH) { /* Death */ ix = prop.ix; u = x[ix]; v = y[ix]; #ifdef DEBUG Rprintf("--- Update for death of point %d = (%lf,%lf) ---- \n", ix, u, v); #endif /* Decrement auxiliary counter for each neighbour of deleted point, and remove entry corresponding to deleted point */ if(badgey->per) { /* periodic distance */ for(j=0; jperiod); for(k = 0; k < ndisc; k++) { if(d2 < badgey->r2[k]) { if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } else { /* Euclidean distance */ for(j=0; jr2[k]) { #ifdef DEBUG Rprintf("hit for point %d with radius r[%d]\n", j, k); #endif if(j < ix) AUX(k,j) -= 1; else AUX(k,j-1) = AUX(k,j) - 1; } else if(j >= ix) AUX(k,j-1) = AUX(k,j); } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts-1; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } if(prop.itype == SHIFT) { #ifdef DEBUG Rprintf("Update for shift ---- \n"); #endif /* Shift */ u = prop.u; v = prop.v; ix = prop.ix; xix = x[ix]; yix = y[ix]; /* recompute all auxiliary counters for point ix */ for(k = 0; k < ndisc; k++) AUX(k,ix) = 0; if(badgey->per) { for(j=0; jperiod); d2old = dist2(xix,yix,x[j],y[j],badgey->period); for(k = 0; k < ndisc; k++) { r2 = badgey->r2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) AUX(k,j) += 1; /* point j gains a new neighbour */ } else if(d2old < r2) AUX(k,j) -= 1; /* point j loses a neighbour */ } } } else { /* Euclidean distance */ for(j=0; jr2[k]; if(d2old >= r2 && d2new >= r2) continue; if(d2new < r2) { #ifdef DEBUG Rprintf("shifted point is close to j=%d\n", j); #endif /* increment neighbour count for new point */ AUX(k,ix) += 1; if(d2old >= r2) { #ifdef DEBUG Rprintf("\t(previous position was not)\n"); #endif AUX(k,j) += 1; /* point j gains a new neighbour */ } } else if(d2old < r2) { #ifdef DEBUG Rprintf("previous position was close to j=%d, shifted point is not\n", j); #endif AUX(k,j) -= 1; /* point j loses a neighbour */ } } } } #ifdef DEBUG Rprintf("end update ---- \n"); for(j=0; j < npts; j++) { for(k=0; k < ndisc; k++) Rprintf("aux[%d,%d]=%d\t", k, j, AUX(k,j)); Rprintf("\n"); } #endif return; } fexitc("Unrecognised transition type; bailing out.\n"); } Cifns BadGeyCifns = { &badgeyinit, &badgeycif, &badgeyupd, NO}; spatstat/src/mhsnoop.h0000644000176000001440000000050012252324034014560 0ustar ripleyusers/* Function declarations from mhsnoop.c $Revision: 1.4 $ $Date: 2013/05/27 02:09:10 $ */ #include "mhsnoopdef.h" void initmhsnoop(Snoop *s, SEXP env); void mhsnoop(Snoop *s, int irep, Algor *algo, State *state, Propo *prop, double numer, double denom, int *itype); spatstat/src/sumsymouter.h0000644000176000001440000000343412252324034015522 0ustar ripleyusers/* sumsymouter.h Code template for some functions in linalg.c $Revision: 1.3 $ $Date: 2013/04/18 11:55:24 $ Macros used: FNAME = function name, WEIGHTED = #defined for weighted version */ void FNAME( x, #ifdef WEIGHTED w, #endif p, n, y ) double *x; /* p by n by n array */ #ifdef WEIGHTED double *w; /* n by n matrix (symmetric) */ #endif int *p, *n; double *y; /* output matrix p by p, initialised to zero */ { int N, P; register int i, j, k, m, ijpos, jipos, maxchunk; register double *xij, *xji; #ifdef WEIGHTED register double wij; #endif N = *n; P = *p; OUTERCHUNKLOOP(i, N, maxchunk, 256) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 256) { /* loop over j != i */ if(i > 0) { for(j = 0; j < i; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } if(i + 1 < N) { for(j = i+1; j < N; j++) { /* pointers to [i,j] and [j,i] in N*N matrices */ ijpos = i + N * j; jipos = j + N * i; /* pointers to x[, i, j] and x[ , j, i] */ xij = x + ijpos * P; xji = x + jipos * P; /* outer product */ #ifdef WEIGHTED wij = w[ijpos]; #endif for(k = 0; k < P; k++) { for(m = 0; m < P; m++) { #ifdef WEIGHTED y[m + k * P] += wij * xij[m] * xji[k]; #else y[m + k * P] += xij[m] * xji[k]; #endif } } } } /* end of loop over j */ } } } spatstat/src/sphevol.c0000755000176000001440000000746412252324034014573 0ustar ripleyusers#include #include #include "geom3.h" /* $Revision: 1.2 $ $Date: 2013/05/27 02:09:10 $ Routine for calculating ABSOLUTE volume of intersection between sphere and box Arbitrary positions: point is allowed to be inside or outside box. # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #ifdef DEBUG #define DBG(X,Y) Rprintf("%s: %f\n", (X), (Y)); #else #define DBG(X,Y) #endif #include "yesno.h" #define ABS(X) ((X >= 0.0) ? (X) : -(X)) static double rcubed, spherevol; double sphevol(point, box, r) Point *point; Box *box; double r; { double sum, p[4], q[4]; double v1(), v2(), v3(); int i, j; rcubed = r * r * r; spherevol = (4.0/3.0) * PI * rcubed; p[1] = box->x0 - point->x; p[2] = box->y0 - point->y; p[3] = box->z0 - point->z; q[1] = box->x1 - point->x; q[2] = box->y1 - point->y; q[3] = box->z1 - point->z; sum = 0; for(i = 1; i <= 3; i++) { sum += v1(p[i], -1, r) + v1(q[i], 1, r); #ifdef DEBUG Rprintf("i = %d, v1 = %f, v1 = %f\n", i, v1(p[i], -1, r), v1(q[i], 1, r)); #endif } DBG("Past v1", sum) for(i = 1; i < 3; i++) for(j = i+1; j <= 3; j++) { sum -= v2(p[i], -1, p[j], -1, r) + v2(p[i], -1, q[j], 1, r) + v2(q[i], 1, p[j], -1, r) + v2(q[i], 1, q[j], 1, r); #ifdef DEBUG Rprintf("i = %d, j = %d, sum = %f\n", i, j, sum); #endif } DBG("Past v2", sum) sum += v3(p[1], -1, p[2], -1, p[3], -1, r) + v3(p[1], -1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(p[1], -1, q[2], 1, p[3], -1, r) + v3(p[1], -1, q[2], 1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, p[2], -1, p[3], -1, r) + v3(q[1], 1, p[2], -1, q[3], 1, r); DBG("sum", sum) sum += v3(q[1], 1, q[2], 1, p[3], -1, r) + v3(q[1], 1, q[2], 1, q[3], 1, r); DBG("Past v3", sum) DBG("sphere volume", spherevol) return(spherevol - sum); } double v1(a,s,r) double a, r; int s; { double value; double u(); short sign; value = 4.0 * rcubed * u(ABS(a)/r, 0.0, 0.0); sign = (a >= 0.0) ? 1 : -1; if(sign == s) return(value); else return(spherevol - value); } double v2(a, sa, b, sb, r) double a, b, r; int sa, sb; { short sign; double u(); sign = (b >= 0.0) ? 1 : -1; if(sign != sb ) return(v1(a, sa, r) - v2(a, sa, ABS(b), 1, r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v1(b, sb, r) - v2(ABS(a), 1, b, sb, r)); a = ABS(a); return(2.0 * rcubed * u(a/r, b/r, 0.0)); } double v3(a, sa, b, sb, c, sc, r) double a, b, c, r; int sa, sb, sc; { short sign; double u(); sign = (c >= 0.0) ? 1 : -1; if(sign != sc) return(v2(a,sa,b,sb,r) - v3(a,sa,b,sb, ABS(c), 1, r)); c = ABS(c); sc = 1; sign = (b >= 0.0) ? 1 : -1; if(sign != sb) return(v2(a,sa,c,sc,r) - v3(a,sa,ABS(b),1,c,sc,r)); b = ABS(b); sb = 1; sign = (a >= 0.0) ? 1 : -1; if(sign != sa) return(v2(b,sb, c, sc, r) - v3(ABS(a),1, b, sb, c, sc, r)); a = ABS(a); return(rcubed * u(a/r, b/r, c/r)); } double u(a, b, c) double a, b, c; { double w(); if(a * a + b * b + c * c >= 1.0) return(0.0); return( (PI/12.0) * (2.0 - 3.0 * (a + b + c) + (a * a * a + b * b * b + c * c * c)) + w(a,b) + w(b,c) + w(a,c) - a * b * c ); } double w(x,y) double x,y; /* Arguments assumed >= 0 */ { double z; z = sqrt(1 - x * x - y * y); return( (x / 2.0 - x * x * x / 6.0) * atan2(y, z) + (y / 2.0 - y * y * y / 6.0) * atan2(x, z) - ( atan2(x * y , z) - x * y * z )/3.0 ); } spatstat/src/loccums.h0000644000176000001440000000400112252324034014542 0ustar ripleyusers/* loccums.h C template for loccum.c data-to-data functions $Revision: 1.5 $ $Date: 2013/09/18 04:28:45 $ macros: FNAME function name NULVAL initial value (empty sum = 0, empty product = 1) INC(A,B) increment operation A += B or A *= B */ void FNAME(n, x, y, v, nr, rmax, ans) /* inputs */ int *n, *nr; double *x, *y, *v; double *rmax; /* output */ double *ans; /* matrix of column vectors of functions for each point */ { int N, Nr, Nans; double Rmax; int i, j, k, kmin, maxchunk, columnstart; double Rmax2, rstep, xi, yi; double dx, dy, dx2, d2, d, contrib; N = *n; Nr = *nr; Rmax = *rmax; if(N == 0) return; rstep = Rmax/(Nr-1); Rmax2 = Rmax * Rmax; Nans = Nr * N; /* initialise products to 1 */ OUTERCHUNKLOOP(k, Nans, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Nans, maxchunk, 8196) { ans[k] = NULVAL; } } OUTERCHUNKLOOP(i, N, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, N, maxchunk, 8196) { xi = x[i]; yi = y[i]; columnstart = Nr * i; /* start position for f_i(.) in 'ans' */ /* process backward until |dx| > Rmax */ if(i > 0) { for(j=i-1; j >= 0; j--) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } /* process forward until |dx| > Rmax */ if(i < N - 1) { for(j=i+1; j < N; j++) { dx = x[j] - xi; dx2 = dx * dx; if(dx2 > Rmax2) break; dy = y[j] - yi; d2 = dx2 + dy * dy; if(d2 <= Rmax2) { d = sqrt(d2); kmin = (int) ceil(d/rstep); if(kmin < Nr) { contrib = v[j]; for(k = kmin; k < Nr; k++) INC(ans[columnstart + k] , contrib); } } } } } } } spatstat/src/methas.h0000755000176000001440000000673012252324034014374 0ustar ripleyusers/* Definitions of types and data structures for Metropolis-Hastings State Current state of point pattern Model Model parameters passed from R Cdata (pointer to) model parameters and precomputed data in C Algor Algorithm parameters (p, q, nrep etc) Propo Proposal in Metropolis-Hastings algorithm History Transition history of MH algorithm Cifns Set of functions for computing the conditional intensity for a point process model. This consists of three functions init(State, Model, Algor) .... initialises auxiliary data eval(State, Propo) ........... evaluates cif update(State,Propo) .......... updates auxiliary data */ /* Current state of point pattern */ typedef struct State { double *x; /* vectors of Cartesian coordinates */ double *y; int *marks; /* vector of mark values */ int npts; /* current number of points */ int npmax; /* storage limit */ int ismarked; /* whether the pattern is marked */ } State; /* Parameters of model passed from R */ typedef struct Model { double *beta; /* vector of activity parameters */ double *ipar; /* vector of interaction parameters */ double *period; /* width & height of rectangle, if torus */ int ntypes; /* number of possible marks */ } Model; /* A pointer to Cdata is a pointer to C storage for parameters of model */ typedef void Cdata; /* RMH Algorithm parameters */ typedef struct Algor { double p; /* probability of proposing shift */ double q; /* conditional probability of proposing death */ int fixall; /* if TRUE, only shifts of location are feasible */ int ncond; /* For conditional simulation, the first 'ncond' points are fixed */ int nrep; /* number of iterations */ int nverb; /* print report every 'nverb' iterations */ int nrep0; /* number of iterations already performed in previous blocks - for reporting purposes */ } Algor; /* Metropolis-Hastings proposal */ typedef struct Propo { double u; /* location of point of interest */ double v; int mrk; /* mark of point of interest */ int ix; /* index of point of interest, if already in pattern */ int itype; /* transition type */ } Propo; /* transition codes 'itype' */ #define REJECT 0 #define BIRTH 1 #define DEATH 2 #define SHIFT 3 #define HISTORY_INCLUDES_RATIO /* Record of transition history */ typedef struct History { int nmax; /* length of vectors */ int n; /* number of events recorded */ int *proptype; /* vector: proposal type */ int *accepted; /* vector: 0 for reject, 1 for accept */ #ifdef HISTORY_INCLUDES_RATIO double *numerator; /* vectors: Hastings ratio numerator & denominator */ double *denominator; #endif } History; /* conditional intensity functions */ typedef Cdata * (*initfunptr)(State state, Model model, Algor algo); typedef double (*evalfunptr)(Propo prop, State state, Cdata *cdata); typedef void (*updafunptr)(State state, Propo prop, Cdata *cdata); typedef struct Cifns { initfunptr init; evalfunptr eval; updafunptr update; int marked; } Cifns; #define NEED_UPDATE(X) ((X).update != (updafunptr) NULL) #define NULL_CIFNS { (initfunptr) NULL, (evalfunptr) NULL, (updafunptr) NULL, NO} /* miscellaneous macros */ #include "yesno.h" # define MAT(X,I,J,M) (X[(I)+(J)*(M)]) spatstat/src/dist2dpath.h0000644000176000001440000000765512252324034015165 0ustar ripleyusers/* Function body for dist2dpath.c Macros used: FNAME function name DTYPE declaration for distance values ('double' or 'int') FLOATY (DTYPE == 'double') $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ #undef DEBUG #define MATRIX(X,I,J) (X)[(J) + n * (I)] #define D(I,J) MATRIX(d, I, J) #define DPATH(I,J) MATRIX(dpath, I, J) #define ADJ(I,J) (MATRIX(adj, I, J) != 0) #define INFIN -1 #define FINITE(X) ((X) >= 0) void FNAME(nv, d, adj, dpath, tol, niter, status) int *nv; /* number of vertices */ DTYPE *d; /* matrix of edge lengths */ int *adj; /* 0/1 edge matrix of graph */ DTYPE *tol; /* tolerance threshold (ignored in integer case) */ DTYPE *dpath; /* output - shortest path distance matrix */ int *niter, *status; /* status = 0 for convergence */ { int i, j, k, n, iter, maxiter, changed; DTYPE dij, dik, dkj, dikj; #ifdef FLOATY DTYPE eps, diff, maxdiff; #endif int totaledges, starti, nneighi, increm, pos; int *start, *nneigh, *indx; n = *nv; #ifdef FLOATY eps = *tol; #endif /* initialise and count edges */ *status = -1; totaledges = 0; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { DPATH(i, j) = (i == j) ? 0 : ((ADJ(i,j)) ? D(i, j) : INFIN); if((i != j) && ADJ(i,j)) ++totaledges; } } maxiter = 2 + ((totaledges > n) ? totaledges : n); /* store indices j for each edge (i,j) */ indx = (int *) R_alloc(totaledges, sizeof(int)); nneigh = (int *) R_alloc(n, sizeof(int)); start = (int *) R_alloc(n, sizeof(int)); pos = 0; for(i = 0; i < n; i++) { nneigh[i] = 0; start[i] = pos; #ifdef DEBUG Rprintf("Neighbours of %d:\n", i); #endif for(j = 0; j < n; j++) { if((i != j) && ADJ(i,j) && FINITE(D(i,j))) { #ifdef DEBUG Rprintf("\t%d\n", j); #endif ++(nneigh[i]); if(pos > totaledges) error("internal error: pos exceeded storage"); indx[pos] = j; ++pos; } } } /* run */ for(iter = 0; iter < maxiter; iter++) { changed = 0; #ifdef FLOATY maxdiff = 0; #endif #ifdef DEBUG Rprintf("--------- iteration %d ---------------\n", iter); #endif for(i = 0; i < n; i++) { R_CheckUserInterrupt(); nneighi = nneigh[i]; if(nneighi > 0) { /* run through neighbours k of i */ starti = start[i]; for(increm = 0, pos=starti; increm < nneighi; ++increm, ++pos) { k = indx[pos]; dik = DPATH(i,k); #ifdef DEBUG #ifdef FLOATY Rprintf("i=%d k=%d dik=%lf\n", i, k, dik); #else Rprintf("i=%d k=%d dik=%d\n", i, k, dik); #endif #endif /* now run through all other vertices j */ for(j = 0; j < n; j++) { if(j != i && j != k) { dij = DPATH(i,j); dkj = DPATH(k,j); if(FINITE(dkj)) { dikj = dik + dkj; #ifdef DEBUG #ifdef FLOATY Rprintf("considering %d -> (%d) -> %d,\t dij=%lf, dikj=%lf\n", i, k, j, dij, dikj); #else Rprintf("considering %d -> (%d) -> %d,\t dij=%d, dikj=%d\n", i, k, j, dij, dikj); #endif #endif if(!FINITE(dij) || dikj < dij) { #ifdef DEBUG #ifdef FLOATY Rprintf("updating i=%d j=%d via k=%d from %lf to %lf\n", i, j, k, dij, dikj); #else Rprintf("updating i=%d j=%d via k=%d from %d to %d\n", i, j, k, dij, dikj); #endif #endif DPATH(i,j) = DPATH(j,i) = dikj; changed = 1; #ifdef FLOATY diff = (FINITE(dij)) ? dij - dikj : dikj; if(diff > maxdiff) maxdiff = diff; #endif } } } } } } } if(changed == 0) { /* algorithm converged */ #ifdef DEBUG Rprintf("Algorithm converged\n"); #endif *status = 0; break; #ifdef FLOATY } else if(FINITE(maxdiff) && maxdiff < eps) { /* tolerance reached */ #ifdef DEBUG Rprintf("Algorithm terminated with maxdiff=%lf\n", maxdiff); #endif *status = 1; break; #endif } } #ifdef DEBUG Rprintf("Returning after %d iterations on %d vertices\n", iter, n); #endif *niter = iter; } #undef DEBUG #undef MATRIX #undef D #undef DPATH #undef ADJ #undef INFIN #undef FINITE spatstat/src/pairloop.h0000644000176000001440000000327512252324034014736 0ustar ripleyusers/* pairloop.h Generic code template for loop collecting contributions to point x_i from all points x_j such that ||x_i - x_j|| <= r cpp variables used: INITIAL_I code executed at start of 'i' loop CONTRIBUTE_IJ code executed to compute contribution from j to i COMMIT_I code executed to save total contribution to i C variables used: int i, j, n, maxchunk; double xi, yi, dx, dy, dx2, d2, r2max; double *x, *y; $Revision: 1.3 $ $Date: 2013/05/27 02:09:10 $ */ #ifndef CHUNKLOOP_H #include "chunkloop.h" #endif #define PAIRLOOP(INITIAL_I, CONTRIBUTE_IJ, COMMIT_I) \ OUTERCHUNKLOOP(i, n, maxchunk, 65536) { \ R_CheckUserInterrupt(); \ INNERCHUNKLOOP(i, n, maxchunk, 65536) { \ \ xi = x[i]; \ yi = y[i]; \ \ INITIAL_I; \ \ if(i > 0) { \ for(j=i-1; j > 0; j--) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ \ if(i+1 < n) { \ for(j=i+1; j < n; j++) { \ dx = x[j] - xi; \ dx2 = dx * dx; \ if(dx2 > r2max) \ break; \ dy = y[j] - yi; \ d2 = dx2 + dy * dy; \ if(d2 <= r2max) { \ CONTRIBUTE_IJ; \ } \ } \ } \ COMMIT_I; \ } \ } spatstat/src/nnMDdist.c0000755000176000001440000004360112252324034014624 0ustar ripleyusers/* nnMDdist.c Nearest Neighbour Distances in m dimensions $Revision: 1.8 $ $Date: 2013/05/27 02:09:10 $ Argument x is an m * n matrix with columns corresponding to points and rows corresponding to coordinates. Spatial dimension m must be > 1 THE FOLLOWING FUNCTIONS ASSUME THAT THE ROWS OF x ARE SORTED IN ASCENDING ORDER OF THE FIRST COLUMN nndMD Nearest neighbour distances nnwMD Nearest neighbours and their distances nnXwMD Nearest neighbour from one list to another nnXxMD Nearest neighbour from one list to another, with overlaps knndMD k-th nearest neighbour distances knnwMD k-th nearest neighbours and their distances */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); void nndMD(n, m, x, nnd, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2min=%lf\n", left, d2min); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2min=%lf\n", right, d2min); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; #ifdef SPATSTAT_DEBUG Rprintf("\tupdating d2min=%lf\n", d2min); #endif } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); } } } /* nnwMD: same as nndMD, but also returns id of nearest neighbour */ void nnwMD(n, m, x, nnd, nnwhich, huge) /* inputs */ int *n, *m; double *x, *huge; /* output */ double *nnd; int *nnwhich; { int npoints, mdimen, i, j, left, right, leftpos, rightpos, which, maxchunk; double d2, d2min, hu, hu2, xi0, dx0, dxj; double *xi; npoints = *n; mdimen = *m; xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ hu = *huge; hu2 = hu * hu; OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif d2min = hu2; which = -1; for(j = 0; j < mdimen; j++) xi[j] = x[i * mdimen + j]; xi0 = xi[0]; /* search backward */ if(i > 0) { for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; leftpos = left * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[leftpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = left; } } } /* search forward */ if(i < npoints - 1) { for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2min) break; rightpos = right * mdimen; for(j = 1; j < mdimen && d2 < d2min; j++) { dxj = xi[j] - x[rightpos + j]; d2 += dxj * dxj; } if (d2 < d2min) { d2min = d2; which = right; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif nnd[i] = sqrt(d2min); /* convert index to R convention */ nnwhich[i] = which + 1; } } } /* nnXwMD: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing z coord */ void nnXwMD(m, n1, x1, n2, x2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich; lastjwhich = jwhich; } } } /* nnXxMD: similar to nnXwMD but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing y coord */ void nnXxMD(m, n1, x1, id1, n2, x2, id2, nnd, nnwhich, huge) /* inputs */ int *m, *n1, *n2; double *x1, *x2, *huge; int *id1, *id2; /* outputs */ double *nnd; int *nnwhich; { int mdimen, npoints1, npoints2, i, ell, jleft, jright, jwhich, lastjwhich, id1i; double d2, d2min, x1i0, dx0, dxell, hu, hu2; double *x1i; int maxchunk; hu = *huge; hu2 = hu * hu; npoints1 = *n1; npoints2 = *n2; mdimen = *m; if(npoints1 == 0 || npoints2 == 0) return; x1i = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* dx = (double *) R_alloc((size_t) mdimen, sizeof(double)); */ lastjwhich = 0; OUTERCHUNKLOOP(i, npoints1, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints1, maxchunk, 16384) { d2min = hu2; jwhich = -1; id1i = id1[i]; for(ell = 0; ell < mdimen; ell++) x1i[ell] = x1[i * mdimen + ell]; x1i0 = x1i[0]; /* search backward from previous nearest neighbour */ if(lastjwhich > 0) { for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dx0 = x1i0 - x2[jleft * mdimen]; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jleft] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jleft * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jleft; } } } } /* search forward from previous nearest neighbour */ if(lastjwhich < npoints2) { for(jright = lastjwhich; jright < npoints2; ++jright) { dx0 = x2[jright * mdimen] - x1i0; d2 = dx0 * dx0; if(d2 > d2min) break; /* do not compare identical points */ if(id2[jright] != id1i) { for(ell = 1; ell < mdimen && d2 < d2min; ell++) { dxell = x1i[ell] - x2[jright * mdimen + ell]; d2 += dxell * dxell; } if (d2 < d2min) { d2min = d2; jwhich = jright; } } } } nnd[i] = sqrt(d2min); nnwhich[i] = jwhich; lastjwhich = jwhich; } } } /* knndMD nearest neighbours 1:kmax */ void knndMD(n, m, kmax, x, nnd, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, maxchunk; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the squared k-th nearest neighbour distances for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) d2min[k] = hu2; for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", xi[j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; #ifdef SPATSTAT_DEBUG Rprintf("L=%d\n", left); Rprintf("\t 0 "); #endif for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { /* overwrite last entry */ #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d\n", right); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } #ifdef SPATSTAT_DEBUG Rprintf("\n\t d2=%lf\n", d2); #endif if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); } } } } /* knnwMD nearest neighbours 1:kmax returns distances and indices */ void knnwMD(n, m, kmax, x, nnd, nnwhich, huge) /* inputs */ int *n, *m, *kmax; double *x, *huge; /* output matrix (kmax * npoints) */ double *nnd; int *nnwhich; { int npoints, mdimen, nk, nk1, i, j, k, k1, left, right, unsorted, itmp; double d2, d2minK, xi0, dx0, dxj, hu, hu2, tmp; double *d2min, *xi; int *which; int maxchunk; hu = *huge; hu2 = hu * hu; npoints = *n; mdimen = *m; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); which = (int *) R_alloc((size_t) nk, sizeof(int)); /* scratch space */ xi = (double *) R_alloc((size_t) mdimen, sizeof(double)); /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; which[k] = -1; } for(j = 0; j < mdimen; j++) xi[j] = x[i* mdimen + j]; xi0 = xi[0]; #ifdef SPATSTAT_DEBUG Rprintf("\n ("); for(j = 0; j < mdimen; j++) Rprintf("%lf, ", x[i * mdimen + j]); Rprintf(")\n"); #endif /* search backward */ for(left = i - 1; left >= 0; --left) { #ifdef SPATSTAT_DEBUG Rprintf("L=%d, d2minK=%lf\n", left, d2minK); Rprintf("\t 0 "); #endif dx0 = xi0 - x[left * mdimen]; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[left * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = left; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } /* search forward */ for(right = i + 1; right < npoints; ++right) { #ifdef SPATSTAT_DEBUG Rprintf("R=%d, d2minK=%lf\n", right, d2minK); Rprintf("\t 0 "); #endif dx0 = x[right * mdimen] - xi0; d2 = dx0 * dx0; if(d2 > d2minK) break; for(j = 1; j < mdimen && d2 < d2minK; j++) { #ifdef SPATSTAT_DEBUG Rprintf("%d ", j); #endif dxj = xi[j] - x[right * mdimen + j]; d2 += dxj * dxj; } if (d2 < d2minK) { #ifdef SPATSTAT_DEBUG Rprintf("\td2=%lf overwrites d2min[%d] = %lf\n", d2, nk1, d2min[nk1]); #endif /* overwrite last entry */ d2min[nk1] = d2; which[nk1] = right; /* bubble sort */ #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] before bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; } else { unsorted = NO; } } #ifdef SPATSTAT_DEBUG Rprintf("\td2min[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%lf, ", d2min[k]); Rprintf("\n"); Rprintf("\twhich[] after bubble sort:"); for(k = 0; k < nk; k++) Rprintf("%d, ", which[k]); Rprintf("\n"); #endif /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { nnd[nk * i + k] = sqrt(d2min[k]); /* convert index back to R convention */ nnwhich[nk * i + k] = which[k] + 1; } } } } spatstat/src/knn3Ddist.h0000644000176000001440000000730112252324034014744 0ustar ripleyusers/* knn3Ddist.h Code template for k-nearest-neighbour algorithms for 3D point patterns Input is a single point pattern - supports 'nndist' and 'nnwhich' This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. THE FOLLOWING CODE ASSUMES THAT THE POINT PATTERN IS SORTED IN ASCENDING ORDER OF THE z COORDINATE $Revision: 1.3 $ $Date: 2013/06/29 02:38:19 $ */ void FNAME(n, kmax, x, y, z, nnd, nnwhich, huge) /* inputs */ int *n, *kmax; double *x, *y, *z, *huge; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; { int npoints, nk, nk1, i, j, k, k1, unsorted, maxchunk; double d2, d2minK, xi, yi, zi, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif hu = *huge; hu2 = hu * hu; npoints = *n; nk = *kmax; nk1 = nk - 1; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop over points */ OUTERCHUNKLOOP(i, npoints, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, npoints, maxchunk, 16384) { #ifdef SPATSTAT_DEBUG Rprintf("\ni=%d\n", i); #endif /* initialise nn distances and indices */ d2minK = hu2; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } xi = x[i]; yi = y[i]; zi = z[i]; /* search backward */ if(i > 0) { for(j = i - 1; j >= 0; --j) { #ifdef SPATSTAT_DEBUG Rprintf("L"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } /* search forward */ if(i + 1 < npoints) { for(j = i + 1; j < npoints; ++j) { #ifdef SPATSTAT_DEBUG Rprintf("R"); #endif dz = z[j] - zi; dz2 = dz * dz; if(dz2 > d2minK) break; dx = x[j] - xi; dy = y[j] - yi; d2 = dx * dx + dy * dy + dz2; if (d2 < d2minK) { /* overwrite last entry */ d2min[nk1] = d2; #ifdef WHICH which[nk1] = j; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } } #ifdef SPATSTAT_DEBUG Rprintf("\n"); #endif /* calculate nn distances for point i and copy to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH /* convert from C to R indexing */ nnwhich[nk * i + k] = which[k] + 1; #endif } } } } spatstat/src/fexitc.c0000755000176000001440000000045512252324034014366 0ustar ripleyusers# include # include # include void fexitc(const char *msg) { size_t nc = strlen(msg); char buf[256]; if(nc > 255) { warning("invalid character length in fexitc"); nc = 255; } strncpy(buf, msg, nc); buf[nc] = '\0'; error(buf); } spatstat/src/nngrid.h0000644000176000001440000000573312252324034014373 0ustar ripleyusers #if (1 == 0) /* nngrid.h Code template for C functions nearest neighbour of each grid point THE FOLLOWING CODE ASSUMES THAT POINT PATTERN (xp, yp) IS SORTED IN ASCENDING ORDER OF x COORDINATE This code is #included multiple times in nngrid.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour Either or both DIST and WHICH may be defined. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.3 $ $Date: 2013/09/29 08:55:27 $ */ #endif void FNAME(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int Nxcol, Nyrow, Npoints; int i, j, ijpos; int mleft, mright, mwhich, lastmwhich; double X0, Y0, Xstep, Ystep; double d2, d2min, xj, yi, dx, dy, dx2, hu, hu2, tmp; Nxcol = *nx; Nyrow = *ny; Npoints = *np; hu = *huge; X0 = *x0; Y0 = *y0; Xstep = *xstep; Ystep = *ystep; hu2 = hu * hu; if(Npoints == 0) return; lastmwhich = 0; /* loop over pixels */ for(j = 0, xj = X0; j < Nxcol; j++, xj += Xstep) { R_CheckUserInterrupt(); for(i = 0, yi = Y0; i < Nyrow; i++, yi += Ystep) { /* reset nn distance and index */ d2min = hu2; mwhich = -1; if(lastmwhich < Npoints) { /* search forward from previous nearest neighbour */ for(mright = lastmwhich; mright < Npoints; ++mright) { dx = xp[mright] - xj; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mright] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mright; } } /* end forward search */ } if(lastmwhich > 0) { /* search backward from previous nearest neighbour */ for(mleft = lastmwhich - 1; mleft >= 0; --mleft) { dx = xj - xp[mleft]; dx2 = dx * dx; if(dx2 > d2min) /* note that dx2 >= d2min could break too early */ break; dy = yp[mleft] - yi; d2 = dy * dy + dx2; if (d2 < d2min) { /* save as nearest neighbour */ d2min = d2; mwhich = mleft; } } /* end backward search */ } /* remember index of most recently-encountered neighbour */ lastmwhich = mwhich; /* copy nn distance for grid point (i, j) to output array nnd[i, j] */ ijpos = i + j * Nyrow; #ifdef DIST nnd[ijpos] = sqrt(d2min); #endif #ifdef WHICH nnwhich[ijpos] = mwhich + 1; /* R indexing */ #endif /* end of loop over grid points (i, j) */ } } } spatstat/src/Efiksel.c0000755000176000001440000000305112252324034014461 0ustar ripleyusers#include #include #include "chunkloop.h" /* Efiksel.c $Revision: 1.3 $ $Date: 2012/03/28 05:55:29 $ C implementation of 'eval' for Fiksel interaction (non-hardcore part) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(), exp(); void Efiksel(nnsource, xsource, ysource, nntarget, xtarget, ytarget, rrmax, kkappa, values) /* inputs */ int *nnsource, *nntarget; double *xsource, *ysource, *xtarget, *ytarget, *rrmax, *kkappa; /* output */ double *values; { int nsource, ntarget, maxchunk, j, i, ileft; double xsourcej, ysourcej, xleft, dx, dy, dx2, d2; double rmax, r2max, kappa, total; nsource = *nnsource; ntarget = *nntarget; rmax = *rrmax; kappa = *kkappa; r2max = rmax * rmax; if(nsource == 0 || ntarget == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nsource, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nsource, maxchunk, 16384) { total = 0; xsourcej = xsource[j]; ysourcej = ysource[j]; /* adjust starting point */ xleft = xsourcej - rmax; while((xtarget[ileft] < xleft) && (ileft+1 < ntarget)) ++ileft; /* process from ileft until dx > rmax */ for(i=ileft; i < ntarget; i++) { /* squared interpoint distance */ dx = xtarget[i] - xsourcej; dx2 = dx * dx; if(dx2 > r2max) break; dy = ytarget[i] - ysourcej; d2 = dx2 + dy * dy; if(d2 <= r2max) total += exp(- kappa * sqrt(d2)); } values[j] = total; } } } spatstat/src/loccum.c0000644000176000001440000000252512252324034014363 0ustar ripleyusers#include #include #include #include "chunkloop.h" /* loccum.c $Revision: 1.1 $ $Date: 2013/05/27 02:09:10 $ Compute local cumulative sums or products of weights locsum: f_i(t) = \sum_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxsum: f_u(t) = \sum_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} locprod: f_i(t) = \prod_{j: j \neq i, ||x_j - x_i|| \le t} v(x_j) for a data point pattern {x_i} locxprod: f_u(t) = \prod_{||x_i - u|| \le t} v(x_i) for a grid of points {u} and a data point pattern {x_i} Assumes point patterns are sorted in increasing order of x coordinate Uses C code template files : loccums.h, loccumx.h */ /* data-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccums.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccums.h" /* test-grid-to-data */ #undef FNAME #undef NULVAL #undef INC #define FNAME locxsum #define NULVAL 0.0 #define INC(A,B) A += B #include "loccumx.h" #undef FNAME #undef NULVAL #undef INC #define FNAME locxprod #define NULVAL 1.0 #define INC(A,B) A *= B #include "loccumx.h" spatstat/src/k3.c0000755000176000001440000000716312252324034013424 0ustar ripleyusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.1 $ $Date: 2009/11/04 23:54:15 $ K function of 3D point pattern k3trans translation correction k3isot isotropic correction # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ void k3trans(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; register double vx, vy, vz; Point *ip, *jp; double dt, vol, lambda, denom, term; double sphesfrac(), sphevol(); /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); lambda = ((double) n )/ vol; denom = lambda * lambda; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; vx = b->x1 - b->x0 - (dx > 0 ? dx : -dx); vy = b->y1 - b->y0 - (dy > 0 ? dy : -dy); vz = b->z1 - b->z0 - (dz > 0 ? dz : -dz); if(vx >= 0.0 && vy >= 0.0 && vz >= 0.0) { term = 2.0 /(vx * vy * vz); /* 2 because they're ordered pairs */ for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } void k3isot(p, n, b, k) Point *p; int n; Box *b; Ftable *k; { register int i, j, l, lmin; register double dx, dy, dz, dist; Point *ip, *jp; double dt, vol, denom, term; double sphesfrac(), sphevol(); Point vertex; Box half; /* compute denominator & initialise numerator*/ vol = (b->x1 - b->x0) * (b->y1 - b->y0) * (b->z1 - b->z0); denom = ((double) (n * n))/vol; for(l = 0; l < k->n; l++) { (k->denom)[l] = denom; (k->num)[l] = 0.0; } /* spacing of argument in result vector k */ dt = (k->t1 - k->t0)/(k->n - 1); /* set up for volume correction */ vertex.x = b->x0; vertex.y = b->y0; vertex.z = b->z0; half.x1 = b->x1; half.y1 = b->y1; half.z1 = b->z1; half.x0 = (b->x0 + b->x1)/2.0; half.y0 = (b->y0 + b->y1)/2.0; half.z0 = (b->z0 + b->z1)/2.0; /* compute numerator */ for( i = 0; i < n; i++) { ip = p + i; for(j = i + 1; j < n; j++) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist = sqrt(dx * dx + dy * dy + dz * dz); lmin = ceil( (dist - k->t0) / dt ); if(lmin < 0) lmin = 0; term = (1.0 / sphesfrac(ip, b, dist)) + (1.0 / sphesfrac(jp, b, dist)); term *= 1.0 - 8.0 * sphevol(&vertex, &half, dist) / vol; for(l = lmin; l < k->n; l++) (k->num)[l] += term; } } /* compute ratio */ for(l = 0; l < k->n; l++) (k->f)[l] = ((k->denom)[l] > 0.0)? (k->num)[l] / (k->denom)[l] : 0.0; } spatstat/src/g3.c0000755000176000001440000001266612252324034013424 0ustar ripleyusers#include #include #include "geom3.h" #include "functable.h" /* $Revision: 1.3 $ $Date: 2012/05/22 07:17:31 $ G function (nearest neighbour distribution) of 3D point pattern Let b = distance from point p[i] to boundary of box d = distance from p[i] to nearest p[j] method = 1 naive ratio estimator (Ripley 1981) numerator(r) = count(i: b >= r, d <= r) denominator(r) = count(i: b >= r) method = 2 minus sampling estimator numerator(r) = count(i: b >= r, d <= r) denominator(r) = lambda * volume(x: b >= r) where lambda = (no of points)/volume(box) method = 3 Hanisch's G3 numerator(r) = count(i: b >= d, d <= r) denominator(r) = count(i: b >= d) method = 4 Hanisch's G4 numerator(r) = count(i: b >= d, d <= r) denominator(r) = fudge * volume(x: b >= r) fudge = numerator(R)/denominator(R) R = sup{r : denominator(r) > 0 } # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009, 2012. # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ #define MIN(X,Y) (((X) > (Y)) ? (Y) : (X)) double * nndist3(p, n, b) /* compute nearest neighbour distance for each p[i] */ Point *p; int n; Box *b; { register int i, j; register double dx, dy, dz, dist2, nearest2, huge2; Point *ip, *jp; double *nnd; nnd = (double *) R_alloc(n, sizeof(double)); dx = b->x1 - b->x0; dy = b->y1 - b->y0; dz = b->z1 - b->z0; huge2 = 2.0 * (dx * dx + dy * dy + dz * dz); /* scan each point and find closest */ for( i = 0; i < n; i++) { ip = p + i; nearest2 = huge2; for(j = 0; j < n; j++) if(j != i) { jp = p + j; dx = jp->x - ip->x; dy = jp->y - ip->y; dz = jp->z - ip->z; dist2 = dx * dx + dy * dy + dz * dz; if(dist2 < nearest2) nearest2 = dist2; } nnd[i] = sqrt(nearest2); } return(nnd); } double * border3(p, n, b) /* compute distances to border */ Point *p; int n; Box *b; { register int i; register double bord; register Point *ip; double *bored; bored = (double *) R_alloc(n, sizeof(double)); for( i = 0; i < n; i++) { ip = p + i; bord = MIN(ip->x - b->x0, b->x1 - ip->x); bord = MIN(bord, ip->y - b->y0); bord = MIN(bord, b->y1 - ip->y); bord = MIN(bord, ip->z - b->z0); bord = MIN(bord, b->z1 - ip->z); bored[i] = bord; } return(bored); } void g3one(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lbord, lnnd; double dt; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ for(l = 0; l < g->n; l++) (g->num)[l] = (g->denom)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { lbord = floor( (bord[i] - g->t0) / dt ); if(lbord >= g->n) lbord = g->n - 1; for(l = 0; l <= lbord; l++) (g->denom)[l] += 1.0; lnnd = ceil( (nnd[i] - g->t0) / dt ); if(lnnd < 0) lnnd = 0; for(l = lnnd; l <= lbord; l++) (g->num)[l] += 1.0; } /* compute ratio */ for(l = 0; l < g->n; l++) (g->f)[l] = ((g->denom)[l] > 0)? (g->num)[l] / (g->denom)[l] : 1.0; } void g3three(p, n, b, g) Point *p; int n; Box *b; Ftable *g; { register int i, l, lmin; double dt; int denom; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* initialise */ denom = 0; for(l = 0; l < g->n; l++) (g->num)[l] = 0.0; /* spacing of argument in result vector g */ dt = (g->t1 - g->t0)/(g->n - 1); for(i = 0; i < n; i++) { if(nnd[i] <= bord[i]) { ++denom; lmin = ceil( (nnd[i] - g->t0) / dt ); if(lmin < 0) lmin = 0; for(l = lmin; l < g->n; l++) (g->num)[l] += 1.0; } } /* compute ratio */ for(l = 0; l < g->n; l++) { (g->denom)[l] = denom; (g->f)[l] = (denom > 0)? (g->num)[l] / (double) denom : 1.0; } } void g3cen(p, n, b, count) Point *p; int n; Box *b; H4table *count; { register int i, lcen, lobs; register double dt, cens, obsv; double *bord, *nnd; bord = border3(p, n, b); nnd = nndist3(p, n, b); /* spacing of histogram cells */ dt = (count->t1 - count->t0)/(count->n - 1); /* 'count' is assumed to have been initialised */ for(i = 0; i < n; i++) { obsv = nnd[i]; cens = bord[i]; lobs = ceil( (obsv - count->t0) / dt ); lcen = floor( (cens - count->t0) / dt ); if(obsv <= cens) { /* observation is uncensored; increment all four histograms */ if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) { (count->obs)[lobs]++; (count->nco)[lobs]++; } if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) { (count->cen)[lcen]++; (count->ncc)[lcen]++; } } else { /* observation is censored; increment only two histograms */ lobs = MIN(lobs, lcen); if(lobs >= count->n) ++(count->upperobs); else if(lobs >= 0) (count->obs)[lobs]++; if(lcen >= count->n) ++(count->uppercen); else if(lcen >= 0) (count->cen)[lcen]++; } } } spatstat/src/trigraf.c0000755000176000001440000005575612252324034014560 0ustar ripleyusers/* trigraf.c Form list of all triangles in a planar graph, given list of edges $Revision: 1.13 $ $Date: 2012/04/06 09:26:50 $ Form list of all triangles in a planar graph, given list of edges Note: vertex indices ie, je are indices in R. They are handled without converting to C convention, because we only need to test equality and ordering. (*except in 'trioxgraph'*) Called by .C: ------------- trigraf() Generic C implementation with fixed storage limit usable with Delaunay triangulation trigrafS() Faster version when input data are sorted (again with predetermined storage limit) suited for handling Delaunay triangulation Called by .Call: --------------- trigraph() Version with dynamic storage allocation triograph() Faster version assuming 'iedge' is sorted in increasing order trioxgraph() Even faster version for use with quadrature schemes Diameters: ----------- triDgraph() Also computes diameters of triangles */ #include #include #include #include "chunkloop.h" #undef DEBUGTRI void trigraf(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles (<= *ntmax) */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Nv, Ne, Ntmax; int Nt, Nj, m, i, j, k, mj, mk, maxchunk; int *jj; Nv = *nv; Ne = *ne; Ntmax = *ntmax; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); Nt = 0; /* vertex index i ranges from 1 to Nv */ XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ if(Nj > 1) { /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } *nt = Nt; *status = 0; } /* faster version of trigraf() assuming that ie[m] < je[m] ie[] is in ascending order je[] is in ascending order within ie[], that is, je[ie[]=i] is in ascending order for each fixed i */ void trigrafS(nv, ne, ie, je, ntmax, nt, it, jt, kt, status) /* inputs */ int *nv; /* number of graph vertices */ int *ne; /* number of edges */ int *ie, *je; /* vectors of indices of ends of each edge */ int *ntmax; /* length of storage space for triangles */ /* output */ int *nt; /* number of triangles */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int *status; /* 0 if OK, 1 if overflow */ { int Ne, Nt, Ntmax; int m, i, j, k, mj, mk; int firstedge, lastedge; Ne = *ne; Ntmax = *ntmax; /* nv is not used, but retained for harmony with trigraf */ /* Avoid compiler warnings */ Nt = *nv; /* initialise output */ Nt = 0; lastedge = -1; while(lastedge + 1 < Ne) { if(lastedge % 256 == 0) R_CheckUserInterrupt(); /* Consider next vertex i. The edges (i,j) with i < j appear contiguously in the edge list. */ firstedge = lastedge + 1; i = ie[firstedge]; for(m= firstedge+1; m < Ne && ie[m] == i; m++) ; lastedge = m-1; /* Consider each pair j, k of neighbours of i, where i < j < k. Scan entire edge list to determine whether j, k are joined by an edge. If so, save triangle (i,j,k) */ if(lastedge > firstedge) { for(mj = firstedge; mj < lastedge; mj++) { j = je[mj]; for(mk = firstedge+1; mk <= lastedge; mk++) { k = je[mk]; /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne && ie[m] < j; m++) ; while(m < Ne && ie[m] == j) { if(je[m] == k) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - exit */ *status = 1; return; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } m++; } } } } } *nt = Nt; *status = 0; } /* ------------------- callable by .Call ------------------------- */ SEXP trigraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* output indices in R convention */ it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* faster version assuming iedge is in increasing order */ SEXP triograph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge) /* all arguments are integer */ { int Nv, Ne; int *ie, *je; /* edges */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ int Nj; int *jj; /* scratch storage */ int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); /* That's 3 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 3+4=7 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(7); return(out); } /* Even faster version using information about dummy vertices. Dummy-to-dummy edges are forbidden. For generic purposes use 'friendly' for 'isdata' Edge between j and k is possible iff friendly[j] || friendly[k]. Edges with friendly = FALSE cannot be connected to one another. */ SEXP trioxgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP friendly) /* indicator vector, length nv */ { /* input */ int Nv, Ne; int *ie, *je; /* edges */ int *friend; /* indicator */ /* output */ int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; int i, j, k, m, mj, mk, maxjk, Nmore, maxchunk; /* output to R */ SEXP iTout, jTout, kTout, out; int *ito, *jto, *kto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(friendly = AS_INTEGER(friendly)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); friend = INTEGER_POINTER(friendly); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); /* convert to C indexing convention */ for(m = 0; m < Ne; m++) { ie[m] -= 1; je[m] -= 1; } OUTERCHUNKLOOP(i, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(i, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(j != k && (friend[j] || friend[k])) { /* Run through edges to determine whether j, k are neighbours */ maxjk = (j > k) ? j : k; for(m = 0; m < Ne; m++) { if(ie[m] > maxjk) break; /* since iedge is in increasing order, the test below will always be FALSE when ie[m] > max(j,k) */ if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); Ntmax = Nmore; } /* convert back to R indexing */ it[Nt] = i + 1; jt[Nt] = j + 1; kt[Nt] = k + 1; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(out = NEW_LIST(3)); /* that's 4+4=8 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); UNPROTECT(8); return(out); } /* also calculates diameter (max edge length) of triangle */ SEXP triDgraph(SEXP nv, /* number of vertices */ SEXP iedge, /* vectors of indices of ends of each edge */ SEXP jedge, SEXP edgelength) /* edge lengths */ { int Nv, Ne; int *ie, *je; /* edges */ double *edgelen; int *it, *jt, *kt; /* vectors of indices of vertices of triangles */ double *dt; /* diameters (max edge lengths) of triangles */ int Nt, Ntmax; /* number of triangles */ /* scratch storage */ int Nj; int *jj; double *dd; int i, j, k, m, mj, mk, Nmore, maxchunk; double dij, dik, djk, diam; /* output */ SEXP iTout, jTout, kTout, dTout, out; int *ito, *jto, *kto; double *dto; /* =================== Protect R objects from garbage collector ======= */ PROTECT(nv = AS_INTEGER(nv)); PROTECT(iedge = AS_INTEGER(iedge)); PROTECT(jedge = AS_INTEGER(jedge)); PROTECT(edgelength = AS_NUMERIC(edgelength)); /* That's 4 protected objects */ /* numbers of vertices and edges */ Nv = *(INTEGER_POINTER(nv)); Ne = LENGTH(iedge); /* input arrays */ ie = INTEGER_POINTER(iedge); je = INTEGER_POINTER(jedge); edgelen = NUMERIC_POINTER(edgelength); /* initialise storage (with a guess at max size) */ Ntmax = 3 * Ne; it = (int *) R_alloc(Ntmax, sizeof(int)); jt = (int *) R_alloc(Ntmax, sizeof(int)); kt = (int *) R_alloc(Ntmax, sizeof(int)); dt = (double *) R_alloc(Ntmax, sizeof(double)); Nt = 0; /* initialise scratch storage */ jj = (int *) R_alloc(Ne, sizeof(int)); dd = (double *) R_alloc(Ne, sizeof(double)); XOUTERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { R_CheckUserInterrupt(); XINNERCHUNKLOOP(i, 1, Nv, maxchunk, 8196) { #ifdef DEBUGTRI Rprintf("i=%d ---------- \n", i); #endif /* Find triangles involving vertex 'i' in which 'i' is the lowest-numbered vertex */ /* First, find vertices j > i connected to i */ Nj = 0; for(m = 0; m < Ne; m++) { if(ie[m] == i) { j = je[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } else if(je[m] == i) { j = ie[m]; if(j > i) { jj[Nj] = j; dd[Nj] = edgelen[m]; Nj++; } } } /* Determine which pairs of vertices j, k are joined by an edge; save triangles (i,j,k) */ #ifdef DEBUGTRI Rprintf("Nj = %d\n", Nj); #endif if(Nj > 1) { #ifdef DEBUGTRI Rprintf("i=%d\njj=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif /* Sort jj in ascending order */ for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; if(k < j) { /* swap */ jj[mk] = j; jj[mj] = k; dik = dd[mj]; dd[mj] = dd[mk]; dd[mk] = dik; j = k; } } } #ifdef DEBUGTRI Rprintf("sorted=\n", i); for(mj = 0; mj < Nj; mj++) Rprintf("%d ", jj[mj]); Rprintf("\n\n"); #endif for(mj = 0; mj < Nj-1; mj++) { j = jj[mj]; dij = dd[mj]; for(mk = mj+1; mk < Nj; mk++) { k = jj[mk]; dik = dd[mk]; if(j != k) { /* Run through edges to determine whether j, k are neighbours */ for(m = 0; m < Ne; m++) { if((ie[m] == j && je[m] == k) || (ie[m] == k && je[m] == j)) { /* triangle (i, j, k) */ /* determine triangle diameter */ diam = (dij > dik) ? dij : dik; djk = edgelen[m]; if(djk > diam) diam = djk; /* add (i, j, k) to list of triangles */ if(Nt >= Ntmax) { /* overflow - allocate more space */ Nmore = 2 * Ntmax; #ifdef DEBUGTRI Rprintf("Doubling space from %d to %d\n", Ntmax, Nmore); #endif it = (int *) S_realloc((char *) it, Nmore, Ntmax, sizeof(int)); jt = (int *) S_realloc((char *) jt, Nmore, Ntmax, sizeof(int)); kt = (int *) S_realloc((char *) kt, Nmore, Ntmax, sizeof(int)); dt = (double *) S_realloc((char *) dt, Nmore, Ntmax, sizeof(double)); Ntmax = Nmore; } it[Nt] = i; jt[Nt] = j; kt[Nt] = k; dt[Nt] = diam; Nt++; } } } } } } } } /* allocate space for output */ PROTECT(iTout = NEW_INTEGER(Nt)); PROTECT(jTout = NEW_INTEGER(Nt)); PROTECT(kTout = NEW_INTEGER(Nt)); PROTECT(dTout = NEW_NUMERIC(Nt)); PROTECT(out = NEW_LIST(4)); /* that's 4+5=9 protected objects */ ito = INTEGER_POINTER(iTout); jto = INTEGER_POINTER(jTout); kto = INTEGER_POINTER(kTout); dto = NUMERIC_POINTER(dTout); /* copy triangle indices to output vectors */ for(m = 0; m < Nt; m++) { ito[m] = it[m]; jto[m] = jt[m]; kto[m] = kt[m]; dto[m] = dt[m]; } /* insert output vectors in output list */ SET_VECTOR_ELT(out, 0, iTout); SET_VECTOR_ELT(out, 1, jTout); SET_VECTOR_ELT(out, 2, kTout); SET_VECTOR_ELT(out, 3, dTout); UNPROTECT(9); return(out); } spatstat/src/poly2im.c0000755000176000001440000002025312252324034014475 0ustar ripleyusers/* poly2im.c Conversion from (x,y) polygon to pixel image poly2imI pixel value = 1{pixel centre is inside polygon} poly2imA pixel value = area of intersection between pixel and polygon $Revision: 1.8 $ $Date: 2013/09/18 04:50:36 $ */ #undef DEBUG #include #include #include #include "chunkloop.h" #define OUT(I,J) out[I + (J) * Ny] void poly2imI(xp, yp, np, nx, ny, out) double *xp, *yp; /* polygon vertices, anticlockwise, CLOSED */ int *np; int *nx, *ny; /* INTEGER raster points from (0,0) to (nx-1, ny-1) */ int *out; /* output matrix [ny, nx], byrow=FALSE, initialised to 0 */ { int Np, Nx, Ny, Np1, maxchunk, mstart, mend; int j, k, m; double x0, y0, x1, y1, xleft, xright, yleft, yright; double dx, dy, y, slope, intercept; int jleft, jright, imax; int sign; Np = *np; Nx = *nx; Ny = *ny; /* Nxy = Nx * Ny; */ Np1 = Np - 1; /* run through polygon edges */ OUTERCHUNKLOOP(k, Np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, Np1, maxchunk, 8196) { x0 = xp[k]; y0 = yp[k]; x1 = xp[k+1]; y1 = yp[k+1]; if(x0 < x1) { xleft = x0; xright = x1; yleft = y0; yright = y1; sign = -1; } else { xleft = x1; xright = x0; yleft = y1; yright = y0; sign = +1; } /* determine relevant columns of pixels */ jleft = (int) ceil(xleft); jright = (int) floor(xright); if(jleft < Nx && jright >= 0 && jleft <= jright) { if(jleft < 0) { jleft = 0; } if(jright >= Nx) {jright = Nx - 1; } /* equation of edge */ dx = xright - xleft; dy = yright - yleft; slope = dy/dx; intercept = yleft - slope * xleft; /* visit relevant columns */ for(j = jleft; j <= jright; j++) { y = slope * ((double) j) + intercept; imax = (int) floor(y); if(imax >= Ny) imax = Ny-1; if(imax >= 0) { /* increment entries below edge in this column: out[i + j * Ny] += sign for 0 <= i <= imax */ mstart = j * Ny; mend = mstart + imax; for(m = mstart; m <= mend; m++) { out[m] += sign; } } } } } } } #define BELOW -1 #define INSIDE 0 #define ABOVE 1 void poly2imA(ncol, nrow, xpoly, ypoly, npoly, out, status) int *ncol, *nrow; /* pixels are unit squares from (0,0) to (ncol,nrow) */ double *xpoly, *ypoly; /* vectors of coordinates of polygon vertices */ int *npoly; double *out; /* double array [nrow, ncol] of pixel areas, byrow=TRUE, initialised to 0 */ int *status; { double *xp, *yp; int nx, ny, np, np1, maxchunk; int i, j, k; double xcur, ycur, xnext, ynext, xleft, yleft, xright, yright; int sgn, jmin, jmax, imin, imax; double x0, y0, x1, y1, slope, yhi, ylo, area, xcut, xcutA, xcutB; int klo, khi; nx = *ncol; ny = *nrow; xp = xpoly; yp = ypoly; np = *npoly; *status = 0; /* initialise output array */ for(i = 0; i < ny; i++) for(j = 0; j < nx; j++) out[j + ny * i] = 0; /* ............ loop over polygon edges ...................*/ np1 = np - 1; OUTERCHUNKLOOP(k, np1, maxchunk, 8196) { R_CheckUserInterrupt(); INNERCHUNKLOOP(k, np1, maxchunk, 8196) { xcur = xp[k]; ycur = yp[k]; xnext = xp[k+1]; ynext = yp[k+1]; #ifdef DEBUG Rprintf("\nEdge %d from (%lf, %lf) to (%lf, %lf) .........\n", k, xcur, ycur, xnext, ynext); #endif if(xcur != xnext) { /* vertical edges are ignored */ if(xcur < xnext) { #ifdef DEBUG Rprintf("negative sign\n"); #endif sgn = -1; xleft = xcur; yleft = ycur; xright = xnext; yright = ynext; } else { #ifdef DEBUG Rprintf("positive sign\n"); #endif sgn = 1; xleft = xnext; yleft = ynext; xright = xcur; yright = ycur; } /* we have now ensured xleft < xright */ slope = (yright - yleft)/(xright - xleft); /* Find relevant columns of pixels */ jmin = floor(xleft); jmin = (jmin < 0) ? 0 : jmin; jmax = ceil(xright); jmax = (jmax > nx - 1) ? nx - 1 : jmax; /* Find relevant rows of pixels */ imin = floor((yleft < yright) ? yleft : yright); imin = (imin < 0) ? 0 : imin; imax = ceil((yleft < yright) ? yright : yleft); imax = (imax > ny - 1) ? ny - 1 : imax; #ifdef DEBUG Rprintf( "imin=%d, imax=%d, jmin=%d, jmax=%d\n", imin, imax, jmin, jmax); #endif /* ........... loop over columns of pixels ..............*/ for(j = jmin; j <= jmax; j++) { #ifdef DEBUG Rprintf( "\t j=%d:\n", j); #endif /* Intersect trapezium with column of pixels */ if(xleft <= j+1 && xright >= j) { if(xleft >= j) { /* retain left corner */ #ifdef DEBUG Rprintf( "\tretain left corner\n"); #endif x0 = xleft; y0 = yleft; } else { /* trim left corner */ #ifdef DEBUG Rprintf( "\ttrim left corner\n"); #endif x0 = (double) j; y0 = yleft + slope * (x0 - xleft); } if(xright <= j+1) { /* retain right corner */ #ifdef DEBUG Rprintf( "\tretain right corner\n"); #endif x1 = xright; y1 = yright; } else { /* trim right corner */ #ifdef DEBUG Rprintf( "\ttrim right corner\n"); #endif x1 = (double) (j+1); y1 = yright + slope * (x1 - xright); } /* save min and max y */ if(y0 < y1) { #ifdef DEBUG Rprintf( "slope %lf > 0\n", slope); #endif ylo = y0; yhi = y1; } else { #ifdef DEBUG Rprintf( "slope %lf <= 0\n", slope); #endif ylo = y1; yhi = y0; } /* ............ loop over pixels within column ......... */ /* first part */ if(imin > 0) { for(i = 0; i < imin; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* The trimmed pixel [x0, x1] * [i, i+1] lies below the polygon edge. */ area = (x1 - x0); #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[j + ny * i] += sgn * area; } } /* second part */ for(i = imin; i <= imax; i++) { #ifdef DEBUG Rprintf( "\ti=%d:\n", i); #endif /* Compute area of intersection between trapezium and trimmed pixel [x0, x1] x [i, i+1] */ klo = (ylo <= i) ? BELOW : (ylo >= (i+1))? ABOVE: INSIDE; khi = (yhi <= i) ? BELOW : (yhi >= (i+1))? ABOVE: INSIDE; if(klo == ABOVE) { /* trapezium covers pixel */ #ifdef DEBUG Rprintf( "\t\ttrapezium covers pixel\n"); #endif area = (x1-x0); } else if(khi == BELOW) { #ifdef DEBUG Rprintf( "\t\tpixel avoids trapezium\n"); #endif /* pixel avoids trapezium */ area = 0.0; } else if(klo == INSIDE && khi == INSIDE) { /* polygon edge is inside pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge is inside pixel\n"); #endif area = (x1-x0) * ((ylo + yhi)/2.0 - i); } else if(klo == INSIDE && khi == ABOVE) { /* polygon edge crosses upper edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper edge of pixel\n"); #endif xcut = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcut - x0) * ((y0 + (i+1))/2 - i) + (x1 - xcut); else area = (x1 - xcut) * ((y1 + (i+1))/2 - i) + (xcut - x0); } else if(klo == BELOW && khi == INSIDE) { /* polygon edge crosses lower edge of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses lower edge of pixel\n"); #endif xcut = x0 + (i - y0)/slope; if(slope > 0) area = (x1 - xcut) * ((y1 + i)/2 - i); else area = (xcut - x0) * ((y0 + i)/2 - i); } else if(klo == BELOW && khi == ABOVE) { /* polygon edge crosses upper and lower edges of pixel */ #ifdef DEBUG Rprintf( "\t\t polygon edge crosses upper and lower edges of pixel\n"); #endif xcutA = x0 + (i - y0)/slope; xcutB = x0 + ((i+1) - y0)/slope; if(slope > 0) area = (xcutB - xcutA)/2 + (x1 - xcutB); else area = (xcutB - x0) + (xcutA - xcutB)/2; } else { /* control should not pass to here */ *status = 1; return; } /* add contribution to area of pixel */ #ifdef DEBUG Rprintf( "\tIncrementing area by %lf\n", sgn * area); #endif out[j + ny * i] += sgn * area; } /* ............ end of loop over pixels within column ......... */ } } /* ........ end of loop over columns of pixels ...............*/ } } } /* ......... end of loop over polygon edges ...................*/ } spatstat/src/fiksel.c0000755000176000001440000000574412252324034014367 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Fiksel process */ /* Conditional intensity function for a pairwise interaction point process with interaction function e(t) = 0 for t < h = exp(a * exp(- kappa * t)) for h <= t < r = 1 for t >= r */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Fiksel { double r; double h; double kappa; double a; double h2; /* h^2 */ double r2; /* r^2 */ double *period; int per; } Fiksel; /* initialiser function */ Cdata *fikselinit(state, model, algo) State state; Model model; Algor algo; { Fiksel *fiksel; fiksel = (Fiksel *) R_alloc(1, sizeof(Fiksel)); /* Interpret model parameters*/ fiksel->r = model.ipar[0]; fiksel->h = model.ipar[1]; fiksel->kappa = model.ipar[2]; fiksel->a = model.ipar[3]; fiksel->period = model.period; /* constants */ fiksel->h2 = pow(fiksel->h, 2); fiksel->r2 = pow(fiksel->r, 2); /* periodic boundary conditions? */ fiksel->per = (model.period[0] > 0.0); return((Cdata *) fiksel); } /* conditional intensity evaluator */ double fikselcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairpotsum, cifval; double kappa, r2, h2; double *period; Fiksel *fiksel; DECLARE_CLOSE_D2_VARS; fiksel = (Fiksel *) cdata; period = fiksel->period; kappa = fiksel->kappa; r2 = fiksel->r2; h2 = fiksel->h2; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairpotsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(fiksel->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u,v,x[j],y[j],r2,d2)) { if(d2 < h2) { cifval = 0.0; return(cifval); } else { pairpotsum += exp(-kappa * sqrt(d2)); } } } } if(ixp1 < npts) { for(j=ixp1; ja * pairpotsum); return cifval; } Cifns FikselCifns = { &fikselinit, &fikselcif, (updafunptr) NULL, NO}; spatstat/src/mhv4.h0000644000176000001440000000040512252324034013757 0ustar ripleyusers/* mhv4.h visual debugger or not */ #undef MH_SNOOP if(snooper.active) { /* visual debugger */ #define MH_SNOOP YES #include "mhloop.h" #undef MH_SNOOP } else { /* no visual debugger */ #define MH_SNOOP NO #include "mhloop.h" #undef MH_SNOOP } spatstat/src/knn3DdistX.h0000644000176000001440000001226412252324034015100 0ustar ripleyusers #if (1 == 0) /* knn3DdistX.h Code template for C functions supporting nncross for k-nearest neighbours (k > 1) for 3D point patterns THE FOLLOWING CODE ASSUMES THAT LISTS ARE SORTED IN ASCENDING ORDER OF z COORDINATE This code is #included multiple times in nn3Ddist.c Variables used: FNAME function name DIST #defined if function returns distance to nearest neighbour WHICH #defined if function returns id of nearest neighbour EXCLUDE #defined if exclusion mechanism is used Either or both DIST and WHICH may be defined. When EXCLUDE is defined, code numbers id1, id2 are attached to the patterns X and Y respectively, such that X[i] and Y[j] are the same point iff id1[i] = id2[j]. Copyright (C) Adrian Baddeley, Jens Oehlschlagel and Rolf Turner 2000-2013 Licence: GPL >= 2 $Revision: 1.1 $ $Date: 2013/06/29 03:04:47 $ */ #endif void FNAME(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { int npoints1, npoints2, nk, nk1; int maxchunk, i, jleft, jright, jwhich, lastjwhich, unsorted, k, k1; double d2, d2minK, x1i, y1i, z1i, dx, dy, dz, dz2, hu, hu2, tmp; double *d2min; #ifdef WHICH int *which; int itmp; #endif #ifdef EXCLUDE int id1i; #endif npoints1 = *n1; npoints2 = *n2; nk = *kmax; nk1 = nk - 1; hu = *huge; hu2 = hu * hu; if(npoints1 == 0 || npoints2 == 0) return; lastjwhich = 0; /* create space to store the nearest neighbour distances and indices for the current point */ d2min = (double *) R_alloc((size_t) nk, sizeof(double)); #ifdef WHICH which = (int *) R_alloc((size_t) nk, sizeof(int)); #endif /* loop in chunks of 2^16 */ i = 0; maxchunk = 0; while(i < npoints1) { R_CheckUserInterrupt(); maxchunk += 65536; if(maxchunk > npoints1) maxchunk = npoints1; for(; i < maxchunk; i++) { /* initialise nn distances and indices */ d2minK = hu2; jwhich = -1; for(k = 0; k < nk; k++) { d2min[k] = hu2; #ifdef WHICH which[k] = -1; #endif } x1i = x1[i]; y1i = y1[i]; z1i = z1[i]; #ifdef EXCLUDE id1i = id1[i]; #endif if(lastjwhich < npoints2) { /* search forward from previous nearest neighbour */ for(jright = lastjwhich; jright < npoints2; ++jright) { dz = z2[jright] - z1i; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jright] != id1i) { #endif dy = y2[jright] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jright] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jright; #ifdef WHICH which[nk1] = jright; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end forward search */ } if(lastjwhich > 0) { /* search backward from previous nearest neighbour */ for(jleft = lastjwhich - 1; jleft >= 0; --jleft) { dz = z1i - z2[jleft]; dz2 = dz * dz; if(dz2 > d2minK) /* note that dz2 >= d2minK could break too early */ break; #ifdef EXCLUDE /* do not compare identical points */ if(id2[jleft] != id1i) { #endif dy = y2[jleft] - y1i; d2 = dy * dy + dz2; if(d2 < d2minK) { dx = x2[jleft] - x1i; d2 = dx * dx + d2; if (d2 < d2minK) { /* overwrite last entry in list of neighbours */ d2min[nk1] = d2; jwhich = jleft; #ifdef WHICH which[nk1] = jleft; #endif /* bubble sort */ unsorted = YES; for(k = nk1; unsorted && k > 0; k--) { k1 = k - 1; if(d2min[k] < d2min[k1]) { /* swap entries */ tmp = d2min[k1]; d2min[k1] = d2min[k]; d2min[k] = tmp; #ifdef WHICH itmp = which[k1]; which[k1] = which[k]; which[k] = itmp; #endif } else { unsorted = NO; } } /* adjust maximum distance */ d2minK = d2min[nk1]; } } #ifdef EXCLUDE } #endif } /* end backward search */ } /* copy nn distances for point i to output matrix in ROW MAJOR order */ for(k = 0; k < nk; k++) { #ifdef DIST nnd[nk * i + k] = sqrt(d2min[k]); #endif #ifdef WHICH nnwhich[nk * i + k] = which[k] + 1; /* R indexing */ #endif } /* save index of last neighbour encountered */ lastjwhich = jwhich; /* end of loop over points i */ } } } spatstat/src/knngrid.c0000644000176000001440000000400612252324034014531 0ustar ripleyusers/* knngrid.c K-th Nearest Neighbour Distances from a pixel grid to a point pattern Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 Licence: GNU Public Licence >= 2 $Revision: 1.6 $ $Date: 2013/11/03 05:06:28 $ Function body definition is #included from knngrid.h THE FOLLOWING FUNCTIONS ASSUME THAT x IS SORTED IN ASCENDING ORDER */ #undef SPATSTAT_DEBUG #include #include #include #include "yesno.h" double sqrt(); /* THE FOLLOWING CODE ASSUMES THAT x IS SORTED IN ASCENDING ORDER */ /* general interface */ void knnGinterface(nx, x0, xstep, ny, y0, ystep, /* pixel grid dimensions */ np, xp, yp, /* data points */ kmax, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *nx, *ny, *np; double *x0, *xstep, *y0, *ystep, *huge; double *xp, *yp; int *kmax; /* options */ int *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnGdw(), knnGd(), knnGw(); int di, wh; di = (*wantdist != 0); wh = (*wantwhich != 0); if(di && wh) { knnGdw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(di) { knnGd(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } else if(wh) { knnGw(nx, x0, xstep, ny, y0, ystep, np, xp, yp, kmax, nnd, nnwhich, huge); } } #undef FNAME #undef DIST #undef WHICH /* knnGdw nearest neighbours 1:kmax returns distances and indices */ #define FNAME knnGdw #define DIST #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGd nearest neighbours 1:kmax returns distances only */ #define FNAME knnGd #define DIST #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH /* knnGw nearest neighbours 1:kmax returns indices only */ #define FNAME knnGw #define WHICH #include "knngrid.h" #undef FNAME #undef DIST #undef WHICH spatstat/src/linnncross.c0000644000176000001440000000121212252324034015261 0ustar ripleyusers#include /* linnncross.c Shortest-path distances between nearest neighbours in linear network One pattern to another pattern $Revision: 1.1 $ $Date: 2013/10/21 02:01:29 $ linndcross linndxcross */ #define DPATH(I,J) dpath[(I) + Nv * (J)] #define ANSWER(I,J) answer[(I) + Np * (J)] #define EUCLID(X,Y,U,V) sqrt(pow((X)-(U),2)+pow((Y)-(V),2)) /* definition of linndcross */ #define FNAME linndcross #undef EXCLU #define WHICH #include "linnncross.h" #undef FNAME #undef EXCLU #undef WHICH /* definition of linndxcross */ #define FNAME linndxcross #define EXCLU #define WHICH #include "linnncross.h" spatstat/src/mhv2.h0000644000176000001440000000041212252324034013753 0ustar ripleyusers/* mhv2.h single interaction or hybrid */ #undef MH_SINGLE if(Ncif == 1) { /* single interaction */ #define MH_SINGLE YES #include "mhv3.h" #undef MH_SINGLE } else { /* hybrid interaction */ #define MH_SINGLE NO #include "mhv3.h" #undef MH_SINGLE } spatstat/src/sftcr.c0000755000176000001440000000436612252324034014232 0ustar ripleyusers#include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Soft Core process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Softcore { double sigma; double kappa; double nook; /* -1/kappa */ double stok; /* sigma^(2/kappa) */ double *period; int per; } Softcore; /* initialiser function */ Cdata *sftcrinit(state, model, algo) State state; Model model; Algor algo; { Softcore *softcore; softcore = (Softcore *) R_alloc(1, sizeof(Softcore)); /* Interpret model parameters*/ softcore->sigma = model.ipar[0]; softcore->kappa = model.ipar[1]; softcore->period = model.period; /* constants */ softcore->nook = -1/softcore->kappa; softcore->stok = pow(softcore->sigma, 2/softcore->kappa); /* periodic boundary conditions? */ softcore->per = (model.period[0] > 0.0); return((Cdata *) softcore); } /* conditional intensity evaluator */ double sftcrcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, pairsum, cifval, nook, stok; Softcore *softcore; softcore = (Softcore *) cdata; nook = softcore->nook; stok = softcore->stok; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(softcore->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = dist2(u,v,x[j],y[j],softcore->period); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; jperiod); pairsum += pow(d2, nook); } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { d2 = pow(u - x[j],2) + pow(v-y[j],2); pairsum += pow(d2, nook); } } if(ixp1 < npts) { for(j=ixp1; j #include #include #include "geom3.h" #include "functable.h" #ifdef DEBUG #define DEBUGMESSAGE(S) Rprintf(S); #else #define DEBUGMESSAGE(S) #endif /* $Revision: 1.3 $ $Date: 2011/11/20 04:19:10 $ 3D distance transform # ///////////////////////////////////////////// # AUTHOR: Adrian Baddeley, CWI, Amsterdam, 1991. # # MODIFIED BY: Adrian Baddeley, Perth 2009 # # This software is distributed free # under the conditions that # (1) it shall not be incorporated # in software that is subsequently sold # (2) the authorship of the software shall # be acknowledged in any publication that # uses results generated by the software # (3) this notice shall remain in place # in each file. # ////////////////////////////////////////////// */ /* step lengths in distance transform */ #define STEP1 41 #define STEP2 58 #define STEP3 71 /* (41,58,71)/41 is a good rational approximation to (1, sqrt(2), sqrt(3)) */ #define MIN(X,Y) (((X) < (Y)) ? (X) : (Y)) #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) typedef struct IntImage { int *data; int Mx, My, Mz; /* dimensions */ int length; } IntImage; typedef struct BinaryImage { unsigned char *data; int Mx, My, Mz; /* dimensions */ int length; } BinaryImage; #define VALUE(I,X,Y,Z) \ ((I).data)[ (Z) * ((I).Mx) * ((I).My) + (Y) * ((I).Mx) + (X) ] void allocBinImage(b, ok) BinaryImage *b; int *ok; { b->length = b->Mx * b->My * b->Mz; b->data = (unsigned char *) R_alloc(b->length, sizeof(unsigned char)); if(b->data == 0) { Rprintf("Can't allocate memory for %d binary voxels\n", b->length); *ok = 0; } *ok = 1; } void allocIntImage(v, ok) IntImage *v; int *ok; { v->length = v->Mx * v->My * v->Mz; v->data = (int *) R_alloc(v->length, sizeof(int)); if(v->data == 0) { Rprintf("Can't allocate memory for %d integer voxels\n", v->length); *ok = 0; } *ok = 1; } void freeBinImage(b) BinaryImage *b; { } void freeIntImage(v) IntImage *v; { } void cts2bin(p, n, box, vside, b, ok) /* convert a list of points inside a box into a 3D binary image */ Point *p; int n; Box *box; double vside; /* side of a (cubic) voxel */ BinaryImage *b; int *ok; { int i, lx, ly, lz; unsigned char *cp; b->Mx = (int) ceil((box->x1 - box->x0)/vside) + 1; b->My = (int) ceil((box->y1 - box->y0)/vside) + 1; b->Mz = (int) ceil((box->z1 - box->z0)/vside) + 1; allocBinImage(b, ok); if(! (*ok)) return; for(i = b->length, cp = b->data; i ; i--, cp++) *cp = 1; for(i=0;ix0)/vside)-1; ly = (int) ceil((p[i].y - box->y0)/vside)-1; lz = (int) ceil((p[i].z - box->z0)/vside)-1; if( lx >= 0 && lx < b->Mx && ly >= 0 && ly < b->My && lz >= 0 && lz < b->Mz ) VALUE((*b),lx,ly,lz) = 0; } } void distrans3(b, v, ok) /* Distance transform in 3D */ BinaryImage *b; /* input */ IntImage *v; /* output */ int *ok; { register int x, y, z; int infinity, q; /* allocate v same size as b */ v->Mx = b->Mx; v->My = b->My; v->Mz = b->Mz; allocIntImage(v, ok); if(! (*ok)) return; /* compute largest possible distance */ infinity = (int) ceil( ((double) STEP3) * sqrt( ((double) b->Mx) * b->Mx + ((double) b->My) * b->My + ((double) b->Mz) * b->Mz)); /* Forward pass: Top to Bottom; Back to Front; Left to Right. */ for(z=0;zMz;z++) { R_CheckUserInterrupt(); for(y=0;yMy;y++) { for(x=0;xMx;x++) { if(VALUE((*b),x,y,z) == 0) VALUE((*v),x,y,z) = 0; else { q = infinity; #define INTERVAL(W, DW, MW) \ ((DW == 0) || (DW == -1 && W > 0) || (DW == 1 && W < MW - 1)) #define BOX(X,Y,Z,DX,DY,DZ) \ (INTERVAL(X,DX,v->Mx) && INTERVAL(Y,DY,v->My) && INTERVAL(Z,DZ,v->Mz)) #define TEST(DX,DY,DZ,DV) \ if(BOX(x,y,z,DX,DY,DZ) && q > VALUE((*v),x+DX,y+DY,z+DZ) + DV) \ q = VALUE((*v),x+DX,y+DY,z+DZ) + DV /* same row */ TEST(-1, 0, 0, STEP1); /* same plane */ TEST(-1,-1, 0, STEP2); TEST( 0,-1, 0, STEP1); TEST( 1,-1, 0, STEP2); /* previous plane */ TEST( 1, 1,-1, STEP3); TEST( 0, 1,-1, STEP2); TEST(-1, 1,-1, STEP3); TEST( 1, 0,-1, STEP2); TEST( 0, 0,-1, STEP1); TEST(-1, 0,-1, STEP2); TEST( 1,-1,-1, STEP3); TEST( 0,-1,-1, STEP2); TEST(-1,-1,-1, STEP3); VALUE((*v),x,y,z) = q; } } } } /* Backward pass: Bottom to Top; Front to Back; Right to Left. */ for(z = b->Mz - 1; z >= 0; z--) { R_CheckUserInterrupt(); for(y = b->My - 1; y >= 0; y--) { for(x = b->Mx - 1; x >= 0; x--) { if((q = VALUE((*v),x,y,z)) != 0) { /* same row */ TEST(1, 0, 0, STEP1); /* same plane */ TEST(-1, 1, 0, STEP2); TEST( 0, 1, 0, STEP1); TEST( 1, 1, 0, STEP2); /* plane below */ TEST( 1, 1, 1, STEP3); TEST( 0, 1, 1, STEP2); TEST(-1, 1, 1, STEP3); TEST( 1, 0, 1, STEP2); TEST( 0, 0, 1, STEP1); TEST(-1, 0, 1, STEP2); TEST( 1,-1, 1, STEP3); TEST( 0,-1, 1, STEP2); TEST(-1,-1, 1, STEP3); VALUE((*v),x,y,z) = q; } } } } } void hist3d(v, vside, count) /* compute histogram of all values in *v using count->n histogram cells ranging from count->t0 to count->t1 and put results in count->num */ IntImage *v; double vside; Itable *count; { register int i, j, k; register int *ip; register double scale, width; /* relationship between distance transform units and physical units */ scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); for(i = 0; i < count->n ; i++) { (count->num)[i] = 0; (count->denom)[i] = v->length; } for(i = v->length, ip = v->data; i; i--, ip++) { k = (int) ceil((*ip * scale - count->t0)/width); k = MAX(k, 0); for(j = k; j < count->n; j++) (count->num)[j]++; } } void hist3dminus(v, vside, count) /* minus sampling */ IntImage *v; double vside; Itable *count; { register int x, y, z, val, border, bx, by, bz, byz, j, kbord, kval; register double scale, width; DEBUGMESSAGE("inside hist3dminus\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeItable */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); kbord = (int) floor((vside * border - count->t0)/width); kbord = MIN(kbord, count->n - 1); /* denominator counts all voxels with distance to boundary >= r */ if(kbord >= 0) for(j = 0; j <= kbord; j++) (count->denom)[j]++; val = VALUE((*v), x, y, z); kval = (int) ceil((val * scale - count->t0)/width); kval = MAX(kval, 0); #ifdef DEBUG /* Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", vside * border, kbord, scale * val, kval); */ #endif /* numerator counts all voxels with distance to boundary >= r and distance to nearest point <= r */ if(kval <= kbord) for(j = kval; j <= kbord; j++) (count->num)[j]++; } } } DEBUGMESSAGE("leaving hist3dminus\n") } void hist3dCen(v, vside, count) /* four censoring-related histograms */ IntImage *v; double vside; H4table *count; { register int x, y, z, val, border, bx, by, bz, byz, kbord, kval; register double scale, width, realborder, realval; DEBUGMESSAGE("inside hist3dCen\n") scale = vside/STEP1; width = (count->t1 - count->t0)/(count->n - 1); /* table is assumed to have been initialised in MakeH4table */ for(z = 0; z < v->Mz; z++) { bz = MIN(z + 1, v->Mz - z); for(y = 0; y < v->My; y++) { by = MIN(y + 1, v->My - y); byz = MIN(by, bz); for(x = 0; x < v->Mx; x++) { bx = MIN(x + 1, v->My - x); border = MIN(bx, byz); realborder = vside * border; kbord = (int) floor((realborder - count->t0)/width); val = VALUE((*v), x, y, z); realval = scale * val; kval = (int) ceil((realval - count->t0)/width); kval = MIN(kval, count->n - 1); #ifdef DEBUG Rprintf("border=%lf\tkbord=%d\tval=%lf\tkval=%d\n", realborder, kbord, realval, kval); #endif if(realval <= realborder) { /* observation is uncensored; increment all four histograms */ if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) { (count->obs)[kval]++; (count->nco)[kval]++; } if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) { (count->cen)[kbord]++; (count->ncc)[kbord]++; } } else { /* observation is censored; increment only two histograms */ kval = MIN(kval, kbord); if(kval >= count->n) ++(count->upperobs); else if(kval >= 0) (count->obs)[kval]++; if(kbord >= count->n) ++(count->uppercen); else if(kbord >= 0) (count->cen)[kbord]++; } } } } DEBUGMESSAGE("leaving hist3dCen\n") } /* CALLING ROUTINES */ void phatminus(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dminus(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } void phatnaive(p, n, box, vside, count) Point *p; int n; Box *box; double vside; Itable *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatnaive\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\n into distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3d..."); hist3d(&v, vside, count); DEBUGMESSAGE("out of hist3d\n") freeIntImage(&v); } } void p3hat4(p, n, box, vside, count) Point *p; int n; Box *box; double vside; H4table *count; { BinaryImage b; IntImage v; int ok; DEBUGMESSAGE("in phatminus\ncalling cts2bin...") cts2bin(p, n, box, vside, &b, &ok); DEBUGMESSAGE("out of cts2bin\ninto distrans3...") if(ok) distrans3(&b, &v, &ok); if(ok) { freeBinImage(&b); DEBUGMESSAGE("out of distrans3\ninto hist3dminus...") hist3dCen(&v, vside, count); DEBUGMESSAGE("out of hist3dminus\n") freeIntImage(&v); } } spatstat/src/digber.c0000644000176000001440000000222312252324034014330 0ustar ripleyusers/* digber.c Diggle-Berman function J used in bandwidth selection J(r) = \int_0^(2r) phi(t, r) dK(t) where K is the K-function and phi(t, r) = 2 r^2 * (acos(y) - y sqrt(1 - y^2)) where y = t/(2r). $Revision: 1.7 $ $Date: 2013/08/24 11:13:43 $ */ #include double sqrt(), acos(); /* r is the vector of distance values, starting from 0, with length nr, equally spaced. dK = diff(K) is the vector of increments of the K-function, with length ndK = nr-1. values of J are computed only up to max(r)/2 nrmax = floor(nr/2). */ void digberJ(r, dK, nr, nrmax, ndK, J) /* inputs */ int *nr, *nrmax, *ndK; double *r, *dK; /* output */ double *J; { int i, j, Ni, NdK; double ri, twori, tj, y, phiy, integral; Ni = *nrmax; NdK = *ndK; J[0] = 0.0; for(i = 1; i < Ni; i++) { ri = r[i]; twori = 2 * ri; integral = 0.0; for(j = 0; j < NdK; j++) { tj = r[j]; y = tj/twori; if(y >= 1.0) break; phiy = acos(y) - y * sqrt(1 - y * y); integral += phiy * dK[j]; } J[i] = 2 * ri * ri * integral; } } spatstat/src/lennard.c0000755000176000001440000000712112252324034014524 0ustar ripleyusers#include #include #include #include #include "methas.h" #include "dist2.h" /* Conditional intensity computation for Lennard-Jones process */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct Lennard { double sigma; double epsilon; double sigma2; /* sigma^2 */ double foureps; /* 4 * epsilon */ double d2min; /* minimum value of d^2 which yields nonzero intensity */ double d2max; /* maximum value of d^2 which has nontrivial contribution */ double *period; int per; } Lennard; /* MAXEXP is intended to be the largest x such that exp(-x) != 0 although the exact value is not needed */ #define MAXEXP (-log(DOUBLE_XMIN)) #define MINEXP (log(1.001)) /* initialiser function */ Cdata *lennardinit(state, model, algo) State state; Model model; Algor algo; { Lennard *lennard; double sigma2, foureps, minfrac, maxfrac; lennard = (Lennard *) R_alloc(1, sizeof(Lennard)); /* Interpret model parameters*/ lennard->sigma = model.ipar[0]; lennard->epsilon = model.ipar[1]; lennard->period = model.period; /* constants */ lennard->sigma2 = sigma2 = pow(lennard->sigma, 2); lennard->foureps = foureps = 4 * lennard->epsilon; /* thresholds where the interaction becomes trivial */ minfrac = pow(foureps/MAXEXP, (double) 1.0/6.0); if(minfrac > 0.5) minfrac = 0.5; maxfrac = pow(foureps/MINEXP, (double) 1.0/3.0); if(maxfrac < 2.0) maxfrac = 2.0; lennard->d2min = sigma2 * minfrac; lennard->d2max = sigma2 * maxfrac; /* periodic boundary conditions? */ lennard->per = (model.period[0] > 0.0); return((Cdata *) lennard); } /* conditional intensity evaluator */ double lennardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ix, ixp1, j; double *x, *y; double u, v; double d2, ratio6, pairsum, cifval; double sigma2, d2max, d2min; double *period; Lennard *lennard; DECLARE_CLOSE_D2_VARS; lennard = (Lennard *) cdata; sigma2 = lennard->sigma2; d2max = lennard->d2max; d2min = lennard->d2min; period = lennard->period; u = prop.u; v = prop.v; ix = prop.ix; x = state.x; y = state.y; npts = state.npts; cifval = 1.0; if(npts == 0) return(cifval); pairsum = 0; ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(lennard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,d2max,d2)) { if(d2 < d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; j 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], d2max, d2)) { if(d2 < lennard->d2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } if(ixp1 < npts) { for(j=ixp1; jd2min) { cifval = 0.0; return cifval; } ratio6 = pow(sigma2/d2, 3); pairsum += ratio6 * (1.0 - ratio6); } } } } cifval *= exp(lennard->foureps * pairsum); return cifval; } Cifns LennardCifns = { &lennardinit, &lennardcif, (updafunptr) NULL, NO}; spatstat/src/Egeyer.c0000755000176000001440000000434512252324034014326 0ustar ripleyusers#include #include #include "chunkloop.h" /* Egeyer.c $Revision: 1.5 $ $Date: 2013/04/18 06:10:06 $ Part of C implementation of 'eval' for Geyer interaction Calculates change in saturated count (xquad, yquad): quadscheme (xdata, ydata): data tdata: unsaturated pair counts for data pattern quadtodata[j] = i if quad[j] == data[i] (indices start from ZERO) Assumes point patterns are sorted in increasing order of x coordinate */ double sqrt(); void Egeyer(nnquad, xquad, yquad, quadtodata, nndata, xdata, ydata, tdata, rrmax, ssat, result) /* inputs */ int *nnquad, *nndata, *quadtodata, *tdata; double *xquad, *yquad, *xdata, *ydata, *rrmax, *ssat; /* output */ double *result; { int nquad, ndata, maxchunk, j, i, ileft, dataindex, isdata; double xquadj, yquadj, rmax, sat, r2max, xleft, dx, dy, dx2, d2; double tbefore, tafter, satbefore, satafter, delta, totalchange; nquad = *nnquad; ndata = *nndata; rmax = *rrmax; sat = *ssat; r2max = rmax * rmax; if(nquad == 0 || ndata == 0) return; ileft = 0; OUTERCHUNKLOOP(j, nquad, maxchunk, 65536) { R_CheckUserInterrupt(); INNERCHUNKLOOP(j, nquad, maxchunk, 65536) { totalchange = 0.0; xquadj = xquad[j]; yquadj = yquad[j]; dataindex = quadtodata[j]; isdata = (dataindex >= 0); /* adjust starting point */ xleft = xquadj - rmax; while((xdata[ileft] < xleft) && (ileft+1 < ndata)) ++ileft; /* process until dx > rmax */ for(i=ileft; i < ndata; i++) { dx = xdata[i] - xquadj; dx2 = dx * dx; if(dx2 > r2max) break; if(i != dataindex) { dy = ydata[i] - yquadj; d2 = dx2 + dy * dy; if(d2 <= r2max) { /* effect of adding dummy point j or negative effect of removing data point */ tbefore = tdata[i]; tafter = tbefore + ((isdata) ? -1 : 1); /* effect on saturated values */ satbefore = (double) ((tbefore < sat)? tbefore : sat); satafter = (double) ((tafter < sat)? tafter : sat); /* sum changes over all i */ delta = satafter - satbefore; totalchange += ((isdata) ? -delta : delta); } } } result[j] = totalchange; } } } spatstat/src/multihard.c0000755000176000001440000000735712252324034015105 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Hardcore process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiHard { int ntypes; double *hc; /* hc[i,j] = hc[j+ntypes*i] for i,j = 0... ntypes-1 */ double *hc2; /* squared radii */ double range2; /* square of interaction range */ double *period; int per; } MultiHard; /* initialiser function */ Cdata *multihardinit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2; double h, h2, range2; MultiHard *multihard; multihard = (MultiHard *) R_alloc(1, sizeof(MultiHard)); multihard->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multihard->hc = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multihard->hc2 = (double *) R_alloc((size_t) n2, sizeof(double)); /* Copy and process model parameters*/ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { h = model.ipar[i + j*ntypes]; h2 = h * h; MAT(multihard->hc, i, j, ntypes) = h; MAT(multihard->hc2, i, j, ntypes) = h2; if(range2 > h2) range2 = h2; } } multihard->range2 = range2; /* periodic boundary conditions? */ multihard->period = model.period; multihard->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multihard); } /* conditional intensity evaluator */ double multihardcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, ix, ixp1, j, mrk, mrkj; int *marks; double *x, *y; double u, v; double d2, range2, cifval; double *period; MultiHard *multihard; DECLARE_CLOSE_D2_VARS; multihard = (MultiHard *) cdata; range2 = multihard->range2; period = multihard->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multihard->ntypes; #ifdef DEBUG Rprintf("scanning data\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multihard->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multihard->hc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } if(ixp1 < npts) { for(j=ixp1; jhc2, mrk, mrkj, ntypes)) { cifval = 0.0; return(cifval); } } } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiHardCifns = { &multihardinit, &multihardcif, (updafunptr) NULL, YES}; spatstat/src/nn3Ddist.c0000755000176000001440000001733312252324034014575 0ustar ripleyusers/* nn3Ddist.c Nearest Neighbour Distances in 3D $Revision: 1.11 $ $Date: 2013/11/03 03:42:48 $ THE FOLLOWING FUNCTIONS ASSUME THAT z IS SORTED IN ASCENDING ORDER nnd3D Nearest neighbour distances nnw3D Nearest neighbours (id) nndw3D Nearest neighbours (id) and distances nnXdw3D Nearest neighbour from one list to another nnXEdw3D Nearest neighbour from one list to another, with overlaps knnd3D k-th nearest neighbour distances knnw3D k-th nearest neighbours (id) knndw3D k-th nearest neighbours (id) and distances */ #undef SPATSTAT_DEBUG #include #include #include #include "chunkloop.h" #include "yesno.h" double sqrt(); /* .......... Single point pattern ...............................*/ #undef FNAME #undef DIST #undef WHICH /* nnd3D: returns nn distance */ #define FNAME nnd3D #define DIST #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nnw3D: returns id of nearest neighbour */ #define FNAME nnw3D #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* nndw3D: returns nn distance .and. id of nearest neighbour */ #define FNAME nndw3D #define DIST #define WHICH #include "nn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* common interface */ void nnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2, *id1, *id2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; /* options */ int *exclude, *wantdist, *wantwhich; /* outputs */ double *nnd; int *nnwhich; { void nnXdw3D(), nnXd3D(), nnXw3D(); void nnXEdw3D(), nnXEd3D(), nnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { nnXdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } else { if(di && wh) { nnXEdw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(di) { nnXEd3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } else if(wh) { nnXEw3D(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, nnd, nnwhich, huge); } } } /* nnXdw3D: for TWO point patterns X and Y, find the nearest neighbour (from each point of X to the nearest point of Y) returning both the distance and the identifier Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXdw3D #define DIST #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXd3D: returns distance only */ #define FNAME nnXd3D #define DIST #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXw3D: returns identifier only */ #define FNAME nnXw3D #define WHICH #undef EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ........................*/ /* nnXEdw3D: similar to nnXdw3D but allows X and Y to include common points (which are not to be counted as neighbours) Code numbers id1, id2 are attached to the patterns X and Y respectively, such that x1[i], y1[i] and x2[j], y2[j] are the same point iff id1[i] = id2[j]. Requires both patterns to be sorted in order of increasing z coord */ #define FNAME nnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEd3D: returns distances only */ #define FNAME nnXEd3D #define DIST #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* nnXEw3D: returns identifiers only */ #define FNAME nnXEw3D #define WHICH #define EXCLUDE #include "nn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Single point pattern ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnd3D nearest neighbour distances 1:kmax */ #define FNAME knnd3D #define DIST #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knnw3D nearest neighbour indices 1:kmax */ #define FNAME knnw3D #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* knndw3D nearest neighbours 1:kmax returns distances and indices */ #define FNAME knndw3D #define DIST #define WHICH #include "knn3Ddist.h" #undef FNAME #undef DIST #undef WHICH /* .......... Two point patterns ...............................*/ /* .......... k-th nearest neighbours ...............................*/ /* general interface */ void knnX3Dinterface(n1, x1, y1, z1, id1, n2, x2, y2, z2, id2, kmax, exclude, wantdist, wantwhich, nnd, nnwhich, huge) /* inputs */ int *n1, *n2; double *x1, *y1, *z1, *x2, *y2, *z2, *huge; int *id1, *id2; int *kmax; /* options */ int *exclude, *wantdist, *wantwhich; /* output matrices (npoints * kmax) in ROW MAJOR order */ double *nnd; int *nnwhich; /* some inputs + outputs are not used in all functions */ { void knnXdw3D(), knnXd3D(), knnXw3D(); void knnXEdw3D(), knnXEd3D(), knnXEw3D(); int ex, di, wh; ex = (*exclude != 0); di = (*wantdist != 0); wh = (*wantwhich != 0); if(!ex) { if(di && wh) { knnXdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } else { if(di && wh) { knnXEdw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(di) { knnXEd3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } else if(wh) { knnXEw3D(n1,x1,y1,z1,id1,n2,x2,y2,z2,id2,kmax,nnd,nnwhich,huge); } } } #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXdw3D nearest neighbours 1:kmax between two point patterns returns distances and indices */ #define FNAME knnXdw3D #define DIST #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXd3D nearest neighbours 1:kmax between two point patterns returns distances */ #define FNAME knnXd3D #define DIST #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXw3D nearest neighbours 1:kmax between two point patterns returns indices */ #define FNAME knnXw3D #define WHICH #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* .......... Two point patterns with exclusion ..........................*/ /* .......... k-th nearest neighbours ...............................*/ /* knnXEdw3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances and indices */ #define FNAME knnXEdw3D #define DIST #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEd3D nearest neighbours 1:kmax between two point patterns with exclusion returns distances */ #define FNAME knnXEd3D #define DIST #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE /* knnXEw3D nearest neighbours 1:kmax between two point patterns with exclusion returns indices */ #define FNAME knnXEw3D #define WHICH #define EXCLUDE #include "knn3DdistX.h" #undef FNAME #undef DIST #undef WHICH #undef EXCLUDE spatstat/src/areadiff.c0000755000176000001440000001533312252324034014646 0ustar ripleyusers/* areadiff.c Area difference function $Revision: 1.14 $ $Date: 2013/09/18 04:09:24 $ A(x,r) = area of disc b(0,r) not covered by discs b(x_i,r) for x_i in x Area estimated by point-counting on a fine grid For use in area-interaction model and related calculations */ #undef DEBUG #include #include #include #include "chunkloop.h" #include "constants.h" /* Original version areadiff() 1 point u No trimming of discs */ void areadiff(rad,x,y,nn,ngrid,answer) /* inputs */ double *rad; /* radius */ double *x, *y; /* coordinate vectors for point pattern */ int *nn; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed area */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, m, n, count, covered; r = *rad; r2 = r * r; n = *nn; m = *ngrid; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg *xg; for(j = 0, yg = -r; j < m; j++, yg += dy) { /* test for inside disc */ if(yg * yg < a2) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; if(n > 0) { for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } } if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area */ *answer = ((double) count) * dx * dy; } /* similar function, handles multiple values of 'r' */ void areadifs(rad,nrads,x,y,nxy,ngrid,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a2, b2, xdif, ydif; int i, j, k, l, m, n, nr, m0, count, covered, maxchunk; n = *nxy; nr = *nrads; m = *ngrid; /* run through radii in chunks of 2^14 */ OUTERCHUNKLOOP(l, nr, maxchunk, 16384) { R_CheckUserInterrupt(); INNERCHUNKLOOP(l, nr, maxchunk, 16384) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if(n == 0) { answer[l] = M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc of radius r */ for(i = 0, xg = -r; i < m; i++, xg += dx) { a2 = r2 - xg * xg; m0 = (a2 > 0.0) ? floor(sqrt(a2)/dy) : 0; for(j = -m0, yg = -m0 * dy; j <= m0; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop through data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } } } /* Modified version multiple test points u discs constrained inside a rectangle */ void areaBdif(rad,nrads,x,y,nxy,ngrid,x0,y0,x1,y1,answer) /* inputs */ double *rad; /* vector of radii */ int *nrads; /* length of 'rads' */ double *x, *y; /* coordinate vectors for point pattern */ int *nxy; /* length of vectors x and y */ int *ngrid; /* dimensions of point-counting grid */ double *x0,*y0,*x1,*y1; /* constraint rectangle */ /* output */ double *answer; /* computed areas (vector of length 'nrads') */ { double dx, dy, xg, yg, r, r2, a, a2, b2, xdif, ydif; double xleft, xright, ylow, yhigh; double xmin, ymin, xmax, ymax; int i, j, k, l, m, n, nr, ileft, iright, mlow, mhigh, count, covered; n = *nxy; nr = *nrads; m = *ngrid; xmin = *x0; ymin = *y0; xmax = *x1; ymax = *y1; /* run through radii */ for(l = 0; l < nr; l++) { r = rad[l]; if(r == 0.0) { answer[l] = 0.0; } else if (n == 0) { answer[l]= M_PI * r * r; } else { r2 = r * r; dx = dy = 2 * r / (m-1); count = 0; /* run through grid points in disc intersected with box */ xleft = (xmin > -r) ? xmin : -r; xright = (xmax < r) ? xmax : r; ileft = ceil(xleft/dx); iright = floor(xright/dx); if(ileft <= iright) { for(i = ileft, xg = ileft * dx; i <= iright; i++, xg += dx) { a2 = r2 - xg * xg; a = (a2 > 0) ? sqrt(a2): 0.0; yhigh = (ymax < a) ? ymax: a; ylow = (ymin > -a) ? ymin: -a; mhigh = floor(yhigh/dy); mlow = ceil(ylow/dy); if(mlow <= mhigh) { for(j = mlow, yg = mlow * dy; j <= mhigh; j++, yg += dy) { #ifdef DEBUG Rprintf("\n\n (xg,yg) = (%lf, %lf)\n", xg, yg); #endif /* run through data points seeking one close to (xy, yg) */ covered = 0; for(k = 0; k < n; k++) { #ifdef DEBUG Rprintf("(x[%d],y[%d]) = (%lf,%lf)\n", k, k, x[k], y[k]); #endif xdif = x[k] - xg; b2 = r2 - xdif * xdif; if(b2 > 0) { ydif = y[k] - yg; if(b2 - ydif * ydif > 0) { #ifdef DEBUG Rprintf("(x[%d], y[%d]) = (%lf, %lf) covers!\n", k, k, x[k], y[k]); #endif covered = 1; break; } } } /* end of loop over data points */ if(covered == 0) { ++count; #ifdef DEBUG Rprintf("Not covered; incrementing count\n"); #endif } } } } } /* end of loop over grid points */ #ifdef DEBUG Rprintf("Count = %d\n", count); #endif /* calculate area for this value of r*/ answer[l] = ((double) count) * dx * dy; } /* end of if(r==0).. else {...} */ } /* end of loop over r values */ } spatstat/src/getcif.c0000755000176000001440000000323012252324034014337 0ustar ripleyusers#include #include "methas.h" void fexitc(const char *msg); extern Cifns AreaIntCifns, BadGeyCifns, DgsCifns, DiggraCifns, FikselCifns, GeyerCifns, HardcoreCifns, LennardCifns, LookupCifns, SoftcoreCifns, StraussCifns, StraussHardCifns, MultiStraussCifns, MultiStraussHardCifns, MultiHardCifns, TripletsCifns; Cifns NullCifns = NULL_CIFNS; typedef struct CifPair { char *name; Cifns *p; } CifPair; CifPair CifTable[] = { {"areaint", &AreaIntCifns}, {"badgey", &BadGeyCifns}, {"dgs", &DgsCifns}, {"diggra", &DiggraCifns}, {"geyer", &GeyerCifns}, {"fiksel", &FikselCifns}, {"hardcore", &HardcoreCifns}, {"lookup", &LookupCifns}, {"lennard", &LennardCifns}, {"multihard", &MultiHardCifns}, {"sftcr", &SoftcoreCifns}, {"strauss", &StraussCifns}, {"straush", &StraussHardCifns}, {"straussm", &MultiStraussCifns}, {"straushm", &MultiStraussHardCifns}, {"triplets", &TripletsCifns}, {(char *) NULL, (Cifns *) NULL} }; Cifns getcif(cifname) char *cifname; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(cifname, cp.name) == 0) return(*(cp.p)); } fexitc("Unrecognised cif name; bailing out.\n"); /* control never passes to here, but compilers don't know that */ return(NullCifns); } /* R interface function, to check directly whether cif is recognised */ void knownCif(cifname, answer) char** cifname; int* answer; { int i; CifPair cp; for(i = 0; CifTable[i].name; i++) { cp = CifTable[i]; if(strcmp(*cifname, cp.name) == 0) { *answer = 1; return; } } *answer = 0; return; } spatstat/src/straussm.c0000755000176000001440000001300312252324034014756 0ustar ripleyusers#include #include #include "methas.h" #include "dist2.h" /* for debugging code, include #define DEBUG 1 */ /* Conditional intensity computation for Multitype Strauss process */ /* NOTE: types (marks) are numbered from 0 to ntypes-1 */ /* Storage of parameters and precomputed/auxiliary data */ typedef struct MultiStrauss { int ntypes; double *gamma; /* gamma[i,j] = gamma[i+ntypes*j] for i,j = 0... ntypes-1 */ double *rad; /* rad[i,j] = rad[j+ntypes*i] for i,j = 0... ntypes-1 */ double *rad2; /* squared radii */ double range2; /* square of interaction range */ double *loggamma; /* logs of gamma[i,j] */ double *period; int *hard; /* hard[i,j] = 1 if gamma[i,j] ~~ 0 */ int *kount; /* space for kounting pairs of each type */ int per; } MultiStrauss; /* initialiser function */ Cdata *straussminit(state, model, algo) State state; Model model; Algor algo; { int i, j, ntypes, n2, hard; double g, r, r2, logg, range2; MultiStrauss *multistrauss; multistrauss = (MultiStrauss *) R_alloc(1, sizeof(MultiStrauss)); multistrauss->ntypes = ntypes = model.ntypes; n2 = ntypes * ntypes; #ifdef DEBUG Rprintf("initialising space for %d types\n", ntypes); #endif /* Allocate space for parameters */ multistrauss->gamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->rad = (double *) R_alloc((size_t) n2, sizeof(double)); /* Allocate space for transformed parameters */ multistrauss->rad2 = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->loggamma = (double *) R_alloc((size_t) n2, sizeof(double)); multistrauss->hard = (int *) R_alloc((size_t) n2, sizeof(int)); /* Allocate scratch space for counts of each pair of types */ multistrauss->kount = (int *) R_alloc((size_t) n2, sizeof(int)); /* Copy and process model parameters*/ /* ipar will contain n^2 gamma values followed by n^2 values of r */ range2 = 0.0; for(i = 0; i < ntypes; i++) { for(j = 0; j < ntypes; j++) { g = model.ipar[i + j*ntypes]; r = model.ipar[n2 + i + j*ntypes]; r2 = r * r; hard = (g < DOUBLE_EPS); logg = (hard) ? 0 : log(g); MAT(multistrauss->gamma, i, j, ntypes) = g; MAT(multistrauss->rad, i, j, ntypes) = r; MAT(multistrauss->hard, i, j, ntypes) = hard; MAT(multistrauss->loggamma, i, j, ntypes) = logg; MAT(multistrauss->rad2, i, j, ntypes) = r2; if(r2 > range2) range2 = r2; } } multistrauss->range2 = range2; /* periodic boundary conditions? */ multistrauss->period = model.period; multistrauss->per = (model.period[0] > 0.0); #ifdef DEBUG Rprintf("end initialiser\n"); #endif return((Cdata *) multistrauss); } /* conditional intensity evaluator */ double straussmcif(prop, state, cdata) Propo prop; State state; Cdata *cdata; { int npts, ntypes, kount, ix, ixp1, j, mrk, mrkj, m1, m2; int *marks; double *x, *y; double u, v, lg; double d2, cifval; double range2; double *period; MultiStrauss *multistrauss; DECLARE_CLOSE_D2_VARS; multistrauss = (MultiStrauss *) cdata; range2 = multistrauss->range2; period = multistrauss->period; u = prop.u; v = prop.v; mrk = prop.mrk; ix = prop.ix; x = state.x; y = state.y; marks = state.marks; npts = state.npts; #ifdef DEBUG Rprintf("computing cif: u=%lf, v=%lf, mrk=%d\n", u, v, mrk); #endif cifval = 1.0; if(npts == 0) return(cifval); ntypes = multistrauss->ntypes; #ifdef DEBUG Rprintf("initialising pair counts\n"); #endif /* initialise pair counts */ for(m1 = 0; m1 < ntypes; m1++) for(m2 = 0; m2 < ntypes; m2++) MAT(multistrauss->kount, m1, m2, ntypes) = 0; /* compile pair counts */ #ifdef DEBUG Rprintf("compiling pair counts\n"); #endif ixp1 = ix+1; /* If ix = NONE = -1, then ixp1 = 0 is correct */ if(multistrauss->per) { /* periodic distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_PERIODIC_D2(u,v,x[j],y[j],period,range2,d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } else { /* Euclidean distance */ if(ix > 0) { for(j=0; j < ix; j++) { if(CLOSE_D2(u, v, x[j], y[j], range2, d2)) { mrkj = marks[j]; if(d2 < MAT(multistrauss->rad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } if(ixp1 < npts) { for(j=ixp1; jrad2, mrk, mrkj, ntypes)) MAT(multistrauss->kount, mrk, mrkj, ntypes)++; } } } } #ifdef DEBUG Rprintf("multiplying cif factors\n"); #endif /* multiply cif value by pair potential */ for(m1 = 0; m1 < ntypes; m1++) { for(m2 = 0; m2 < ntypes; m2++) { kount = MAT(multistrauss->kount, m1, m2, ntypes); if(MAT(multistrauss->hard, m1, m2, ntypes)) { if(kount > 0) { cifval = 0.0; return(cifval); } } else { lg = MAT(multistrauss->loggamma, m1, m2, ntypes); cifval *= exp(lg * kount); } } } #ifdef DEBUG Rprintf("returning positive cif\n"); #endif return cifval; } Cifns MultiStraussCifns = { &straussminit, &straussmcif, (updafunptr) NULL, YES}; spatstat/NAMESPACE0000644000176000001440000017076012252313136013375 0ustar ripleyusers# spatstat NAMESPACE file import(stats,graphics,grDevices,utils,mgcv,deldir,abind,tensor,polyclip) # Do not edit the following. # It is generated automatically. # .................................................. # Automatically-generated list of entry points # .................................................. useDynLib(spatstat, "adenspt", "areaBdif", "areadifs", "asmoopt", "awtdenspt", "awtsmoopt", "Cbiform", "Cclosepaircounts", "Ccountends", "Ccrossdist", "Ccrosspaircounts", "CcrossPdist", "Cidw", "Cmatchxy", "cocoGraph", "cocoImage", "Cpairdist", "CpairPdist", "Cquadform", "crosscount", "Csumouter", "Csumsymouter", "Cwsumouter", "Cwsumsymouter", "Cxypolyselfint", "Cxysegint", "CxysegXint", "D3crossdist", "D3crossPdist", "D3pairdist", "D3pairPdist", "Ddist2dpath", "delta2area", "denspt", "digberJ", "dinfty_R", "discareapoly", "distmapbin", "dppll", "drevcumsum", "dwpure", "Ediggatsti", "Ediggra", "Efiksel", "Egeyer", "exact_dt_R", "Fclosepairs", "Fcrosspairs", "graphVees", "Idist2dpath", "idwloo", "inxyp", "irevcumsum", "KborderD", "KborderI", "knnd3D", "knndMD", "knndsort", "knnGinterface", "knnsort", "knnw3D", "knnwMD", "knnX3Dinterface", "knnXinterface", "KnoneD", "KnoneI", "knownCif", "Kwborder", "Kwnone", "lincrossdist", "linndcross", "linndxcross", "linnndist", "linnnwhich", "linpairdist", "locpcfx", "locprod", "locWpcfx", "locxprod", "nnd3D", "nndist2segs", "nndistsort", "nndMD", "nnGinterface", "nnw3D", "nnwhichsort", "nnwMD", "nnX3Dinterface", "nnXinterface", "paircount", "PerfectDGS", "PerfectDiggleGratton", "PerfectHardcore", "PerfectStrauss", "PerfectStraussHard", "poly2imA", "poly2imI", "prdist2segs", "ps_exact_dt_R", "RcallF3", "RcallF3cen", "RcallG3", "RcallG3cen", "RcallK3", "Rcallpcf3", "ripleybox", "ripleypoly", "scantrans", "seg2pixI", "seg2pixL", "smoopt", "triDgraph", "trigraf", "trigrafS", "triograph", "trioxgraph", "VcloseIJpairs", "Vclosepairs", "Vclosethresh", "VcrossIJpairs", "Vcrosspairs", "wtdenspt", "wtsmoopt", "xmethas", "xypsi", "xysegint", "xysegXint", "xysi", "xysiANY", "xysxi") # .................................................. # Automatically-generated list of documented objects # .................................................. export("acedist.noshow") export("acedist.show") export("active.interactions") export("adaptive.density") export("addvar") export("adjustthinrange") export("affine") export("affine.im") export("affine.layered") export("affine.linnet") export("affine.lpp") export("affine.owin") export("affine.ppp") export("affine.psp") export("affinexy") export("affinexypolygon") export("allstats") export("alltypes") export("angles.psp") export("anova.lppm") export("anova.mppm") export("anova.ppm") export("anova.slrm") export("anycrossing.psp") export("append.psp") export("apply23sum") export("applynbd") export("applytolayers") export("areadelta2") export("areaGain") export("areaGain.diri") export("areaGain.grid") export("AreaInter") export("areaLoss") export("areaLoss.diri") export("areaLoss.grid") export("area.owin") export("area.xypolygon") export("as.array.im") export("as.box3") export("as.breakpts") export("as.data.frame.bw.optim") export("as.data.frame.fv") export("as.data.frame.hyperframe") export("as.data.frame.im") export("as.data.frame.ppp") export("as.data.frame.ppx") export("as.data.frame.psp") export("as.function.fv") export("as.function.linfun") export("as.function.rhohat") export("as.fv") export("as.fv.bw.optim") export("as.fv.data.frame") export("as.fv.fasp") export("as.fv.fv") export("as.fv.kppm") export("as.fv.matrix") export("as.fv.minconfit") export("as.hyperframe") export("as.hyperframe.data.frame") export("as.hyperframe.default") export("as.hyperframe.hyperframe") export("as.hyperframe.listof") export("as.hyperframe.ppx") export("as.im") export("as.im.default") export("as.im.distfun") export("as.im.function") export("as.im.im") export("as.im.leverage.ppm") export("as.im.linim") export("as.im.matrix") export("as.im.nnfun") export("as.im.owin") export("as.im.ppp") export("as.im.scan.test") export("as.im.tess") export("as.interact") export("as.interact.fii") export("as.interact.interact") export("as.interact.ppm") export("as.linim") export("as.linim.default") export("as.linim.linfun") export("as.linim.linim") export("as.linnet") export("as.linnet.linfun") export("as.linnet.linnet") export("as.linnet.lpp") export("as.linnet.lppm") export("as.list.hyperframe") export("as.listof") export("as.lpp") export("as.mask") export("as.mask.psp") export("as.matrix.im") export("as.matrix.owin") export("as.matrix.ppx") export("as.owin") export("as.owin.data.frame") export("as.owin.default") export("as.owin.distfun") export("as.owin.funxy") export("as.owin.im") export("as.owin.kppm") export("as.owin.layered") export("as.owin.linfun") export("as.owin.linnet") export("as.owin.lpp") export("as.owin.lppm") export("as.owin.nnfun") export("as.owin.owin") export("as.owin.ppm") export("as.owin.ppp") export("as.owin.psp") export("as.owin.quad") export("as.owin.rmhmodel") export("as.owin.tess") export("as.polygonal") export("as.ppm") export("as.ppm.kppm") export("as.ppm.ppm") export("as.ppm.profilepl") export("as.ppp") export("as.ppp.data.frame") export("as.ppp.default") export("as.ppp.influence.ppm") export("as.ppp.lpp") export("as.ppp.matrix") export("as.ppp.ppp") export("as.ppp.psp") export("as.ppp.quad") export("as.psp") export("as.psp.data.frame") export("as.psp.default") export("as.psp.linnet") export("as.psp.lpp") export("as.psp.matrix") export("as.psp.owin") export("as.psp.psp") export("as.rectangle") export("assemble.plot.objects") export("as.tess") export("as.tess.im") export("as.tess.list") export("as.tess.owin") export("as.tess.quadratcount") export("as.tess.quadrattest") export("as.tess.tess") export("as.units") export("AsymmDistance.psp") export("BadGey") export("badprobability") export("bdist.pixels") export("bdist.points") export("bdist.tiles") export("bdrylength.xypolygon") export("bdry.mask") export("beachcolourmap") export("beachcolours") export("beginner") export("bermantest") export("bermantestEngine") export("bermantest.lpp") export("bermantest.lppm") export("bermantest.ppm") export("bermantest.ppp") export("bilinearform") export("bind.fv") export("bind.ratfv") export("blankcoefnames") export("blur") export("border") export("bounding.box") export("bounding.box3") export("bounding.box.xy") export("box3") export("boxx") export("break.holes") export("breakpts") export("breakpts.from.r") export("bt.frame") export("bw.diggle") export("bw.frac") export("bw.optim") export("bw.ppl") export("bw.relrisk") export("bw.scott") export("bw.smoothppp") export("bw.stoyan") export("by.im") export("by.ppp") export("cannot.update") export("cartesian") export("cat.factor") export("cauchy.estK") export("cauchy.estpcf") export("cbind.fv") export("cbind.hyperframe") export("cellmiddles") export("censtimeCDFest") export("centroid.owin") export("change.default.expand") export("check.1.integer") export("check.1.real") export("checkfields") export("check.finite") export("check.hist.lengths") export("check.named.list") export("check.named.thing") export("check.named.vector") export("check.nmatrix") export("check.nvector") export("check.range") export("check.testfun") export("chop.tess") export("circumradius") export("clarkevans") export("clarkevansCalc") export("clarkevans.test") export("clear.simplepanel") export("clf.test") export("clickjoin") export("clickpoly") export("clickppp") export("clip.infline") export("clippoly.psp") export("clip.psp") export("cliprect.psp") export("closepaircounts") export("closepairs") export("closethresh") export("closing") export("closing.owin") export("closing.ppp") export("closing.psp") export("clusterset") export("cobble.xy") export("codetime") export("coef.fii") export("coef.kppm") export("coef.lppm") export("coef.mppm") export("coef.ppm") export("coef.slrm") export("coef.summary.fii") export("coef.summary.ppm") export("col2hex") export("collapse.fv") export("colourmap") export("commasep") export("commonGrid") export("compareFit") export("compatible") export("compatible.fasp") export("compatible.fv") export("compatible.im") export("compatible.rat") export("compatible.units") export("compileCDF") export("compileK") export("compilepcf") export("complaining") export("complement.owin") export("concatxy") export("Concom") export("conform.imagelist") export("conform.ratfv") export("connected") export("connected.im") export("connected.owin") export("connected.ppp") export("conspire") export("contour.funxy") export("contour.im") export("contour.listof") export("contour.objsurf") export("convexhull") export("convexhull.xy") export("convolve.im") export("coords<-") export("coords") export("coords<-.ppp") export("coords.ppp") export("coords<-.ppx") export("coords.ppx") export("corners") export("countends") export("countingweights") export("crossdist") export("crossdist.default") export("crossdist.lpp") export("crossdist.pp3") export("crossdist.ppp") export("crossdist.ppx") export("crossdist.psp") export("crossing.psp") export("crosspaircounts") export("crosspairquad") export("crosspairs") export("cut.im") export("cut.ppp") export("damaged.ppm") export("datagen.rpoisppOnLines") export("datagen.runifpointOnLines") export("datagen.runifpoisppOnLines") export("data.mppm") export("data.ppm") export("dclf.progress") export("dclf.test") export("default.clipwindow") export("default.dummy") export("default.expand") export("default.ntile") export("default.n.tiling") export("default.rmhcontrol") export("delaunay") export("delaunay.distance") export("deltametric") export("deltasuffstat") export("densitypointsEngine") export("density.ppp") export("density.psp") export("density.splitppp") export("deriv.fv") export("dfbetas.ppm") export("dflt.redraw") export("diagnose.ppm") export("diagnose.ppm.engine") export("diameter") export("diameter.box3") export("diameter.boxx") export("diameter.linnet") export("diameter.owin") export("DiggleGatesStibbard") export("DiggleGratton") export("digital.volume") export("dilated.areas") export("dilate.owin") export("dilation") export("dilation.owin") export("dilation.ppp") export("dilation.psp") export("dim.fasp") export("dim.hyperframe") export("dim.im") export("dim.msr") export("dimnames<-.fasp") export("dimnames.fasp") export("dimnames.msr") export("dirichlet") export("dirichlet.weights") export("disc") export("discpartarea") export("discretise") export("dist2dpath") export("distcdf") export("distfun") export("distfun.lpp") export("distfun.owin") export("distfun.ppp") export("distfun.psp") export("distmap") export("distmap.owin") export("distmap.ppp") export("distmap.psp") export("distpl") export("distppl") export("distppll") export("distppllmin") export("distributecbind") export("divisors") export("dknn") export("do.as.im") export("do.call.matched") export("do.istat") export("dotexpr.to.call") export("dropifsingle") export("dummify") export("dummy.ppm") export("duplicated.ppp") export("duplicated.ppx") export("edge.Ripley") export("edges2triangles") export("edges2vees") export("edge.Trans") export("eem") export("effectfun") export("Emark") export("empty.space") export("emptywindow") export("endpoints.psp") export("ensure2vector") export("ensure3Darray") export("envelope") export("envelopeEngine") export("envelope.envelope") export("envelope.kppm") export("envelope.lpp") export("envelope.lppm") export("envelope.matrix") export("envelope.pp3") export("envelope.ppm") export("envelope.ppp") export("envelopeProgressData") export("envelopeTest") export("equalpairs") export("equalpairs.quad") export("equalsfun.quad") export("equals.quad") export("eratosthenes") export("eroded.areas") export("eroded.volumes") export("eroded.volumes.box3") export("eroded.volumes.boxx") export("erodemask") export("erode.owin") export("erosion") export("erosion.owin") export("erosion.ppp") export("erosion.psp") export("evalCovar") export("evalCovariate") export("evalCovar.lppm") export("evalCovar.ppm") export("eval.fasp") export("eval.fv") export("eval.hyper") export("eval.im") export("evalInteraction") export("evalInterEngine") export("eval.linim") export("evalPairPotential") export("even.breaks.owin") export("evenly.spaced") export("ewcdf") export("exactdt") export("exactMPLEstrauss") export("exactPdt") export("expand.owin") export("explain.ifnot") export("extractAIC.lppm") export("extractAIC.ppm") export("extractAIC.slrm") export("extractAtomicQtests") export("f3Cengine") export("f3engine") export("F3est") export("fakecallstring") export("[.fasp") export("fasp") export("fave.order") export("Fest") export("fii") export("Fiksel") export("fillNA") export("findbestlegendpos") export("findcbind") export("findCovariate") export("Finhom") export("firstfactor") export("fitin") export("fitin.ppm") export("fitted.kppm") export("fitted.mppm") export("fitted.ppm") export("fitted.slrm") export("flipxy") export("flipxy.im") export("flipxy.layered") export("flipxy.owin") export("flipxypolygon") export("flipxy.ppp") export("flipxy.psp") export("forbid.logi") export("forbidNA") export("FormatFaspFormulae") export("formula<-") export("formula<-.fv") export("formula.fv") export("formula.kppm") export("formula.lppm") export("formula.ppm") export("formula.slrm") export("fryplot") export("frypoints") export("funxy") export("[.fv") export("fv") export("fvlabelmap") export("fvlabels<-") export("fvlabels") export("fvlegend") export("fvnames<-") export("fvnames") export("g3Cengine") export("g3engine") export("G3est") export("Gcom") export("Gcross") export("Gdot") export("Gest") export("getdataname") export("getfields") export("getglmdata") export("getglmfit") export("getglmsubset") export("getlambda.lpp") export("getppmdatasubset") export("getSumFun") export("Geyer") export("geyercounts") export("geyerdelta2") export("Gfox") export("Ginhom") export("GLMpredict") export("Gmulti") export("good.correction.K") export("good.names") export("greatest.common.divisor") export("Gres") export("grid1index") export("gridcenters") export("gridcentres") export("gridindex") export("gridweights") export("grow.rectangle") export("grow.simplepanel") export("gsubdot") export("handle.r.b.args") export("handle.rshift.args") export("Hardcore") export("harmonic") export("harmonise.im") export("harmonize.im") export("has.offset") export("has.offset.term") export("Hest") export("hist.im") export("ho.engine") export("hsvim") export("hsvNA") export("Hybrid") export("hybrid.family") export("[<-.hyperframe") export("[.hyperframe") export("$<-.hyperframe") export("$.hyperframe") export("hyperframe") export("identical.formulae") export("identify.ppp") export("identify.psp") export("idorempty") export("idw") export("Iest") export("ifelse0NA") export("ifelse1NA") export("ifelseAB") export("ifelseAX") export("ifelseNegPos") export("ifelseXB") export("ifelseXY") export("illegal.iformula") export("[<-.im") export("[.im") export("im") export("image.im") export("image.listof") export("image.objsurf") export("imcov") export("implemented.for.K") export("impliedcoefficients") export("impliedpresence") export("incircle") export("infline") export("influence.ppm") export("inforder.family") export("inpoint") export("inside.owin") export("inside.range") export("inside.triangle") export("inside.xypolygon") export("instantiate.interact") export("integral.im") export("integral.msr") export("intensity") export("intensity.lpp") export("intensity.ppm") export("intensity.ppp") export("interp.colourmap") export("interp.im") export("intersect.owin") export("intersect.ranges") export("intersect.tess") export("intX.owin") export("intX.xypolygon") export("intY.owin") export("intY.xypolygon") export("iplot") export("iplot.default") export("iplot.layered") export("iplot.ppp") export("ippm") export("is.atomicQtest") export("is.cadlag") export("is.convex") export("is.data") export("is.empty") export("is.empty.default") export("is.empty.owin") export("is.empty.ppp") export("is.empty.psp") export("is.expandable") export("is.expandable.ppm") export("is.expandable.rmhmodel") export("is.fv") export("is.hole.xypolygon") export("is.hybrid") export("is.hybrid.interact") export("is.hybrid.ppm") export("is.hyperframe") export("is.im") export("is.infline") export("is.interact") export("is.kppm") export("is.lpp") export("is.lppm") export("is.marked") export("is.marked.default") export("is.marked.lppm") export("is.marked.ppm") export("is.marked.ppp") export("is.marked.psp") export("is.marked.quad") export("is.mask") export("is.mppm") export("is.multitype") export("is.multitype.default") export("is.multitype.lppm") export("is.multitype.ppm") export("is.multitype.ppp") export("is.multitype.quad") export("is.owin") export("is.parseable") export("is.poisson") export("is.poisson.interact") export("is.poisson.kppm") export("is.poisson.lppm") export("is.poisson.mppm") export("is.poisson.ppm") export("is.poisson.rmhmodel") export("is.poisson.slrm") export("is.polygonal") export("is.pp3") export("is.ppm") export("is.ppp") export("is.ppx") export("is.prime") export("is.psp") export("is.rectangle") export("is.slrm") export("is.stationary") export("is.stationary.kppm") export("is.stationary.lppm") export("is.stationary.ppm") export("is.stationary.rmhmodel") export("is.stationary.slrm") export("is.subset.owin") export("istat") export("is.tess") export("Jcross") export("Jdot") export("Jest") export("Jfox") export("Jinhom") export("Jmulti") export("k3engine") export("K3est") export("kaplan.meier") export("Kborder.engine") export("Kcom") export("Kcross") export("Kcross.inhom") export("Kdot") export("Kdot.inhom") export("Kest") export("Kest.fft") export("killinteraction") export("Kinhom") export("Kmeasure") export("Kmodel") export("Kmodel.kppm") export("km.rs") export("km.rs.opt") export("Kmulti") export("Kmulti.inhom") export("Knone.engine") export("Kount") export("Kpcf.kppm") export("kppm") export("kppmComLik") export("kppmMinCon") export("Kres") export("Kscaled") export("ksmooth.ppp") export("kstest") export("kstest.lpp") export("kstest.lppm") export("kstest.mppm") export("ks.test.ppm") export("kstest.ppm") export("kstest.ppp") export("kstest.slrm") export("Kwtsum") export("labels.kppm") export("labels.ppm") export("labels.slrm") export("LambertW") export("latest.news") export("[.layered") export("layered") export("layerplotargs<-") export("layerplotargs") export("layout.boxes") export("Lcross") export("Lcross.inhom") export("Ldot") export("Ldot.inhom") export("least.common.multiple") export("lengths.psp") export("LennardJones") export("Lest") export("levelset") export("levels<-.im") export("levels.im") export("leverage") export("leverage.ppm") export("lgcp.estK") export("lgcp.estpcf") export("lhs.of.formula") export("lineardisc") export("linearK") export("linearKcross") export("linearKcross.inhom") export("linearKdot") export("linearKdot.inhom") export("linearKengine") export("linearKinhom") export("linearKmulti") export("linearKmultiEngine") export("linearKmulti.inhom") export("linearmarkconnect") export("linearmarkequal") export("linearpcf") export("linearpcfcross") export("linearpcfcross.inhom") export("linearpcfdot") export("linearpcfdot.inhom") export("linearpcfengine") export("linearpcfinhom") export("linearpcfmulti") export("linearPCFmultiEngine") export("linearpcfmulti.inhom") export("linequad") export("linfun") export("Linhom") export("linim") export("[.linnet") export("linnet") export("[<-.listof") export("listof") export("local2lpp") export("localK") export("localKengine") export("localKinhom") export("localL") export("localLinhom") export("localpcf") export("localpcfengine") export("localpcfinhom") export("[.localpcfmatrix") export("localpcfmatrix") export("logi.dummy") export("logi.engine") export("logLik.lppm") export("logLik.mppm") export("logLik.ppm") export("logLik.slrm") export("lohboot") export("lookup.im") export("[.lpp") export("lpp") export("lppm") export("Lscaled") export("lurking") export("lut") export("mad.progress") export("mad.test") export("majorminorversion") export("make.even.breaks") export("make.parseable") export("makeunits") export("markappend") export("markappendop") export("markcbind") export("markconnect") export("markcorr") export("markcorrint") export("markformat") export("markformat.default") export("markformat.ppp") export("markformat.ppx") export("markformat.psp") export("markmean") export("markreplicateop") export("marks<-") export("marks") export("mark.scale.default") export("marks.default") export("marks<-.lpp") export("markspace.integral") export("marks<-.ppp") export("marks.ppp") export("marks<-.ppx") export("marks.ppx") export("marks<-.psp") export("marks.psp") export("marks.quad") export("markstat") export("marksubset") export("marksubsetop") export("marktable") export("markvar") export("markvario") export("mask2df") export("matchingdist") export("matclust.estK") export("matclust.estpcf") export("matcolall") export("matcolany") export("matcolsum") export("matrixsample") export("matrowall") export("matrowany") export("matrowsum") export("maxflow") export("max.im") export("mctest.progress") export("mean.im") export("meanlistfv") export("meanX.owin") export("meanY.owin") export("median.im") export("midpoints.psp") export("mincontrast") export("min.im") export("miplot") export("model.covariates") export("model.depends") export("model.frame.kppm") export("model.frame.lppm") export("model.frame.ppm") export("model.images") export("model.images.kppm") export("model.images.lppm") export("model.images.ppm") export("model.images.slrm") export("model.is.additive") export("model.matrix.kppm") export("model.matrix.lppm") export("model.matrix.ppm") export("model.matrix.slrm") export("model.se.image") export("mpl") export("mpl.engine") export("mpl.get.covariates") export("mpl.prepare") export("mppm") export("[.msr") export("msr") export("MultiHard") export("MultiPair.checkmatrix") export("multiplicity") export("multiplicity.data.frame") export("multiplicity.ppp") export("multiplicity.ppx") export("multiply.only.finite.entries") export("MultiStrauss") export("MultiStraussHard") export("na.handle.im") export("names<-.hyperframe") export("names.hyperframe") export("nearest.neighbour") export("nearest.pixel") export("nearest.raster.point") export("nearestsegment") export("nearest.valid.pixel") export("newstyle.coeff.handling") export("niceround") export("nnclean") export("nncleanEngine") export("nnclean.pp3") export("nnclean.ppp") export("nncorr") export("nncross") export("nncross.default") export("nncross.lpp") export("nncross.pp3") export("nncross.ppp") export("nndcumfun") export("nndensity") export("nndensity.ppp") export("nndist") export("nndist.default") export("nndist.lpp") export("nndist.pp3") export("nndist.ppp") export("nndist.ppx") export("nndist.psp") export("nnfun") export("nnfun.lpp") export("nnfun.ppp") export("nnfun.psp") export("nnmap") export("nnmark") export("nnmean") export("nnvario") export("nnwhich") export("nnwhich.default") export("nnwhich.lpp") export("nnwhich.pp3") export("nnwhich.ppp") export("nnwhich.ppx") export("nobjects") export("nobjects.ppp") export("nobjects.ppx") export("nobjects.psp") export("nobs.lppm") export("nobs.ppm") export("no.trend.ppm") export("npfun") export("npoints") export("npoints.pp3") export("npoints.ppp") export("npoints.ppx") export("n.quad") export("nsegments") export("nsegments.psp") export("numalign") export("numeric.columns") export("nzpaste") export("objsurf") export("objsurfEngine") export("objsurf.kppm") export("objsurf.minconfit") export("offsetsinformula") export("onecolumn") export("opening") export("opening.owin") export("opening.ppp") export("opening.psp") export("optimStatus") export("Ord") export("ord.family") export("ordinal") export("OrdThresh") export("outdated.interact") export("overlap.owin") export("overlap.trapezium") export("overlap.xypolygon") export("oversize.quad") export("[.owin") export("owin") export("owinpoly2mask") export("owinpolycheck") export("pairdist") export("pairdist.default") export("pairdist.lpp") export("pairdist.pp3") export("pairdist.ppp") export("pairdist.ppx") export("pairdist.psp") export("PairPiece") export("pairsat.family") export("pairs.im") export("pairs.listof") export("Pairwise") export("pairwise.family") export("paletteindex") export("paletteindex") export("param.quad") export("paren") export("parres") export("partialModelMatrix") export("passthrough") export("paste.expr") export("pcf") export("pcf3engine") export("pcf3est") export("pcfcross") export("pcfcross.inhom") export("pcfdot") export("pcfdot.inhom") export("pcf.fasp") export("pcf.fv") export("pcfinhom") export("pcfmodel") export("pcfmodel.kppm") export("pcfmulti.inhom") export("pcf.ppp") export("perimeter") export("periodify") export("periodify.owin") export("periodify.ppp") export("periodify.psp") export("persp.funxy") export("persp.im") export("persp.objsurf") export("pickoption") export("pixellate") export("pixellate.owin") export("pixellate.ppp") export("pixellate.psp") export("pixelquad") export("pknn") export("plot.addvar") export("plot.barplotdata") export("plot.bermantest") export("plot.bw.frac") export("plot.bw.optim") export("plot.colourmap") export("plot.diagppm") export("plot.envelope") export("ploterodeimage") export("ploterodewin") export("plot.fasp") export("plot.fii") export("plot.funxy") export("plot.fv") export("plot.hyperframe") export("plot.im") export("plot.infline") export("plot.influence.ppm") export("plot.kppm") export("plot.kstest") export("plot.layered") export("plot.leverage.ppm") export("plot.linfun") export("plot.linim") export("plot.linnet") export("plot.listof") export("plot.localpcfmatrix") export("plot.lppm") export("plot.minconfit") export("plot.mppm") export("plot.msr") export("plot.objsurf") export("plot.owin") export("plot.parres") export("plot.plotpairsim") export("plot.plotppm") export("plot.pp3") export("plot.ppm") export("plot.ppp") export("plot.pppmatching") export("plot.ppx") export("plot.profilepl") export("plot.psp") export("plot.qqppm") export("plot.quad") export("plot.quadratcount") export("plot.quadrattest") export("plot.rho2hat") export("plot.rhohat") export("plot.scan.test") export("plot.slrm") export("plot.splitppp") export("plot.tess") export("pointgrid") export("pointsOnLines") export("Poisson") export("polynom") export("pool") export("pool.envelope") export("pool.fasp") export("pool.quadrattest") export("pool.rat") export("[.pp3") export("pp3") export("ppllengine") export("ppm") export("ppmCovariates") export("ppm.influence") export("[<-.ppp") export("[.ppp") export("ppp") export("pppdist") export("pppdist.mat") export("pppdist.prohorov") export("pppmatching") export("ppsubset") export("[.ppx") export("ppx") export("prange") export("predict.kppm") export("predict.lppm") export("predict.mppm") export("predict.ppm") export("predict.rhohat") export("predict.slrm") export("prefixfv") export("prettyinside") export("primefactors") export("primesbelow") export("print.addvar") export("print.autoexec") export("print.box3") export("print.boxx") export("print.bt.frame") export("print.bw.frac") export("print.bw.optim") export("print.colourmap") export("print.diagppm") export("print.distfun") export("print.envelope") export("print.fasp") export("print.fii") export("print.funxy") export("print.fv") export("print.fvfun") export("print.hyperframe") export("print.im") export("print.infline") export("print.influence.ppm") export("print.interact") export("print.isf") export("print.kppm") export("print.layered") export("print.leverage.ppm") export("print.linfun") export("print.linim") export("print.linnet") export("print.localpcfmatrix") export("print.lpp") export("print.lppm") export("print.lut") export("print.minconfit") export("print.mppm") export("print.msr") export("print.nnfun") export("print.objsurf") export("print.owin") export("print.parres") export("print.plotpairsim") export("print.plotppm") export("print.pp3") export("print.ppm") export("print.ppp") export("print.pppmatching") export("print.ppx") export("print.profilepl") export("print.psp") export("print.qqppm") export("print.quad") export("print.quadrattest") export("print.rat") export("print.rho2hat") export("print.rhohat") export("print.rmhcontrol") export("print.rmhexpand") export("print.rmhInfoList") export("print.rmhmodel") export("print.rmhstart") export("print.simplepanel") export("print.slrm") export("print.splitppp") export("print.splitppx") export("printStatus") export("print.summary.fii") export("print.summary.hyperframe") export("print.summary.im") export("print.summary.listof") export("print.summary.logiquad") export("print.summary.lpp") export("print.summary.lut") export("print.summary.mppm") export("print.summary.owin") export("print.summary.pp3") export("print.summary.ppm") export("print.summary.ppp") export("print.summary.psp") export("print.summary.quad") export("print.summary.rmhexpand") export("print.summary.splitppp") export("print.summary.splitppx") export("print.summary.units") export("print.tess") export("print.timed") export("print.units") export("profilepl") export("progressreport") export("project2segment") export("project.ppm") export("prolongseq") export("[.psp") export("psp") export("psst") export("psstA") export("psstG") export("qknn") export("qqplot.ppm") export("[.quad") export("quad") export("quadform") export("quad.mppm") export("quad.ppm") export("quadratcount") export("quadratcount.ppp") export("quadratcount.splitppp") export("quadratresample") export("quadrats") export("quadrat.test") export("quadrat.testEngine") export("quadrat.test.mppm") export("quadrat.test.ppm") export("quadrat.test.ppp") export("quadrat.test.quadratcount") export("quadrat.test.splitppp") export("quadscheme") export("quadscheme.logi") export("quadscheme.replicated") export("quadscheme.spatial") export("quantile.im") export("RandomFieldsSafe") export("range.im") export("rastersample") export("raster.x") export("rasterx.im") export("raster.xy") export("rasterxy.im") export("raster.y") export("rastery.im") export("rat") export("ratfv") export("rbind.hyperframe") export("rCauchy") export("rcell") export("rcellnumber") export("rDGS") export("rDiggleGratton") export("reach") export("reach.fii") export("reach.interact") export("reach.ppm") export("reach.rmhmodel") export("rebadge.fv") export("rebound") export("rebound.im") export("rebound.owin") export("rebound.ppp") export("rebound.psp") export("reconcile.fv") export("rectquadrat.breaks") export("rectquadrat.countEngine") export("redraw.simplepanel") export("reduced.sample") export("reduceformula") export("reflect") export("reflect.default") export("reflect.im") export("reflect.layered") export("reincarnate.interact") export("relrisk") export("repair.image.xycoords") export("repair.old.factor.image") export("rescale") export("rescale.im") export("rescale.layered") export("rescale.linnet") export("rescale.lpp") export("rescale.owin") export("rescale.ppp") export("rescale.psp") export("rescale.units") export("rescue.rectangle") export("reset.spatstat.options") export("resid1panel") export("resid1plot") export("resid4plot") export("residuals.mppm") export("residuals.ppm") export("resolve.1.default") export("resolve.2D.kernel") export("resolve.defaults") export("resolveEinfo") export("resolve.vargamma.shape") export("restrict.mask") export("revcumsum") export("reverse.xypolygon") export("rGaussPoisson") export("rgb2hex") export("rgbim") export("rgbNA") export("rHardcore") export("rho2hat") export("rhohat") export("rhohatCalc") export("rhohatEngine") export("rhohat.lpp") export("rhohat.lppm") export("rhohat.ppm") export("rhohat.ppp") export("rhohat.quad") export("rhs.of.formula") export("ripras") export("rjitter") export("rknn") export("rlabel") export("rLGCP") export("rlinegrid") export("rMatClust") export("rMaternI") export("rMaternII") export("rmax.rule") export("rmh") export("rmhcontrol") export("rmhcontrol.default") export("rmhcontrol.list") export("rmhcontrol.rmhcontrol") export("rmh.default") export("rmhEngine") export("rmhexpand") export("RmhExpandRule") export("rmhmodel") export("rmhmodel.default") export("rmhmodel.list") export("rmhmodel.ppm") export("rmhmodel.rmhmodel") export("rmh.ppm") export("rmhResolveControl") export("rmhResolveExpansion") export("rmhResolveTypes") export("rmhsnoop") export("rmhSnoopEnv") export("rmhstart") export("rmhstart.default") export("rmhstart.list") export("rmhstart.rmhstart") export("rMosaicField") export("rMosaicSet") export("rmpoint") export("rmpoint.I.allim") export("rmpoispp") export("rNeymanScott") export("rotate") export("rotate.im") export("rotate.layered") export("rotate.linnet") export("rotate.lpp") export("rotate.owin") export("rotate.ppp") export("rotate.psp") export("rotxy") export("rotxypolygon") export("rounding") export("rounding.default") export("rounding.pp3") export("rounding.ppp") export("rounding.ppx") export("round.pp3") export("round.ppp") export("round.ppx") export("row.names<-.hyperframe") export("row.names.hyperframe") export("rpoint") export("rpoint.multi") export("rpoisline") export("rpoislinetess") export("rpoislpp") export("rpoispp") export("rpoispp3") export("rpoisppOnLines") export("rpoisppx") export("rPoissonCluster") export("rshift") export("rshift.ppp") export("rshift.psp") export("rshift.splitppp") export("rSSI") export("rstrat") export("rStrauss") export("rStraussHard") export("rsyst") export("rthin") export("rThomas") export("rtoro") export("runifdisc") export("runiflpp") export("runifpoint") export("runifpoint3") export("runifpointOnLines") export("runifpointx") export("runifpoispp") export("runifpoisppOnLines") export("runifrect") export("run.simplepanel") export("rVarGamma") export("safelookup") export("samecolour") export("samefunction") export("SatPiece") export("Saturated") export("scalardilate") export("scalardilate.default") export("scalardilate.im") export("scalardilate.layered") export("scalardilate.linnet") export("scalardilate.lpp") export("scalardilate.owin") export("scalardilate.ppp") export("scalardilate.psp") export("scaletointerval") export("scaletointerval.default") export("scaletointerval.im") export("scanBinomLRTS") export("scanLRTS") export("scanmeasure") export("scanmeasure.im") export("scanmeasure.ppp") export("scanPoisLRTS") export("scanpp") export("scan.test") export("second.moment.calc") export("second.moment.engine") export("selfcrossing.psp") export("sensiblevarname") export("sessionLibs") export("setcov") export("setmarks") export("setminus.owin") export("sewpcf") export("sewsmod") export("sharpen") export("sharpen.ppp") export("shift") export("shift.im") export("shift.layered") export("shift.linnet") export("shift.lpp") export("shift.owin") export("shift.ppp") export("shift.psp") export("shiftxy") export("shiftxypolygon") export("short.deparse") export("shortside") export("shortside.box3") export("shortside.boxx") export("shortside.owin") export("sidelengths") export("sidelengths.box3") export("sidelengths.boxx") export("sidelengths.owin") export("signalStatus") export("simplepanel") export("simplify.owin") export("simplify.xypolygon") export("simulate.kppm") export("simulate.ppm") export("simulate.slrm") export("simulrecipe") export("singlestring") export("slrAssemblePixelData") export("slrm") export("slr.prepare") export("Smooth") export("smooth.fv") export("Smooth.fv") export("smooth.msr") export("Smooth.msr") export("smoothpointsEngine") export("smooth.ppp") export("Smooth.ppp") export("Softcore") export("solutionset") export("sort.im") export("spatialCDFframe") export("spatialCDFtest") export("spatstatClusterModelInfo") export("spatstat.options") export("spatstat.rawdata.location") export("spatstatRmhInfo") export("sp.foundclass") export("sp.foundclasses") export("sphere.volume") export("splitHybridInteraction") export("split.im") export("[<-.splitppp") export("[.splitppp") export("split<-.ppp") export("split.ppp") export("[<-.splitppx") export("[.splitppx") export("split.ppx") export("spokes") export("square") export("stieltjes") export("store.versionstring.spatstat") export("stratrand") export("Strauss") export("strausscounts") export("StraussHard") export("str.hyperframe") export("subfits") export("subfits.new") export("subfits.old") export("suffloc") export("suffstat") export("suffstat.generic") export("suffstat.poisson") export("sum.im") export("summarise.trend") export("summary.envelope") export("summary.fii") export("summary.hyperframe") export("summary.im") export("summary.linnet") export("summary.listof") export("summary.logiquad") export("summary.lpp") export("summary.lut") export("summary.mppm") export("summary.owin") export("summary.pp3") export("summary.ppm") export("summary.ppp") export("summary.pppmatching") export("summary.ppx") export("summary.profilepl") export("summary.psp") export("summary.quad") export("summary.rmhexpand") export("summary.splitppp") export("summary.splitppx") export("summary.units") export("sumouter") export("sumsymouter") export("superimpose") export("superimpose.default") export("superimposeMarks") export("superimpose.ppp") export("superimpose.psp") export("superimposePSP") export("sympoly") export("termsinformula") export("terms.kppm") export("terms.lppm") export("terms.ppm") export("terms.slrm") export("[<-.tess") export("[.tess") export("tess") export("test.crossing.psp") export("test.selfcrossing.psp") export("thomas.estK") export("thomas.estpcf") export("tile.areas") export("tilecentroids") export("tilenames<-") export("tilenames") export("tiles") export("timed") export("transect.im") export("trap.extra.arguments") export("trianglediameters") export("trim.mask") export("trim.rectangle") export("triplet.family") export("Triplets") export("Tstat") export("tweak.colourmap") export("tweak.fv.entry") export("union.owin") export("union.quad") export("unique.ppp") export("unique.ppx") export("unitname<-") export("unitname") export("unitname<-.box3") export("unitname.box3") export("unitname<-.boxx") export("unitname.boxx") export("unitname<-.default") export("unitname.default") export("unitname<-.im") export("unitname.im") export("unitname<-.kppm") export("unitname.kppm") export("unitname<-.linnet") export("unitname.linnet") export("unitname<-.lpp") export("unitname.lpp") export("unitname<-.minconfit") export("unitname.minconfit") export("unitname<-.owin") export("unitname.owin") export("unitname<-.pp3") export("unitname.pp3") export("unitname<-.ppm") export("unitname.ppm") export("unitname<-.ppp") export("unitname.ppp") export("unitname<-.ppx") export("unitname.ppx") export("unitname<-.psp") export("unitname.psp") export("unitname<-.quad") export("unitname.quad") export("unitname<-.slrm") export("unitname.slrm") export("unit.square") export("unmark") export("unmark.lpp") export("unmark.ppp") export("unmark.ppx") export("unmark.psp") export("unmark.splitppp") export("unnormdensity") export("unparen") export("update.interact") export("update.kppm") export("update.lppm") export("update.ppm") export("update.rmhcontrol") export("update.rmhstart") export("update.slrm") export("validate.mask") export("validate.quad") export("validposint") export("valid.ppm") export("validradius") export("vanilla.fv") export("varblock") export("vargamma.estK") export("vargamma.estpcf") export("variablesinformula") export("vcov.kppm") export("vcov.lppm") export("vcov.mppm") export("vcov.ppm") export("vcov.slrm") export("verbalogic") export("verifyclass") export("verify.xypolygon") export("versionstring.interact") export("versionstring.ppm") export("versionstring.spatstat") export("vertices") export("Vmark") export("volume") export("volume.box3") export("volume.boxx") export("volume.owin") export("warn.ignored.args") export("weighted.var") export("which.max.im") export("whist") export("will.expand") export("windows.mppm") export("with.fv") export("with.hyperframe") export("with.msr") export("w.quad") export("X2testEngine") export("x.quad") export("xtfrm.im") export("xy.grid") export("xypolygon2psp") export("xypolyselfint") export("y.quad") export("zapsmall.im") # ....... Special cases ........... export("%mapp%") export("%mark%") export("%mrep%") export("%msub%") export("%orifnull%") # ....... End of special cases ... # ......................................... # Automatically generated list of S3 methods # ......................................... S3method("affine", "im") S3method("affine", "layered") S3method("affine", "linnet") S3method("affine", "lpp") S3method("affine", "owin") S3method("affine", "ppp") S3method("affine", "psp") S3method("anova", "lppm") S3method("anova", "mppm") S3method("anova", "ppm") S3method("anova", "slrm") S3method("as.array", "im") S3method("as.data.frame", "bw.optim") S3method("as.data.frame", "fv") S3method("as.data.frame", "hyperframe") S3method("as.data.frame", "im") S3method("as.data.frame", "ppp") S3method("as.data.frame", "ppx") S3method("as.data.frame", "psp") S3method("as.function", "fv") S3method("as.function", "linfun") S3method("as.function", "rhohat") S3method("as.fv", "bw.optim") S3method("as.fv", "data.frame") S3method("as.fv", "fasp") S3method("as.fv", "fv") S3method("as.fv", "kppm") S3method("as.fv", "matrix") S3method("as.fv", "minconfit") S3method("as.hyperframe", "data.frame") S3method("as.hyperframe", "default") S3method("as.hyperframe", "hyperframe") S3method("as.hyperframe", "listof") S3method("as.hyperframe", "ppx") S3method("as.im", "default") S3method("as.im", "distfun") S3method("as.im", "function") S3method("as.im", "im") S3method("as.im", "leverage.ppm") S3method("as.im", "linim") S3method("as.im", "matrix") S3method("as.im", "nnfun") S3method("as.im", "owin") S3method("as.im", "ppp") S3method("as.im", "scan.test") S3method("as.im", "tess") S3method("as.interact", "fii") S3method("as.interact", "interact") S3method("as.interact", "ppm") S3method("as.linim", "default") S3method("as.linim", "linfun") S3method("as.linim", "linim") S3method("as.linnet", "linfun") S3method("as.linnet", "linnet") S3method("as.linnet", "lpp") S3method("as.linnet", "lppm") S3method("as.list", "hyperframe") S3method("as.matrix", "im") S3method("as.matrix", "owin") S3method("as.matrix", "ppx") S3method("as.owin", "data.frame") S3method("as.owin", "default") S3method("as.owin", "distfun") S3method("as.owin", "funxy") S3method("as.owin", "im") S3method("as.owin", "kppm") S3method("as.owin", "layered") S3method("as.owin", "linfun") S3method("as.owin", "linnet") S3method("as.owin", "lpp") S3method("as.owin", "lppm") S3method("as.owin", "nnfun") S3method("as.owin", "owin") S3method("as.owin", "ppm") S3method("as.owin", "ppp") S3method("as.owin", "psp") S3method("as.owin", "quad") S3method("as.owin", "rmhmodel") S3method("as.owin", "tess") S3method("as.ppm", "kppm") S3method("as.ppm", "ppm") S3method("as.ppm", "profilepl") S3method("as.ppp", "data.frame") S3method("as.ppp", "default") S3method("as.ppp", "influence.ppm") S3method("as.ppp", "lpp") S3method("as.ppp", "matrix") S3method("as.ppp", "ppp") S3method("as.ppp", "psp") S3method("as.ppp", "quad") S3method("as.psp", "data.frame") S3method("as.psp", "default") S3method("as.psp", "linnet") S3method("as.psp", "lpp") S3method("as.psp", "matrix") S3method("as.psp", "owin") S3method("as.psp", "psp") S3method("as.tess", "im") S3method("as.tess", "list") S3method("as.tess", "owin") S3method("as.tess", "quadratcount") S3method("as.tess", "quadrattest") S3method("as.tess", "tess") S3method("bermantest", "lpp") S3method("bermantest", "lppm") S3method("bermantest", "ppm") S3method("bermantest", "ppp") S3method("by", "im") S3method("by", "ppp") S3method("cbind", "fv") S3method("cbind", "hyperframe") S3method("closing", "owin") S3method("closing", "ppp") S3method("closing", "psp") S3method("coef", "fii") S3method("coef", "kppm") S3method("coef", "lppm") S3method("coef", "mppm") S3method("coef", "ppm") S3method("coef", "slrm") S3method("coef", "summary.fii") S3method("coef", "summary.ppm") S3method("compatible", "fasp") S3method("compatible", "fv") S3method("compatible", "im") S3method("compatible", "units") S3method("connected", "im") S3method("connected", "owin") S3method("connected", "ppp") S3method("contour", "funxy") S3method("contour", "im") S3method("contour", "listof") S3method("contour", "objsurf") S3method("coords", "ppp") S3method("coords", "ppx") S3method("crossdist", "default") S3method("crossdist", "lpp") S3method("crossdist", "pp3") S3method("crossdist", "ppp") S3method("crossdist", "ppx") S3method("crossdist", "psp") S3method("cut", "im") S3method("cut", "ppp") S3method("density", "ppp") S3method("density", "psp") S3method("density", "splitppp") S3method("deriv", "fv") S3method("dfbetas", "ppm") S3method("diameter", "box3") S3method("diameter", "boxx") S3method("diameter", "linnet") S3method("diameter", "owin") S3method("dilation", "owin") S3method("dilation", "ppp") S3method("dilation", "psp") S3method("dim", "fasp") S3method("dim", "hyperframe") S3method("dim", "im") S3method("dim", "msr") S3method("dimnames", "fasp") S3method("dimnames", "msr") S3method("distfun", "lpp") S3method("distfun", "owin") S3method("distfun", "ppp") S3method("distfun", "psp") S3method("distmap", "owin") S3method("distmap", "ppp") S3method("distmap", "psp") S3method("duplicated", "ppp") S3method("duplicated", "ppx") S3method("envelope", "envelope") S3method("envelope", "kppm") S3method("envelope", "lpp") S3method("envelope", "lppm") S3method("envelope", "matrix") S3method("envelope", "pp3") S3method("envelope", "ppm") S3method("envelope", "ppp") S3method("eroded.volumes", "box3") S3method("eroded.volumes", "boxx") S3method("erosion", "owin") S3method("erosion", "ppp") S3method("erosion", "psp") S3method("evalCovar", "lppm") S3method("evalCovar", "ppm") S3method("extractAIC", "lppm") S3method("extractAIC", "ppm") S3method("extractAIC", "slrm") S3method("[", "fasp") S3method("fitin", "ppm") S3method("fitted", "kppm") S3method("fitted", "mppm") S3method("fitted", "ppm") S3method("fitted", "slrm") S3method("flipxy", "im") S3method("flipxy", "layered") S3method("flipxy", "owin") S3method("flipxy", "ppp") S3method("flipxy", "psp") S3method("formula", "fv") S3method("formula", "kppm") S3method("formula", "lppm") S3method("formula", "ppm") S3method("formula", "slrm") S3method("[", "fv") S3method("hist", "im") S3method("[", "hyperframe") S3method("$", "hyperframe") S3method("identify", "ppp") S3method("identify", "psp") S3method("[", "im") S3method("image", "listof") S3method("image", "objsurf") S3method("influence", "ppm") S3method("intensity", "lpp") S3method("intensity", "ppm") S3method("intensity", "ppp") S3method("iplot", "default") S3method("iplot", "layered") S3method("iplot", "ppp") S3method("is.empty", "default") S3method("is.empty", "owin") S3method("is.empty", "ppp") S3method("is.empty", "psp") S3method("is.hybrid", "interact") S3method("is.hybrid", "ppm") S3method("is.marked", "default") S3method("is.marked", "ppm") S3method("is.marked", "ppp") S3method("is.marked", "psp") S3method("is.marked", "quad") S3method("is.multitype", "default") S3method("is.multitype", "ppm") S3method("is.multitype", "ppp") S3method("is.multitype", "quad") S3method("is.poisson", "interact") S3method("is.poisson", "kppm") S3method("is.poisson", "lppm") S3method("is.poisson", "mppm") S3method("is.poisson", "ppm") S3method("is.poisson", "rmhmodel") S3method("is.poisson", "slrm") S3method("is.stationary", "kppm") S3method("is.stationary", "lppm") S3method("is.stationary", "ppm") S3method("is.stationary", "rmhmodel") S3method("is.stationary", "slrm") S3method("Kmodel", "kppm") S3method("kstest", "lpp") S3method("kstest", "lppm") S3method("kstest", "mppm") S3method("kstest", "ppm") S3method("kstest", "ppp") S3method("kstest", "slrm") S3method("labels", "kppm") S3method("labels", "ppm") S3method("labels", "slrm") S3method("[", "layered") S3method("levels", "im") S3method("leverage", "ppm") S3method("[", "linnet") S3method("[", "localpcfmatrix") S3method("logLik", "lppm") S3method("logLik", "mppm") S3method("logLik", "ppm") S3method("logLik", "slrm") S3method("[", "lpp") S3method("marks", "default") S3method("marks", "ppp") S3method("marks", "ppx") S3method("marks", "psp") S3method("marks", "quad") S3method("max", "im") S3method("mean", "im") S3method("median", "im") S3method("min", "im") S3method("model.frame", "kppm") S3method("model.frame", "lppm") S3method("model.frame", "ppm") S3method("model.images", "kppm") S3method("model.images", "lppm") S3method("model.images", "ppm") S3method("model.images", "slrm") S3method("model.matrix", "kppm") S3method("model.matrix", "lppm") S3method("model.matrix", "ppm") S3method("model.matrix", "slrm") S3method("[", "msr") S3method("multiplicity", "data.frame") S3method("multiplicity", "ppp") S3method("multiplicity", "ppx") S3method("names", "hyperframe") S3method("nnclean", "pp3") S3method("nnclean", "ppp") S3method("nncross", "default") S3method("nncross", "lpp") S3method("nncross", "pp3") S3method("nncross", "ppp") S3method("nndensity", "ppp") S3method("nndist", "default") S3method("nndist", "lpp") S3method("nndist", "pp3") S3method("nndist", "ppp") S3method("nndist", "ppx") S3method("nndist", "psp") S3method("nnfun", "lpp") S3method("nnfun", "ppp") S3method("nnfun", "psp") S3method("nnwhich", "default") S3method("nnwhich", "lpp") S3method("nnwhich", "pp3") S3method("nnwhich", "ppp") S3method("nnwhich", "ppx") S3method("nobs", "lppm") S3method("nobs", "ppm") S3method("npoints", "pp3") S3method("npoints", "ppp") S3method("npoints", "ppx") S3method("nsegments", "psp") S3method("objsurf", "kppm") S3method("objsurf", "minconfit") S3method("opening", "owin") S3method("opening", "ppp") S3method("opening", "psp") S3method("[", "owin") S3method("pairdist", "default") S3method("pairdist", "lpp") S3method("pairdist", "pp3") S3method("pairdist", "ppp") S3method("pairdist", "ppx") S3method("pairdist", "psp") S3method("pairs", "im") S3method("pairs", "listof") S3method("pcf", "fasp") S3method("pcf", "fv") S3method("pcfmodel", "kppm") S3method("pcf", "ppp") S3method("periodify", "owin") S3method("periodify", "ppp") S3method("periodify", "psp") S3method("persp", "funxy") S3method("persp", "im") S3method("persp", "objsurf") S3method("pixellate", "owin") S3method("pixellate", "ppp") S3method("pixellate", "psp") S3method("plot", "addvar") S3method("plot", "barplotdata") S3method("plot", "bermantest") S3method("plot", "bw.frac") S3method("plot", "bw.optim") S3method("plot", "colourmap") S3method("plot", "diagppm") S3method("plot", "envelope") S3method("plot", "fasp") S3method("plot", "fii") S3method("plot", "funxy") S3method("plot", "fv") S3method("plot", "hyperframe") S3method("plot", "im") S3method("plot", "infline") S3method("plot", "influence.ppm") S3method("plot", "kppm") S3method("plot", "kstest") S3method("plot", "layered") S3method("plot", "leverage.ppm") S3method("plot", "linfun") S3method("plot", "linim") S3method("plot", "linnet") S3method("plot", "listof") S3method("plot", "localpcfmatrix") S3method("plot", "lppm") S3method("plot", "minconfit") S3method("plot", "mppm") S3method("plot", "msr") S3method("plot", "objsurf") S3method("plot", "owin") S3method("plot", "parres") S3method("plot", "plotpairsim") S3method("plot", "plotppm") S3method("plot", "pp3") S3method("plot", "ppm") S3method("plot", "ppp") S3method("plot", "pppmatching") S3method("plot", "ppx") S3method("plot", "profilepl") S3method("plot", "psp") S3method("plot", "qqppm") S3method("plot", "quad") S3method("plot", "quadratcount") S3method("plot", "quadrattest") S3method("plot", "rho2hat") S3method("plot", "rhohat") S3method("plot", "scan.test") S3method("plot", "slrm") S3method("plot", "splitppp") S3method("plot", "tess") S3method("pool", "envelope") S3method("pool", "fasp") S3method("pool", "quadrattest") S3method("pool", "rat") S3method("[", "pp3") S3method("[", "ppp") S3method("[", "ppx") S3method("predict", "kppm") S3method("predict", "lppm") S3method("predict", "mppm") S3method("predict", "ppm") S3method("predict", "rhohat") S3method("predict", "slrm") S3method("print", "addvar") S3method("print", "autoexec") S3method("print", "box3") S3method("print", "boxx") S3method("print", "bt.frame") S3method("print", "bw.frac") S3method("print", "bw.optim") S3method("print", "colourmap") S3method("print", "diagppm") S3method("print", "distfun") S3method("print", "envelope") S3method("print", "fasp") S3method("print", "fii") S3method("print", "funxy") S3method("print", "fv") S3method("print", "fvfun") S3method("print", "hyperframe") S3method("print", "im") S3method("print", "infline") S3method("print", "influence.ppm") S3method("print", "interact") S3method("print", "isf") S3method("print", "kppm") S3method("print", "layered") S3method("print", "leverage.ppm") S3method("print", "linfun") S3method("print", "linim") S3method("print", "linnet") S3method("print", "localpcfmatrix") S3method("print", "lpp") S3method("print", "lppm") S3method("print", "lut") S3method("print", "minconfit") S3method("print", "mppm") S3method("print", "msr") S3method("print", "nnfun") S3method("print", "objsurf") S3method("print", "owin") S3method("print", "parres") S3method("print", "plotpairsim") S3method("print", "plotppm") S3method("print", "pp3") S3method("print", "ppm") S3method("print", "ppp") S3method("print", "pppmatching") S3method("print", "ppx") S3method("print", "profilepl") S3method("print", "psp") S3method("print", "qqppm") S3method("print", "quad") S3method("print", "quadrattest") S3method("print", "rat") S3method("print", "rho2hat") S3method("print", "rhohat") S3method("print", "rmhcontrol") S3method("print", "rmhexpand") S3method("print", "rmhInfoList") S3method("print", "rmhmodel") S3method("print", "rmhstart") S3method("print", "simplepanel") S3method("print", "slrm") S3method("print", "splitppp") S3method("print", "splitppx") S3method("print", "summary.fii") S3method("print", "summary.hyperframe") S3method("print", "summary.im") S3method("print", "summary.listof") S3method("print", "summary.logiquad") S3method("print", "summary.lpp") S3method("print", "summary.lut") S3method("print", "summary.mppm") S3method("print", "summary.owin") S3method("print", "summary.pp3") S3method("print", "summary.ppm") S3method("print", "summary.ppp") S3method("print", "summary.psp") S3method("print", "summary.quad") S3method("print", "summary.rmhexpand") S3method("print", "summary.splitppp") S3method("print", "summary.splitppx") S3method("print", "summary.units") S3method("print", "tess") S3method("print", "timed") S3method("print", "units") S3method("[", "psp") S3method("[", "quad") S3method("quadratcount", "ppp") S3method("quadratcount", "splitppp") S3method("quadrat.test", "mppm") S3method("quadrat.test", "ppm") S3method("quadrat.test", "ppp") S3method("quadrat.test", "quadratcount") S3method("quadrat.test", "splitppp") S3method("quantile", "im") S3method("range", "im") S3method("rbind", "hyperframe") S3method("reach", "fii") S3method("reach", "interact") S3method("reach", "ppm") S3method("reach", "rmhmodel") S3method("reflect", "default") S3method("reflect", "im") S3method("reflect", "layered") S3method("rescale", "im") S3method("rescale", "layered") S3method("rescale", "linnet") S3method("rescale", "lpp") S3method("rescale", "owin") S3method("rescale", "ppp") S3method("rescale", "psp") S3method("rescale", "units") S3method("residuals", "mppm") S3method("residuals", "ppm") S3method("rhohat", "lpp") S3method("rhohat", "lppm") S3method("rhohat", "ppm") S3method("rhohat", "ppp") S3method("rhohat", "quad") S3method("rmhcontrol", "default") S3method("rmh", "default") S3method("rmhmodel", "default") S3method("rmhmodel", "list") S3method("rmhmodel", "ppm") S3method("rmh", "ppm") S3method("rmhstart", "default") S3method("rotate", "im") S3method("rotate", "layered") S3method("rotate", "linnet") S3method("rotate", "lpp") S3method("rotate", "owin") S3method("rotate", "ppp") S3method("rotate", "psp") S3method("rounding", "default") S3method("rounding", "pp3") S3method("rounding", "ppp") S3method("rounding", "ppx") S3method("round", "pp3") S3method("round", "ppp") S3method("round", "ppx") S3method("row.names", "hyperframe") S3method("rshift", "ppp") S3method("rshift", "psp") S3method("rshift", "splitppp") S3method("scalardilate", "default") S3method("scalardilate", "im") S3method("scalardilate", "layered") S3method("scalardilate", "linnet") S3method("scalardilate", "lpp") S3method("scalardilate", "owin") S3method("scalardilate", "ppp") S3method("scalardilate", "psp") S3method("scaletointerval", "default") S3method("scaletointerval", "im") S3method("sharpen", "ppp") S3method("shift", "im") S3method("shift", "layered") S3method("shift", "linnet") S3method("shift", "lpp") S3method("shift", "owin") S3method("shift", "ppp") S3method("shift", "psp") S3method("shortside", "box3") S3method("shortside", "boxx") S3method("shortside", "owin") S3method("sidelengths", "box3") S3method("sidelengths", "boxx") S3method("sidelengths", "owin") S3method("simulate", "kppm") S3method("simulate", "ppm") S3method("simulate", "slrm") S3method("Smooth", "fv") S3method("Smooth", "msr") S3method("Smooth", "ppp") S3method("sort", "im") S3method("split", "im") S3method("[", "splitppp") S3method("split", "ppp") S3method("split", "ppx") S3method("[", "splitppx") S3method("str", "hyperframe") S3method("sum", "im") S3method("summary", "envelope") S3method("summary", "fii") S3method("summary", "hyperframe") S3method("summary", "im") S3method("summary", "linnet") S3method("summary", "listof") S3method("summary", "logiquad") S3method("summary", "lpp") S3method("summary", "lut") S3method("summary", "mppm") S3method("summary", "owin") S3method("summary", "pp3") S3method("summary", "ppm") S3method("summary", "ppp") S3method("summary", "pppmatching") S3method("summary", "ppx") S3method("summary", "profilepl") S3method("summary", "psp") S3method("summary", "quad") S3method("summary", "rmhexpand") S3method("summary", "splitppp") S3method("summary", "splitppx") S3method("summary", "units") S3method("superimpose", "default") S3method("superimpose", "ppp") S3method("superimpose", "psp") S3method("terms", "kppm") S3method("terms", "lppm") S3method("terms", "ppm") S3method("terms", "slrm") S3method("[", "tess") S3method("unique", "ppp") S3method("unique", "ppx") S3method("unitname", "box3") S3method("unitname", "boxx") S3method("unitname", "im") S3method("unitname", "kppm") S3method("unitname", "linnet") S3method("unitname", "lpp") S3method("unitname", "minconfit") S3method("unitname", "owin") S3method("unitname", "pp3") S3method("unitname", "ppm") S3method("unitname", "ppp") S3method("unitname", "ppx") S3method("unitname", "psp") S3method("unitname", "quad") S3method("unitname", "slrm") S3method("unmark", "lpp") S3method("unmark", "ppp") S3method("unmark", "ppx") S3method("unmark", "psp") S3method("unmark", "splitppp") S3method("update", "interact") S3method("update", "kppm") S3method("update", "lppm") S3method("update", "ppm") S3method("update", "rmhcontrol") S3method("update", "rmhstart") S3method("update", "slrm") S3method("vcov", "kppm") S3method("vcov", "lppm") S3method("vcov", "mppm") S3method("vcov", "ppm") S3method("vcov", "slrm") S3method("volume", "box3") S3method("volume", "boxx") S3method("volume", "owin") S3method("with", "fv") S3method("with", "hyperframe") S3method("with", "msr") S3method("xtfrm", "im") # ......................................... # Assignment methods # ......................................... S3method("coords<-", "ppp") S3method("coords<-", "ppx") S3method("dimnames<-", "fasp") S3method("formula<-", "fv") S3method("[<-", "hyperframe") S3method("$<-", "hyperframe") S3method("[<-", "im") S3method("levels<-", "im") S3method("[<-", "listof") S3method("marks<-", "lpp") S3method("marks<-", "ppp") S3method("marks<-", "ppx") S3method("marks<-", "psp") S3method("names<-", "hyperframe") S3method("[<-", "ppp") S3method("row.names<-", "hyperframe") S3method("[<-", "splitppp") S3method("split<-", "ppp") S3method("[<-", "splitppx") S3method("[<-", "tess") S3method("unitname<-", "box3") S3method("unitname<-", "boxx") S3method("unitname<-", "im") S3method("unitname<-", "kppm") S3method("unitname<-", "linnet") S3method("unitname<-", "lpp") S3method("unitname<-", "minconfit") S3method("unitname<-", "owin") S3method("unitname<-", "pp3") S3method("unitname<-", "ppm") S3method("unitname<-", "ppp") S3method("unitname<-", "ppx") S3method("unitname<-", "psp") S3method("unitname<-", "quad") S3method("unitname<-", "slrm") # ......................................... # End of methods # ......................................... spatstat/demo/0000755000176000001440000000000012237642736013105 5ustar ripleyusersspatstat/demo/diagnose.R0000755000176000001440000001217112237642736015026 0ustar ripleyusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) par(mfrow=c(1,1)) oldoptions <- options(warn = -1) # ####################################################### # X <- rpoispp(function(x,y) { 1000 * exp(- 4 * x)}, 1000) plot(X, main="Inhomogeneous Poisson pattern") fit.hom <- ppm(X, ~1, Poisson()) fit.inhom <- ppm(X, ~x, Poisson()) diagnose.ppm(fit.inhom, which="marks", type="Pearson", main="Mark plot\nCircles for positive residual mass\nColour for negative residual density") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="marks", main=c("Wrong model (homogeneous Poisson)", "raw residuals")) diagnose.ppm(fit.inhom, which="marks", main=c("Right model (inhomogeneous Poisson)", "raw residuals")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="smooth", main="Smoothed residual field") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="smooth", main=c("Wrong model (homogeneous Poisson)", "Smoothed residual field")) diagnose.ppm(fit.inhom, which="smooth", main=c("Right model (inhomogeneous Poisson)", "Smoothed residual field")) par(mfrow=c(1,1)) diagnose.ppm(fit.inhom, which="x") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, which="x", main=c("Wrong model (homogeneous Poisson)", "lurking variable plot for x coordinate")) diagnose.ppm(fit.inhom, which="x", main=c("Right model (inhomogeneous Poisson)", "lurking variable plot for x coordinate")) par(mfrow=c(1,1)) diagnose.ppm(fit.hom, type="Pearson",main="standard diagnostic plots") par(mfrow=c(1,2)) diagnose.ppm(fit.hom, main="Wrong model (homogeneous Poisson)") diagnose.ppm(fit.inhom, main="Right model (inhomogeneous Poisson)") par(mfrow=c(1,1)) # ####################################################### # LEVERAGE/INFLUENCE plot(leverage(fit.inhom)) plot(influence(fit.inhom)) plot(dfbetas(fit.inhom)) # ####################################################### # COMPENSATORS CF <- compareFit(listof(hom=fit.hom, inhom=fit.inhom), Kcom, same="iso", different="icom") plot(CF, main="model compensators", legend=FALSE) legend("topleft", legend=c("empirical K function", "compensator of CSR", "compensator of inhomogeneous Poisson"), lty=1:3, col=1:3) # ####################################################### # Q - Q PLOTS # qqplot.ppm(fit.hom, 40) #conclusion: homogeneous Poisson model is not correct title(main="Q-Q plot of smoothed residuals") qqplot.ppm(fit.inhom, 40) # TAKES A WHILE... title(main=c("Right model (inhomogeneous Poisson)", "Q-Q plot of smoothed residuals")) # conclusion: fitted inhomogeneous Poisson model looks OK # ####################################################### # plot(cells) fitPoisson <- ppm(cells, ~1, Poisson()) diagnose.ppm(fitPoisson, main=c("CSR fitted to cells data", "Raw residuals", "No suggestion of departure from CSR")) diagnose.ppm(fitPoisson, type="pearson", main=c("CSR fitted to cells data", "Pearson residuals", "No suggestion of departure from CSR")) # These diagnostic plots do NOT show evidence of departure from uniform Poisson plot(Kcom(fitPoisson), cbind(iso, icom) ~ r) plot(Gcom(fitPoisson), cbind(han, hcom) ~ r) # K compensator DOES show strong evidence of departure from uniform Poisson qqplot.ppm(fitPoisson, 40) title(main=c("CSR fitted to cells data", "Q-Q plot of smoothed raw residuals", "Strong suggestion of departure from CSR")) # Q-Q plot DOES show strong evidence of departure from uniform Poisson. # fitStrauss <- ppm(cells, ~1, Strauss(r=0.1)) diagnose.ppm(fitStrauss, main=c("Strauss model fitted to cells data", "Raw residuals")) diagnose.ppm(fitStrauss, type="pearson", main=c("Strauss model fitted to cells data", "Pearson residuals")) plot(Kcom(fitStrauss), cbind(iso, icom) ~ r) plot(Gcom(fitStrauss), cbind(han, hcom) ~ r) # next line takes a LOOONG time ... qqplot.ppm(fitStrauss, 40, type="pearson") title(main=c("Strauss model fitted to cells data", "Q-Q plot of smoothed Pearson residuals", "Suggests adequate fit")) # Conclusion: Strauss model seems OK # ####################################################### # plot(nztrees) fit <- ppm(nztrees, ~1, Poisson()) diagnose.ppm(fit, type="pearson") title(main=c("CSR fitted to NZ trees", "Pearson residuals")) diagnose.ppm(fit, type="pearson", cumulative=FALSE) title(main=c("CSR fitted to NZ trees", "Pearson residuals (non-cumulative)")) lurking(fit, expression(x), type="pearson", cumulative=FALSE, splineargs=list(spar=0.3)) # Sharp peak at right is suspicious qqplot.ppm(fit, 40, type="pearson") title(main=c("CSR fitted to NZ trees", "Q-Q plot of smoothed Pearson residuals")) # Slight suggestion of departure from Poisson at top right of pattern. par(oldpar) options(oldoptions) spatstat/demo/data.R0000755000176000001440000000610412252274535014140 0ustar ripleyusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) plot(amacrine) plot(anemones, markscale=0.5) ants.extra$plotit() plot(bei.extra$elev, main="Beilschmiedia") plot(bei, add=TRUE, pch=16, cex=0.3) plot(betacells) plot(bramblecanes, cols=1:3) plot(split(bramblecanes)) plot(bronzefilter,markscale=1) plot(cells) plot(as.linnet(chicago), main="Chicago Street Crimes",col="green") plot(as.ppp(chicago), add=TRUE, col="red", chars=c(16,2,22,17,24,15,6)) chorley.extra$plotit() plot(clmfires, which.marks="cause", cols=2:5, cex=0.25, main="Castilla-La Mancha forest fires") plot(clmcov200, main="Covariates for forest fires") plot(copper$Points, main="Copper") plot(copper$Lines, add=TRUE) plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) plot(demopat) plot(finpines, main="Finnish pines") wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), chars=c(16,3), cex=0.4, cols=2:3) plot(gordon, main="People in Gordon Square", pch=16) plot(gorillas, which.marks=1, chars=c(1,3), cols=2:3, main="Gorilla nest sites") plot(hamster, cols=c(2,4)) plot(heather) plot(humberside) plot(owin(c(0,25),c(0,20)), type="n", main="Hyytiala") plot(hyytiala$window, add=TRUE) a <- plot(hyytiala, add=TRUE, cols=2:5) legend(22, 20, legend=names(a), pch=a, col=2:5) plot(japanesepines) plot(lansing) plot(split(lansing)) plot(longleaf) plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch="+",col="blue") plot(murchison$faults, add=TRUE, col="red") plot(nbfires, use.marks=FALSE, pch=".") plot(split(nbfires), use.marks=FALSE, chars=".") a <- plot(split(nbfires)$"2000", which.marks="fire.type", main=c("New Brunswick fires 2000", "by fire type"), cols=c("red", "blue", "green", "cyan")) legend("bottomleft", title="Fire type", legend=names(a), pch=a, col=c("red", "blue", "green", "cyan")) plot(nztrees) enable3d <- ("scatterplot3d" %in% row.names(installed.packages())) if(enable3d) { plot(osteo[1:10,], tick.marks=FALSE, xlab="", ylab="", zlab="") } ponderosa.extra$plotit() plot(paracou, cols=2:3, chars=c(16,3)) pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") rm(pyr) plot(redwood) redwoodfull.extra$plotit() plot(residualspaper$Fig1) plot(residualspaper$Fig4a) plot(residualspaper$Fig4b) plot(residualspaper$Fig4c) shapley.extra$plotit(main="Shapley") plot(simdat) plot(spruces, maxsize=min(nndist(spruces))/2) plot(swedishpines) a <- plot(urkiola, cex=0.5, cols=2:3) legend("bottomleft", legend=names(a), pch=a, col=2:3) plot(waka, markscale=0.02, main=c("Waka national park", "tree diameters")) plot(waterstriders) par(oldpar) options(oldoptions) spatstat/demo/00Index0000755000176000001440000000020112237642736014233 0ustar ripleyusersspatstat Demonstration of spatstat library diagnose Demonstration of diagnostic functions in spatstat data Datasets in spatstat spatstat/demo/spatstat.R0000755000176000001440000004637312252323567015106 0ustar ripleyusersif(dev.cur() <= 1) { dd <- getOption("device") if(is.character(dd)) dd <- get(dd) dd() } oldpar <- par(ask = interactive() && dev.interactive(orNone=TRUE)) oldoptions <- options(warn=-1) fanfare <- function(stuff) { plot(c(0,1),c(0,1),type="n",axes=FALSE, xlab="", ylab="") text(0.5,0.5, stuff, cex=2.5) } par(mar=c(1,1,2,1)+0.1) fanfare("Spatstat demonstration") fanfare("I. Types of data") plot(swedishpines, main="Point pattern") plot(demopat, cols=c("green", "blue"), main="Multitype point pattern") plot(longleaf, fg="blue", main="Marked point pattern") plot(finpines, main="Point pattern with multivariate marks") a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Line segment pattern") marks(a) <- sample(letters[1:4], 20, replace=TRUE) plot(a, main="Multitype line segment pattern") marks(a) <- runif(20) plot(a, main="Marked line segment pattern") plot(owin(), main="Rectangular window") plot(letterR, main="Polygonal window") plot(as.mask(letterR), main="Binary mask window") Z <- as.im(function(x,y){ sqrt((x - 1)^2 + (y-1)^2)}, square(2)) plot(Z, main="Pixel image") X <- runifpoint(42) plot(dirichlet(X), main="Tessellation") enable3d <- ("scatterplot3d" %in% row.names(installed.packages())) if(enable3d) plot(rpoispp3(100), main="Three-dimensional point pattern") plot(simplenet, main="Linear network (linnet)") X <- rpoislpp(20, simplenet) plot(X, main="Point pattern on linear network (lpp)") fanfare("II. Graphics") plot(letterR, col="green", border="red", lwd=2, main="Polygonal window with colour fill") plot(letterR, hatch=TRUE, spacing=0.15, angle=30, main="Polygonal window with line shading") plot(amacrine, chars=c(1,16), main="plot(X, chars = c(1,16))") plot(amacrine, cols=c("red","blue"), chars=16, main="plot(X, cols=c(\"red\", \"blue\"))") opa <- par(mfrow=c(1,2)) plot(longleaf, markscale=0.03, main="markscale=0.03") plot(longleaf, markscale=0.09, main="markscale=0.09") par(opa) Z <- as.im(function(x,y) { r <- sqrt(x^2+y^2); r * exp(-r) }, owin(c(-5,5),c(-5,5))) plot(Z, main="pixel image: image plot") plot(Z, main="pixel image: image plot (heat colours)", col=heat.colors(256)) plot(Z, main="pixel image: logarithmic colour map", log=TRUE, col=rainbow(128, end=5/6)) contour(Z, main="pixel image: contour plot", axes=FALSE) plot(Z, main="pixel image: image + contour plot") contour(Z, add=TRUE) persp(Z, colmap=terrain.colors(128), shade=0.3, phi=30,theta=100, main="pixel image: perspective plot") ct <- colourmap(rainbow(20), breaks=seq(-1,1,length=21)) plot(ct, main="Colour map for real numbers") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, main="Colour map for discrete values") W <- owin(c(1,5),c(0,4.5)) Lout <- scaletointerval(distmap(rebound.owin(letterR, W))) Lin <- scaletointerval(distmap(complement.owin(letterR, W))) L <- scaletointerval(eval.im(Lin-Lout)) D <- scaletointerval(density(runifpoint(30, W), adjust=0.3)) X <- scaletointerval(as.im(function(x,y){ x }, W=W)) plot(listof(L=L, D=D, X=X), main="Multiple images") pairs(L, D, X, main="Multiple images: pairs plot") plot(rgbim(D,X,L,maxColorValue=1), valuesAreColours=TRUE, main="Three images: RGB display") plot(hsvim(D,L,X), valuesAreColours=TRUE, main="Three images: HSV display") fanfare("III. Conversion between types") W <- as.owin(chorley) plot(W, "window W") plot(as.mask(W)) plot(as.mask(W, dimyx=1000)) plot(as.im(W, value=3)) plot(as.im(W, value=3, na.replace=0), ribbon=TRUE) plot(as.im(function(x,y) {x^2 + y}, W=square(1)), main="as.im(function(x,y){x^2+y})") V <- delaunay(runifpoint(12)) plot(V, main="Tessellation V") plot(as.im(V, dimyx=256), main="as.im(V)") plot(as.owin(V)) X <- swedishpines plot(X, "point pattern X") plot(as.im(X), col=c("white","red"), ribbon=FALSE, xlab="", ylab="") plot(as.owin(X), add=TRUE) fanfare("IV. Subsetting and splitting data") plot(X, "point pattern X") subset <- 1:20 plot(X[subset], main="subset operation: X[subset]") subwindow <- owin(poly=list(x=c(0,96,96,40,40),y=c(0,0,100,100,50))) plot(X[subwindow], main="subset operation: X[subwindow]") plot(lansing, "Lansing Woods data") plot(split(lansing), main="split operation: split(X)") plot(longleaf, main="Longleaf Pines data") plot(cut(longleaf, breaks=3), main=c("cut operation", "cut(longleaf, breaks=3)")) Z <- dirichlet(runifpoint(16)) X <- runifpoint(100) plot(Z, main="points cut by tessellation") plot(cut(X, Z), add=TRUE) plot(split(X, Z), main="points split by tessellation") W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y), main="image split by tessellation") fanfare("V. Exploratory data analysis") par(mar=c(3,3,3,2)+0.1) plot(swedishpines, main="Quadrat counts", pch="+") tab <- quadratcount(swedishpines, 4) plot(tab, add=TRUE, lty=2, cex=2, col="blue") par(mar=c(5,3,3,2)+0.1) plot(swedishpines, main="", pch="+") title(main=expression(chi^2 * " test"), cex.main=2) tes <- quadrat.test(swedishpines, 3) tes plot(tes, add=TRUE, col="red", cex=1.5, lty=2, lwd=3) title(sub=paste("p-value =", signif(tes$p.value,3)), cex.sub=1.4) par(mar=c(4,4,3,2)+0.1) tesk <- kstest(nztrees, "x") tesk plot(tesk) mur <- lapply(murchison, rescale, s=1000) mur <- lapply(mur, "unitname<-", value="km") X <- mur$gold D <- distfun(mur$faults) plot(X, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") rh <- rhohat(X,D, dimyx=256) plot(rh, main="Smoothed rate estimate", xlab="Distance to nearest fault (km)", legend=FALSE) plot(predict(rh), main="predict(rhohat(X,D))") Z <- density(cells, 0.07) plot(Z, main="Kernel smoothed intensity of point pattern") plot(cells, add=TRUE) plot(redwood, main="Redwood data") te <- scan.test(redwood, 0.1, method="poisson") plot(te, main=c("Scan Statistic for redwood data", paste("p-value =", signif(te$p.value,3)))) plot(redwood, add=TRUE) te X <- unique(unmark(shapley)) plot(X, "Shapley galaxy concentration", pch=".") plot(nnclean(X, k=17), main="Byers-Raftery nearest neighbour cleaning", chars=c(".", "+"), cols=1:2) Y <- sharpen(X, sigma=0.5, edgecorrect=TRUE) plot(Y, main="Choi-Hall data sharpening", pch=".") owpa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X)") plot(clusterset(X, fast=TRUE), add=TRUE, chars=c("o", "+"), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "d", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(owpa) D <- density(a, sigma=0.05) plot(D, main="Kernel smoothed intensity of line segment pattern") plot(a, add=TRUE) X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) plot(delaunay(X)) plot(X, add=TRUE) parsave <- par(mfrow=c(2,2)) plot(longleaf, main="Longleaf Pines data") plot(nnmark(longleaf), main="Nearest mark") plot(Smooth(longleaf, 10), main="Kernel smoothing of marks") plot(idw(longleaf), main=c("Inverse distance weighted","smoothing of marks")) par(parsave) fryplot(cells, main=c("Fry plot","cells data"), pch="+") miplot(longleaf, main="Morishita Index plot", pch=16, col="blue") plot(swedishpines, main="Swedish Pines data") K <- Kest(swedishpines) plot(K, main="K function for Swedish Pines", legendmath=TRUE) en <- envelope(swedishpines, fun=Kest, nsim=10, correction="translate") plot(en, main="Envelopes of K function based on CSR", shade=c("hi", "lo")) pc <- pcf(swedishpines) plot(pc, main="Pair correlation function") plot(swedishpines, main="nearest neighbours") m <- nnwhich(swedishpines) b <- swedishpines[m] arrows(swedishpines$x, swedishpines$y, b$x, b$y, angle=12, length=0.1, col="red") plot(swedishpines %mark% (nndist(swedishpines)/2), markscale=1, main="Stienen diagram") plot(Gest(swedishpines), main=c("Nearest neighbour distance function G", "Gest(swedishpines)"), legendmath=TRUE) Z <- distmap(swedishpines, dimyx=512) plot(swedishpines$window, main="Distance map") plot(Z, add=TRUE) points(swedishpines) plot(Fest(swedishpines), main=c("Empty space function F", "Fest(swedishpines)"), legendmath=TRUE) W <- rebound.owin(letterR, square(5)) plot(distmap(W), main="Distance map") plot(W, add=TRUE) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) contour(distmap(a), main="Distance map") plot(a, add=TRUE,col="red") plot(Jest(swedishpines), main=c("J-function", "J(r)=(1-G(r))/(1-F(r))")) X <- swedishpines X <- X[sample(1:npoints(X))] Z <- nnfun(X) plot(as.owin(X), main="Nearest neighbour map") plot(Z, add=TRUE) points(X) plot(allstats(swedishpines)) Fig4b <- residualspaper$Fig4b plot(Fig4b, main="Inhomogeneous point pattern") plot(Kinhom(Fig4b), main="Inhomogeneous K-function") plot(pcfinhom(Fig4b, stoyan=0.1), main="Inhomogeneous pair correlation") plot(Ginhom(Fig4b, sigma=0.06), main="Inhomogeneous G-function") plot(Jinhom(Fig4b, sigma=0.06), main="Inhomogeneous J-function") X <- unmark(bronzefilter) plot(X, "Bronze filter data") lam <- predict(ppm(X, ~x)) plot(Kscaled(X, lam), xlim=c(0, 1.5), main="Locally-scaled K function") plot(urkiola) plot(split(urkiola)) plot(density(split(urkiola))) contour(density(split(urkiola)), panel.begin=as.owin(urkiola)) plot(relrisk(urkiola), main="Relative risk (cross-validated)") plot(bramblecanes) br <- rescale(bramblecanes) plot(alltypes(br, "K"), mar.panel=c(4,5,2,2)+0.1) ama <- rescale(amacrine) plot(alltypes(ama, Lcross, envelope=TRUE, nsim=9), . - r ~ r, ylim=c(-25, 5)) ponderosa.extra$plotit(main="Ponderosa Pines") L <- localL(ponderosa) pL <- plot(L, lty=1, col=1, legend=FALSE, main="neighbourhood density functions for Ponderosa Pines") parsave <- par(mfrow=c(1,2)) ponderosa.extra$plotit() par(pty="s") plot(L, iso007 ~ r, main="point B") ponderosa.extra$plotit() L12 <- localL(ponderosa, rvalue=12) P12 <- ponderosa %mark% L12 Z12 <- Smooth(P12, sigma=5, dimyx=128) plot(Z12, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z12, add=TRUE) points(ponderosa, pch=16, cex=0.5) plot(amacrine, main="Amacrine cells data") par(pty="s") mkc <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov") plot(mkc, main="Mark correlation function", legend=FALSE) par(parsave) plot(alltypes(amacrine, markconnect), title="Mark connection functions for amacrine cells") parsave <- par(mfrow=c(1,2)) plot(spruces, cex.main=0.75) par(pty="s") plot(markcorr(spruces), main="Mark correlation", legendpos="bottomright") plot(spruces, cex.main=0.75) plot(markvario(spruces), main="Mark variogram", legendpos="topright") par(parsave) plot(as.listof(list("Emark(spruces)"=Emark(spruces), "Vmark(spruces)"=Vmark(spruces))), main="Independence diagnostics", ylim.covers=0, legendpos="bottom") if(enable3d) { par3 <- par(mfrow=c(1,2)) X <- rpoispp3(100) plot(X, main="3D point pattern X") plot(K3est(X), main="K-function in 3D") plot(X, main="3D point pattern X") plot(G3est(X), main="G-function in 3D", legendpos="bottomright") par(par3) } par2 <- par(mfrow=c(1,3)) X <- unmark(chicago) plot(as.linnet(X), main="Chicago Street Crimes",col="green") plot(as.ppp(X), add=TRUE, col="red") plot(linearK(X, correction="none"), main="Network K-function") plot(linearK(X, correction="Ang"), main="Corrected K-function") par(par2) fanfare("VI. Model-fitting") plot(japanesepines) fit <- ppm(japanesepines, ~1) print(fit) fit <- ppm(japanesepines, ~polynom(x,y,2)) print(fit) plot(fit, how="image", se=FALSE, main=c("Inhomogeneous Poisson model", "fit by maximum likelihood", "Fitted intensity")) plot(fit, how="image", trend=FALSE, main="Standard error of fitted intensity") plot(leverage(fit)) plot(influence(fit)) plot(mur$gold, main="Murchison gold deposits", cols="blue") plot(mur$faults, add=TRUE, col="red") fit <- ppm(mur$gold, ~D, covariates=list(D=distfun(mur$faults))) plot(parres(fit, "D"), main="Partial residuals from loglinear Poisson model", xlab="Distance to nearest fault (km)", ylab="log intensity of gold", legend=FALSE) legend("bottomleft", legend=c("partial residual", "loglinear fit"), col=c(1,4), lty=c(1,4)) parsave <- par(mfrow=c(1,2)) plot(redwood) fitT <- kppm(redwood, ~1, clusters="Thomas") plot(simulate(fitT)[[1]], main="simulation from fitted Thomas model") oop <- par(pty="s") plot(fitT, main=c("Thomas model","minimum contrast fit")) os <- objsurf(fitT) plot(os, main="Minimum contrast objective function", col=terrain.colors(128)) contour(os, add=TRUE) par(oop) plot(swedishpines) fit <- ppm(swedishpines, ~1, Strauss(r=7)) print(fit) plot(fit, how="image", main=c("Strauss model", "fit by maximum pseudolikelihood", "Conditional intensity plot")) # fitted interaction plot(swedishpines) fit <- ppm(swedishpines, ~1, PairPiece(c(3,5,7,9,11,13))) plot(fitin(fit), legend=FALSE, main=c("Pairwise interaction model", "fit by maximum pseudolikelihood")) # simulation plot(swedishpines) Xsim <- rmh(model=fit, start=list(n.start=80), control=list(nrep=100)) plot(Xsim, main="Simulation from fitted Strauss model") # model compensator plot(swedishpines) fit <- ppm(swedishpines, ~1, Strauss(r=7)) plot(Kcom(fit), cbind(iso, icom, pois) ~ r, legend=FALSE, main="model compensators") legend("topleft", legend=c("empirical K function", "Strauss model compensator of K", "Poisson theoretical K"), lty=1:3, col=1:3, inset=0.05) par(parsave) # Multitype data dpat <- rescale(demopat, 8) unitname(dpat) <- c("mile", "miles") dpat plot(dpat, cols=c("red", "blue")) fit <- ppm(dpat, ~marks + polynom(x,y,2), Poisson()) plot(fit, trend=TRUE, se=TRUE) fanfare("VII. Simulation") plot(letterR, main="Poisson random points") lambda <- 10/area.owin(letterR) points(rpoispp(lambda, win=letterR)) points(rpoispp(9 * lambda, win=letterR)) points(rpoispp(90 * lambda, win=letterR)) plot(rpoispp(100)) plot(rpoispp(function(x,y){1000 * exp(-3*x)}, 1000)) plot(rMaternII(200, 0.05)) plot(rSSI(0.05, 200)) plot(rThomas(10, 0.2, 5)) plot(rMatClust(10, 0.05, 4)) plot(rCauchy(30, 0.01, 5)) plot(rVarGamma(30, 2, 0.02, 5)) plot(rGaussPoisson(30, 0.05, 0.5)) if(require(RandomFields) && RandomFieldsSafe()) { param <- c(0, variance=0.2, nugget=0, scale=.1) mu <- 4 plot(rLGCP("exp", mu, param)) X <- rLGCP("exp", mu, param) plot(attr(X, "Lambda"), main="log-Gaussian Cox process") plot(X, add=TRUE, pch=16) } plot(rStrauss(200, 0.3, 0.07)) plot(rDiggleGratton(200,0.03,0.08)) plot(rDGS(300, 0.05)) plot(redwood, main="random thinning - rthin()") points(rthin(redwood, 0.5), col="green", cex=1.4) plot(rcell(nx=15)) plot(rsyst(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) plot(rstrat(nx=5)) abline(h=(1:4)/5, lty=2) abline(v=(1:4)/5, lty=2) X <- rsyst(nx=10) plot(rjitter(X, 0.02)) Xg <- rmh(list(cif="geyer", par=list(beta=1.25, gamma=1.6, r=0.2, sat=4.5), w=c(0,10,0,10)), control=list(nrep=1e4), start=list(n.start=200)) plot(Xg, main=paste("Geyer saturation process\n", "rmh() with cif=\"geyer\"")) L <- as.psp(matrix(runif(20), 5, 4), window=square(1)) plot(L, main="runifpointOnLines(30, L)") plot(runifpointOnLines(30, L), add=TRUE, pch="+") plot(L, main="rpoisppOnLines(3, L)") plot(rpoisppOnLines(3, L), add=TRUE, pch="+") plot(runiflpp(20, simplenet)) plot(rpoislpp(5, simplenet)) plot(rpoisline(10)) plot(rlinegrid(30, 0.1)) spatstat.options(npixel=256) X <- dirichlet(runifpoint(30)) plot(rMosaicSet(X, 0.4), col="green", border=NA) plot(X, add=TRUE) plot(rMosaicField(X, runif)) plot(rMosaicSet(rpoislinetess(3), 0.5), col="green", border=NA, main="Switzer's random set") spatstat.options(npixel=100) fanfare("VIII. Geometry") A <- letterR B <- shift(letterR, c(0.2,0.1)) plot(bounding.box(A,B), main="shift", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") B <- rotate(letterR, 0.2) plot(bounding.box(A,B), main="rotate", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") mat <- matrix(c(1.1, 0, 0.3, 1), 2, 2) B <- affine(letterR, mat=mat, vec=c(0.2,-0.1)) plot(bounding.box(A,B), main="affine", type="n") plot(A, add=TRUE) plot(B, add=TRUE, border="red") par1x2 <- par(mfrow=c(1,2)) L <- rpoisline(10, owin(c(1.5,4.5),c(0.2,3.6))) plot(L, main="Line segment pattern") plot(L$window, main="L[window]", type="n") plot(L[letterR], add=TRUE) plot(letterR, add=TRUE, border="red") par(par1x2) a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a, main="Self-crossing points") plot(selfcrossing.psp(a), add=TRUE, col="red") a <- as.psp(matrix(runif(20), 5, 4), window=square(1)) b <- rstrat(square(1), 5) plot(a, lwd=3, col="green", main="project points to segments") plot(b, add=TRUE, col="red", pch=16) v <- project2segment(b, a) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(b$x, b$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") plot(a, main="pointsOnLines(L)") plot(pointsOnLines(a, np=100), add=TRUE, pch="+") parry <- par(mfrow=c(1,3)) X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X) plot(letterR) plot(intersect.tess(X, letterR)) X <- dirichlet(runifpoint(10)) plot(X) L <- infline(0.3,0.5) plot(owin(), main="L") plot(L, col="red", lwd=2) plot(chop.tess(X,L)) par(parry) W <- chorley$window plot(W, main="simplify.owin") WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") nopa <- par(mfrow=c(2,2)) Rbox <- grow.rectangle(as.rectangle(letterR), 0.3) v <- erode.owin(letterR, 0.25) plot(Rbox, type="n", main="erode.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red", cex.main=0.75) plot(v, add=TRUE, col="blue") v <- dilate.owin(letterR, 0.25) plot(Rbox, type="n", main="dilate.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- closing.owin(letterR, 0.3) plot(Rbox, type="n", main="closing.owin", cex.main=0.75) plot(v, add=TRUE, col="blue") plot(letterR, add=TRUE, col="red") v <- opening.owin(letterR, 0.3) plot(Rbox, type="n", main="opening.owin", cex.main=0.75) plot(letterR, add=TRUE, col="red") plot(v, add=TRUE, col="blue") par(nopa) fanfare("IX. Operations on pixel images") Z <- distmap(swedishpines, dimyx=512) plot(Z, main="An image Z") plot(levelset(Z, 4)) plot(cut(Z, 5)) plot(eval.im(sqrt(Z) - 3)) plot(solutionset(abs(Z - 6) <= 1)) nopa <- par(mfrow=c(1,2)) plot(Z) segments(0,0,96,100,lwd=2) plot(transect.im(Z)) par(nopa) d <- distmap(cells, dimyx=256) W <- levelset(d, 0.06) nopa <- par(mfrow=c(1,2)) plot(W) plot(connected(W)) par(nopa) Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) fanfare("X. Programming tools") showoffK <- function(Y, current, ..., fullpicture,rad) { plot(fullpicture, main=c("Animation using `applynbd'", "explaining the K function")) points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(u[1]+ rad * cos(theta),u[2]+rad*sin(theta)) text(u[1]+rad/3,u[2]+rad/2,Y$n,cex=3) if(runif(1) < 0.2) Sys.sleep(runif(1, max=0.4)) return(Y$n) } par(ask=FALSE) applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) par(oldpar) options(oldoptions) spatstat/NEWS0000644000176000001440000037730612252313136012662 0ustar ripleyusers CHANGES IN spatstat VERSION 1.35-0 OVERVIEW o We thank Melanie Bell, Leanne Bischof, Ida-Maria Sintorn, Ege Rubak, Martin Hazelton, Oscar Garcia, Rasmus Waagepetersen, Abdollah Jalilian and Jens Oehlschlaegel for contributions. o Support for analysing replicated spatial point patterns. o New vignette on analysing replicated spatial point patterns. o Objective function surface plots. o Estimator of point process intensity using nearest neighbour distances. o Improved estimator of pair correlation function. o Four new datasets. o Simple point-and-click interface functions for general use. o More support for fv objects. o More support for ppx objects. o Extensions to nearest neighbour functions. o Morphological operations accelerated. o Bug fix to pair correlation functions. o Bug fix to k-th nearest neighbour distances o Version nickname: 'Multiple Personality' NEW CLASSES o mppm An object of class 'mppm' represents a Gibbs point process model fitted to several point pattern datasets. The point patterns may be treated as independent replicates of the same point process, or as the responses in an experimental design, so that the model may depend on covariates associated with the design. Methods for this class include print, plot, predict, anova and so on. o objsurf An object of class 'objsurf' contains values of the likelihood or objective function in a neighbourhood of the maximum. o simplepanel An object of class 'simplepanel' represents a spatial arrangement of buttons that respond to mouse clicks, supporting a simple, robust graphical interface. NEW FUNCTIONS o mppm Fit a Gibbs model to several point patterns. The point pattern data may be organised as a designed experiment and the model may depend on covariates associated with the design. o anova.mppm Analysis of Deviance for models of class mppm o coef.mppm Extract fitted coefficients from a model of class mppm o fitted.mppm Fitted intensity or conditional intensity for a model of class mppm o kstest.mppm Kolmogorov-Smirnov test of goodness-of-fit for a model of class mppm o logLik.mppm log likelihood or log pseudolikelihood for a model of class mppm o plot.mppm Plot the fitted intensity or conditional intensity of a model of class mppm o predict.mppm Compute the fitted intensity or conditional intensity of a model of class mppm o quadrat.test Quadrat counting test of goodness-of-fit for a model of class mppm o residuals.mppm Point process residuals for a model of class mppm o subfits Extract point process models for each individual point pattern dataset, from a model of class mppm o vcov.mppm Variance-covariance matrix for a model of class mppm o integral.msr Integral of a measure. o objsurf For a model fitted by optimising an objective function, this command computes the objective function in a neighbourhood of the optimal value. o contour.objsurf, image.objsurf, persp.objsurf, plot.objsurf Plot an 'objsurf' object. o fvnames Define groups of columns in a function value table, for use in plot.fv, etc o multiplicity New generic function for which multiplicity.ppp is a method. o unique.ppx, duplicated.ppx, multiplicity.ppx Methods for unique(), duplicated() and multiplicity() for 'ppx' objects. These also work for 'pp3' and 'lpp' objects. o closepairs, crosspairs, closepaircounts, crosspaircounts Low-level functions for finding all close pairs of points o nndensity Estimate point process intensity using k-th nearest neighbour distances o simplepanel, run.simplepanel Support for a simple point-and-click interface for general use. NEW DATASETS o pyramidal Diggle-Lange-Benes data on pyramidal neurons in cingulate cortex. 31 point patterns divided into 3 groups. o waterstriders Nummelin-Penttinen waterstriders data. Three independent replicates of a point pattern formed by insects. o simba Simulated data example for mppm. Two groups of point patterns with different interpoint interactions. o demohyper Simulated data example for mppm. Point patterns and pixel image covariates, in two groups with different regression coefficients. SIGNIFICANT USER-VISIBLE CHANGES o plot.hyperframe The argument 'e' now has a different format. Instead of plot(h, plot(XYZ)) one must now type plot(h, quote(plot(XYZ))) This is necessary in order to avoid problems with 'S4 method dispatch'. o pcf.ppp, pcfinhom New argument 'divisor' enables better performance of the estimator of pair correlation function for distances close to zero. o applynbd The arguments N, R and criterion may now be specified together. o markstat The arguments N and R may now be specified together. o ppx New argument 'simplify' allows the result to be converted to an object of class 'ppp' or 'pp3' if appropriate. o as.function.fv Now allows multiple columns to be interpolated o multiplicity.ppp This function is now a method for the generic 'multiplicity'. It has also been accelerated. o nnfun.ppp, distfun.ppp New argument 'k' allows these functions to compute k-th nearest neighbours. o rVarGamma, kppm, vargamma.estK, vargamma.estpcf New argument 'nu.pcf' provides an alternative way to specify the kernel shape in the VarGamma model, instead of the existing argument 'nu.ker'. Function calls that use the ambiguous argument name 'nu' will no longer be accepted. o nnmap Image is now clipped to the original window. o dilation, erosion, opening, closing Polygonal computations greatly accelerated. o plot.colourmap Improved appearance and increased options, for discrete colourmaps. o plot.msr Improved appearance o plot.ppp, plot.owin An `empty' plot can now be generated by setting type="n" o nndist.ppp, nnwhich.ppp, nncross.ppp Column names of the result are now more informative. BUG FIXES o nncross.ppp Results were completely incorrect when k > 1. Spotted by Jens Oehschlaegel. Bug was introduced in spatstat 1.34-1. Fixed. o rVarGamma Simulations were incorrect; they were generated using the wrong value of the parameter 'nu.ker'. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o pair correlation functions (pcf.ppp, pcfdot, pcfcross, pcfinhom, ...) The result had a negative bias at the maximum 'r' value, because contributions to the pcf estimate from interpoint distances greater than max(r) were mistakenly omitted. Spotted by Rasmus Waagepetersen and Abdollah Jalilian. Bug was always present. Fixed. o demo(spatstat) This demonstration script had some unwanted side-effects, such as rescaling the coordinates of standard datasets 'bramblecanes', 'amacrine' and 'demopat', which caused the demonstration to crash when it was repeated several times, and caused errors in demo(data). Fixed. o rmh Visual debugger crashed sometimes with message 'XI not found'. Fixed. o predict.ppm Crashed if the model was fitted using 'covfunargs'. Fixed. o bounding.box Crashed if one of the arguments was NULL. Fixed. o multiplicity.ppp Did not handle data frames of marks. Fixed. CHANGES IN spatstat VERSION 1.34-1 OVERVIEW o We thank Kurt Hornik, Ted Rosenbaum, Ege Rubak and Achim Zeileis for contributions. o Important bug fix. SIGNIFICANT USER-VISIBLE CHANGES o as.box3 Now accepts objects of class 'ppx' or 'boxx'. o crossdist.ppp, crossdist.pp3, crossdist.default New argument 'squared' allows the squared distances to be computed (saving computation time in some applications) BUG FIXES o union.owin, is.subset.owin, dilation.owin Results were sometimes completely wrong for polygons with holes. Spotted by Ted Rosenbaum. Fixed. o psstA, areaLoss Crashed in some cases, with error message 'Number of items to replace is not a multiple of replacement length'. Spotted by Achim Zeileis. Fixed. CHANGES IN spatstat VERSION 1.34-0 OVERVIEW o We thank Andrew Bevan, Ege Rubak, Aruna Jammalamadaka, Greg McSwiggan, Jeff Marcus, Jose M Blanco Moreno, and Brian Ripley for contributions. o spatstat and all its dependencies are now Free Open Source. o spatstat does not require the package 'gpclib' any more. o spatstat now depends on the packages 'tensor', 'abind' and 'polyclip' o polygon clipping is now enabled always. o Substantially more support for point patterns on linear networks. o Faster computations for pairwise interaction models. o Bug fixes in nearest neighbour calculations. o Bug fix in leverage and influence diagnostics. o Version nickname: "Window Cleaner" o spatstat now requires R version 3.0.2 or later NEW FUNCTIONS o as.lpp Convert data to a point pattern on a linear network. o distfun.lpp Distance function for point pattern on a linear network. o eval.linim Evaluate expression involving pixel images on a linear network. o linearKcross, linearKdot, linearKcross.inhom, linearKdot.inhom Multitype K functions for point patterns on a linear network o linearmarkconnect, linearmarkequal Mark connection function and mark equality function for multitype point patterns on a linear network o linearpcfcross, linearpcfdot, linearpcfcross.inhom, linearpcfdot.inhom Multitype pair correlation functions for point patterns on a linear network o linfun New class of functions defined on a linear network o nndist.lpp, nnwhich.lpp, nncross.lpp Methods for nndist, nnwhich, nncross for point patterns on a linear network o nnfun.lpp Method for nnfun for point patterns on a linear network o vcov.lppm Variance-covariance matrix for parameter estimates of a fitted point process model on a linear network. o bilinearform Computes a bilinear form o tilenames, tilenames<- Extract or change the names of tiles in a tessellation. SIGNIFICANT USER-VISIBLE CHANGES o package dependencies Previous versions of spatstat used the package 'gpclib' to perform geometrical calculations on polygons. Spatstat now uses the package 'polyclip' for polygon calculations instead. o free open-source licence The restrictive licence conditions of 'gpclib' no longer apply to users of spatstat. Spatstat and all its dependencies are now covered by a free open-source licence. o polygon clipping In previous versions of spatstat, geometrical calculations on polygons could be performed 'exactly' using gpclib or 'approximately' using pixel discretisation. Polygon calculations are now always performed 'exactly'. o intersect.owin, union.owin, setminus.owin If A and B are polygons, the result is a polygon. o erosion, dilation, opening, closing If the original set is a polygon, the result is a polygon. o intersect.tess, dirichlet The tiles of the resulting tessellation are polygons if the input was polygonal. o plot.owin Polygons with holes can now be plotted with filled colours on any device. o lppm New arguments 'eps' and 'nd' control the quadrature scheme. o pairwise interaction Gibbs models Many calculations for these models have been accelerated. BUG FIXES o nncross.pp3 Values were completely incorrect in some cases. Usually accompanied by a warning about NA values. (Spotted by Andrew Bevan.) Fixed. o nnmap, nnmark A small proportion of pixels had incorrect values. [These were the pixels lying on the boundary of a Dirichlet cell.] Fixed. o leverage.ppm, influence.ppm, dfbetas.ppm Results were incorrect for non-Poisson processes. Fixed. o distcdf Results were incorrect in some cases when W was a window and V was a point pattern. Fixed. o Kcross, Kdot, pcfcross, pcfdot Results were incorrect in some rare cases. Fixed. o as.fv.kppm Erroneously returned a NULL value. Fixed. o vcov.ppm For point process models fitted with method = 'logi', sometimes crashed with error "object 'fit' not found". (Spotted by Ege Rubak). Fixed. o vcov.ppm For multitype point process models, sometimes crashed with error "argument 'par' is missing". Fixed. o plot.im Crashed if some of the pixel values were infinite. Fixed. o owin owin(poly=..) crashed if there were NA's in the polygon coordinates. Spotted by Jeff Marcus. Fixed. o plot.fv Crashed, giving an incomprehensible error, if the plot formula contained a number with a decimal point. Fixed. o alltypes Crashed if envelopes=TRUE and global=TRUE, with error message 'csr.theo not found'. Spotted by Jose M Blanco Moreno. Fixed. o chop.tess, rMosaicField Format of result was garbled in some cases. Fixed. o vcov.ppm Sometimes gave an irrelevant warning "parallel option not available". Fixed. CHANGES IN spatstat VERSION 1.33-0 OVERVIEW o We thank Kurt Hornik and Brian Ripley for advice. o The package namespace has been modified. o Numerous internal changes. o Likelihood cross-validation for smoothing bandwidth. o More flexible models of intensity in cluster/Cox processes. o New generic function for smoothing. o Version nickname: 'Titanic Deckchair' NEW FUNCTIONS o bw.ppl Likelihood cross-validation technique for bandwidth selection in kernel smoothing. o is.lppm, is.kppm, is.slrm Tests whether an object is of class 'lppm', 'kppm' or 'slrm' o Smooth New generic function for spatial smoothing. o Smooth.ppp, Smooth.fv, Smooth.msr Methods for Smooth (identical to smooth.ppp, smooth.fv, smooth.msr respectively) o fitted.kppm Method for 'fitted' for cluster/Cox models SIGNIFICANT USER-VISIBLE CHANGES o namespace The namespace of the spatstat package has been changed. o internal functions Some undocumented internal functions are no longer visible, as they are no longer exported in the namespace. These functions can still be accessed using the form spatstat:::functionname. Functions that are not visible are not guaranteed to exist or to remain the same in future. o methods For some generic functions defined in the spatstat package, it is possible that R may fail to find one of the methods for the generic. This is a temporary problem due to a restriction on the size of the namespace in R 3.0.1. It will be fixed in future versions of R and spatstat. It only applies to methods for a generic which is a spatstat function (such as nndist) and does not apply to methods for generics defined elsewhere (such as density). In the meantime, if this problem should occur, it can be avoided by calling the method explicitly, in the form spatstat:::genericname.classname. o speed The package should run slightly faster overall, due to the improvement of the namespace, and changes to internal code. o envelope New argument 'envir.simul' determines the environment in which to evaluate the expression 'simulate'. o kppm More flexible models of the intensity, and greater control over the intensity fitting procedure, are now possible using the arguments 'covfunargs', 'use.gam', 'nd', 'eps' passed to ppm. Also the argument 'X' may now be a quadrature scheme. o distcdf Arguments W and V can now be point patterns. o Kest New option: correction = "good" selects the best edge correction that can be computed in reasonable time. o bw.diggle Accelerated. o predict.ppm Calculation of standard error has been accelerated. o smooth.ppp, smooth.fv, smooth.msr These functions will soon be 'Deprecated' in favour of the methods Smooth.ppp, Smooth.fv, Smooth.msr respectively. o stratrand, overlap.owin, update.slrm, edge.Trans, edge.Ripley These already-existing functions are now documented. BUG FIXES o kppm, matclust.estpcf, pcfmodel The pair correlation function of the Matern Cluster Process was evaluated incorrectly at distances close to 0. This could have affected the fitted parameters in matclust.estpcf() or kppm(clusters="MatClust"). Fixed. o anova.ppm Would cause an error in future versions of R when 'anova.glm' is removed from the namespace. Fixed. CHANGES IN spatstat VERSION 1.32-0 OVERVIEW o We thank Ege Rubak for major contributions. o Thanks also to Patrick Donnelly, Andrew Hardegen, Tom Lawrence, Robin Milne, Gopalan Nair and Sean O'Riordan. o New 'logistic likelihood' method for fitting Gibbs models. o Substantial acceleration of several functions including profile maximum pseudolikelihood and variance calculations for Gibbs models. o Nearest neighbours for point patterns in 3D o Nearest-neighbour interpolation in 2D o New 'progress plots' o Hard core thresholds can be estimated automatically. o More support for colour maps o More support for 'fv' objects o Spatstat now has version nicknames. The current version is "Logistical Nightmare". o Minor improvements and bug fixes. NEW FUNCTIONS o nncross.pp3 Method for 'nncross' for point patterns in 3D o nnmark Mark of nearest neighbour - can be used for interpolation o dclf.progress, mad.progress Progress plots (envelope representations) for the DCLF and MAD tests. o deriv.fv Numerical differentiation for 'fv' objects. o interp.colourmap Smooth interpolation of colour map objects - makes it easy to build colour maps with gradual changes in colour o tweak.colourmap Change individual colour values in a colour map object o beachcolourmap Colour scheme appropriate for `altitudes' (signed numerical values) o as.fv Convert various kinds of data to an 'fv' object o quadscheme.logi Generates quadrature schemes for the logistic method of ppm. o beginner Introduction for beginners. SIGNIFICANT USER-VISIBLE CHANGES o ppm New option: method = "logi" Fits a Gibbs model by the newly developed 'logistic likelihood' method which is often faster and more accurate than maximum pseudolikelihood. Code contributed by Ege Rubak. o profilepl Greatly accelerated, especially for area-interaction models. o vcov.ppm Greatly accelerated for higher-order interaction models. o smooth.ppp Now handles bandwidths equal to zero (by invoking 'nnmark') o Hardcore, StraussHard The hard core distance 'hc' can now be omitted; it will be estimated from data. o plot.ppp Now behaves differently if there are multiple columns of marks. Each column of marks is plotted, in a series of separate plots arranged side-by-side. o plot.im Argument 'col' can now be a function o lohboot Now computes confidence intervals for L-functions as well (fun="Lest" or fun="Linhom") o dclf.test, mad.test The argument X can now be an object produced by a previous call to dclf.test or mad. o plot.fv Labelling of plots has been improved in some cases. o smooth.fv Further options added. o density.ppp The argument 'weights' can now be a matrix. o smooth.ppp Accelerated, when there are several columns of marks. o density.ppp Accelerated slightly. o simulate.ppm, simulate.kppm The total computation time is also returned. o simulate.kppm Now catches errors (such as 'insufficient memory'). o latest.news, licence.polygons Can now be executed by typing the name of the function without parentheses. o latest.news The text is now displayed one page at a time. BUG FIXES o Hest, Gfox, Jfox The 'raw' estimate was not computed correctly (or at least it was not the raw estimate described in the help files). Spotted by Tom Lawrence. Fixed. o edges2vees Format of result was incorrect if there were fewer than 3 edges. Fixed. o Jfox The theoretical value (corresponding to independence between X and Y) was erroneously given as 0 instead of 1. Spotted by Patrick Donnelly. Fixed. o ppm, quadscheme, default.dummy If the grid spacing parameter 'eps' was specified, the quadrature scheme was sometimes slightly incorrect (missing a few dummy points near the window boundary). Fixed. o print.timed Matrices were printed incorrectly. Fixed. CHANGES IN spatstat VERSION 1.31-3 OVERVIEW o spatstat now 'Suggests' the package 'tensor' o Code slightly accelerated. o More support for pooling of envelopes. o Bug fixes. NEW FUNCTIONS o nnmap Given a point pattern, finds the k-th nearest point in the pattern from each pixel in a raster. o coef.fii, coef.summary.fii Extract the interaction coefficients of a fitted interpoint interaction o edges2vees Low-level function for finding triples in a graph. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm New argument 'correction' allows choice of edge correction when calculating the conditional intensity. o pool.envelope New arguments 'savefuns' and 'savepatterns'. o pool.envelope Envelopes generated with VARIANCE=TRUE can now be pooled. o pool.envelope The plot settings of the input data are now respected. o Numerous functions have been slightly accelerated. BUG FIXES o predict.ppm Calculation of the conditional intensity omitted the edge correction if correction='translate' or correction='periodic'. Fixed. o shift.lpp, rotate.lpp, scalardilate.lpp, affine.lpp, shift.linnet, rotate.linnet, scalardilate.linnet, affine.linnet The enclosing window was not correctly transformed. Fixed. o rHardcore, rStraussHard, rDiggleGratton, rDGS The return value was invisible. Fixed. o ppm In rare cases the results obtained with forcefit=FALSE and forcefit=TRUE were different, due to numerical rounding effects. Fixed. CHANGES IN spatstat VERSION 1.31-2 OVERVIEW o We thank Robin Corria Anslie, Julian Gilbey, Kiran Marchikanti, Ege Rubak and Thordis Linda Thorarinsdottir for contributions. o spatstat now depends on R 3.0.0 o More support for linear networks o More functionality for nearest neighbours o Bug fix in fitting Geyer model o Performance improvements and bug fixes NEW FUNCTIONS o affine.lpp, shift.lpp, rotate.lpp, rescale.lpp, scalardilate.lpp Geometrical transformations for point patterns on a linear network o affine.linnet, shift.linnet, rotate.linnet, rescale.linnet, scalardilate.linnet Geometrical transformations for linear networks o [.linnet Subset operator for linear networks o timed Records the computation time taken SIGNIFICANT USER-VISIBLE CHANGES o nncross nncross.ppp can now find the k-th nearest neighbours, for any k. o nndist, nnwhich New argument 'by' makes it possible to find nearest neighbours belonging to specified subsets in a point pattern, for example, the nearest neighbour of each type in a multitype point pattern. o [.fv Now handles the argument 'drop'. o with.fv Argument 'drop' replaced by new argument 'fun' (with different interpretation). o [.lpp Subset index may now be a window (class 'owin') o Kest Options correction='border' and correction='none' now run about 4 times faster, thanks to Julian Gilbey. o density.ppp Numerical underflow no longer occurs when sigma is very small and 'at="points"'. A warning is no longer issued. Thanks to Robin Corria Anslie. o crossing.psp New argument 'fatal' allows the user to handle empty intersections o union.owin It is now guaranteed that if A is a subset of B, then union.owin(A,B)=B. o plot.colourmap Now passes arguments to axis() to control the plot. Appearance of plot improved. o image.listof Now passes arguments to plot.colourmap() if equal.ribbon=TRUE. o kppm Accelerated (especially for large datasets). o plot.envelope plot.envelope is now equivalent to plot.fv and is essentially redundant. o rThomas, rMatClust, rNeymanScott Improved explanations in help files. o All functions Many functions have been slightly accelerated. BUG FIXES o ppm Results were incorrect for the Geyer saturation model with a non-integer value of the saturation parameter 'sat'. Spotted by Thordis Linda Thorarinsdottir. Bug introduced in spatstat 1.20-0, July 2010. Fixed. o ppm Fitting a stationary Poisson process using a nonzero value of 'rbord', as in "ppm(X, rbord=R)" with R > 0, gave incorrect results. Fixed. o predict.slrm Crashed with message 'longer object length is not a multiple of shorter object length' if the original data window was not a rectangle. Fixed. o iplot Main title was sometimes incorrect. Fixed. o plot.layered Ignored argument 'main' in some cases. Fixed. o plot.listof, image.listof Crashed sometimes with a message 'figure margins too large' when equal.ribbon=TRUE. Fixed. o print.ppx Crashed if the object contained local coordinates. Fixed. o transect.im Crashed if the transect lay partially outside the image domain. Fixed. o rthin Crashed if X was empty. Fixed. o max.im, min.im, range.im Ignored additional arguments after the first argument. Fixed. o update.lppm Updated object did not remember the name of the original dataset. Fixed. o envelope Grey shading disappeared from plots of envelope objects when the envelopes were transformed using eval.fv or eval.fasp. Fixed. CHANGES IN spatstat VERSION 1.31-1 OVERVIEW o We thank Marcelino de la Cruz, Daniel Esser, Jason Goldstick, Abdollah Jalilian, Ege Rubak and Fabrice Vinatier for contributions. o Nonparametric estimation and tests for point patterns in a linear network. o More support for 'layered' objects. o Find clumps in a point pattern. o Connected component interaction model. o Improvements to interactive plots. o Visual debugger for Metropolis-Hastings algorithm. o Bug fix in Metropolis-Hastings simulation of Geyer process. o Faster Metropolis-Hastings simulation. o Faster computation of 'envelope', 'fv' and 'fasp' objects. o Improvements and bug fixes. NEW FUNCTIONS o connected.ppp Find clumps in a point pattern. o kstest.lpp, kstest.lppm The spatial Kolmogorov-Smirnov test can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o bermantest.lpp, bermantest.lppm Berman's Z1 and Z2 tests can now be applied to point patterns on a linear network (class 'lpp') and point processes on a linear network (class 'lppm'). o rhohat.lpp, rhohat.lppm Nonparametric estimation of the dependence of a point pattern on a spatial covariate: 'rhohat' now applies to objects of class 'lpp' and 'lppm'. o intensity.lpp Empirical intensity of a point pattern on a linear network. o as.function.rhohat Converts a 'rhohat' object to a function, with extrapolation beyond the endpoints. o [.layered Subset operator for layered objects. o shift, rotate, affine, rescale, reflect, flipxy, scalardilate These geometrical transformations now work for 'layered' objects. o iplot.layered Interactive plotting for 'layered' objects. o as.owin.layered Method for as.owin for layered objects. o [.owin Subset operator for windows, equivalent to intersect.owin. o rcellnumber Generates random integers for the Baddeley-Silverman counterexample. o is.lpp Tests whether an object is a point pattern on a linear network. o is.stationary.lppm, is.poisson.lppm New methods for is.stationary and is.poisson for class 'lppm' o sessionLibs Print library names and version numbers (for use in Sweave scripts) SIGNIFICANT USER-VISIBLE CHANGES o iplot iplot is now generic, with methods for 'ppp', 'layered' and 'default'. iplot methods now support zoom and pan navigation. o rmh.default New argument 'snoop' allows the user to activate a visual debugger for the Metropolis-Hastings algorithm. o connected connected() is now generic, with methods for 'im', 'owin' and 'ppp'. o alltypes Now works for lpp objects o rlabel Now works for lpp, pp3, ppx objects o plot.kstest Can now perform P-P and Q-Q plots as well. o plot.fasp New argument 'samey' controls whether all panels have the same y limits. o plot.fasp Changed default value of 'samex'. o Objects of class 'envelope', 'fv' and 'fasp' Reduced computation time and storage required for these objects. o pcfmodel.kppm Improved calculation. o plot.fv Improved collision-avoidance algorithm (for avoiding overlaps between curves and legend) o ppm Improved error handling o envelope All methods for 'envelope' now handle fun=NULL o setminus.owin Better handling of the case where both arguments are rectangles. o rmh Simulation has been further accelerated. o lppm Accelerated. o vcov.ppm Accelerated. o marktable Accelerated. o Triplets() interaction Accelerated. o alltypes Accelerated when envelope=TRUE. BUG FIXES o rmh Simulation of the Geyer saturation process was incorrect. [Bug introduced in previous version, spatstat 1.31-0.] Fixed. o rmh Simulation of the Geyer saturation process was incorrectly initialised, so that the results of a short run (i.e. small value of 'nrep') were incorrect, while long runs were correct. [Bug introduced in spatstat 1.17-0, october 2009.] Fixed. o ppm Objects fitted with use.gam=TRUE caused fatal errors in various functions including print, summary, vcov and model.frame. Spotted by Jason Goldstick. Fixed. o lpp, runiflpp, rpoislpp Empty point patterns caused an error. Fixed. o rmh.default Crashed for hybrid models, with message 'Attempt to apply non-function'. Spotted by Ege Rubak. Fixed. o relrisk Crashed when 'at="points"' for a multitype pattern with more than 2 types. Spotted by Marcelino de la Cruz. Fixed. o erosion.owin, dilation.psp, border Ignored the arguments "..." in some cases (namely when the window was polygonal and 'gpclib' was disabled). Fixed. o rsyst, rcell Did not correctly handle the argument 'dx'. Spotted by Fabrice Vinatier. Fixed. o correction="trans" Various functions such as Kest no longer recognised 'correction = "trans"'. Fixed. o istat Crashed with an error message about envelopes. Fixed. o summary.ppm, print.ppm p-values which were exactly equal to zero were reported as NA. Fixed. o [.im Crashed if the intersection consisted of a single row or column of pixels. Fixed. o plot.im Sometimes incorrectly displayed an image consisting of a single row or column of pixels. Fixed. o plot.layered The plot region was determined by the first layer, so that objects in subsequent layers could sometimes fall outside the plot region. Fixed. o transect.im If the arguments 'from' and 'to' were numeric vectors of length 2, the result was garbled. Fixed. o Inhomogeneous K functions and pair correlation functions [Kinhom, pcfinhom, Kcross.inhom, Kdot.inhom, pcfcross.inhom, etc.] These functions reported an error 'lambda is not a vector' if the intensity argument lambda was computed using density(, at="points"). Fixed. o rlabel Did not accept a point pattern with a hyperframe of marks. Fixed. o alltypes Crashed when envelope=TRUE if the summary function 'fun' did not have default values for the marks i and j. Fixed. o Kres, Gres, psst, psstA Ignored the unit of length. Fixed. CHANGES IN spatstat VERSION 1.31-0 OVERVIEW o We thank Frederic Lavancier and Ege Rubak for contributions. o Major bug fix in simulation of area-interaction process. o Metropolis-Hastings simulations accelerated. o Rounding of spatial coordinates o clmfires dataset corrected. o Bug fixes and minor improvements. NEW FUNCTIONS o round.ppp Round the spatial coordinates of a point pattern to a specified number of decimal places. o rounding Determine whether a dataset has been rounded. SIGNIFICANT USER-VISIBLE CHANGES o rmh Simulation of the following models has been accelerated: areaint, dgs, diggra, fiksel, geyer, hardcore, lennard, multihard, strauss, straush, straussm, strausshm. o rmh The transition history of the simulation (which is saved if 'track=TRUE') now also contains the value of the Hastings ratio for each proposal. o clmfires The clmfires dataset has been modified to remove errors and inconsistencies. o plot.linim Appearance of the plot has been improved, when style='width'. o summary.ppm Now reports whether the spatial coordinates have been rounded. o dclf.test, mad.test The range of distance values ('rinterval') used in the test is now printed in the test output, and is saved as an attribute. BUG FIXES o rmh Simulation of the Area-Interaction model was completely incorrect. Spotted by Frederic Lavancier. The bug was introduced in spatstat version 1.23-6 or later. Fixed. o dclf.test The test statistic was incorrectly scaled (by a few percent). This did not affect the p-value of the test. Fixed. o ppx If argument 'coord.type' was missing, various errors occurred: a crash may have occurred, or the results may have depended on the storage type of the data. Spotted by Ege Rubak. Fixed. o plot.ppx Crashed for 1-dimensional point patterns. Spotted by Ege Rubak. Fixed. CHANGES IN spatstat VERSION 1.30-0 OVERVIEW o We thank Jorge Mateu, Andrew Bevan, Olivier Flores, Marie-Colette van Lieshout, Nicolas Picard and Ege Rubak for contributions. o The spatstat manual now exceeds 1000 pages. o Hybrids of point process models. o Five new datasets o Second order composite likelihood method for kppm. o Inhomogeneous F, G and J functions. o Delaunay graph distance o Fixed serious bug in 'lppm' for marked patterns. o bug fix in some calculations for Geyer model o Improvements to linear networks code o Pixel images can now be displayed with a logarithmic colour map. o spatstat now formally 'Depends' on the R core package 'grDevices' o miscellaneous improvements and bug fixes NEW DATASETS o clmfires Forest fires in Castilla-La Mancha o gordon People sitting on the grass in Gordon Square, London o hyytiala Mixed forest in Hyytiala, Finland (marked by species) o paracou Kimboto trees in Paracou, French Guiana (marked as adult/juvenile) o waka Trees in Waka national park (marked with diameters) NEW FUNCTIONS o Hybrid The hybrid of several point process interactions [Joint research with Jorge Mateu and Andrew Bevan] o is.hybrid Recognise a hybrid interaction or hybrid point process model. o Finhom, Ginhom, Jinhom Inhomogeneous versions of the F, G and J functions [Thanks to Marie-Colette van Lieshout] o delaunay.distance Graph distance in the Delaunay triangulation. o distcdf Cumulative distribution function of the distance between two independent random points in a given window. o bw.frac Bandwidth selection based on window geometry o shortside.owin, sidelengths.owin Side lengths of (enclosing rectangle of) a window SIGNIFICANT USER-VISIBLE CHANGES o ppm Can now fit models with 'hybrid' interactions [Joint research with Jorge Mateu and Andrew Bevan] o kppm Now has the option of fitting models using Guan's (2006) second order composite likelihood. o envelope.lpp Now handles multitype point patterns. o envelope.envelope New argument 'transform' allows the user to apply a transformation to previously-computed summary functions. o runifpointOnLines, rpoisppOnLines, runiflpp, rpoislpp Can now generate multitype point patterns. o rmhmodel, rmh, simulate.ppm Now handle point process models with 'hybrid' interactions. o kppm Accelerated, and more reliable, due to better choice of starting values in the optimisation procedure. o kppm The internal format of kppm objects has changed. o minimum contrast estimation Error messages from the optimising function 'optim' are now trapped and handled. o rhohat This command is now generic, with methods for ppp, quad, and ppm. o raster.x, raster.y, raster.xy These functions have a new argument 'drop' o summary.ppm Improved behaviour when the model covariates are a data frame. o progressreport Output improved. o second order summary functions (Kest, Lest, Kinhom, pcf.ppp, Kdot, Kcross, Ldot etc etc) These functions now accept correction="translation" as an alternative to correction = "translate", for consistency. o plot.im New argument 'log' allows colour map to be equally spaced on a log scale. o as.owin.ppm, as.owin.kppm New argument 'from' allows the user to extract the spatial window of the point data (from="points") or the covariate images (from="covariates") o dclf.test, mad.test The rule for handling tied values of the test statistic has been changed. The tied values are now randomly ordered to obtain a randomised integer rank. o with.fv New argument 'enclos' allows evaluation in other environments BUG FIXES o lppm For multitype patterns, the fitted model was completely incorrect due to an error in constructing the quadrature scheme. Fixed. o Geyer For point process models with the 'Geyer' interaction, vcov.ppm() and suffstat() sometimes gave incorrect answers. [Spotted by Ege Rubak.] Fixed. o as.im.im Did not correctly handle factor-valued images if one of the arguments 'dimyx', 'eps', 'xy' was given. Fixed. o envelope.lppm Crashed if the model was multitype. Fixed. o lpp Did not handle empty patterns. Fixed. o density.ppp If 'sigma' was a bandwidth selection function such as bw.scott() which returned a numeric vector of length 2, a warning message was issued, and the smoothing bandwidth was erroneously taken to be the first element of the vector. Fixed. o Fest, Jcross, Jdot, Jmulti If these functions were computed using correction = 'rs', plotting them would sometimes give an error, with the message "no finite x/y limits". Fixed. o pcfmodel.kppm For models with clusters="VarGamma" the value of the pcf at distance r=0 was given as NaN. Fixed. o vcov.ppm Result was incorrect in rare cases, due to numerical rounding effects. Fixed. o rLGCP, simulate.kppm For models fitted to point patterns in an irregular window, simulation sometimes failed, with a message that the image 'mu' did not cover the simulation window. (Spotted by George Limitsios.) Fixed. o rLGCP, simulate.kppm Crashed sometimes with an error about unequal x and y steps (from 'GaussRF'). Fixed. CHANGES IN spatstat VERSION 1.29-0 OVERVIEW o We thank Colin Beale, Li Haitao, Frederic Lavancier, Erika Mudrak and Ege Rubak for contributions. o random sequential packing o Allard-Fraley estimator o method for pooling several quadrat tests o better control over dummy points in ppm o more support for data on a linear network o nearest neighbour map o changes to subsetting of images o improvements and bug fixes NEW FUNCTIONS o clusterset Allard-Fraley estimator of high-density features in a point pattern o pool.quadrattest Pool several quadrat tests o nnfun Nearest-neighbour map of a point pattern or a line segment pattern o as.ppm Converts various kinds of objects to ppm o crossdist.lpp Shortest-path distances between pairs of points in a linear network o nobs.lppm Method for 'nobs' for lppm objects. o as.linim Converts various kinds of objects to 'linim' o model.images.slrm Method for model.images for slrm objects o rotate.im Rotate a pixel image SIGNIFICANT USER-VISIBLE CHANGES o "[.im" and "[<-.im" New argument 'j' allows any type of matrix indexing to be used. o "[.im" Default behaviour changed in the case of a rectangular subset. New argument 'rescue' can be set to TRUE to reinstate previous behaviour. o rSSI Performs 'Random Sequential Packing' if n=Inf. o ppm New argument 'eps' determines the spacing between dummy points. (also works for related functions quadscheme, default.dummy, ...) o fitted.ppm, predict.ppm Argument 'new.coef' specifies a vector of parameter values to replace the fitted coefficients of the model. o lppm Stepwise model selection using step() now works for lppm objects. o vcov.slrm Can now calculate correlation matrix or Fisher information matrix as well as variance-covariance matrix. o eval.fv Improved behaviour when plotted. o "[.fv" Improved behaviour when plotted. o lohboot When the result is plotted, the confidence limits are now shaded. o lohboot New argument 'global' allows global (simultaneous) confidence bands instead of pointwise confidence intervals. o vcov.ppm Accelerated by 30% in some cases. o quadrat.test.splitppp The result is now a single object of class 'quadrattest' o progressreport Improved output (also affects many functions which print progress reports) o Full redwood data (redwoodfull) Plot function redwoodfull.extra$plotit has been slightly improved. o nncross This function is now generic, with methods for 'ppp' and 'default'. o distfun The internal format of objects of class 'distfun' has been changed. o duplicated.ppp, unique.ppp New argument 'rule' allows behaviour to be consistent with package 'deldir' BUG FIXES o bdist.tiles Values were incorrect in some cases due to numerical error. (Spotted by Erika Mudrak.) Fixed. o vcov.ppm, suffstat These functions sometimes gave incorrect values for marked point process models. Fixed. o simulate.ppm, predict.ppm Did not correctly handle the 'window' argument. (Spotted by Li Haitao). Fixed. o smooth.ppp, markmean If sigma was very small, strange values were produced, due to numerical underflow. (Spotted by Colin Beale). Fixed. o MultiHard, MultiStrauss, MultiStraussHard Crashed if the data point pattern was empty. (Spotted by Ege Rubak). Fixed. o vcov.ppm Crashed sporadically, with multitype interactions. (Spotted by Ege Rubak). Fixed. o rStrauss, rHardcore, rStraussHard, rDiggleGratton, rDGS If the simulated pattern was empty, these functions would either crash, or return a pattern containing 1 point. (Spotted by Frederic Lavancier). Fixed. o model.matrix.slrm Crashed if the model was fitted using split pixels. Fixed. o residuals.ppm, diagnose.ppm Did not always correctly handle models that included offset terms. Fixed. o project.ppm When a model was projected by project.ppm or by ppm(project=TRUE), the edge corrections used the projected models were sometimes different from the edge corrections in the original model, so that the projected and unprojected models were not comparable. Fixed. o plot.listof, plot.splitppp Crashed sometimes due to a scoping problem. Fixed. o dclf.test, mad.test Crashed if any of the function values were infinite or NaN. Fixed. o psstA Default plot did not show the horizontal line at y=0 corresponding to a perfect fit. Fixed. o vcov.ppm names attribute was spelt incorrectly in some cases. Fixed. CHANGES IN spatstat VERSION 1.28-2 OVERVIEW o We thank Thomas Bendtsen, Ya-Mei Chang, Daniel Esser, Robert John-Chandran, Ege Rubak and Yong Song for contributions. o New code for Partial Residual Plots and Added Variable Plots. o maximum profile pseudolikelihood computations vastly accelerated. o New dataset: cells in gastric mucosa o now possible to capture every k-th state of Metropolis-Hastings algorithm. o size of 'ppm' objects reduced. o scope of 'intensity.ppm' extended. o quadrat.test can now perform Monte Carlo tests and one/two-sided tests o improvements to 'plot.fv' o improvement to 'rescale' o some datasets reorganised. o numerous bug fixes NEW DATASET o mucosa Cells in gastric mucosa Kindly contributed by Dr Thomas Bendtsen NEW FUNCTIONS o parres Partial residual plots for spatial point process models. A diagnostic for the form of a covariate effect. o addvar Added variable plots for spatial point process models. A diagnostic for the existence of a covariate effect. SIGNIFICANT USER-VISIBLE CHANGES o profilepl Accelerated (typically by a factor of 5). o rmh, rmhcontrol It is now possible to save every k-th iteration of the Metropolis-Hastings algorithm. The arguments 'nsave' and 'nburn' may be given to rmh or to rmhcontrol. They specify that the point pattern will be saved every 'nsave' iterations, after an initial burn-in of 'nburn' iterations. o simulate.ppm New argument 'singlerun' determines whether the simulated patterns are generated using independent runs of the Metropolis-Hastings algorithm or are obtained by performing one long run of the algorithm and saving every k-th iteration. o exactMPLEstrauss New argument 'project' determines whether the parameter gamma is constrained to lie in [0,1]. o intensity.ppm Now works for stationary point process models with the interactions DiggleGratton, DiggleGatesStibbard, Fiksel, PairPiece and Softcore. o plot.fv Improved algorithm for avoiding collisions between graphics and legend. o plot.fv New argument 'log' allows plotting on logarithmic axes. o envelope Can now calculate an estimate of the true significance level of the "wrong" test (which declares the observed summary function to be significant if it lies outside the pointwise critical boundary anywhere). Controlled by new argument 'do.pwrong'. o quadrat.test New argument 'alternative' allows choice of alternative hypothesis and returns one-sided or two-sided p-values as appropriate. o quadrat.test Can now perform Monte Carlo test as well (for use in small samples where the chi^2 approximation is inaccurate) o Softcore Improved numerical stability. New argument 'sigma0' for manual control over rescaling. o rescale If scale argument 's' is missing, then the data are rescaled to native units. For example if the current unit is 0.1 metres, coordinates will be re-expressed in metres. o psst Extra argument 'verbose=TRUE' o is.subset.owin Accelerated for polygonal windows o rmh.default 'track' is no longer a formal argument of rmh.default; it is now a parameter of rmhcontrol. However there is no change in usage: the argument 'track' can still be given to rmh.default. o clf.test Has been renamed 'dclf.test' to give proper attribution to Peter Diggle. o betacells This dataset has been restructured. The vector of cell profile areas, formerly given by betacells.extra$area, has now been included as a column of marks in the point pattern 'betacells'. o ants The function ants.extra$plot() has been renamed plotit() for conformity with other datasets. o redwoodfull The function redwoodfull.extra$plot() has been renamed plotit() for conformity with other datasets. o nbfires For conformity with other datasets, there is now an object nbfires.extra BUG FIXES o ripras Expansion factor was incorrect in the rectangular case. Fixed. o Triplets Crashed sometimes with error "dim(X) must have positive length". Fixed. o affine.im Crashed in the case of a diagonal transformation matrix! Spotted by Ege Rubak. Fixed. o envelope.envelope Ignored the argument 'global'. Fixed. o MultiStraussHard The printed output showed the hardcore radii as NULL. Spotted by Ege Rubak. Fixed. o "[.psp" Crashed if the data were generated by rpoisline(). Spotted by Marcelino de la Cruz. Fixed. o plot.linim If style="colour", the main title was always "x". Fixed. o plot.ppx Setting add=TRUE did not prevent the domain being plotted. Fixed. o rmh Crashed if x.start was an empty point pattern. Spotted by Ege Rubak. Fixed. o as.ppp.data.frame Crashed if any points lay outside the window. Spotted by Ege Rubak. Fixed. o Ripley isotropic edge correction Divide-by-zero error in rare cases. Spotted by Daniel Esser. Fixed. o summary functions For many of the summary functions (e.g. Kest, pcf), the result of saving the object to disc was an enormous file. Spotted by Robert John-Chandran. Fixed. o pcf.fv Default plot was wrongly coloured. Fixed. CHANGES IN spatstat VERSION 1.28-1 OVERVIEW o We thank Ege Rubak, Gopal Nair, Jens Oehlschlaegel and Mike Zamboni for contributions. o New approximation to the intensity of a fitted Gibbs model. o Minor improvements and bug fixes o spatstat now 'Suggests' the package 'gsl' NEW FUNCTIONS o intensity, intensity.ppp, intensity.ppm Calculate the intensity of a dataset or fitted model. Includes new approximation to the intensity of a fitted Gibbs model o LambertW Lambert's W-function SIGNIFICANT USER-VISIBLE CHANGES o envelope Improved plot labels for envelopes that were generated using the 'transform' argument. o plot.fv Improved algorithm for collision detection. o plot.im Now returns the colour map used. o plot.listof, plot.splitppp Slight change to handling of plot.begin and plot.end o square Now accepts vectors of length 2 o plot.fii Increased resolution of the plot obtained from plot(fitin(ppm(...))) o image.listof If equal.ribbon=TRUE, the colour ribbon will no longer be displayed repeatedly for each panel, but will now be plotted only once, at the right hand side of the plot array. BUG FIXES o vcov.ppm Results were sometimes incorrect for a Gibbs model with non-trivial trend. Spotted by Ege Rubak. Fixed. o nncross In rare cases the results could be slightly incorrect. Spotted by Jens Oehlschlaegel. Fixed. o plot.fv When add=TRUE, the x limits were sometimes truncated. Spotted by Mike Zamboni. Fixed. o plot.im Labels for the tick marks on the colour ribbon were sometimes ridiculous, e.g. "2.00000001". Fixed. CHANGES IN spatstat VERSION 1.28-0 OVERVIEW o We thank Farzaneh Safavimanesh, Andrew Hardegen and Tom Lawrence for contributions. o Improvements to 3D summary functions. o A multidimensional point pattern (ppx) can now have 'local' coordinates as well as spatial and temporal coordinates and marks. o Changed format for point patterns on a linear network (lpp). Changes are backward compatible. Many computations run faster. o More support for fitted cluster models (kppm). o split method for multidimensional point patterns (ppx) and point patterns on a linear network (lpp). o Fixed bug causing errors in plot.im o Miscellaneous improvements and bug fixes NEW FUNCTIONS o exactMPLEstrauss Fits the stationary Strauss point process model using an exact maximum pseudolikelihood technique. This is mainly intended for technical investigation of algorithms. o split.ppx Method for 'split' for multidimensional point patterns (class 'ppx'). This also works for point patterns on a linear network (class 'lpp'). o model.images This function is now generic, with methods for classes ppm, kppm, lppm o model.frame, model.matrix These generic functions now have methods for classes kppm, lppm o as.owin.kppm, as.owin.lppm New methods for 'as.owin' for objects of class kppm, lppm o as.linnet.lppm Extracts the linear network in which a point process model was fitted. SIGNIFICANT USER-VISIBLE CHANGES o class 'ppx' An object of class 'ppx' may now include 'local' coordinates as well as 'spatial' and 'temporal' coordinates, and marks. o ppx Arguments have changed. o class 'lpp' The internal format of lpp objects has been extended (but is backward-compatible). Many computations run faster. To convert an object to the new format: X <- lpp(as.ppp(X), as.linnet(X)). o F3est Calculation of theoretical Poisson curve ('theo') has changed, and is now controlled by the argument 'sphere'. o rmh, rmhstart The initial state ('start') can now be missing or null. o im, as.im The pixel coordinates in an image object are now generated more accurately. This avoids a numerical error in plot.im. o eval.fv, eval.fasp Evaluation is now applied only to columns that contain values of the function itself (rather than values of the derivative, hazard rate, etc). This is controlled by the new argument 'dotonly'. o spatstat.options New option 'nvoxel' o quad.ppm Now accepts kppm objects. o str This generic function (for inspecting the internal structure of an object) now produces sensible output for objects of class 'hyperframe', 'ppx', 'lpp' o ppx, coords.ppx, coords<-.ppx The arguments to these functions have changed. o lgcp.estK, Kmodel Computation can be greatly accelerated by setting spatstat.options(fastK.lgcp=TRUE). o G3est Computation accelerated. o envelope Computation slightly accelerated. o spatstat.options New option 'fastK.lgcp' BUG FIXES o nndist.psp Caused an error if length(k) > 1. Fixed. o plot.im Sometimes reported an error "useRaster=TRUE can only be used with a regular grid." This was due to numerical rounding effects on the coordinates of a pixel image. Fixed. o plot.fv If a formula was used to specify the plot, the names of variables in the formula were sometimes incorrectly matched to *functions*. Spotted by Farzaneh Safavimanesh. Fixed. o F3est Took a very long time if the containing box was very flat, due to the default value of 'vside'. Fixed. o rmh, rmhmodel An erroneous warning about 'outdated format of rmhmodel object' sometimes occurred. Fixed. o marks<-.ppx Names of result were incorrect. Fixed. o hyperframe class Various minor bug fixes. CHANGES IN spatstat VERSION 1.27-0 OVERVIEW o Variance estimates are now available for all Gibbs point process models. o Cressie-Loosmore-Ford test implemented o plot.fv now avoids collisions between the legend and the graphics. o Extension to predict.ppm o Improvements to envelopes and multitype summary functions. o Line transects of a pixel image. o Changes to defaults in Metropolis-Hastings simulations. o More geometrical operations o Bug fixes. o We thank Aruna Jammalamadaka for contributions. NEW FUNCTIONS o clf.test Perform the Cressie (1991)/ Loosmore and Ford (2006) test of CSR (or another model) o mad.test Perform the Maximum Absolute Deviation test of CSR (or another model). o convolve.im Compute convolution of pixel images. o Kmulti.inhom Counterpart of 'Kmulti' for spatially-varying intensity. o rmhexpand Specify a simulation window, or a rule for expanding the simulation window, in Metropolis-Hastings simulation (rmh) o transect.im Extract pixel values along a line transect. o affine.im Apply an affine transformation to a pixel image. o scalardilate Perform scalar dilation of a geometrical object relative to a specified origin. o reflect Reflect a geometrical object through the origin. o "[.lpp", "[.ppx" Subset operators for the classes "lpp" (point pattern on linear network) and "ppx" (multidimensional space-time point pattern). o is.rectangle, is.polygonal, is.mask Determine whether a window w is a rectangle, a domain with polygonal boundaries, or a binary pixel mask. o has.offset Determines whether a fitted model object (of any kind) has an offset. SIGNIFICANT USER-VISIBLE CHANGES o predict.ppm This function can now calculate the conditional intensity of a model relative to any point pattern X (not just the original data pattern). o vcov.ppm This function now handles all Gibbs point process models. o plot.fv Collisions between the legend box and the graphics are now detected and avoided. o rmh.ppm, rmh.default, simulate.ppm, qqplot.ppm, envelope.ppm These functions now have slightly different default behaviour because of changes to the handling of arguments to 'rmhcontrol'. o rmhcontrol The default value of the parameters 'periodic' and 'expand' has changed. o rmhcontrol The parameter 'expand' can now be in any format acceptable to rmhexpand(). o rmh.ppm, rmh.default, simulate.ppm Any 'rmhcontrol' parameter can now be given directly as an argument to rmh.ppm, rmh.default or simulate.ppm. o Kmulti, Gmulti, Jmulti The arguments I, J can now be any kind of subset index or can be functions that yield a subset index. o envelope.envelope In envelope(E, fun=NULL) if E does not contain simulated summary functions, but does contain simulated point patterns, then 'fun' now defaults to Kest, instead of flagging an error. o print.ppp, summary.ppp If the point pattern x was generated by Metropolis-Hastings simulation using 'rmh', then print(x) and summary(x) show information about the simulation parameters. o print.ppm Standard errors for the parameter estimates, and confidence intervals for the parameters, can now be printed for all Gibbs models (but are printed only for Poisson models by default). o eval.im Images with incompatible dimensions are now resampled to make them compatible (if harmonize=TRUE). o spatstat.options New option 'print.ppm.SE' controls whether standard errors and confidence intervals are printed for all Gibbs models, for Poisson models only, or are never printed. o inside.owin Now accepts the form list(x,y) for the first argument. o image.listof New argument 'equal.ribbon' allows several images to be plotted with the same colour map. o is.subset.owin Improved accuracy in marginal cases. o expand.owin Functionality extended to handle all types of expansion rule. o default.rmhcontrol, default.expand These functions now work with models of class 'rmhmodel' as well as 'ppm' o print.rmhcontrol Output improved. BUG FIXES o linearK, linearKinhom If any data points were located exactly at a vertex of the linear network, the weights for Ang's correction were incorrect, due to numerical error. This sometimes produced infinite or NA values of the linear K function. Fixed. o predict.ppm In some cases, predict.ppm(type="cif") generated a spurious warning that "number of rows of result is not a multiple of vector length." Fixed. o crossing.psp Results were sometimes incorrect due to numerical rounding error associated with GCC bug #323. Fixed. o MultiHard, MultiStrauss, MultiStraussHard If the mark values contained non-alphanumeric characters, the names of the interaction coefficients in coef(ppm(...)) were sometimes garbled. Fixed. o profilepl For edge corrections other than the border correction, an error message about 'rbord' would sometimes occur. Fixed. o is.marked, is.multitype These functions gave the wrong answer for 'lpp' objects. Fixed. o marks<-.lpp, marks<-.ppx Format of result was garbled if new columns of marks were added. Fixed. o reach.rmhmodel Gave the wrong answer for Geyer and BadGey models. Fixed. o envelope.envelope Ignored the argument 'savefuns'. Fixed. o BadGey Sometimes wrongly asserted that the parameter 'sat' was invalid. Occurred only in ppm(project=TRUE). Fixed. CHANGES IN spatstat VERSION 1.26-1 OVERVIEW o Variance-covariance matrix for Gibbs point process models. o Bootstrap confidence bands for pair correlation function and K function. o Bug fix in scan test. o Area-interaction model accelerated. o we thank Jean-Francois Coeurjolly and Ege Rubak for contributions. NEW FUNCTIONS o lohboot Computes bootstrap confidence bands for pair correlation function and K function using Loh's (2008) mark bootstrap. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Now works for all Gibbs point process models, thanks to new code (and theory) from Jean-Francois Coeurjolly and Ege Rubak o AreaInter Computations related to the area-interaction point process (ppm, predict.ppm, residuals.ppm, diagnose.ppm, qqplot.ppm) have been accelerated. BUG FIXES o scan.test Results were sometimes incorrect due to numerical instability (a 'Gibbs phenomenon'). Fixed. CHANGES IN spatstat VERSION 1.26-0 OVERVIEW o We thank Jens Oehlschlaegel for contributions. o Further substantial acceleration of spatstat functions. o Workaround for bug in RandomFields package. o Numerous modifications to internal code. NEW FUNCTIONS o RandomFieldsSafe There is a bug in the package 'RandomFields' (version <= 2.0.54) which causes a crash to occur, in the development version of R but not in R 2.15.0. To avoid crashing spatstat, we have written the temporary, undocumented function RandomFieldsSafe() which returns TRUE if it is safe to use the RandomFields package. Examples in the spatstat help files for kppm, lgcp.estK, lgcp.estpcf and rLGCP are only executed if RandomFieldsSafe() returns TRUE. SIGNIFICANT USER-VISIBLE CHANGES o Many functions Many spatstat functions now run faster, and will handle larger datasets, thanks to improvements in the internal code, following suggestions from Jens Oehlschlaegel. o Many functions The response to an 'Interrupt' signal is slightly slower. CHANGES IN spatstat VERSION 1.25-5 OVERVIEW o We thank Ya-Mei Chang, Jens Oehlschlaegel and Yong Song for contributions. o Extended functionality of 'rhohat' to local likelihood smoothing and bivariate smoothing. o Nearest neighbour distance computations accelerated. o spatstat now 'Suggests:' the package 'locfit' NEW FUNCTIONS o rho2hat Bivariate extension of 'rhohat' for estimating spatial residual risk, or intensity as a function of two covariates. SIGNIFICANT USER-VISIBLE CHANGES o rhohat Estimation can now be performed using local likelihood fitting with the 'locfit' package, or using kernel smoothing. o nncross Substantially accelerated. New arguments added to control the return value and the sorting of data. BUG FIXES o plot.msr Crashed if the argument 'box' was given. Fixed. CHANGES IN spatstat VERSION 1.25-4 OVERVIEW o We thank Jonathan Lee and Sergiy Protsiv for contributions. o Improvements and bug fixes to K function for very large datasets NEW FUNCTIONS o rStraussHard Perfect simulation for Strauss-hardcore process (with gamma <= 1) SIGNIFICANT USER-VISIBLE CHANGES o plot.im The colour ribbon can now be placed left, right, top or bottom using new argument 'ribside' o profilepl Does not generate warnings when some of the candidate models have zero likelihood - for example when fitting model with a hard core. o Kest Now includes fast algorithm for 'correction="none"' which will handle patterns containing millions of points. BUG FIXES o Kest, Lest Gave incorrect values in very large datasets, due to numerical overflow. `Very large' typically means about 1 million points in a random pattern, or 100,000 points in a tightly clustered pattern. [Overflow cannot occur unless there are at least 46,341 points.] [Spotted by Sergiy Protsiv.] Fixed. o Kest, Lest Ignored 'ratio=TRUE' if the argument 'domain' was given. [Spotted by Jonathan Lee.] Fixed. o rjitter Output was sometimes incorrect. [Spotted by Sergiy Protsiv.] Fixed. CHANGES IN spatstat VERSION 1.25-3 OVERVIEW o We thank Daniel Esser for contributions. o Improved support for fitted point process models. o Bug fixes. NEW FUNCTIONS o simulate.slrm Method for 'simulate' for spatial logistic regression models. o labels.ppm, labels.kppm, labels.slrm Methods for 'labels' for fitted point process models. o commonGrid Determine a common spatial domain and pixel resolution for several pixel images and/or binary masks SIGNIFICANT USER-VISIBLE CHANGES o effectfun Now has argument 'se.fit' allowing calculation of standard errors and confidence intervals. o [.msr Now handles character-valued indices. o print.summary.ppm Output gives a more precise description of the fitting method. o ppm, kppm, slrm Confidence intervals for the fitted trend parameters can now be obtained using 'confint' o predict.slrm New argument 'window' o union.owin Now handles a single argument: union.owin(A) returns A. BUG FIXES o selfcrossing.psp y coordinate values were incorrect. [Spotted by Daniel Esser.] Fixed. o as.im.owin Did not handle a binary mask with a 1 x 1 pixel array. Fixed. o predict.slrm Results of predict(object, newdata) were incorrect if the spatial domain of 'newdata' was larger than the original domain. Fixed. o ppm If the model was the uniform Poisson process, the argument 'rbord' was ignored. Fixed. o image subset assignment "[<-.im" Generated an error if the indexing argument 'i' was a point pattern containing zero points. Fixed. o hyperframe subset assignment "[<-.hyperframe" Did not correctly handle the case where a single column of the hyperframe was to be changed. Fixed. o help(bw.relrisk), help(rmh.ppm), help(plot.plotppm) These help files had the side-effect of changing some options in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-2 OVERVIEW o We thank Abdollah Jalilian and Thierry Onkelinx for contributions. o Very Important Bug fixes. o Improved mechanism for handling 'invalid' point processes NEW FUNCTIONS o as.matrix.owin Converts a window to a logical matrix. SIGNIFICANT USER-VISIBLE CHANGES o project.ppm Improved algorithm. Now handles terms in the trend formula as well as the interaction. The projected point process is now obtained by re-fitting the model, and is guaranteed to be the maximum pseudolikelihood fit. o plot.im Now handles many arguments recognised by plot.default such as 'cex.main'. Also handles argument 'box'. New argument 'ribargs' contains parameters controlling the ribbon plot only. o spatstat.options New option 'project.fast' allows a faster shortcut for project.ppm o spatstat.options New options 'rmh.p', 'rmh.q', 'rmh.nrep' determine the default values of the parameters p, q and nrep of the Metropolis-Hastings algorithm. See rmhcontrol o ppm Slightly accelerated. BUG FIXES o nncross, distfun, AreaInter Results of nncross were possibly incorrect when X and Y did not have the same window. This bug affected values of 'distfun' and may also have affected ppm objects with interaction 'AreaInter'. [Spotted by Thierry Onkelinx] Bug introduced in spatstat 1.9-4 (June 2006). Fixed. o rCauchy Simulations were incorrect in the sense that the value of 'omega' was inadvertently doubled (i.e. omega was incorrectly replaced by 2 * omega). Bug introduced in spatstat 1.25-0. Fixed. o plot.im White lines were present in the image display, on some graphics devices, due to changes in R 2.14. Fixed. o update.ppm The result of 'update(object, formula)' sometimes contained errors in the internal format. Bug introduced in spatstat 1.25-0. Fixed. o example(AreaInter), example(bw.smoothppp), example(Kest.fft), example(plot.owin), example(predict.ppm), example(simulate.ppm) Executing these examples had the side-effect of changing some of the parameters in spatstat.options. Fixed. CHANGES IN spatstat VERSION 1.25-1 OVERVIEW o We thank Neba Funwi-Gabga and Jorge Mateu for contributions. o New dataset of gorilla nest sites o New functions for perfect simulation o Bug fix for rare crashes in rStrauss o Code for ensuring a fitted point process model is a valid point process NEW DATASET o gorillas Gorilla nest sites in a National Park in Cameroon. Generously contributed by Neba Funwi-Gabga NEW FUNCTIONS o rDiggleGratton, rDGS, rHardcore Perfect simulation for the Diggle-Gratton process, Diggle-Gates-Stibbard process, and Hardcore process. o bw.scott Scott's rule of thumb for bandwidth selection in multidimensional smoothing o valid.ppm Checks whether a fitted point process model is a valid point process o project.ppm Forces a fitted point process model to be a valid point process SIGNIFICANT USER-VISIBLE CHANGES o ppm New argument 'project' determines whether the fitted model is forced to be a valid point process o linnet Substantially accelerated. o rStrauss Slightly accelerated. o summary.lpp Now prints the units of length. BUG FIXES o rStrauss Crashed rarely (once every 10 000 realisations) with a memory segmentation fault. Fixed. CHANGES IN spatstat VERSION 1.25-0 OVERVIEW o Leverage and influence for point process models o New cluster models (support for model-fitting and simulation). o Fit irregular parameters in trend of point process model o Third order summary statistic. o Improvements to speed and robustness of code. o spatstat now depends on R 2.14 o We thank Abdollah Jalilian and Rasmus Waagepetersen for contributions. NEW FUNCTIONS o leverage.ppm, influence.ppm, dfbetas.ppm Leverage and influence for point process models o ippm Experimental extension to 'ppm' which fits irregular parameters in trend by Fisher scoring algorithm. o Tstat Third order summary statistic for point patterns based on counting triangles. o rCauchy, rVarGamma simulation of a Neyman-Scott process with Cauchy clusters or Variance Gamma (Bessel) clusters. Contributed by Abdollah Jalilian. o rPoissonCluster simulation of a general Poisson cluster process o model.covariates Identify the covariates involved in a model (lm, glm, ppm etc) o as.im.distfun Converts a 'distfun' to a pixel image. o cauchy.estK, cauchy.estpcf, vargamma.estK, vargamma.estpcf Low-level model-fitting functions for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Contributed by Abdollah Jalilian. SIGNIFICANT USER-VISIBLE CHANGES o kppm Now accepts clusters="Cauchy" or clusters="VarGamma" for the Neyman-Scott process with Cauchy or Variance-Gamma cluster kernel. Code contributed by Abdollah Jalilian. o rNeymanScott Argument 'rcluster' may now take a different format. o psst Argument 'funcorrection' changed to 'funargs' allowing greater flexibility. o plot.fv, plot.envelope New argument 'limitsonly' allows calculation of a common x,y scale for several plots. o overall speed spatstat is now byte-compiled and runs slightly faster. o user interrupt Long calculations in spatstat now respond to the Interrupt/Stop signal. o update.ppm Now runs faster and uses much less memory, when the update only affects the model formula (trend formula). o rNeymanScott, rThomas, rMatClust Accelerated thanks to Rasmus Waagepetersen. o multitype data and models Second order multitype statistics (such as Kcross, pcfcross) and multitype interaction models (such as MultiStrauss) now run faster, by a further 5%. BUG FIXES o distfun Some manipulations involving 'distfun' objects failed if the original data X in distfun(X) did not have a rectangular window. Fixed. CHANGES IN spatstat VERSION 1.24-2 OVERVIEW o Geyer's triplet interaction o more functionality for replicated point patterns o changed default for simulation window in point process simulation o changed default for edge correction in Kcom, Gcom o data in spatstat is now lazy-loaded o bug fixes NEW FUNCTIONS o Triplets Geyer's triplet interaction, for point process models o coef.summary.ppm New method coef.summary.ppm You can now type 'coef(summary(fit))' to extract a table of the fitted coefficients of the point process model 'fit' SIGNIFICANT USER-VISIBLE CHANGES o data in spatstat are now lazy-loaded so you don't have to type data(amacrine), etc. o rmh.default, rmh.ppm, simulate.ppm These now handle the 'triplets' interaction o fryplot Now has arguments 'to' and 'from', allowing selection of a subset of points. o fryplot, frypoints These functions now handle marked point patterns properly. o Kcross, Kdot, Kmulti New argument 'ratio' determines whether the numerator and denominator of the estimate of the multitype K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o rmh.ppm, simulate.ppm, default.expand For point process models which have a trend depending only on x and y, the simulation window is now taken to be the same as the original window containing the data (by default). That is, `expansion' does not take place, by default. (In previous versions of spatstat the simulation window was larger than the original data window.) o rmh.ppm, simulate.ppm The argument sequence for these functions has changed. New argument 'expand' allows more explicit control over simulation domain. o Kcom, Gcom New argument 'conditional' gives more explicit control over choice of edge correction in compensator. Simplified defaults for edge correction. o Kinhom Improved plot labels. o profilepl Printed output improved. BUG FIXES o Lest The variance approximations (Lotwick-Silverman and Ripley) obtained with var.approx=TRUE, were incorrect for Lest (although they were correct for Kest) due to a coding error. Fixed. o simulate.ppm Ignored the argument 'control' in some cases. Fixed. o pcf and its relatives (pcfinhom, pcfcross.inhom, pcfdot.inhom) Sometimes gave a warning about 'extra arguments ignored'. Fixed. CHANGES IN spatstat VERSION 1.24-1 OVERVIEW o Spatial Scan Test o Functionality for replicated point patterns o Bug fixes NEW FUNCTIONS o scan.test Spatial scan test of clustering o rat New class of 'ratio objects' o pool.rat New method for 'pool'. Combines K function estimates for replicated point patterns (etc) by computing ratio-of-sums o unnormdensity Weighted kernel density with weights that do not sum to 1 and may be negative. o compatible New generic function with methods for 'fv', 'im', 'fasp' and 'units' SIGNIFICANT USER-VISIBLE CHANGES o Kest New argument 'ratio' determines whether the numerator and denominator of the estimate of the K-function will be stored. This enables analysis of replicated point patterns, using 'pool.rat()' to pool the K function estimates. o Lest Now handles theoretical variance estimates (using delta method) if var.approx=TRUE o as.mask Argument 'eps' can now be a 2-vector, specifying x and y resolutions. o default.expand Behaviour changed slightly. o plot.listof, plot.splitppp, contour.listof, image.listof The arguments 'panel.begin' and 'panel.end' can now be objects such as windows. BUG FIXES o rgbim, hsvim Did not work on images with non-rectangular domains. Fixed. o scaletointerval Did not handle NA's. Fixed. CHANGES IN spatstat VERSION 1.24-0 OVERVIEW o This version was not released publicly. CHANGES IN spatstat VERSION 1.23-6 OVERVIEW o Spatial covariance functions of windows and pixel images. o Area-interaction models can now be fitted in non-rectangular windows o Bug fix for envelope of inhomogeneous Poisson process o Bug fix for raster conversion o New vignette on 'Getting Started with Spatstat' o Code accelerated. NEW FUNCTIONS o imcov Spatial covariance function of pixel image or spatial cross-covariance function of two pixel images o harmonise.im Make several pixel images compatible by converting them to the same pixel grid o contour.listof, image.listof Methods for contour() and image() for lists of objects o dummify Convert data to numeric values by constructing dummy variables. SIGNIFICANT USER-VISIBLE CHANGES o setcov Can now compute the `cross-covariance' between two regions o AreaInter Point process models with the AreaInter() interaction can now be fitted to point pattern data X in any window. o areaGain, areaLoss These now handle arbitrary windows W. They are now more accurate when r is very small. o Kcom Computation vastly accelerated, for non-rectangular windows. o vignettes New vignette 'Getting Started with the Spatstat Package' o nncorr, nnmean, nnvario These functions now handle data frames of marks. BUG FIXES o envelope.ppm If the model was an inhomogeneous Poisson process, the resulting envelope object was incorrect (the simulations were correct, but the envelopes were calculated assuming the model was CSR). Bug was introduced in spatstat 1.23-5. Fixed. o envelope.ppm If the model was an inhomogeneous Poisson process with intensity a function of x and y only, overflow errors sometimes occurred ('insufficient storage' or 'attempting to generate a large number of random points'). Fixed. o as.im.im The result of as.im(X, W) was incorrect if 'W' did not cover 'X'. Fixed. o as.mask The result of as.mask(w, xy) was incorrect if 'xy' did not cover 'w'. Fixed. o plot.fv Legend was incorrectly labelled if 'shade' variables were not included in the plot formula. Fixed. o areaGain, areaLoss Crashed if the radius r was close to zero. Fixed. CHANGES IN spatstat VERSION 1.23-5 OVERVIEW o Bug fix to bandwidth selection. o Functions to pool data from several objects of the same class. o Improvements and bug fixes. o We thank Michael Sumner for contributions. NEW FUNCTIONS o pool Pool data from several objects of the same class o pool.envelope Pool simulated data from several envelope objects and create a new envelope o pool.fasp Pool simulated data from several function arrays and create a new array o envelope.envelope Recalculate an envelope from simulated data using different parameters SIGNIFICANT USER-VISIBLE CHANGES o bw.diggle, bw.relrisk, bw.smoothppp, bw.optim Plot method modified. o model.depends Now also recognises 'offset' terms. BUG FIXES o bw.diggle Bandwidth was too large by a factor of 2. Fixed. o plot.psp Crashed if any marks were NA. Fixed. o pointsOnLines Crashed if any segments had zero length. Ignored argument 'np' in some cases. Fixed. o stieltjes Crashed if M had only a single column of function values. Fixed. CHANGES IN spatstat VERSION 1.23-4 OVERVIEW o Bandwidth selection for density.ppp and smooth.ppp o Layered plots. o Model-handling facilities. o Improvements and bug fixes. NEW FUNCTIONS o bw.diggle Bandwidth selection for density.ppp by mean square error cross-validation. o bw.smoothppp Bandwidth selection for smooth.ppp by least-squares cross-validation. o layered, plot.layered A simple mechanism for controlling plots that consist of several successive layers of data. o model.depends Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. o model.is.additive Determine whether a fitted model (of any kind) is additive, in the sense that each term in the model involves at most one covariate. SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp Bandwidth 'sigma' is now selected by least-squares cross-validation o bw.relrisk Computation in large datasets accelerated. New arguments 'hmin', 'hmax' control the range of trial values of bandwidth. o Hest, Gfox, Jfox Improved algebraic labels for plot o spatstat.options New parameter 'n.bandwidth' o density.ppp, smooth.ppp Slightly accelerated. o point-in-polygon test Accelerated. BUG FIXES o with.fv Mathematical labels were incorrect in some cases. Fixed. o bw.relrisk Implementation of method="weightedleastsquares" was incorrect and was equivalent to method="leastsquares". Fixed. o smooth.ppp NaN values occurred if the bandwidth was very small. Fixed. CHANGES IN spatstat VERSION 1.23-3 OVERVIEW o Urgent bug fix. BUG FIXES o crossing.psp Crashed occasionally with a message about NA or NaN values. Fixed. o affine.ppp Crashed if the point pattern was empty. Fixed. CHANGES IN spatstat VERSION 1.23-2 OVERVIEW o Bug fixes. o Several functions have been accelerated. o We thank Marcelino de la Cruz and Ben Madin for contributions. NEW FUNCTIONS o sumouter, quadform Evaluate certain quadratic forms. o flipxy Exchange x and y coordinates. SIGNIFICANT USER-VISIBLE CHANGES o vcov.ppm Accelerated. o owin, as.owin Checking the validity of polygons has been accelerated. o crossing.psp, selfcrossing.psp Accelerated. BUG FIXES o split.ppp If drop=TRUE then some of the point patterns had the wrong windows. Spotted by Marcelino de la Cruz. Fixed. o split.ppp Crashed if the tessellation did not cover the point pattern. Fixed. o predict.ppm Crashed when type="se" if NA's were present. Spotted by Ben Madin. Fixed. o plot.ppp Incorrectly handled the case where both 'col' and 'cols' were present. Fixed. o polygon geometry The point-in-polygon test gave the wrong answer in some boundary cases. Fixed. CHANGES IN spatstat VERSION 1.23-1 OVERVIEW o Important bug fix to 'localpcf'. o Inverse-distance weighted smoothing. o Inhomogeneous versions of neighbourhood density functions. o Internal repairs and bug fixes. o We thank Mike Kuhn and Ben Madin for contributions. NEW FUNCTIONS o idw Inverse-distance weighted smoothing. o localKinhom, localLinhom, localpcfinhom Inhomogeneous versions of localK, localL, localpcf BUG FIXES o localpcf The columns of the result were in the wrong order. [i.e. pair correlation functions were associated with the wrong points.] Fixed. o delaunay If the union of several Delaunay triangles formed a triangle, this was erroneously included in the result of delaunay(). Fixed. o predict.ppm, plot.ppm Sometimes crashed with a warning about 'subscript out of bounds'. Fixed. o point-in-polygon test Vertices of a polygon were sometimes incorrectly classified as lying outside the polygon. Fixed. o Internal code Numerous tweaks and repairs to satisfy the package checker for the future R version 2.14. CHANGES IN spatstat VERSION 1.23-0 OVERVIEW o point patterns on a linear network: new tools including geometrically-corrected linear K function, pair correlation function, point process models, envelopes o changes to renormalisation of estimates in Kinhom and pcfinhom o new dataset: Chicago street crime o spatstat now 'Suggests:' the package RandomFields o spatstat now has a Namespace o we thank Mike Kuhn, Monia Mahling, Brian Ripley for contributions. NEW DATASET o chicago Street crimes in the University district of Chicago. A point pattern on a linear network. NEW FUNCTIONS o envelope.lpp Simulation envelopes for point patterns on a linear network o lineardisc Compute the 'disc' of radius r in a linear network o linearpcf Pair correlation for point pattern on a linear network o linearKinhom, linearpcfinhom Inhomogeneous versions of the K function and pair correlation function for point patterns on a linear network o lppm Fit point process models on a linear network. o anova.lppm Analysis of deviance for point process models on a linear network. o predict.lppm Prediction for point process models on a linear network. o envelope.lppm Simulation envelopes for point process models on a linear network. o linim Pixel image on a linear network o plot.linim Plot a pixel image on a linear network SIGNIFICANT USER-VISIBLE CHANGES o linearK New argument 'correction'. Geometrically-corrected estimation is performed by default (based on forthcoming paper by Ang, Baddeley and Nair) o Kinhom New argument 'normpower' allows different types of renormalisation. o pcfinhom Now performs renormalisation of estimate. Default behaviour changed - estimates are now renormalised by default. BUG FIXES o density.ppp Crashed if argument 'varcov' was given. Fixed. CHANGES IN spatstat VERSION 1.22-4 OVERVIEW o new diagnostics based on score residuals o new dataset o improvements to plotting summary functions o We thank Ege Rubak, Jesper Moller, George Leser, Robert Lamb and Ulf Mehlig for contributions. NEW FUNCTIONS o Gcom, Gres, Kcom, Kres New diagnostics for fitted Gibbs or Poisson point process models based on score residuals. Gcom is the compensator of the G function Gres is the residual of the G function Kcom is the compensator of the K function Kres is the residual of the K function o psst, psstA, psstG New diagnostics for fitted Gibbs or Poisson point process models based on pseudoscore residuals. psst is the pseudoscore diagnostic for a general alternative psstA is the pseudoscore diagnostic for an Area-interaction alternative psstG is the pseudoscore diagnostic for a Geyer saturation alternative o compareFit Computes and compares several point process models fitted to the same dataset, using a chosen diagnostic. o as.interact Extracts the interpoint interaction structure (without parameters) from a fitted point process model or similar object. NEW DATASET o flu Spatial point patterns giving the locations of influenza virus proteins on cell membranes. Kindly released by Dr George Leser and Dr Robert Lamb. SIGNIFICANT USER-VISIBLE CHANGES o pixel images and grids The default size of a pixel grid, given by spatstat.options("npixel"), has been changed from 100 to 128. A power of 2 gives faster and more accurate results in many cases. o residuals.ppm New arguments 'coefs' and 'quad' for advanced use (make it possible to compute residuals from a modified version of the fitted model.) o relrisk New argument 'casecontrol' determines whether a bivariate point pattern should be treated as case-control data. o plot.fv Further improvements in mathematical labels. o plot.fv The formula can now include the symbols .x and .y as abbreviation for the function argument and the recommended function value, respectively. o plot.fv New argument 'add' BUG FIXES o multitype summary functions (Kcross, Kdot, Gcross, Gdot, .....) Plotting these functions generated an error if the name of one of the types of points contained spaces, e.g. "Escherichia coli". Fixed. CHANGES IN spatstat VERSION 1.22-3 OVERVIEW o Important bug fix to simulation code o Miscellaneous improvements o spatstat now depends on R 2.13.0 or later o We thank Ege Rubak, Kaspar Stucki, Vadim Shcherbakov, Jesper Moller and Ben Taylor for contributions. NEW FUNCTIONS o is.stationary, is.poisson New generic functions for testing whether a point process model is stationary and/or Poisson. Methods for ppm, kppm, slrm etc o raster.xy raster coordinates of a pixel mask o zapsmall.im 'zapsmall' for pixel images SIGNIFICANT USER-VISIBLE CHANGES o density.ppp New argument 'diggle' allows choice of edge correction o rotate.owin, affine.owin These functions now handle binary pixel masks. New argument 'rescue' determines whether rectangles will be preserved BUG FIXES o rmh, simulate.ppm Serious bug - simulation was completely incorrect in the case of a multitype point process with an interaction that does not depend on the marks, such as ppm(betacells, ~marks, Strauss(60)) The calling parameters were garbled. Fixed. o effectfun Crashed if the covariate was a function(x,y). Fixed. o lurking Gave erroneous error messages about 'damaged' models. Fixed. o envelope.ppm Did not recognise when the fitted model was equivalent to CSR. Fixed. o plot.ppx Crashed in some cases. Fixed. CHANGES IN spatstat VERSION 1.22-2 OVERVIEW o Fitting and simulation of log-Gaussian Cox processes with any covariance function o More support for 'kppm' and 'rhohat' objects o K-function for point patterns on a linear network o Metropolis-Hastings algorithm now saves its transition history o Easier control of dummy points in ppm o Convert an 'fv' object to an R function o spatstat now depends on the package 'RandomFields' o We thank Abdollah Jalilian, Shen Guochun, Rasmus Waagepetersen, Ege Rubak and Ang Qi Wei for contributions. NEW FUNCTIONS o linearK Computes the Okabe-Yamada network K-function for a point pattern on a linear network. o pairdist.lpp Shortest-path distances between each pair of points on a linear network. o vcov.kppm Asymptotic variance-covariance matrix for regression parameters in kppm object. [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o rLGCP Simulation of log-Gaussian Cox processes [Contributed by Abdollah Jalilian and Rasmus Waagepetersen] o predict.rhohat Method for 'predict' for objects of class 'rhohat' Computes a pixel image of the predicted intensity. o Kmodel, pcfmodel Generic functions that compute the K-function or pair correlation function of a point process *model*. So far the only methods are for the class 'kppm'. o as.function.fv Converts a function value table (class 'fv') to a function in R o coef.kppm Method for 'coef' for objects of class 'kppm' o unitname, unitname<- These generic functions now have methods for fitted model objects (classes ppm, slrm, kppm, minconfit) and quadrature schemes (quad). o nobs.ppm Method for 'nobs' for class 'ppm'. Returns the number of points in the original data. SIGNIFICANT USER-VISIBLE CHANGES o kppm Can now fit a log-Gaussian Cox process o simulate.kppm Can now simulate a fitted log-Gaussian Cox process o lgcp.estK, lgcp.estpcf These functions previously fitted a log-Gaussian Cox process with exponential covariance. They can now fit a log-Gaussian Cox process with any covariance function implemented by the RandomFields package. o rmh If track=TRUE, the history of transitions of the Metropolis-Hastings algorithm is saved and returned. o ppm New argument 'nd' controls the number of dummy points. o as.fv Now handles objects of class kppm or minconfit. o rhohat If covariate = "x" or "y", the resulting object has the same 'unitname' as the original point pattern data. o rhohat Now has arguments 'eps, 'dimyx' to control pixel resolution. o MultiStrauss, MultiHard, MultiStraussHard Default value of 'types' has been changed to NULL. o data(ants) The auxiliary data 'ants.extra' now includes a function called 'side' determining whether a given location is in the scrub or field region. Can be used as a covariate in ppm, kppm, slrm. o print.ppm Now has argument 'what' to allow only selected information to be printed. BUG FIXES o profilepl Crashed in some cases involving multitype interactions. Fixed. o plot.splitppp Behaved incorrectly if 'main' was an expression. Fixed. o effectfun Crashed in trivial cases. Fixed. o kppm, thomas.estpcf, matclust.estpcf, lgcp.estpcf Gave a spurious warning message. Fixed. o step When applied to ppm objects this gave a spurious warning. Fixed. CHANGES IN spatstat VERSION 1.22-1 OVERVIEW o marked line segment patterns can now be plotted o multitype point process models are now 'self-starting' o new functions to manipulate colour images NEW FUNCTIONS o rgbim, hsvim Specify three colour channels. These functions convert three pixel images with numeric values into a single image whose pixel values are strings representing colours. o scaletointerval Generic utility function to rescale data (including spatial data) to a specified interval SIGNIFICANT USER-VISIBLE CHANGES o plot.im Can now plot images whose pixel values are strings representing colours. New argument 'valuesAreColours' o plot.psp Now handles marked line segment patterns and plots the marks as colours. o MultiHard, MultiStrauss, MultiStraussHard The argument 'types' can now be omitted; it will be inferred from the point pattern data. o rhohat Improved mathematical labels (when the result of rhohat is plotted) o plot.fv Minor improvements in graphics BUG FIXES o several minor bug fixes and improvements to satisfy R-devel CHANGES IN spatstat VERSION 1.22-0 OVERVIEW o support for point patterns on a linear network o 'superimpose' is now generic o improved mathematical labels when plotting functions NEW CLASSES o linnet An object of class 'linnet' represents a linear network, i.e. a connected network of line segments, such as a road network. Methods for this class include plot, print, summary etc. o lpp An object of class 'lpp' represents a point pattern on a linear network, such as a record of the locations of road accidents on a road network. Methods for this class include plot, print, summary etc. NEW FUNCTIONS o runiflpp Uniformly distributed random points on a linear network o rpoislpp Poisson point process on a linear network o clickjoin Interactive graphics to create a linear network o superimpose The function 'superimpose' is now generic, with methods for ppp, psp and a default method. o as.ppp.psp New method for as.ppp extracts the endpoints and marks from a line segment pattern NEW DATASETS o simplenet Simple example of a linear network SIGNIFICANT USER-VISIBLE CHANGES o superimposePSP This function is now deprecated in favour of 'superimpose' o superimpose Now handles data frames of marks. o plot.fv Argument 'legendmath' now defaults to TRUE. New argument 'legendargs' gives more control over appearance of legend. Increased default spacing between lines in legend. o eval.fv, with.fv Functions computed using eval.fv or with.fv now have better labels when plotted. o summary functions (Kest, Kest.fft, Kcross, Kdot, Kmulti, Kinhom, Kcross.inhom, Kdot.inhom, Kmulti.inhom, Lest, Lcross, Ldot, pcf, pcfcross, pcfdot, pcfinhom, pcfcross.inhom, pcfdot.inhom, Fest, Gest, Gcross, Gdot, Gmulti, Jest, Jcross, Jdot, Jmulti, Iest, localL, localK, markcorr, markvario, markconnect, Emark, Vmark, allstats, alltypes) Improved plot labels. BUG FIXES o superimpose If the marks components of patterns consisted of character vectors (rather than factors or non-factor numeric vectors) an error was triggered. Fixed. o plot.fv The y axis limits did not always cover the range of values if the argument 'shade' was used. Fixed. o plot.rhohat The y axis label was sometimes incorrect. Fixed. o plot.rhohat If argument 'xlim' was used, a warning was generated from 'rug'. Fixed. CHANGES IN spatstat VERSION 1.21-6 OVERVIEW o A line segment pattern can now have a data frame of marks. o Various minor extensions and alterations in behaviour NEW FUNCTIONS o nsegments Number of segments in a line segment pattern SIGNIFICANT USER-VISIBLE CHANGES o psp class A line segment pattern (object of class 'psp') can now have a data frame of marks. o density.ppp New argument 'adjust' makes it easy to adjust the smoothing bandwidth o plot.envelope If the upper envelope is NA but the lower envelope is finite, the upper limit is now treated as +Infinity o msr Argument 'continuous' renamed 'density' BUG FIXES o [.psp In X[W] if X is a line segment pattern and W is a polygonal window, marks were sometimes discarded, leading to an error. Fixed. o [.psp In X[W] if X is a line segment pattern and W is a rectangular window, if the marks of X were factor values, they were converted to integers. Fixed. o superimposePSP If the marks were a factor, they were mistakenly converted to integers. Fixed. o is.marked.ppp Did not generate a fatal error when na.action="fatal" as described in the help file. Fixed. CHANGES IN spatstat VERSION 1.21-5 OVERVIEW o Increased numerical stability. o New 'self-starting' feature of interpoint interactions. SIGNIFICANT USER-VISIBLE CHANGES o ppm Interaction objects may now be 'self-starting' i.e. initial parameter estimates can be computed from the point pattern dataset. So far, only the LennardJones() interaction has a self-starting feature. o LennardJones Increased numerical stability. New (optional) scaling argument 'sigma0'. Interpoint distances are automatically rescaled using 'self-starting' feature. o vcov.ppm New argument 'matrix.action' controls what happens when the matrix is ill-conditioned. Changed name of argument 'gamaction' to 'gam.action' o rmhmodel.ppm Default resolution of trend image has been increased. o is.poisson.ppm Accelerated. o ppm, kppm, qqplot.ppm Improved robustness to numerical error CHANGES IN spatstat VERSION 1.21-4 OVERVIEW o Urgent bug fix BUG FIXES o print.summary.ppm exited with an error message, if the model had external covariates. Fixed. CHANGES IN spatstat VERSION 1.21-3 OVERVIEW o Point process model covariates may now depend on additional parameters. o New class of signed measures, for residual analysis. o Miscellaneous improvements and bug fixes. NEW FUNCTIONS o clarkevans.test Classical Clark-Evans test of randomness o msr New class 'msr' of signed measures and vector-valued measures supporting residual analysis. o quadrat.test.quadratcount Method for 'quadrat.test' for objects of class 'quadratcount' (allows a chi-squared test to be performed on quadrat counts rather than recomputing from the original data) o tile.areas Computes areas of tiles in a tessellation (efficiently) SIGNIFICANT USER-VISIBLE CHANGES o ppm The spatial trend can now depend on additional parameters. This is done by allowing spatial covariate functions to have additional parameters: function(x, y, ...) where ... is controlled by the new argument 'covfunargs' to ppm o profilepl Can now maximise over trend parameters as well as interaction parameters o residuals.ppm The value returned by residuals.ppm is now an object of class 'msr'. It can be plotted directly. o eval.im When the argument 'envir' is used, eval.im() now recognises functions as well as variables in 'envir' o colourmap The argument 'col' can now be any kind of colour data o persp.im The 'colmap' argument can now be a 'colourmap' object o ppm The print and summary methods for 'ppm' objects now show standard errors for parameter estimates if the model is Poisson. o quadrat.test The print method for 'quadrattest' objects now displays information about the quadrats o lurking Improved format of x axis label o distmap.ppp Internal code is more robust. BUG FIXES o im Did not correctly handle 1 x 1 arrays. Fixed. o as.mask, pixellate.ppp Weird things happened if the argument 'eps' was set to a value greater than the size of the window. Fixed. CHANGES IN spatstat VERSION 1.21-2 OVERVIEW o New multitype hardcore interaction. o Nonparametric estimation of covariate effects on point patterns. o Output of 'Kmeasure' has been rescaled. o Numerous improvements and bug fixes. NEW FUNCTIONS o MultiHard multitype hard core interaction for use in ppm() o coords<- Assign new coordinates to the points in a point pattern o rhohat Kernel estimate for the effect of a spatial covariate on point process intensity SIGNIFICANT USER-VISIBLE CHANGES o as.ppp.matrix, as.ppp.data.frame These methods for 'as.ppp' now accept a matrix or data frame with any number of columns (>= 2) and interpret the additional columns as marks. o Kmeasure The interpretation of the output has changed: the pixel values are now density estimates. o rmh.ppm, rmhmodel.ppm These functions now accept a point process model fitted with the 'MultiHard' interaction o rmh.default, rmhmodel.default These functions now accept the option: cif='multihard' defining a multitype hard core interaction. o markcorr Now handles a data frame of marks o varblock Improved estimate in the case of the K function o colourmap, lut New argument 'range' makes it easier to specify a colour map or lookup table o [<-.hyperframe Now handles multiple columns o plot.fv Improved y axis labels o spatstat.options New option 'par.fv' controls default parameters for line plotting o rmhmodel More safety checks on parameter values. o quadratresample New argument 'verbose' o smooth.fv Default value of 'which' has been changed. BUG FIXES o Kest If the argument 'domain' was used, the resulting estimate was not correctly normalised. Fixed. o Kest The Lotwick-Silverman variance approximation was incorrectly calculated. (Spotted by Ian Dryden and Igor Chernayavsky). Fixed. o plot.owin, plot.ppp Display of binary masks was garbled if the window was empty or if it was equivalent to a rectangle. Fixed. o plot.bermantest One of the vertical lines for the Z1 test was in the wrong place. Fixed. o marks<-.ppx Crashed in some cases. Fixed. o is.convex An irrelevant warning was issued (for non-convex polygons). Fixed. CHANGES IN spatstat VERSION 1.21-1 OVERVIEW o Confidence intervals for K-function and other statistics o Bug fixes for smoothing and relative risk estimation NEW FUNCTIONS o varblock Variance estimation (and confidence intervals) for summary statistics such as Kest, using subdivision technique o bw.stoyan Bandwidth selection by Stoyan's rule of thumb. o which.max.im Applied to a list of images, this determines which image has the largest value at each pixel. o as.array.im Convert image to array SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean, sharpen.ppp, relrisk, bw.relrisk Further acceleration achieved. o Kest Argument 'correction' now explicitly overrides automatic defaults o plot.fv More robust handling of 'shade' BUG FIXES o relrisk Format of relrisk(at="points") was incorrect. Fixed. o bw.relrisk Result was incorrect in the default case method="likelihood" because of previous bug. Fixed. o Jdot, Jcross, Jmulti Return value did not include the hazard function, when correction="km" Fixed. o Jdot, Jcross, Jmulti Format of output was incompatible with format of Jest. Fixed. CHANGES IN spatstat VERSION 1.21-0 OVERVIEW o Implemented Spatial Logistic Regression o Implemented nonparametric estimation of relative risk with bandwidth selection by cross-validation. o Smoothing functions can handle a data frame of marks. o New options in Kinhom; default behaviour has changed. NEW FUNCTIONS o slrm Fit a spatial logistic regression model o anova.slrm, coef.slrm, fitted.slrm, logLik.slrm, plot.slrm, predict.slrm Methods for spatial logistic regression models o relrisk Nonparametric estimation of relative risk o bw.relrisk Automatic bandwidth selection by cross-validation o default.rmhcontrol Sets default values of Metropolis-Hastings parameters SIGNIFICANT USER-VISIBLE CHANGES o smooth.ppp, markmean These functions now accept a data frame of marks. o Kinhom Default behaviour has changed. New argument 'renormalise=TRUE' determines scaling of estimator and affects bias and variance in small samples. o residuals.ppm Now also computes the score residuals. o plot.im New argument 'ribscale' o plot.listof, plot.splitppp New arguments panel.begin, panel.end and panel.args o ppp Now checks for NA/NaN/Inf values in the coordinates o envelope.ppm Changed default value of 'control' New argument 'nrep' o qqplot.ppm Changed default value of 'control' BUG FIXES o marks<-.ppp, setmarks, %mark% A matrix of marks was accepted by ppp() but not by these assignment functions. Fixed. o density.ppp, smooth.ppp, sharpen.ppp, markmean Crashed if the bandwidth was extremely small. Fixed. CHANGES IN spatstat VERSION 1.20-5 OVERVIEW o Accelerated computations of kernel smoothing. o Implemented Choi-Hall data sharpening. NEW FUNCTIONS o sharpen.ppp Performs Choi-Hall data sharpening of a point pattern SIGNIFICANT USER-VISIBLE CHANGES o density.ppp, smooth.ppp Computation has been vastly accelerated for density(X, at="points") and smooth.ppp(X, at="points") o Kinhom Accelerated in case where lambda=NULL o Vignette 'shapefiles' updated CHANGES IN spatstat VERSION 1.20-4 OVERVIEW o New functions for inhomogeneous point patterns and local analysis. o Pair correlation function for 3D point patterns o Minor improvements and bug fixes to simulation code and image functions NEW FUNCTIONS o pcf3est Pair correlation function for 3D point patterns. o Kscaled, Lscaled Estimator of the template K function (and L-function) for a locally-scaled point process. o localpcf Local version of pair correlation function o identify.psp Method for 'identify' for line segment patterns. o as.im.matrix Converts a matrix to a pixel image SIGNIFICANT USER-VISIBLE CHANGES o rMaternI, rMaternII New argument 'stationary=TRUE' controls whether the simulated process is stationary (inside the simulation window). Default simulation behaviour has changed. o im New arguments 'xrange', 'yrange' o envelope Improvements to robustness of code. BUG FIXES o quadratcount If V was a tessellation created using a factor-valued image, quadratcount(X, tess=V) crashed with the error "Tessellation does not contain all the points of X". Fixed. o [.im If Z was a factor valued image and X was a point pattern then Z[X] was not a factor. Fixed. CHANGES IN spatstat VERSION 1.20-3 OVERVIEW o minor improvements (mostly internal). NEW FUNCTIONS o unmark.ppx Method for 'unmark' for general space-time point patterns SIGNIFICANT USER-VISIBLE CHANGES o plot.ppx Now handles marked patterns, in two-dimensional case o as.psp.psp Default value of argument 'check' set to FALSE CHANGES IN spatstat VERSION 1.20-2 OVERVIEW o Extensions to minimum contrast estimation. o Bug fix in simulation of Lennard-Jones model. o More support for distance functions. o Changes to point process simulations. NEW FUNCTIONS o thomas.estpcf Fit Thomas process model by minimum contrast using the pair correlation function (instead of the K-function). o matclust.estpcf Fit Matern Cluster model by minimum contrast using the pair correlation function (instead of the K-function). o lgcp.estpcf Fit log-Gaussian Cox process model by minimum contrast using the pair correlation function (instead of the K-function). o contour.distfun, persp.distfun Methods for 'contour' and 'persp' for distance functions o default.expand Computes default window for simulation of a fitted point process model. SIGNIFICANT USER-VISIBLE CHANGES o kppm Models can now be fitted using either the K-function or the pair correlation function. o ppm The list of covariates can now include windows (objects of class 'owin'). A window will be treated as a logical covariate that equals TRUE inside the window and FALSE outside it. o plot.distfun Pixel resolution can now be controlled. o envelope.ppm, qqplot.ppm The default value of 'control' has changed; simulation results may be slightly different. o rmh Slightly accelerated. BUG FIXES o rmh Simulation of the Lennard-Jones model (cif = 'lennard') was incorrect due to an obscure bug, introduced in spatstat 1.20-1. Fixed. o thomas.estK, matclust.estK, lgcp.estK The value of 'lambda' (if given) was ignored if X was a point pattern. Fixed. CHANGES IN spatstat VERSION 1.20-1 OVERVIEW o Further increases in speed and efficiency of ppm and rmh o New pairwise interaction model NEW FUNCTIONS o DiggleGatesStibbard Diggle-Gates-Stibbard pairwise interaction for use in ppm() SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10 for the BadGey interaction. o rmh simulation of the Lennard-Jones model (cif='lennard') has been greatly accelerated. o rmh, rmhmodel.ppm Point process models fitted by ppm() using the DiggleGatesStibbard interaction can be simulated automatically using rmh. BUG FIXES o fitin The plot of a fitted Hardcore interaction was incorrect. Fixed. CHANGES IN spatstat VERSION 1.20-0 OVERVIEW o spatstat now contains over 1000 functions. o Substantial increase in speed and efficiency of model-fitting code. o Changes to factor-valued images. SIGNIFICANT USER-VISIBLE CHANGES o ppm has been accelerated by a factor of 10, and can handle datasets with 20,000 points, for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o predict.ppm accelerated by a factor of 3 (when type = "cif") with vastly reduced memory requirements for the following interactions: DiggleGratton, Fiksel, Geyer, Hardcore, Strauss, StraussHard o pixel images (class "im") The internal representation of factor-valued images has changed. Existing objects in the old format should still work. o im The syntax for creating a factor-valued image has changed. Argument 'lev' has been deleted. o ppm Some warnings have been reworded for greater clarity. BUG FIXES o [.im Mishandled some factor-valued images. Fixed. o hist.im Produced slightly erroneous output for some factor-valued images. Fixed. o plot.owin Filled polygons appeared to contain criss-cross lines on some graphics drivers. Fixed. o deltametric Did not handle windows with different enclosing frames (error message: 'dA and dB are incompatible') Fixed. o quadratcount Crashed if the pattern was empty and the window was a rectangle. (Noticed by Sandro Azaele) Fixed. o rNeymanScott Crashed if the parent process realisation was empty. (Noticed by Sandro Azaele) Fixed. CHANGES IN spatstat VERSION 1.19-3 ACKNOWLEDGEMENTS o We thank David Dereudre for contributions. OVERVIEW o Urgent bug fix to Metropolis-Hastings for Lennard-Jones model. o Miscellaneous additions to plotting and colour management. NEW FUNCTIONS o col2hex, rgb2hex, paletteindex, samecolour Functions for converting and comparing colours. o plot.envelope New method for plotting envelopes. By default the area between the upper and lower envelopes is shaded in grey. SIGNIFICANT USER-VISIBLE CHANGES o plot.fasp If the entries in the array are envelopes, they are plotted using plot.envelope (hence the envelope region is shaded grey). o plot.fv Now displays mathematical notation for each curve, if legendmath=TRUE. o print.fv Now prints the available range of 'r' values as well as the recommended range of 'r' values. BUG FIXES o rmh Simulation of Lennard-Jones model was incorrect; the simulations were effectively Poisson patterns. (Spotted by David Dereudre.) Fixed. o plot.fv Did not correctly handle formulas that included I( ) Fixed. CHANGES IN spatstat VERSION 1.19-2 ACKNOWLEDGEMENTS o We thank Jorge Mateu, Michael Sumner and Sebastian Luque for contributions. OVERVIEW o More support for fitted point process models and pixel images. o Improved plotting of pixel images and envelopes. o Simulation algorithm for Lennard-Jones process. o Improvements and bug fixes to envelopes. o Bug fixes to Metropolis-Hastings simulation. NEW FUNCTIONS o pairs.im Creates a scatterplot matrix for several pixel images. o model.frame.ppm Method for 'model.frame' for point process models. o sort.im Method for 'sort' for pixel images. SIGNIFICANT USER-VISIBLE CHANGES o plot.fv, plot.fasp New argument 'shade' enables confidence intervals or significance bands to be displayed as filled grey shading. o LennardJones The parametrisation of this interaction function has been changed. o rmh, rmhmodel These functions will now simulate a point process model that was fitted using the LennardJones() interaction. o rmh.default, rmhmodel.default These functions will now simulate a point process model with the Lennard-Jones interaction (cif='lennard'). o ecdf This function now works for pixel images. o dim, row, col These functions now work for pixel images. o order This function now works for pixel images. o [.im and [<-.im The subset index can now be any valid subset index for a matrix. o density.ppp, smooth.ppp The return value now has attributes 'sigma' and 'varcov' reporting the smoothing bandwidth. o plot.im The argument 'col' can now be a 'colourmap' object. This makes it possible to specify a fixed mapping between numbers and colours (e.g. so that it is consistent between plots of several different images). o rmh, spatstat.options spatstat.options now recognises the parameter 'expand' which determines the default window expansion factor in rmh. o rmh Improved handling of ppm objects with covariates. o kstest The 'covariate' can now be one of the characters "x" or "y" indicating the Cartesian coordinates. BUG FIXES o model.matrix.ppm For a fitted model that used a large number of quadrature points, model.matrix.ppm sometimes reported an internal error about mismatch between the model matrix and the quadrature scheme. Fixed. o plot.ppx Minor bugs fixed. o rmh In rare cases, the simulated point pattern included multiple points at the origin (0,0). (Bug introduced in spatstat 1.17-0.) Fixed. o rmh, rmhmodel.ppm Crashed when applied to a fitted multitype point process model if the model involved more than one covariate image. (Spotted by Jorge Mateu) Fixed. o density.psp If any segment had zero length, the result contained NaN values. (Spotted by Michael Sumner and Sebastian Luque.) Fixed. o envelope Crashed with fun=Lest or fun=Linhom if the number of points in a simulated pattern exceeded 3000. Fixed. o plot.kstest Main title was corrupted if the covariate was a function. Fixed. CHANGES IN spatstat VERSION 1.19-1 OVERVIEW o New dataset: replicated 3D point patterns. o Improvements to Metropolis-Hastings simulation code. o More support for hyperframes. o Bug fixes. NEW DATASETS o osteo: Osteocyte Lacunae data: replicated 3D point patterns NEW FUNCTIONS o rbind.hyperframe: Method for rbind for hyperframes. o as.data.frame.hyperframe: Converts a hyperframe to a data frame. SIGNIFICANT USER-VISIBLE CHANGES o Fiksel: Fitted point process models (class ppm) with the Fiksel() double exponential interaction can now be simulated by rmh. o rmh.default: Point processes with the Fiksel interaction can now be simulated by specifying parameters in rmh.default. o logLik.ppm: New argument 'warn' controls warnings. o profilepl: No longer issues spurious warnings. BUG FIXES o Hardcore, rmh: Simulation of the 'Hardcore' process was incorrect. The hard core radius was erroneously set to zero so that the simulated patterns were Poisson. Fixed. o fitin: A plot of the pairwise interaction function of a fitted model, generated by plot(fitin(model)) where model <- ppm(...), was sometimes incorrect when the model included a hard core. Fixed. CHANGES IN spatstat VERSION 1.19-0 OVERVIEW o Numerous bugs fixed in the implementation of the Huang-Ogata approximate maximum likelihood method. o New interpoint interaction model. NEW FUNCTIONS o Fiksel: new interpoint interaction: Fiksel's double exponential model. SIGNIFICANT USER-VISIBLE CHANGES o runifpoint, rpoispp, envelope These functions now issue a warning if the number of random points to be generated is very large. This traps a common error in simulation experiments. BUG FIXES o predict.ppm, fitted.ppm: Predictions and fitted values were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o logLik, AIC: Values of logLik() and AIC() were incorrect for objects fitted using ppm(..., method="ho"). Fixed. o profilepl: Results were incorrect if the argument 'method="ho"' was used. Fixed. o fitin The result of fitin() was incorrect for objects fitted using ppm(..., method="ho"). Fixed. o rmhcontrol: rmhcontrol(NULL) generated an error. Fixed. CHANGES IN spatstat VERSION 1.18-4 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly if the 'weights' argument was present. Fixed. NEW FUNCTIONS o pairdist.ppx, crossdist.ppx, nndist.ppx, nnwhich.ppx: Methods for pairdist, crossdist, nndist, nnwhich for multidimensional point patterns (class 'ppx') o runifpointx, rpoisppx: Random point patterns in any number of dimensions o boxx: Multidimensional box in any number of dimensions o diameter.boxx, volume.boxx, shortside.boxx, eroded.volumes.boxx: Geometrical computations for multidimensional boxes o sum.im, max.im, min.im: Methods for sum(), min(), max() for pixel images. o as.matrix.ppx: Convert a multidimensional point pattern to a matrix SIGNIFICANT USER-VISIBLE CHANGES o plot.ppp: New argument 'zap' o diameter: This function is now generic, with methods for "owin", "box3" and "boxx" o eroded.volumes: This function is now generic, with methods for "box3" and "boxx" CHANGES IN spatstat VERSION 1.18-3 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. BUG FIXES o pixellate.psp: segments shorter than one pixel width were measured incorrectly. Fixed. o fv: 'alim' not handled correctly. Fixed. NEW FUNCTIONS o smooth.fv: Applies spline smoothing to the columns of an fv object. CHANGES IN spatstat VERSION 1.18-2 ACKNOWLEDGEMENTS o We thank Michael Sumner for contributions. NEW FUNCTIONS o Gfox, Jfox: Foxall's G and J functions o as.owin.distfun: New method for as.owin extracts the domain of a distfun object. SIGNIFICANT USER-VISIBLE CHANGES o distfun: objects of class 'distfun', when called as functions, will now accept either two vectors (x,y) or a point pattern x. o Hest: this function can now compute the Hanisch estimator. It now has arguments 'r', 'breaks' and 'correction', like other summary functions. o Hest: new argument 'conditional'. BUG FIXES o pixellate.psp: Values were sometimes incorrect due to coding error. (Spotted by Michael Sumner) Fixed. o kstest: Crashed if the covariate contained NA's. Fixed. o kstest: Crashed if X was a multitype point pattern in which some mark values were unrepresented. Fixed. o lurking: Minor bug in handling of NA values. Fixed. o Hest: labels of columns were incorrect. Fixed. CHANGES IN spatstat VERSION 1.18-1 ACKNOWLEDGEMENTS o we thank Andrew Bevan and Ege Rubak for suggestions. NEW FUNCTIONS o Hardcore: Hard core interaction (for use in ppm) o envelope.pp3: simulation envelopes for 3D point patterns o npoints: number of points in a point pattern of any kind SIGNIFICANT USER-VISIBLE CHANGES o rmh.ppm, rmhmodel.ppm: It is now possible to simulate Gibbs point process models that are fitted to multitype point patterns using a non-multitype interaction, e.g. data(amacrine) fit <- ppm(amacrine, ~marks, Strauss(0.1)) rmh(fit, ...) o rmh.ppm, rmhmodel.ppm, rmh.default, rmhmodel.default: Hard core models can be simulated. o rmh.default, rmhmodel.default: The argument 'par' is now required to be a list, in all cases (previously it was sometimes a list and sometimes a vector). o Fest: Calculation has been accelerated in some cases. o summary.pp3 now returns an object of class 'summary.pp3' containing useful summary information. It is plotted by 'plot.summary.pp3'. o F3est, G3est, K3est: these functions now accept 'correction="best"' o union.owin, intersect.owin: these functions now handle any number of windows. o envelope.ppp, envelope.ppm, envelope.kppm: argument lists have changed slightly BUG FIXES o Fest: The result of Fest(X, correction="rs") had a slightly corrupted format, so that envelope(X, Fest, correction="rs") in fact computed the envelopes based on the "km" correction. (Spotted by Ege Rubak). Fixed. o rmh (rmh.ppm, rmhmodel.ppm): rmh sometimes failed for non-stationary point process models, with a message about "missing value where TRUE/FALSE needed". (Spotted by Andrew Bevan). Fixed. o diagnose.ppm, lurking: Calculations were not always correct if the model had conditional intensity equal to zero at some locations. Fixed. o ppm, profilepl: If data points are illegal under the model (i.e. if any data points have conditional intensity equal to zero) the log pseudolikelihood should be -Inf but was sometimes returned as a finite value. Thus profilepl did not always work correctly for models with a hard core. Fixed. o F3est, G3est: Debug messages were printed unnecessarily. Fixed. CHANGES IN spatstat VERSION 1.18-0 ACKNOWLEDGEMENTS o we thank Ege Rubak and Tyler Dean Rudolph for suggestions. HEADLINES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Extended capabilities for 'envelope' and 'kstest'. NEW FUNCTIONS o pixellate.psp, as.mask.psp Convert a line segment pattern to a pixel image or binary mask o as.data.frame.im Convert a pixel image to a data frame SIGNIFICANT USER-VISIBLE CHANGES o A point pattern is now allowed to have a data frame of marks (previously the marks had to be a vector). o Many functions in spatstat now handle point patterns with a data frame of marks. These include print.ppp, summary.ppp, plot.ppp, split.ppp. o finpines, nbfires, shapley: The format of these datasets has changed. They are now point patterns with a data frame of marks. o envelope() is now generic, with methods for "ppp", "ppm" and "kppm". o kstest() now handles multitype point patterns and multitype point process models. o nnclean() now returns a point pattern with a data frame of marks. o plot.ppp() has new argument 'which.marks' to select one column from a data frame of marks to be plotted. o plot.ppp() now handles marks that are POSIX times. o complement.owin now handles any object acceptable to as.owin. BUG FIXES o erosion(w) and opening(w) crashed if w was not a window. Fixed. o diameter() and eroded.areas() refused to work if w was not a window. Fixed. CHANGES IN spatstat VERSION 1.17-6 ACKNOWLEDGEMENTS o We thank Simon Byers and Adrian Raftery for generous contributions. OVERVIEW o Nearest neighbour clutter removal algorithm o New documentation for the 'fv' class. o Minor improvements and bug fixes. NEW FUNCTIONS o nnclean: Nearest neighbour clutter removal for recognising features in spatial point patterns. Technique of Byers and Raftery (1998) [From original code by Simon Byers and Adrian Raftery, adapted for spatstat.] o marks.ppx, marks<-.ppx: Methods for extracting and changing marks in a multidimensional point pattern o latest.news: print news about the current version of the package SIGNIFICANT USER-VISIBLE CHANGES o news: spatstat now has a NEWS file which can be printed by typing news(package="spatstat"). o areaGain, areaLoss: New algorithms in case exact=TRUE. Syntax slightly modified. o with.hyperframe: - The result now inherits 'names' from the row names of the hyperframe. - New argument 'enclos' controls the environment in which the expression is evaluated. - The algorithm is now smarter at simplifying the result when simplify=TRUE. o update.ppm: Tweaked to improve the ability of ppm objects to be re-fitted in different contexts. ADVANCED USERS ONLY o Documentation for the class 'fv' of function value tables - fv: Creates an object of class 'fv' - cbind.fv, collapse.fv: Combine objects of class 'fv' - bind.fv: Add additional columns of data to an 'fv' object BUG FIXES o "$<-.hyperframe" destroyed the row names of the hyperframe. Fixed. o model.matrix.ppm had minor inconsistencies. Fixed. o ppm: The fitted coefficient vector had incorrect format in the default case of a uniform Poisson process. Fixed. o plot.ppx: Crashed if the argument 'main' was given. Fixed. o envelope.ppp: Crashed if the object returned by 'fun' did not include a column called "theo". Fixed. spatstat/data/0000755000176000001440000000000012252324034013053 5ustar ripleyusersspatstat/data/flu.rda0000644000176000001440000023666012252324041014344 0ustar ripleyusers7zXZi"6!X])TW"nRʟ)'dz$&}Tn?{8gQ/ԌP[Vw&X Mv ytvl#gj$=0iWe=>`Nn}dHJoˈy0!ꡩD܏A*<d%T @^􇭤QA#s32Tp2skwa`-ObJY:0Yh#y/p.G1 foѡ) }է&tV .\㎥ 1x+O%_s/4K1@à) v1r5ԘuqtP +<ֿ^ %魌A^?Bnmh"s$ ar܄\$6vx36 79@VAFw`5eJ$H*:㴆݁h77cM|F6-0"8q3r6gx$z,3*;{fVR\KB6}%Orn sY/ive}e6]5_#S]->֩_5K-^!DPc *{w({j^YPQ?ݳN 5 ٢ p^}:wAb_^dn`Tp!/5V#?;Yju6='\mm B]rĩ/p_ƅI0=;NaLa1lpwry0VQ%ytHo%](Pk\i:F لZGr].^8*G;Iu`R)bn{`*]sSUl# ?8?$XdKrrDB|jʅGG5;)y3"x㷒FWI> X{01_W&ts&&Mia/TȆHc)5)=*gGN*܈eSYM 8%nVt|]x~B\ݿUB$ K㗽ȚĈZ_ʘٟP?>f `pYJuVB|ӖZպVjA 8 4)$]?$VFJ갻 6-N*aHLk>,pȎ!̟߂u4ʀn$R Mo\#e1奴DN\4!0 637YXdGP͊awZK7/N wٹ/N yzto}ӵE( CAF)oQ3edi\\[gG6?s&R5O4DЉ5 d16H׀>pU!XEfP^bwuxD4A3[](+#2"nPQy \ fL Ym S E\:<< OpTm 䗝 6C: m Hm*3@פt t:j!n!W@(|D#sP=tb{-aZd%qWn?Lk^({˯0{TУo__/6ݍtŌ5YS%Q<f 脌m=`iAѵIJI/*CRUI($*dmOo>L_?Vtng[P 8Z[XƞRG5ȋ85qC#V>̆dU`v Ƨ\.\S0 a'nMQ'tkO)7v RG)r{:^ZyX.ת~`$ȝ4 I1>uWAc,?У>3+>f B(tlQ)m#3Ky[GMuȚ= x XIh*v"13'hA;JjA|d-jEAqe(9*Yh zA#YW2w*,=8Z P?CƲYlϱ"s}-qe ژT;)բdUqK)Vb5({%ysS]Ln;xܸF`_glYU:UMH|1:k*lg}ϫٕΣNV4 gg ^xLV'SML}sәĖtp;ydպ)r `serUgrRv}(,i]&M:ہ)hEҡ^&ںAyT D1,|rP/՟[*UxX#-6 (k0`>U QV @G3y}7ɱgtTS!_d`ɫV8RcHtCv5(EYVDFw{ (F@FG MJe7(j!ӍwWLQ+u4;{JCkIYvBrV[LDsFZa`%^tM|1c?wJ9Gu. ZHXSJl`-緞bH:JrD>ЕQw(ax1O-%jaޕ ]XߌC^php_#ܵA\WYi?H8d(T1F[3b#,MNV:СnG;rxZu>1v|݂^tixӈh:CҢ`X߯'^B SamJ5s E ˀ0hR 1?<8MX~!AgTatC @=#[+QݖƸ!sdcʹqͦ'q%%:ZȯW 139>eC1qd+]t΋`ҵ0Z+T|;ˡ8,+;JCĦt["z0+f&XVw"923/8K$l;)XHqb}s/(_Cn m||1x.r}8 ,>UKLPrzZLpk˟`tb1Bg"$[/Yv~8K?CUuFD=Sw%AcM,(U }&Eh_puujLFzf/`+maʞcN nR1ݔ s[Dam7bV,S@ulZsޠSC+=k"/\yrVʶE![.&[%tVzB V j6 I&.!WQBDFBlAUŖG1lYc ѻP@qa0.B TH#u:͘_ )H2 ߐTeE~+ wqH6-8.9#:aAR ]O8lʵ-kSl:]^78{tLm糶Lh[stqG7H^@w?sjjY#K< _[]w0g;&T;,74)6E$בW4AnS'.Qv,gIIs5 q{ۯE@di|rNu5JVJ2Àq8}D̺ڻ.QlTa]8&0ߓJmT&*<*V/' LtB}U%; f ]! `Fb {,O<7,⊚Ii32E-vjdU{38K^#; 9 ߅RKT)ݎ(ɳQ"={L" =GiZB dCLZj 4W<w/ή>܍8~뿒q X r[EC OhDB`PyBޭ6rz~{&{X#!e8Т,6esb02:0e p|X#e=BW~[ WX;w ބ95Gf/"u 9#9&b]7pJ ڌ`6w0]IV)Ĩ"{^#tĠI eed N:1"ίh`kpTK2Thx; s7c,΢goPd&ggL[WWvRPIe.UgL͇=>>(un}x3r VcS.hm\uݰL&>{S p 4Zo*F7H8Ud8hi 5U3Pq?u/n{1ch ݿ:(#(=|^BUG4<3{l껙5V.1yׅT g|.ao?@`'{29 _Sѽ`Mmg3CwcuzKAyZ7/ %:*L&D[af۫uFN8(d. s(Z aIwsˠؿQ\*ÆQ!*P,X VerloAWs2|OUkOtt|T-z@G&.x`.T&M)^=76D.qi#a*'p/mkwGZ\e i1xkGUyTe; xOVfqb# c ݆Rpy^i #TQnU9E Cpơ{-8:8k lgg%^4D>ba]CjѝgP3 PUB}0r(K#XO00aH eR@ԗVvAJFq8Ɯmlg^/T5Qץ ^^MF֯IMy'oB-%wv 3OE,DGIHw#["s`QWKjG_F0꾀WPyIp"##7U:IxBnk|ukJp:s\[pz/fo4n <\@j\{ot Hh@ ^_R!l0Ҝk9"""X,P%4t9, ƗNu#P5}ԒgO!2 .uQ\wϦ S>03Z@$g ]О_5TbkBAMuK ظ1>, e6h9oOi'2G؛%iWWsxd0xT\Og-Oj\/UH6IKgrk U( NgZmeD?/>̫TZv3rgqEB`~4'UZ+@ƇJ]cQen:Q?zlJMK 7[!k)?;л27_ӥ(MS;!Oxy\kG G޷tzKYxdihrǩI 3ES &=e.?.z_t[Ud[8UbcA鄓,kV6 #(d 7X\XJa$?`;j'M"y ͔.9>ƳEyQBM#'c=P3'究=Yʫޢ *e }-KizhRsQGٱZ2$KrT@UPMRݰ]K ƈ;=0 ] p 83R{Ne[,5>lEB&P~fm04#fJY'7 X{>D@9ˆ3?4xiu?~e2/&H3= -~8;dȠ-nox xp%W+3b~S!AB*kڹ:B?-}/JjKt'tV/S3D:5>ő=7Kx ;LO%ɄN._s?h&11$(nYv!ъLey}`pXhxGfB(e#ɾ"e_5 TVG) u_ܭP<<lE=ʳA[7 ˯hMES:j0)̨^y/H8lɢ^p<wd_{W +a9Tdz:kzQ" -\4 9q=Gbø=Ό*߭WI%W*bo7*9zUKq{K)E7;g=ߚ[9|t+(${ 6Ķ L@V#]W YIgs> Y %n1hH ŗ /5tZI&Nv/;Y׀]BՐCAÁJZ$.ɉi]4 >\zN tsn7|*ǁrɑK/|AoY@>,AS/VG05UcC߯[{RRV蹑 If극Fsz =9JVn.?0j4.ǤϠo`9Ux) TPc,ZVtT,e# ;7VQ.L$~Y7~dϯ'Sf`-J֔Q;ڳj8X?:4μHևSĴyG,afO[T³?-A lԫdsG-ӭ[ swZz~*sS\O/Mф@H.1aXQ(Cp+*SOq.eF_@cnatXPlAuH7TNt[r3v"!LxJ=i$/;7˘&T\y @6-Yg!1SsL$S3Ѿ]=4KG׶TwW.5*VXDm3W{b32 As`gƫ #]xm{ H iY==-PP+%\@sK|-=KiɮՖ?sҶ1!Pf#6b2* ?jy?|M/~15@;|1 Ľ0pLQa mc]Nj ijAFӆ<:lU׆j갎M\F&r̋XuӹȿjĻ]fy'>1w{k^e/Ho& l B+xp8W(҄i¼7tj\{:o 1)bjMk?|ڰ(SS@wf7tv=Y)ĩwdK~pNT+)!CjI,1z]|pH?]2H(DLC\M`{q/ȋؙslݛFn q7grBP%fy<%<erk7xHiY݋Ѵw(ÂqT4O6J<h?J1}H@{"eܝMHwxK.0( P$p9 n șn-ěnVЄ2ՃwLqDXzm{ | .eK?tzJVI-gH`qER8 Z¶Pxz2&ˊ"#smq,R <\Y8OUQ؜(5S<:SMefrԍ; ?.{PW~P!uz Q흚mΎ7x^V4 h h@A?{:S0CֻAM͗FU|n:hO#osF%S(P1bIɃ>,TK dˮ@\(Z#ѩ/Smou5kG<#k<(v*+Uv.̕_Rw?f9=|_#Fmo-GIab:}Xޣ8BcUKIZ4A.Md"\Lᘌy=o&C<цX}ԧ%nRPpp~s yR)-*s|XAVjn$e\2|\T!Nx VSe:QC.W!ivz–"RmĜIxϟ/gHL9wDQ=-P}N~)1ؙVY.kTDNRv jyTn䪮ґU4$EpEB4axn B,O* Ma򿒧?ӊA$$ti:%Z;j?XP(3y4K>U:P[Ik|݃=3Ӝ[n_'OPӭ1s*nKO <kM$/6_2>d́,}Xg!*Bse+?gOwgvg8/.]nǩČJB1WQ]V+nenFgSflDW K4 k, vsV޵pVIx[^VӘ[=딃@!a `W_$yڟN=Umi@{%82s]73`($˷S1 lٚ:!W#ݤ܃`)ߚ=g"1 خc8Ӆ9,Vl}TP.fTat%ң qwfKkkj CgՖU&=e:,'[<!mi3lUk~oT@D8 r&Wߟ"?5y;R@[*_<"d^ ƕU/ ѺQb Z?q=^K Dׄ|@QEc/vK]փ"^N=]%ow;>l{_~07rNE&V7)jq3 Ӆf|[>EUz,(+];u$(걽h{Pܶ@@6s1 X}@QH-/Q-L-\֟%ޔ=ٚno3iT%}]`m/Eh9D_L{[-) &/ۂ/{#Xkڹ)ncgX}O%&sgV]H8 _T#ͮRH}BXJor6I[`6luw%oA`1u2_?<.jί`lZ̘'2Ţ46&4mGkio#e/ ab @X/٣jҟ qtΧQMs=4A,WOĠս9Sh EM!7G<UA\h&i'gflIg}wW9D/ qӳ\"H3zGSI15d,'w>km |OC- PUx,}Ȏx>E''t{zewEH~lZ^:Yؠآ1@9dxD qX%;ja5[aQb=/eG"oU- p}8iפq"a݂^KX")߯!l# .KQ˳tшA bIaUQQ&q#BSP4%4ќfQOֳ.kJ;̸J; NJY#!?a2p)j,MB< -KR4d-W;wRSEN.:C)Y$Ma[ahH J0Dv+ru(Sk9Vp(WEf1W9!qAKJ ;穨wŔec?ij@ŕ3YEs*=2SS#pIsg }—j>_'5}ȭ рq#NV 9r2hzJ(煜dWb_H >xYˠ,<`Xxwҵ*v"8ݺZ4m)dsyIt4C5T*V@sZ#*$ zo?LȐ{R* 7ܓ{pv`ŬB_w5&c}֠T^l_}76 _-/ ݔ/d[= f?[@=4Sd:ChQ7삊_\&)< C:8GJkOv]{阆D_-򞀚%2g=A/ U`}]傧(8Ko֢dXP_{!놷f`;ƣh^[_ϊOIl{D"+APḜroiHB'b~Haӆl<] t4ZKVu]'M̪MZ\-H #/=S)幈Pn4t zgz916X  1roȳt ymc;{pU]1&$9؟k'q;6'pZ_P$Q9g0sjhWfSJɑZ6 c[sLI#n _8.cэ֜_ŬI`X 䇴̱5ѱWoOm!}7 67ѿtbX $ó?&@Õ?LjjC}͟FCPZj(cx2'ۥ XjE# Bdhb0dcJyHx NEfwJ{Q#oapIPPϵ{ j0Vڮ͠VCЏ][-9z[1\ ƹ?bTIMx޾g>_۲1O7sCT]٦&WA`߮֩&R>oTPb(.:ιeoNgQcW:/ydGRu޿p v7WG2$)] |ޝ=o)Oʦʖ"b,~tATI4j!Z`.:7_x_]gsgLQZ[TX0*4fs sBze\ _mıݫf`7Q`ࢤjz`Wm&)Mf@HR$͜2ڕ4 pKO-L~81աJ̊}D̶9S&Ґ":%r֝/K!s-'ܻTF$݈f3%Q'm2+&oܧ/cC&:ieŔ|PlW.e4~ؽ_֘ f̊v@YwT`4OLatKSk">]kc8)Wڨ8S Qe*zߥ EVG5g(#H>\_-bsy),s+>h[CcJbkRmDV_u%;*AU7eJ/kSoa7И,Lɼ,j"UZe.x{|:?粆yG jNcc# Xu {'1QKьErjr ".ϒY8{;/?0bw휊dI^W& %L[6L:%{.&bXHp("ͮ yG͎cݰPaQ\jݚV緎HvYvȊoϽP10}9(ǐS\v-a8)Λ & IYm(>XE(›ZBO`sDv+dƧṠ$䊏>)5sW&fxc=CcF+loM[p%#]OT,t.J``3( d%I7+A*h:-@/x~8"cZwܠt >d?0fR_AakUQ:t<669"R&RHht^ ʰb#lXIyA 0eÜ}D zT~(aZ5G=ԠoG1=(XT VG>o)vA -Ţ6 CA]XBaxB1R=`M=hK}/D"s.U,vHť5+M?lQfc`+ c^c_wNmC4N]00/J/Bݓ7TN`~XHFVFjBM %Rl%1c+ep'p) |-[ }G֒}"p6LO+SyʱSk1 E7j+b|t15۱ۢ0r9!x I0ܤ}fxeJ1?{$M` ^X; ߈h8ZG%!Wx4`BF0rZWtOphAp dǬջv`˄ н¼9ۄ™:wF3 REuF}0sQ [o >'Xn U,/:J0hX6'#0g[oYa&\ Mh\{S{I: N7a'ۄnׁ񌒗B~ )OVCQ VIU'{K`4n >k36>uK\Jꚩ֦OL0 Vln^ͮ"MNE@t^%\4}"I:R4%LG9o+$w dXki8Zq\ًHg[;_&ϊG5w?Įdvp㊴)!>N1eR:YգB!v2Da( !Ag'S s]F r Q#볍aš!1C9"ϰ;=3/#ƇDv-fG9 *cOgad׳GL-h-k'$K̈́,/Jw.;EM4bDO˫aW1Ӷ\lS6wB)Ip-ZA'@Ǒg$ԂZ7ޕr@Yl$%1'6zBMdd^t$6%/VW /"|F[orh.ٴ9EH:HD5xb7phƘ/abJ~B;]~> (Wa#p?b$&bW"/ܯYRM(>pȾ[b D͋^4F yE+9T"N($4T~qRf;Qݤ,h59팛rxAZUҩN-Aϧ{j%V ,Nk!D&>3r-2\= X8VvĎ^p2I̹ Yt)¬,(Wp`zNW#ۋW,A?I?K]8%7=ٹq[&}v|'o%U+(IkȢOhJRh9<<2``{$dHga6=^yll k)ʂCK%O$X֮oY7tߏ dgOv1F*(!%Ƹx/˦-d+S%2OYGѡf0npYH"vj.\Zq sED.EFp@.gr?LgIW {Ehd*fbXU $H"b„*ls5NԵܾ"ILgAع?RzɍR@tMlD +$uj&܍3Љ,qW[(v2=Qu3P Qa'BQX33ÑUSYt4`DUq,7A<+#˂9щT &&{ȷ 8y@jrSVO\+Ou0ÃwXZG{U*}V\FQn`oHbMCSkQ(Z{+Y6aWF4]\BK2$x`Xk+FusAzظ"2-zt/Zۢߛ,W+GK6 =?و18%;x;*!j?^uUq\Z;5< 1lw^TM|'OM?@d$s[ײHx rY6^e?Z Tl 40?Y*?[\E'A$%-·s{MT!`ݢ9#, jD5@-%un|d@DuJW?*C{$f) f]xA4eUې }h޺/8>&rxW*ȦoXkݲ\5;>e\Uo D"O=*oh0O!M#1 HA kSC]Λ9["S_رU-פx#&$qe#;8W,z VJh{BR6T sN BK7 tl? T`3C{|X]Y+#e8[:9Ӥ~s'- ȡXLE)\uN?:ߥ+uq (/Z.!cPe/xfgÓ ~#'#;⛙(7b,*Uz+AOq QJ@2i0*,vx-9lH* |/Yi$}V :_|Q/׋ %`h$_\KSQ`[H~8O[ծ %qŹpL8MRfambH`=rwe4l콃`xn \sd0I\^ve >6~ 7q8Co=/H\^$dG6HvZHKvDTsy5dYgddlԥZKUoVқ$֝u\Fqt5<3ңFQ']T[VS6B9#7C3hs{53."(YMXq2:݁ςI)u^YxnPP{G4xh觳;0T2LkuTǭcjEژ{{B%`U)y$& tYr'M:W# 4(\<H4?-i=>L0)Rnӕ_l-a@[/qZ`zTF͟]-!=n)@O4IpBcJ-w fVGnmbӭrtMAd}?Rv+C/+-:)eqf$lc /:L6 Xgc`FrULC1A++88$Ie|Z蹅.Y!# 4J9^Pxb),@-tQcЁz.Br@tJ*VMegfb"i" =9)7#`+4ݵ9@+[q"ܮ>YGlk^m־8蛳A!eta䎞[.hoDUIUXf[yJ RoiNfԎ[(ܧDM{ӐUh-/-.z_ |\S0.dgz:o Q6P`R^-t,WIJz Hg/aջy֟R3j v1qԚBcKK: 'x╡yhvw[R\t]@0ijJB%U #Nto}:=fr$C 8~  oHONw/xI9Td~U:vZ r6R-6NVT2&Ʌ/BHոl6~ohQDy x>mF;2}@ jeQs6kb^Lү:hΐ }vTv3E2Cu '^PJCDrM+M5jQUV[FgQVNWlrC0!|!$IqE !8&v ͊dUi S:k꺌]̥19&mP#+']+ 0s(^Ǹ/Y5rF.f v=;b`dwsJE,!PH@ͻkFC H5 (SV| [l>%Q7[^5+0G$̐ ^(;B.hgsy!$`1*:F8[@|:FjE!ѮA2/*%VJoF95Ҿ ||%5ܶ<7n,"QČ'RLZ?,8EShP1>~Dl'׋df/8it5{Y6%E&'%mr(?n]g٦glgkx: ~S x9@k؟5"Si ^> a4Yp>2ʘ:|L']D^ֺX88Ҋi%a: gJ!,pRԛ 8ۍ[+Rި*u56,B&=K\V7}?ڤVCpѷ?W sX¶5$ps͔gsά_$0Ȯ 9S;3~;蕇(%ؿ0?o[>%+il\<'EbTDn@`؋̯S˹J sDZّG"s ~,Et&jLNf \:0 b]m/N9 gx173/g %h탾79pķOEH!#^7D{A9!".t0l~"iO9拣`4b`g4L]7e8qWkqKnݡH oI̮b.IEEZmv 4ɓe1YYE"_~{3Qu&Tqţ3h$Ur$On|&r;ȬiQ oCw2o C4jbIj&>vf-W:3+0Rjz#eMq(c.IO%1õwT͛.]g&ߐ)]<T63e>"e||@c0Cy}SRcI:zkK>X_>?xVL?@qL&;YNIWr%Te!ܣ ""Lr{  ^%j;St"*VnzaHQ?]| T@8 {"UJ_4]F@"щ{ơ!;<2TQ[? 0\e?HJ!q w E`,۠1p4sftt8R]ۻ4JP|4f g1@`ۯ7'(3^ X%>icYmMPj-u-'ypkDŽ!3dpPw*r+a[]n թl«K-|mrFN*Hp%yzSkb*b>/XbsO]~ 3j,zjo9؃ŗmHT<`_4PߔDy a9gob9bo}iz}>1;tcW-l)תᱱ6V\s߈]F(Y8iP^s^o5]h& 15LHM;% 0JLB_K}Bli;Gh"a,&DjXgyIa^$;v*u0  'ă_찙š('{!P39Kw/|HFQ==75H{h2e'MƻAQ/ԛw. Nڜ(82Zg!_[ ފi$]9qA~YOߚwi?"=FK*gM(fRYZUL9V 8K@ܘO]$!vD-zLӼ8' UC$Z 1F^sk9BB nj ۤ)^褭X7W^t H~,,\Ācʗ~DkRJǛWT-3 c.ݛ`Ķ9u)t|$T+n4i 1pcr;22u.I3rm|R4߱\8ipn!|\){z,9:{Vś!TtA9;Yf;@BK95w<7qD2.sIX$(4{Xi9_wS _d"i$S9!8P=]3'*A; Ex1C3U)" <2m۹l Px E:+{q[E 7a$#E{zHBo(9<=} 3(qp1|v˕ *խKl2jhsCPQc^ѝpZIKVcʼn>,1eש /ɋlUyOr~(^øRHAfe!rX,&C.&?_ ת*A`]bj/۽JD)ζOoU S wpڔoѵ_BiZplC@jW486 ;#Ǘ״dh2`QDcvhB<ٻ.5S Dw#7a2@:d>vtd 3PZdRB֮V !V&o38W0(1R=/ѢI[:cf$NNR_dHS[Wfi"uX$kॠm>#ws"v(0iipVu)c3_pMP@zF\ BjUӇw\n3I8S,$feP,hfl l'6!Pcvi2:(maGYJl4] V^GZӀۆHo++:yuvQ]~ 1kpO~.cVTR&u\yH_[h(p ܑw0@qe;+@-kXіp-08}YWVo&l? zo ]y]5jAX;/# g} |V-D+0y HNmCx tHzK:WA$4hh }TkaR6AB};}qr^#S&! ٺ>.hwZpA=Y$vDR9j3L}}&Ŋgh;"5؎WN3K$N?B?V}f(Xr(K@Ĭ''duh8]˳@,~U3aR\퉽*`/@ ]=fXHzxȞkc2EoWk!E2Br$BFKˁ:j\$$wZGAjXI$A~~ݰ:~_pHxY—8d\aj0n(YnQʷ!`^| ]:N bK%ge<ȻײKpO~gg:]Ԭ:XpzlqZoDc+oHl{ݜ뾇8|HfG*=2H(^u?]F3AHŗ{@cz*xKM,P5T }6[HFo8ʌ's Popo 6"i/̮%קG)ԇ^Z.nŢNTyRxb.ҽbt}GIHAZCxq4,8cJY27u:l { ksLkeWK~BK jAн)ދT = zRӄx1he_>^=rtsOWԬMQ;}߹ϫ8$wvc ^-EUZʙK!m)c^!%FoФ`״֑)qicub'xPRZzP珩cÌ )S3Yf*xF|7嶁ңӜ36K|#|P5nlpTjs̙r߃߾&e؝vFOXzH._W fM;Q z!$ g/#(f$ѐ"ijW:r~E`#hw=H 1_$SJzHZxfBip6K=}с|JhA~u),!B;[qmG,a;)rfam kjY=faocx }cRׄ_|`"uBì T#Tf֗%l6]qEH#OʵQ7z2OoR'͌+UD@ZKrذӌYԊ3}c" ;iHLx@Ḕ8qQ ]YwbaδnR9now'3 U7Ͻ]hکV . _/͊fIBfSI#&Ox_moqǀtkߑ@|,gT綖{fV}ЬF}Xi\ډ1+:g~5gb/J>hF/!dmWB]b#pٓ6ʘb&Epvx{i?7i4Z wn)y!4e߸+W2Q8%Xg[2/NW<ժTm3LhэXY7hxv /ˋ o?n`vU+@@:Gϰ3 d!;i2Cf4'hOSErܹU ʅeD7*>0^ F*bI0SMrd 6W@W^sRj⦄= ]榔]M'L< s#$׌Vsڸ"@/͐T}@KvgU4M3  uZqSPc8 rx=Plpf%vGs8j Q>;M{u1mi?߂:pַlYDTp4LcYt\6Tؙq.r$q`$Bupv|ve̒."y 4d4P38 ^ LqXs 1zב4Ic| -ԌoCd c0݉pI~XvZf=1ԧIzKfjwĒ Bnk#:ib|Hj/\ƵTn't)P  8'i+O[(E/ɀ*N`>bV;(~eۘpެuGT`d kLjy9ncG}ku!Ĝ^o*|=3]\jJ^xȤ s̕CQӌ>Cg,M0SJ) T;VhZ YfOСPJHZs JK% s]/ʼnGI oxDЃF2tPRW/..fQ&x% M-"bLA+/]^z-J 5t[Ux/| %eGؗ{r/6Dx~cNB^X\Mѣ!6 FGã&26'Aݓ /S`ݻ/`6++6+Z<0^oNuc'2jIQQy y]'x x~Q r 98ʐW9a0=~P(ЯԺQя;9N]PK[j.,WL68|PS!Ņ^h4]5׽8ac'] gy+Ilp ruq4vJf4Q 4-E{ 3Ja孪ؚi* Gu\5mTH93ɷ pG+$OJ*KJ"/:jA2 *lqHb:*PL=_0Z Ao‚3;ZI8G3_Zj0lm66W;y>.YKRiFz59}#ݾXb]zד)%}RLz鏔RF@l*QNKJƗbcC;faiZ@(JCmZT8"L 7cs5퇚#T=\ggtAu '<ҵ3H,vħMJB\!O OJכrTw>IAT3E_.X[dWT*UQeyK~MݠlyD¾9/uq_u&y~ޟӯw upmf`tlc}Z!gwGOɾٱ5R@pCH%r]\5"e4 D!Ҁ?R1񅨛$[Y3<@7uV\+'`+&.yJӱnpA-#&P2n)v .DX2JҙպS؟ >ZΪR s28L۵>h *C;JВa۠::."4⣕G'pB+l &Cvkuss_? 16]:o]x0]J~9 {<9r^ d@74WL8n;\LcY6T#FZ&qAbD=Gғ^IX:0q6yIFx=o3X͌S,^uH5Q~`#Fi圀 2Q/b 6Dt dս&6|ԵCZ#&·h /mYTw"PםsĖ80]՚'N0cճ& D J>ٵWAKoCmPT c%)=pJKV8qx-5<&lUKJ*}DZ[e8Pbf%dta⮷4UH&zflŖ^uF}@RXe vb.b(,SM-Jg}K2h"e#W cODr𞻀bR'U`h2XCQ%G\4(լsc-}h>*,ms-#>̓BU5oWҵ[D$XWϮ[>+]Gey1jeЛjL~$X__S _^U1|g M~YO8q{\pʺ6k CЫcW]~ 2((qaw59r.+_{P$>2p.4kD g6ɣ\CO3Ϙ&F2v ㌞_l/,k[Jͦo $']6Kn6W p"| )N!x!@p˴ 7kJ^UB`Dyǭjo}Mg"WrN0yiuJAwVg\šNUH- oX;$Ɍr~U nFKn5q).OA.zڑd R:m?1^ yvN&[6BwZnQr ˁk`iG$ /. eOG+{ȚxM}$Pi嚏ԐR|tTvosZᝫP#_tav;uՎ 5~!0CSޥT3fX<"CIAa?L*v_{/ᾤzN*1.yc>{I~0ցVMr) 7TkSe)`Q JzZVRx VP"sU{WUf;ycAٓq:l,kT)ſpOIL|_=iI*sVm?Rߦ K4!@2  .=מ)<{ڙ4",{.tdHf W_ \]L1=gVəI#yҗ b'=b,SZȟ/FѼ9) g u`x$E ޑ"? x@ѥDs;:DAicg!@$K01&re3d݊йT FijXAl9-{9v.X aA ?C%0e[ҳ"D#V]?qUY~aS*^D0jtk ZA$4\le93Jt#IDVso*@d `w!z=׮ȔPF Z]siJUD Ff? tzgX h6Ҭq̦UQ- meW*}g>yiϯO10S|Y2Cq=:eV7D}a;0~؇@-o.a idYesMMs8 ȸC(xjϼ<8ʎgT&oE9':h۸LjAWnjo[eb!G<LQ/mtu 08mY*[C 30sv-P8MݱAkn4lm#9D%&Y-?1@曕A(#`l+xo.qҕR<&# uahWWpRŶ]ƼA$ZՊ.kr2,eϳOԯUp\tՎ.voЋ~"嚊E6q(T$߁mi#˄xӳ#5wO]rG#ӕ|kPb;ڻB*lEȊ |N1(Ѳ*\LpϽ7֯ڝӇAMhռTж?!N#\K{8$`,%%K)N0mZ~N8s6$P}5le5TY>a2! T` 'x6qԒ#o)Gb~c4.swcݥzJaRNCwO_rPVNYQKPpAπZ4+x z'I>DiB>:qO-vT.|EM ! @fbzdmCt[r_[(w,;Ug'P .`CF4PdQxEs˓qC iK\} iTPtO2zMP ee!QeR}6 :~c*&_Md`1(}*۔$<)—7bID֌yp)Zhׅ"뽻 ]],0 -%ʋ;<COzv!o@Q46jDZI%(zU3ؑy#1˩(`1;]iP|MT`[m=NWSN _挲oi9J!!o"o1ുX'A*K{ Z?xOM5b[~iݍ9-V[~ ˞kϩ긬?}y uHI"rWCXXր7sGKG#Ko,B_wK6su;.\j'ce'VkxSS>g &MY'=t̢!Ī{[LǍ>RAb#iZf/HIC(Jkt۫ߣ5vp\_ǰD-WVuQVjqA4; 1z4 ڌ "u8?ݖW%U% @H &S) G 0I⮬Pw(S!֐j(kyrw#Yl"pmÚfy7 <6k.8cI23`Ol4U|˟K$IZBa5&I[m(t$ ]1yEk [*oZ:a^ Ow -d]V&*]Օ%Z),|b(Ljf.Lz'g[0) <:#ٷwLQA^rq 0\*S(Mp1ޞjTROD'&멩N3INl!12iRD$?}ϺF=ͨ4bhwϿ,nkgOԬvL]PBx61&Xε(T!0~Zn97$\fJH\fM˔̗^1YY,Nߑ)8vi_WgFCS` `O bvw+|_Pqe̗xrV@utZk!o|+?dNriuI˴I':!䮶5TC*I協-a:-\(257UO`uM}S1}mC4 4zDD>5ຟ + kmhNL7|"$q zBGД6.Fy'p޷S9Qaj448 NmI2 /ƳȦ*ݘ:' E]zXcBT57I_ V?!S&ބ?gT)֩'(_HÇ|yظ-QKLNV?^JŢ{R| Bvݘ64'd^(KԝDh8"#Zo4ܴ,aO|^;g4[Br<#7Hvc|X9N +Mc܏i_*;:3Yi4 0rc   -R.q'=$JEyU=X>gѭyvb@h}Zr˔sL*gS륅ËX)DzG̷mՁ?KtU<' *nttr>ITS6_`JǢVw<]t>Na7 !wo1JJYwԧI<ʪH/O8s,lຯ@o9&CE;LDNX Ň?m^^=W;l-ZL)_?"@lZn'tQ9/ݢ9jxi- N:}G_,/ ;%>Yf)߃b9" e`vFwqf61 R5藥E}m6Qn(+FC]DL#rҡ }G%{*:KYlb!!'_3fb#З|ڜ9M|ŤNK"P7͹- Z@cK?FqKl#g蛟muլ[9p3d];BdRd]esޤXiE24 T`U^)Vme K{+X,לOI%%?wpړ9t`ꋼvFOpcG B0\L`&d:wrQI umu{=5+[|i(\CP}Fj*dad)W<ǟC+S8\HY\DY(3 Uwrt%3"IY/w62-EҦ)=@W^Pmf"b jG﫟c5:s}z#( Zθ"=+f{Q(66JypAͮ/vgaMH/ I؜4T7Z0Fp™4}-^,MT'G"ໄU5ϲy܏ -ujf'x}Ęro@cd6rvrH D}mӦ1~`LvD{nQ>MN ˅?rܛ 1'-Bq! e(Tšf*gdzvc]¦wْeڭ[J9v&X'm?vjq tyY HX4K ^4\3GߧeaGZ"} n_VϸZ lwgdKG Gл6Ha-d-#3X(w g&83bls#x-LnK5m 6^$#nf+19R]%2 \3̃dk j޿/l}H{gUV@s|ȑ-F3@@B;э%"LmGաš^71 Ee#[ x[^Y_y%ؙx5ֳۤqa'ۢJNKmTPJBf) vE{) ȨSl%S ʊPNZTCv iȬ QJs?V7րg }}A3ׂ7kV :XP{קn;?ʥ}Ḃ bW@#a(voQ5fg 5#M&cՅ50ӚM#ou?-;{/G=:ֱw 83=xe`-I`^WdWLC9q#kB^;-@Z`Zj :r8^ﶞZrU$$ƒ~lQ1 W۱07pAhk߯#8dY~z^=y3ԥmk2{ MW4"!c@@+!+klHE.oyÌ )~\V[gY9Z8{'fSr>Nd|Q 6Nk? n(`,ez 8-ྃX4ؘ-63~">C~i> LY&y ^џ< .gp6*Fu&׫qzhgJ0(gu;%3Z3 ϴ)i()kӺF,[Syf׉bf8/ ncd.Vqbh wbY[aG*44Z-0(>,ŭ_qʮ~.-cn>뽅)tH2Q5n%"vdnz)!r4 b/SA~-D(2j"FY<HҪɨQoJNВ9s5O< lc`1ugRYث H5B!GQSY 1!̛,!+ kq|bC$g~sdD4#Cy1BQGq@GåŬ)K"R5 :Yǟ) ex_ȬIb`X|ng ] T'l +١$/#vqbM7$]NB>0M!FdY$UhTxGwpY_&P a#NW}S4<-Ԭ;RxQ%r/:/ شJILd/_[짇$o>1{*.=RtŞEyHKZZxOG=KnJE_|ZZ*ߔeߪJfø|űV$ ۽/w{h ЈiͿ?xKX4r8*XJ:J`:Fr+3O3F#4/ҺxƫCV &qAC;ٛg}8q>۞̯.p es+HM&9Өjv@ ܂q&+5hJRfBU*R~4ħbXJmPVh]7;Lat߰X$PD>Y e]my`5H sl|FLa;.V_A͛"C> kb/cC}^-bLhGW24Yipyv>_SlYކg論"Oy,BaSIYh/mm'3i/%2lodQ/#/:ܰ_i>uv*®;9 ɭ?$QR 2:&` ڿ7bj-kmM4SF^D/"s_}ӶꚏCqOBbS!kVdQ\6@At4VòNs$C|ԖPH3%#]6ȖQ,f?}utě;𬿟R5Fmc!^ׂ&Y|Q.J;°_iGJK}#QKC.B \y1 ^*%b}5]_9֏~Sn򅇺Ǔvw6GXkp} TZO !@RBBBX)>,Ya.%>#j+AM W| Lp8֜b^t6fx;y+K۴J8Z zl:1q`6o4XS Խ0;<ԕE):!3ma 簆]]ɩ#=2>f]Q+KH+;5vLM=k`,G"&Q>'ei`8\U`߅jkzu`c$^pU l9Ha\~a$͝KΐA|$^JK[u;`"-e{"|P\}6EV,&9lT×pI96Y#NBgA f'^ieT:)Ju[{BH NsOt4$<^͡ n[@fb' KRMAf^M9IJ'P;zqd+drѓ*+XO<1.W|+cDѲ*lW;/-7E;oTPF# PRyɔ c2#&)Gd QAPj.{@s܇zLd9FVRjَOBY~:#@܋\gyԹKdGa}@H)lڻt}2TWRRq! ԁK<6qY*e cݺ:yԁ`OD]bq;x2&g\,xU"I SO;| G#Zl@DrRt(xlִG,UVr[ҿmt㐐hi^E#jL>`#tWo֕!$R[v'>-ăW֎z,L@i&;A, vL׈,M *ҡy$ ~8UP~opweXF%=Huz!W&A $|(u-Y'OZa̼I%\Ijy.G~|>i:/֋xA a埤ra`aη@>6) /#*z}Hc1>A{.EJucjkVA09HQ uJ *\jpYg'uO6~b^KQ]§40+e1"i!?`>>v=icNQ &tݑ (7fX9jtяh/pX-qQхb-\`]z3Bռa5G5S.>n+u͌͞E|n&eݔi#W z ݀ytmЙpV{Ǜ֧a7U(1#:c `g< fӃx>I"-’WDV;"ᜣ~"ljaDPmU.d5A cdb-9B@HNo&_IgU2e~G [eOrH٥LPh.EA> ԫE6IP{'hK|L9TP% MP$oGP`Z!~q^ ::xL~ D.Qf_ĉUՆ*uKi FSH@5n)*Tϙ:tHcucV&mx#f<J%&nwO[ 3## wkQ '"|jG,H55KPmu9nٲW6N{I3N_W׻owjysYC ur8F͏ Q(BXN<Vi~V@Rra\<#xZ6F)QKc%q0i7Kw4p#ׯHߏu\ f1ֽ/gө)C 8.HK.%1]{QxZJn+%WW)-ˈwEqU>pxdG_lj3N͕oi["c|\ *4fG٤@̦;r0Fk\Z5*7){ˀt=uʩ9 ᇑ~oڽUoVc8ӎӵΖ4auu^p+5}M,VFEX@B:q1=5VN]bC&~5Rx߀9 z2F^ñڣ7fl?ÎaTĐz n^{7 1UIe+ q3x2ַ1Ä^nOveUr*|ưϠ曬\p$e !˥ѐCV;,Ÿr*5 |̌15eL0"T.xmS?ÆE{fWo]>]C.ӀݫeBIe@5~JmĆP:nʹ@"rE@:+M{7 nc\V$ O"$Ԁ("bdm lCޮ=Rˣp%ob:!3s293;D0YU$F%i]BP$E-ѶĕEM/_'^lOs6Wyb3vE~"^)!ߚ<-6t2ŋEl=47}WNQ k#`5Pm4gg*"Mh$BDaAf+Q BlB́81  *uxv"ҧF ,5[GVNQ~Fnfyԧ6Qj:Z:]LNJ1P]U3PB!(Uh׽v yP =2+/@mkzr&Jqk){m(DP Cq-B)XD:~eWr@|U*3,N.U!û㜪7O~}bA!>bKZxꈘ癫`]KG^:.@؄ fatJWܻD,EU"LB[k:{\j|gUIw[l'x^(qYd` [ʅI 3?dJ.+LMD+ʆկ`րSQX+!ѶG_ (뎺~Rs*C_NT`VV0*lfD*d]BZbȩPYhUǽA텽j=參50He+dzmA/|-,%Gy N-)-6Ng)o,5W 3Ǥ}("8B.Ql( PkS/%?;6w;vN1Mtʩ;heA/~~Mwk"W{Ya޾'aKڴa&d]BIf|wW_R]#-*;*`͕$Rn5 񦟠&D@8 ,~E{q9gt54YO"gzPUQ` Z!K@sCbLJ;$?88<&+dE< Thdש PƻF9{!5ɧ#i)xKJR<}f0>`~VJPP$e֙_GӸwD_ f%ʆ7do 1cAi~ 8 & 9<9"@6I6A`(.ɕN4ł?5qFL!쩬Ě93= /e8>|:oEͩhl±"uΟH,PBP?vw< xGXmZ8a1>* .&lo4:k` p@σEZGzyo9dGM59U7)'Lq_|W"arZ+>p'AcwrH+7x 1~V\JڇֺU>5TʝvoC11ޓJCbin3~\Kp%CYxIp{s6_CuoLlTI7r ÿ>vBڂ07 q&0m`l,O$B ##ki&⠫iP>eJf"xtB¤bJu'jř(eaBۣ)t ߹ouyF=i\c haA,Vڃ8s{AɀxBR+SDW\]:%\Qg{hN0zi(rGfWs /ֵ(F8L;;SJ5"dyX2}e @`Ιy2V(4ӶxN~[O~hmٞInqgLy ?N! i WM-秝9Yy6p("t(;t[Q+ravHWRANW^O-E|^֗,Zc@ZNjWtryX\󥛵x@..L{3ʜʼ:gtǒќӈ܆F8Tp.WA-LkfR:mjC:n3YZj诣FttLLlX@vC `EYlZch3>𪉚UiKޯ,Ϳ>FS1D^50y< m+9\9JB}M>?rPpnG6p.+'];MCJ K5zkUy*ntt#ʴ3_s|MQ|c4}E\픍 i<yVD Ƙx*\2fz-ӑ٪h?0"lk~$:2V5v[z7+DTQے.q{t| ^prwM-\Sǹ<GpedB'1j?^>^9Mh!-IyȌqh`gcSA;|NuEP8دh{ܻNc:b e|%L8K ^!Y3s- ۸9%|W2n T#b) RwkVX SSnNYXUeTw((,;d<؉fQbY!3:)yn'TBqerC>}>˚]Z*i-"zj073ØͼR-[e:/)"Qu68;g/~j?~{Wr7[CԔ& 3G]6(ÏhѲĜsbmF]OxKn M +&}^a⍌$\Lmn3Q,%0?Jxwo%D{2;- $#a e V9=jFw1 "$cPPm"lO/=+@ Oك@q<CE|?E4F  X ׂHV3II%V ]9e\?S$Y7_Do:cG\u1aLbl)Jy0qI&%q {[†>֓[-,\:/o&.&! R4nčCPVBO:~Wm$ G`mfd96J"ٱ^v1vdbs5v_X:4wuj9DGIφny l~/]bm)%eɄu@uD, 6 D\[ԇ$.3;N@9N-5ue IL7ekHjDȚ2H9zq^SRDx5{&>7X^mV95ҬՓ$#&)4A=|Z}) ߅nz"i, 8X(! <.j.CDWY,APқYJZCT HRV?;H!KD;6lAmVzyP-ݻAD D'iP:TҜ$BbLFv~K(3CJ.(gM9!zq!Typ$^Wuzo͜\xd}W#9.4>iR1Xx \`N=xԴ%iG/d.G7)#^EJO6"/ˣAaDw<|k<++hHL4}u"41I6H-V0'V].?#4[vJEjR.X$ ߬ߑL{p3B"I /N_\JS-ޛWHc+)&T6\2%":/-^3 5eqE, ^V4 lela)"gV}7*BHPއ8om">~_wSONi) cXޘ-t4qrxB94Q$G]Z nD `S`5'͜ꙝy%#HzQ|^ ʎp Vѕ1q)9lc1؆Yv+]#]jk!ļhBCy7uw޳3&?8 xB[L/ I?1fFrUa $S8 4uSq]oI^= s7 oz9w^+%.$ LOim,~U^5ɈC>qcU-kr1q]둱-i s4ȴí%$w˫AۚD/jvsZB{Z8 2JﺚCZ5~+oocAt Q{ЪJ+1O M`c|/@{;d!/`GNFN?^$9fPw^eoq%UzV9=o&&-%:J'ϒk!޹9\(=gh¦-Hf5 ;_(=}͉Ęŗ;,,8-ɥ)q ȏ;x-\ֆ˴jl.^Xi-;ah qL-dx5/iKLPnk/lSZqH3(;^Vޞק?@]aum!`i„!B^FPA!R/XR}zӝ( E6r#+M_M?_ɠc{詗ͺPǻNZ ^s. d+4d# S8dWoL1~_h,?bk{O姢TYz{sRM#c+c^TYUF.o4 D#WihFZHF!fJxCy8f b ;kV!ܤ}z&\F-]m%H%$X}Cmb;#k՞^Uq;%`+z֣<}dxhq0JO,jSd0a{P_)˵g CPH=FUm0+ص y\;Cqڰ0,fyLƊ۟F I\ 3}GF'Ew ,[fյQ#-}<6ջ\Y0 \N6YS@g8ZkR~8(=&/V2! ^:Q1z޿y /5_"SF~E'w !|"|1"ݖ- 6{C{<(jm:xҋHmqS|#Up>LdWң3wPz^Tf_k.h]پ}߿KE)u^EEzECf4[f~||n+a@B\Oz|H"u҄jo r d!m_ Nx\a_J(~ۅnx׊ u}x)_һIIQ65zny-[;嚶{wb^]?kKdVJNx[@)luzpv-9Y/6[nIT &>Oa^2NJȖ+ >xp:l9ƜBf%i {#v?,Ftec %~(4b -XkDIXC·V=a^?ގJUKiF}ĮN9IH[ ilQuՓد*3em=܀LM'y+A_$r}Q3l; O6L) g[GL~/Se*4=`dX `!)<fxgܢ`MCE[$9v*).:`IS 3o\wVg=S/*.@g/q/!V⯝K:7CA"ήmԡ<WKc h@GmV4`շkD`~!fl wBƗG9OGǯ?0L)v\wA[I7WdyGM%0?K2gݻM2l?wJ!=v|pBQiF=@nJqe#wX3nIZBtG Iy fp-}/-BPG˛U`u!<T7lWE<¹%$d3x'S78"ӞI]Պ>4Cqh&4Z#6DNR6&—^Vs K\Es $ESUQ[8gH~s"No7Yjt"PU[K.OѺ~%B4yA/@B6b.'\}k]\xDd074o4!ޭm^~%s\eRo>)Qv>l #f9|{^5w֠1#C~x}8a|1G=N 4~(j2Ken#0Am r9Ja~HpRW RAnJEsy//w'VEd%@i!9uUv5RToLKI.58K VfD(*h|Ψ =Nq[h0)!{JCIB: -6X6K >,bf]$~^dʎc]bZ 0Gb,aw:w_9JZ|3Qfj2): xEogo/TF򇄊Kf}^48[b_oϖ7R*kp/}^Oa(q_UWuK48'| #)b.l.F[D0VXFxwD8$GAt@T3҇+Ibm>1űjnc`3>>N^Im>'m>|riQU1Y·pWDZOL>K =ͮ^*5.T(Ńew iwfJA_z:۵|QR9G%F Yȹ1&ƤMC g&O H7mø؝wcl(*1ª=uR D-\;)P&@ʐbEtsxJq>- R$brG̙ ބ~;@LEY}<>Dx# ëoI=c06`C-\\:z{="e,*PMl5很`>}i=Z Ӻ)/!h6k.lgvOx=00g00g[ ;QlezxԸ]\sRRc(}&*DxĖh՞ޭU<ă6dZBq}7zcA[3^;:5zpۡNRA&D : HQGEE%IBcS%ƼϹ3#:o;{-~AYZqjl d[oՇFi] (qh5i_Q7LbhAWZ NW[mP#!ќQ`:9YjpURt vݙ&t8m,x^){\Ne!@A),ZIcMqUWf4RR[Ï %O$66 ygvzIuq#"qSr u?xj\Jx`Ou{kgŅ3/{M048՞DCa"> to 0VUD-7 yOɏwK a[ؚ%q2,J ,;&l&PH xp/#Q.Znf0Qj`D5]mY{=Sk]j k* 贪SxՑ=N+J53vh/}Iy$|LJ=hkv$[J{J4I %ڷdɶ*Bz2]?3 iMOCP45\qr!. HLSR`Bs C`0)Ou`t8C4bnj0 ߡ5 of`WE}~}ej]xB]`Z+kikAQ,{i1xyEPaIYݼkV,B}ɔxsE߷ ВCg~N`"2NݶN$A<3_5r!S<1nh-sa>߀8Ta`Rv)B6 j- w ;>0nEcb#m!l6"z*#{_yXx c獤s%3Ti-RƍGzt&1.klb|ŒH1axLhA멺7e,&5TK7 bl )ycC6(N2l/paYvH2Moeн PKj$59~?2+|Vv?Vg=% f\˰kfe'Zرs)k`Ѕ_nzSf/rOZ_H܁/5a{BRZɅ) GsY ^Z1ʥ+dP!{<^vճא?/igakHS˘fsuB-R*ؿ !q(}PZEavC fEBGC@ހQSS aonzѪW VX_Zҟ%OԠѩ[8ؙ ..u/=D(NwM9`!hѳXb?ɮfpqrwv ,S6C<$T\wi[qLtؐnsщuB|]ȗ;e Lq) -ˈ#= Ya6~Gr9M&R6Zu,:>L[J)ȢiJ.U"+}<++*l|"f'^d;mSO^[F^ n|;,Tڎ{4^?c9MHmSd}v¯FI3ZΓ/4̀e=/jo aSkODM,5&r]w3z5L/kjaaT2qQJXc8YLoVzMG,$纷ښv /PԜfs+5hU{Q~+n幸L块ܜ_ (bOJ4jDWƊHS(M/ H1(O\ly݅pԶ57ÎIɃZ9$`+C: >zuiMb e|K.8_i%)j0!҈GHg1!wb[`N{?0]>NX!bt?[_ I!6 n_<_|ۇ >,l<9x5aQϱd!˝ $<ʕNfFdeS= 0FѸ y蓼dLłJR΁U?xƨ[pt.fa^p=g};ۛuBL l2맹o)E~X]sc{i1W'+0;|sf$#(e.,>2؀ v턋h=2 ڑMM8+?0*6 M*i)o6w˃[ZLhKRd`Wcjy|ks}pM~dſ'G@F>L7p챵d\t|N)CR+w& n+:5qӎi\o݉2Nܦ8 _ASgӶB2<˺!(FBg,)Z=Xjr ޢr5,4]%<>STK㣭Y!)y *II $> -7rգvT|Cey')Ib߆VMbj3 8v}OǙRKs'iQWmKDHVeJ˞!J̀F^B&s;pP hVV DGUl@ʁ%Ɉ} |޾.JFEZi'X|!KN(tqh;LqI2pó2B36% ^ߕaJg/BP7x.Of''nj;aD~Oa%;Qyo[ ʁV7KI/Ղ\Ȅ؎u2ŋJCG) R[+m?]+~ .O%!ؓq.*Vs3ܬ ʼ=Cjg#T(~wu(4[Dwk"l;IĨ#mk-lhP#G|b;npT z0LK؂*T%vb 'V.S3Wtt<& sku` @6Ll^f;0B|)-ԳLl_XP)G'P,Bs['v'gN*;8kGLvAel6[P/,2\U$Q"t;_fMǤYSeoT?c JSfu2 39;5^@}%"3%'DFxn_!y]X{kf#l,W~'_@Ъ4¼%}!Ɇ:7_f֬YN #An6q#. c-u)cJoORY̘{P⸾CD9ԢY>8]>cW$"J&Zs$Fxvh/,L"pa.d<}$j!$ür<-/V\I<#TL4Ou;ZU\/ez*JB3˶jZٶPra(4^*(LmXٺʻ gK4(\wPe TžI]2H96zҁ ]K !ǎջ1M Kr(0G9.J *~[ss!M'9=ʕ%m0H 1|*rN@NwE/myh,!ҵIw{VPhp?u~"rYnNzr؜U ʜSۨ\ZWt]% GM-,t5P оŁ#n ۳"u(p;uXڙ>\pÎ(EY/%]ĵ $} q~M-nFP61ץ \HO;89B(7yݼI14ݽ{bm/lDZ=W/C)v7,),w~ ^3rz R|MW=V7@IcڍINb}L_̼̂L+j-aOsr$9bYqyiT+7og$\*RŝGm~}YGZzbΔW!j/q!e{j۫3> )d7ja.9gtzL ]@ԗ$4/sBeE<(F@ZmO''*$Tgg}cav;lf@9 ksNuz~|UAd_ǻW*0x"]N&ӽumAӹEo+rWn׬ݷ$xPB~۔Ȣ|,M(E: ʃ$+Te Kҏ)F2 yu K)~i k=^IM;gXU%9&Bj\&Optc/;"Yl} UN臢$Бs].~&^PZ+]LdNow4mZG+{Oapew6PS'w{TytAXKއ87)Wp+@sZAJrtXg 5c"[!ߖZbGVWy w@Umlvi |> 4 B{f*,@6*<eٚO1?u, [J9zmM(#IY?T*Z,^_ H/=&14R >p?*&JAcEFm+nh+_G7@68YUN5Lv  ェ2mz ,z rz6O!Ao FI?ՅQϴobAel߮x ^7oX_ѬQic&~*bwlV=pErD@'; ݩRkA'l5Q's [$Q"^9uAS'\-E9}7m8Sߍmʘ)s1_dqg(Me5l rh%\`\Q9zSd($UU'l1O7YLt;wnܟvS0[T $V#w9vzs3$\{&TSl(e)&)<~Ghؕ>N@y#4`p܄n~'oUF(Щ\tH ie"q)E/G܃iQȿN.ݲE[7iă:懵^0!c\RR.k ;wGHcÍmgJ (e n*:FONYv,$nِ)|;OCӿz92!pnqЂ!5WʍSe_*o {k eH͞&4]$-8DŢwtA\řRLJ=.|_ӖS.; ݉n;0bdOu>L+YegBgP07;e@'ԍMR $*0oE([4U} 1BFɌ`SjaPUGFq Ɍ pjQAyHeaM#@&nL =a|XA^G/ ȡJNCQ]F' pB\%7 L;g0@?El;aR7!|g2􀨍5#VcC ~l4mr P 3'cZD?gՊQm+72г?6qTYcIV/;˗*cknWlaӄPY6哤,Vs'V m LeDOT<;6tD3 Sd/=}[lDݥ?- p=UhYOP$4KD}$V}k `ށݎ0#ä/ ׼@6'\XI?ъ89Wkfga4ԇgMvu[жU2٤ ƃkuLbUp)%ucf 6( qϘ6fẃeeJUc\ÛwcC,¶eMΝ2%p7,iH|&FP05Ep !H4$6z&/NQx-!(̼XZ6"Z0sԤ[A fx,.qh Bz!Hƴjv虐PRQ2#2y+&4U0ۻT$vqgeprtژgJYb ~v* uCBWmzR)e)X_dsNDT4T&+G6#?nN`0*Y(!qdr֍TȏLW`~^|QTinm@k*fȟ@JfB [L?ѷM񊰀&Dv[HTLwgHu& 'ZM9գ:P|U9]'K9[5ꨁ ̥eqqOUc՘WYX#XV, rAZy0.bMB,`2qI;o&q~x?)p cŘءtTX$wiε y꯾]p `z6#2*> CkQi}8A)AռVcO۽mT'Kf7^TuowT9JK@:Cp.ѳ7Hf/A f7pNLe[GsM#k=N]qp am @CϼD+U=gGޠZhXɿj;e,P {S3CŦr,@YjXlqK!4 )}}Ք5ԡc=R-Toǒ3<;!h LIȕ1VnG4#$]JYp3%n i\20`6"3  bϭ8MyXl9u?(ɴS\ڎeY`6bF1dQy9 @튳' ld#nu)D-bϴ&^vjGC2iସ2t_[>$o~A#L ZEnZlIGE \:/[_ rC1<8"+02G JWznB=gL-2 GTTT\_zY5;KK}nƛxWf$l4K3>/jA_f&:qime5yWG.^I*ja!꼠]7iTQ8o/y5ř' Z$dT2&mbx g<'Gb2"̠A|AD?A)_X}\-̼ZiI9B= TObˠZ7c5`Ѿq糐r:n>}jN?h{p#͸wҔ%3>ɛT.\Dw+sإ4s2ijV,a},309dbە_]6vMfc(#*V=?>fӏUx:75p8+MxQvL Cq 0ޙ .:bPsW#n쑯kLiFd!x=Rl_s5HU)wz7lH3-1g,М@/Ѣiv;+2yo2x +7 Y!9ՌyIQv/sb'DWo@a?xH|N\nF ]5v c^)n5Gv-% ̊LK"l HhcoTLr&zZ,)D ys sF iZ$⊐:zYz?TDaEREMu_0tXPBiwwM'&%&|beXw~ Akht`l%]'o1#x&S|zYS&vu>kLiʭf"R/IB~X8=r\"˗xP ܁ka}8X 3ֻ\~7ȚTф"p~yo!̔1%АgJ-]Xqdۘ$5H{,[׬mHu?:AҠȂ]]yf8H'40 \MԝYK \"`烍PA2 1OfaA_y|m਒W,b>JMNi¼%y>EUIC+p+c;)mS8y{u[tZI*l\ig1xrol MS>?"t4KscMxr8:啅OgZnk:7׷Q7v% PږF?, xҞ. 3M:ƒbgQ[RP'1VޔlTͳ9^bOTtL&k[Ix^?.wTP%ϱ;>%z!;s_m_t_ J_YA jAӈQva2܏SG6:g5}AR9bm* y4Tͷ=.b~f_p4$5C}m1@[_/5Ĭ΅G#-T3JqoA`#a'PF#Q䃿/!4Z淆=r!m| +E./~oZŪ+,ᛚ Ljg(lΔZ@UPeQszlf@mtg)NG/g =iF{y1ɘla 9W#k 򄄄 `e-͏O2"aB90DNK3) A%q,_pײIZE첐*囑P!6S6sҟ6Cb*`uJNhWҾ)LAݡ¡l~ !xa[ I3:q) Rz{Zj_43htfA+ׁRbP*+nI@M0Ks2itobAF B$3tyY[ -:vۿ8l@YeYp)l<~.98S0^'Wﺪ}j=Z$ܨTN`ӝ9-?]`JHUֲ;yo$+~`=߼%J&c!=o-xlg׼P4rqH\DXMaƓ29 Vl'p>}^3F a1ylEҘ`dyِ֋RU[(*`45gw1lB_ػL}@N9)}]ҳi 7S*4L՚Fj3e]p;6z>Cƀ=XŮlΗ+ J~5rj*-۶)Dfuu> "[+# :V Z?,h -xFο@rn̚]X*u5 b}1=!r6I/0^bgRL-ݬO3:^zQ mǨ^&ʡL*tkLOfP4c̑cgɱbc̣V.sn_;$ȲV;쩉%&: W?T ܠKb^췦Ӊ9 %j,gȤbpl_Qg$PfVo-wܢ <)n\"LԶEuag \.T4ŲR/e޼Z޾Jc ( Mj騏^Z*s½2o^d^%P|Ѣ2&e{⭗v#ݙ{״2HڢIs`ݙm]Z?jP-0_B6A*rmX֣ v['e!qàD2}dۼ;#nԲF@kW cga*\zOX>Iex޲Ff]~Mȩu$:ﮞEF Lz\7V_5Tdm[dGv'kxIKŚ .~!7x˪$&V ҩ?E$| 9 M)Q:X؆tFȂ@Jy etI+N_@WI;'N^{c&#土5`EP_&r7-DY>BhV/SZH= }f[y5;*`HmGX(гMUĜ:/pQ@ɉuYla#?G~zT1ڎR5|*̫ hI9^pk̋sP]  勫IO|Sj{ŞAiN#]|rDqgz}Nɏ C8yS9N.$ Ήoď$p_n7S5S_/͂RVm$8\N/Jhم¸ZjKę3j׻?inhͩ(]>8RҶ ~p/ېxK$xm|}eplni[fJJg2,&2 xThen ,ln*}V?9!_YBeRzwFrnN`~|7" %թX'lLj!x< NWθ-%3z/>xrK9r:Qwvy] us/nո\N3w_c&*E驋-l/Nl< Rchv6 R u}~j` N[&O:My.dg=|e't'uG%d(kZqn[Wz2:p ߙ\Rь?w[qN#wμ*ݪt<9_arm.L4A`Km2s3v'4 ^'u[]y9It>nȅ n hB7NTY$? Ţ-4JNb [.O7 xe,$X7=2ޱ0O ^9n4Negh|c2PKc)YH juCFtK19CGy}ޚ/a0BGsH<8#xģ+uII^퐸E^^Onsʸ_'\9~,YsH0N\Z>/7XR^Ӹ AQ-8vt2_,_s SWg܉N̩:හ)=hBF\\͌N ưEv){S[ϧmH,n3|+lchd/H^F꒎"d- 8J?Z/z \^ۃEg>yf1}J{]{2̝/ !h<A>?G_FwmnŊ2} "k2>NH`AৌI>u?aFi)JVtyya?5=j-?@_8LqAZ_ ^zy ($z _1p#͡aO\Aِqp?2 f:(˶ҊđasGW"[ⅷ q3{[n.S-\u'uė(yD2v:,Q&KN&Pv pYkƦ+VcE٤rY)?$B+CP:5Ao'1Oij91.* ftTwQ؍ƻZr4Û6jyMK^;cۛlf3s å+y2>Z'}lA6r>΀2zd ߴPjp@-| OƙԮ{PG̕F7Z%⑈ҡw/E[-cOAOdl'ekG'1X& Ynyv* p2$sc\2'Hۤj{PT--흺-vH1+18p#ˬ{f—g-fA;6K=:e:FJo-ykW*?.SEeNS/)v)Q^Z+򳇷#O ]1- !Ykz.ɦC ¯9Ǡw['J E)@(ɂj}+:Bna2Iu܎Mo9H3ЛYH`G|283bJ&ӣF1%2bν!{xNʐ(TV cg2Y”CmG[Bcl5sc~y7WFK@bzK-}u ~3qb 쪱bBBזW+&|S"`Hj{tƶYHHJ3ff^%ŻP :`#;i%Qq~Im$}l鬭YqBfVQ̄Fb6idœ6uݠͳi$`> }!8=r!_8|E/t:Vnqu`b_uhem|?QXSKyJw7s2;$7(_؜p-[E%MJ>j?s?jE+<Yz1B-EwV60rL1 ae p̽EH˻ ym m/J_5zaE$q܄@&djⷷZ?iJ_N`_ 5Z[@T/`!"?]:mt 0!Ƹ;$M mL Zcn GR7TP\IVX@-`-p*|ǡ?E|=G}:;)t<([vnaLp G61VAo[Ȼm6 @aA`2ULnlyn[@dCgTlsXʋD]>:-HܒHp [m!$]\ ?U7)6l;c![g^J+"a0Ovq,Jp+0rwQ׆f L0ع }#oIqXdMokJWc]lN8Bӎ>e;%[<146d묹({I.~_xIT搘mm˒(պR_.YC&Zc̕ӌx$ aM;ltJ~ v2#?TYaB *'"5XǪg;AZJ?tXs8~ckڬYno.7zӫ:ǩīA~Ѣiv>[jL꽜S$íS0_LFl1*!uUwuA=&;2+޹|M҈BF@P L/M$tw}8,DrRv2M9b0g};q\jꆁ8G(Q?UBe}!a_YšgsLDݒMe";JڨmLO ޓ U 0lF@BOiAlaIvK[1]vUW 7gղܥs P_ku*IM7O*8p`r^$kjNYܛDڷLwP꽀`#1 mMB\9.v9s@}J(q.;r ox]9.a<_U#~Kr.*4ssd~&e#w#|?i)}I;$AUL 3432Q5ab⦾4\F8#k=RP8 Zث>㉬Y,nbKW;ĊQZKaϞW%|'ge< )ra1u/gvQ.vJ`I*8`9PV8ʃ;l[R;1pل+֟!`r#?zIvϸP(3xf U&, .tlhf YazWVO\t&W-6 {1xjzvJ']o- D Jngs8rE) y+%9J!R[7ͧuٟy%Їb\'UMTCs0{o ,݂*b>jK[ }Y/#YX${*.EgH q d{Q?&6yhkTdPZ0X2i*ţ1OLjaK]Ž%*2nkU^dVS9NobyQ\k⤇-XgTQի|=jwu/+<쓶G>mJlr?LɎV)'j$/_pVkgt"EeMR #3yW#-JŠJAh&!l0G`#P;ly8K#- _Cp;ًQ$3$`3]k '["0a3zR{ sk)Mg[;e0_q{-/'0 Q!Gy1Ą48gƻӫG<֊ГpX|LTX4&hL,(; ,'xU-!>0 YZspatstat/data/japanesepines.rda0000644000176000001440000000115512252324046016375 0ustar ripleyusersuT=LQ~wg R[l/#RQ\{{ښFk)!11w rr"6Xo}߆<&{37fحROAj@^E~ÏsYK~N"%ÊG3$1:(#qN,n}:IԠ+5ʋr%~.ˇʜҘk:O6A~lߞݖ~EUʮ o#XGlHQm ϸ-|<8G#|uMz מ7لx.g|! 2i|>yB.mdW }?vN7GIEk^NX鶸,,QN2UfuU[10GdV4.mvk\Vm^h<!߼j3ۺ,{P}Ym#GwO5I#䠜̶Oq)(WQRrZ?CӫyB@C?wCR~ ;nסΐa8:fZnZC!_~OH_`:FN+ U(COОMxlh);3h./usZүθ/P{f>XwTX6 X܀lehUZj8RrcxQHMGzAuppAg"Arx7"m HM HrwQ*LYu5$=01Haz޹*x=j_ ZHu{ {\iyHf޷0=4RsZoOH~bL&mfR~ʇWHlyH?$͎C?@ʿUu+ n2w"q(Tt!԰/!]~}WS[,Df]HG HD?&{\; 8H? uHkVsxcHge;nm5"Ec @iQq=v?iyg||q4 AڍM6BZl;ZEVfHշ'_RuNz!4~L:q&;%}Z/[COW:Yieǩ_kk7_GzbECcX 9)M5C.KA=51Ҕ dFZ *bzOGkYO5ŏ0:dmݘ{~pxv{!..7wp2d{酳~##ߺ9|Iyyl6>`z2IjR-V/U>AO.=OzOu]d{ϿB&l=]:'ۯ^X; FujG[c־&V> A=X1e|])lC`̇yָ`";_[>D6OmFaI+P噳5ȶ3v3vevf ӯ7*DJCY=z5+mG;2Cdh߁kQmfݼi]Af,L3Wo20#NO?Y6={l&&3 ݆&;Aɇ,(Uolc/ou޷A_Bk'o~,ﹲ: ƗIظQ.B3* _H=v1f1OY' =_L?zv)Gf5=νD.C2cFoo{v_=GkcD2sȜ՗;Rmsi%dɔ[-x=;x>ׄ wL@azgd]vZ$z6fCH1_zH5С3 |Wj\52hUf2<]h`ې̔jbd`(feـF(7 㴫{}BdyxfvN5gڟ=8}6 3?Nr}BXӥ݅P{!>NShh:F~cHC%6)T o[6 f(_qe| C&ts92Mb>kv{қ7]&-[3ɰӾ??L6m7m[mg`#ݚmlrl{$k#{Fvߘ'1۾.{ned~f]ְyU}^l [w a}=ZaGKĞg}>˪ֱ׳meϓ:H|%JV>8\?lj~Xm[ PV`I[/־l$/Vl{;fǧD}^u%A||~~bvpmg[IGv%_;m7e TJ}OKDI|s%Ƨk {+GyP>ѻk +={=mw1{[?"~/wkV/ğH{Ovv'vtcmx'~ALy ?BsfSe#%v <$~1=?_fb(!ؙglWfޮS%}HDl?މ]Y*lؓ׉IG$a >'#& y؋ؾ7:lĞ}$xH?!&^G{{8N-;qܐ|H쓼NMBGH/H3!Dq!?H=%1r?ٚI~^oz%qCpP~u$G伂'7OH蟍#Re'7/X~fK/9o"uWR?@ ikWq;$` qIO=Np%ik {3ɓNϬ7Uy8Y=~FKpzIEm(o2nOI=A8EEN?ȸUױ}q\~d H\O,?b}İrH$?:\>a0ɺMW2Hbߥ->.KbG|x"v&v"q9uOp/O_HIpC\; Ro3+S"K8{??ω<2ؑVBOl|,v {}ȺWR?q)w~ ?3_Kk_Cy${D>H #IK'z";%{D#z%$ 2ox&ORu$^I ~t#~,Hܑ8$~'ԓbNYIYI!&z?&/I~#lM;q|@AKr2ph8Ƿlj~ |M9D,yO/1N2/!y?H &8#zq}r?B<1Or伌؋/8?9OGݪ!DGlN8Hؕay}RqKÈ$"rI/d%Gppѷdz\sb/C<'$Ix\D.Wo$K6+QrEⓌ 7gq@'=v&IF[=.#߈Owd|$~ߐ~=(/`HH}Ə^$%ΐN?"ϥ=Xzk>/<:Û'zeH?߰Ã?^|{6ώǝ$g?}q "QS2yevrAO%ʃ?ܣ^D$I/}ru,Hs$lY.z\g=ϾW,{K⃍3r,7b$.q!q@V!?`qyݗI݋常^$~IܳK~8&v&d_b~b$;ѿK?H$NݶSO &vbC'7߄$~J'~K8q'qK5S&!I{:{~H}rcOؙsb}?$I8FO_$8\ ד;~8ǎK?Ol{l97 2{ %BG''@ }H苭FYS">'#6Mۊ;Nq#~NݒH\v%OeG1G #!IDD'ďg$'I#%#u*2 K[Opm{=?sI'q^z<8%f؟I "J ѿwOy;b?VΟlI]NՏXC ""~Aɓݲ:qH]IKD؟+YW1le D$%%Lrxv]%D[󂪄f#!<%~YOIHxHC&\67:$Ax@p9OI#u,؛ppO'$y=QwIűokq'q&?"&u8)?^%؃WNn-V=Nd]sI '3OCD_^GI}W߉pvKZb\D[:'~\ZBDO'z$~O'@ %8Hg'Ịy7%|b{<ϝ}?F_qc'ľ^X;9I&d\ĿH}IA!7.BV3m: 2O#< wDܷBḄnK3IλybߗI>> 䃈|$G gG+? ğO"' XG,#ep28b XG,+ep2b\ XW,+e2xb< XO,'e2b| X_,/e2b| X@,C !e2b X@,C(!e2bB XP,C(!e2Db" XH,C$!e2,2,2,2,2,2,2,2,2,2, Çӏ#Gȥy#~ЏBQD?Ri#T*m6JAFPi#TJPi*Cq4ơ8TJPi\*Kq4.ƥҸTJRi\*K4ƣxTJQi<*G4ƧTJSi|*O4>ƧTJPi*M@ 4&TJPiB*MH 4!&҄TJRiB*MH4&DTJQi"*MD4fIYRiT%fIYRiT%fIQp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e CY¡,Pp(K8%e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,Rp)K%\.e KY¥,R(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e GY£,Q(Kx%<e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S)K%|>e OY§,S(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eD@Y",P(K%eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R)K%B!eDHY",R(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"eDDY",Q(KD%"e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%Òs]jUEH?D~,(ǝȏywMaϳ_xx-;.*c\'+pǙؗ?L~tʔn*{%e|}iY}.+G~4ʖmȏ=}YmOaȏi/uY~ ?ҵ?Kb=$?7]GGs\lW_◭ɗ~|E^&/q{[R2yڗ}^=>K@_j"rؗ_fc8DAu?y|Y|M^'/?c/oC^&[evK^fJ+~Y|يe]Ins|y[\3=-'D@\~yDy7_vtݼjf6!ϊC5?}Kˈʞ ;v ̤9y[i!3=gL K^j%o\?|rgS`.{ *xP0mc}˄pF+3KC1Fi/w5WAdh7Şn;>ӑw΃S n 7óSM}?{c DoQʑW nա!!_J~L}0xǩV h_zA!`ٖVP(yYAitҾ/t/tLP[! e_m֍ >z n{[= ŚmԧJsP8ݥ ^orxJfeˇe#>,ۮ.^Gi~'d w-[J&_~En߂!0jdMY2BF^%h+c,$^K&\\bkS jNl ojlns1m oB){g-\Y%RwCb}_n/y4˟Pg ;HOCm|M:aUT;T&CUO |‡wOUyB!'^CZѢݺ!v#JO'|j>+,)a(ipqݒ>ՀO nXXiG' q'f.X^hXiw=CVCjWOўyNҨe|왧+=Z~d{"佘oC+ +:#oyB>2A"54s+'~ABħ//B¨!m][ FadHv󦑐mU b`3B!bJ^U᫘ H [0La$&:~BO>Y{^K3JrJO= ks27=_3.a4<ۤi:]˵\D׻}ыS\|} /+盳{,8*.TY:3Wn`(mHJN>8Wx+[)nȱQQPn'ssO)9(=|X#|҉xjIAHJ*'2v,Qڞϗˍ$*kq*ˮd$ʠ;Nvz cY!R]DJJ ^NWSr,p .w(ck)~`In ч'ǢֹPi9۷˅}s4;1LǮBK c3|ʉ݅r"c$A+8(ɘ#mHF~Rq; n{Bq}}C!co3 ~e_cء%6ܑǧ3 8.ZGE@̅FL|;Z"O2pyG vAgcBNw\E1z(3 @ӥk5)A +2Gwӟϙt@.]}O±b/X3~Zq?g L% _L%=:rGsrhʺn8fƨ@}zkKV1~!T%oIQFL-c)zÞBa.WQO2T:e3Μ<.S'먟S ]XUl 7s~Eoo~WG1~^~rퟸ^a/Y_qW/+?`=]n´WD__~\C_CW̏ VC {G Zvcr8.!?zg~厅i2z/xJ6w w~ &fMH,>8X:;b')gFnY <2|A2^W񓋷^3edJG?U%~i8,ݞW9r+,dƖ(:<@>q\>{?<؆xz  { 8,q#vͮ\^k ϭq^(q)njjFGyzQ<XeM~L8\=xlQj\9YYRBqDպ2r{Cq 1kiͦCuJ_nLf̋E8KVɴWfz _#s,g焾7AY% {cUl˓`U8yc eiܓ%gq^/}*_iXZ~F~Y<<޲uJ/Ŝ{wrnW#&/:%bV^Gp>) ) bB/+M*)2Ů+L\JAn{RG#7uy|)#G 7yc.?7v H$ i"|N /|-~+#t2*Ÿ,7+ 簕1_8?ecU+Y<(QGrq!qUy]fwe៼\ϊMKEL>wxj]__l NĉpIq"~W)|fy3G{c˳{ָ..-=~&wg4ՙJKm+q ηU-p^+9_Uϔ!:wxۭJ(lCܰ߇߼? \Ưv@#@qa~ENP:{|ﷸ*pݒr*̷׹סp\,{e~y33(?M E #R/]}+uӋ'#y+&A}\rw>x;;GMz(X둼I۱x\oƌUi!,Wlgl׽㺿_Q0WjsŜq~y_*kzbN߶3 Ux,{/G\؃|qcQ_.sl`,uMe*WggzKD?J}XCTeyʰnզMdGz\aJAVB NI1u,4n:m S7Ɲpn+8w bKo<;NV$I^(t?TuӮ"bۻkȵJy[I}%ZFL^髶(]m^+aޙ<#wƄ 9Bۑ_nFvG{ncfmָg6٭M"lLmorZk(~"E]Ql.4_vqyV7'w'V%,+GJ%*[JFta!_Hy˪mdn;83ab? u^0jWGVʑT$_Qgwuxk HHHK"-q\2xi{f;B>l_͎߼nj2ınZ5 >{U$[c7CF"s`=$Iܶ*쀓utc\v/ZYcԫ=_oGoZU yNiG:WK^~ gtJ<5ƥ^/BLP),7_4o"U&8Q|w eZ GY[ÎbU _n|7"l+Sʐ4kkf-?*q]{GqImTG("á˝:ii˕?UWyPT<.ݮ;f7_̱ĵ`[~Bx^1qQ Oh2\}7YOZDZ]O:mבTnZBwA'X9#|<ϋYOO\ 8d 4.w2!Rp(/ד܈gB?O6?Ųc'[ ޑ1 0qoW3dJ?*9/m":ziܯP߭1K}}AIt8!e;n?)Eae|?7(o,1KjC 8Uz%KUk-xz`ne%/) m2Iœh[Zea`FcsI5|/{0[\&M\=#v0;O0r*/b Wα\uZ̍.L1d NoǤ|Q0(۩g=W]~㟝bIʧfE2~'c*gX G3$6kh\d(WH\{%=Gzy9Ҋ]8Á󐔋>C,s[7]ˍBbidh?FN:[1||_y/ dBqa8_L}Cq=uv>Y[3b svG.z#?kͬ|.w _?Ji6gr1`ꤲy9E<2>s3O^bu{m*Ҭȹꐜq8~g,z*}R);x9/qJop OU󛂎Kj?ϟS1Ǿ۝롓[<2sIߕai}̧0W~s~wpf~KZ]UCW!&N*ċxPb$w ^Ϗ\4m`e)8nb^ .iD9oZ5y;;oZ̛J)?qż6_k|룬Ȅ?q9WۅZ|̨p(9 sZW׊Gq:z2'~8?&ɇ'ڍ҄ĹO03k2ń+w uG̐y磮򑸿iڰ~~tLZ,-ru gN1|sn?>s0~Y~v?suEwѶ Fnȼ/{lU}o0㻹k7k4Ża~y 0-#9OQ_'F?Qɝ1:_c._]罟y/P&"pv-H;/JPI^n?V.=q4}oSye-'DZ>.N܉G9ו\O~Mq}FR鰻=Za?*pST|p%?=eL\y+qlW(L*.w4_s([)Bv8O4`,W^^;ط'q޻Y.uqnb%+~8ſ4xE-܏Xx)Rބ4[llPfO|>.v`QkMq* Szg<\]7wMXA_1c5 gyi> uR]8)Z`?dr }9ҳ /53>ޗOyp挷fx,|16n}Cոy)_.^ݎٔK?γc.x3bӍ^8.T~^rkj?n_V?^y/=~p>vKul 1g5k{7j=[Od9ByۑOk#׭ҟ<|~3'{55'^Oy?E76Y܏u#p#r2O})/sSS\CQG k|,77g~y`||\>0r԰?:qyGYx]%%B<~_~`|>dh3?4zZƅw8U.[|U/ /\שf7zlME=^Y3j+2n-Ki:|u/zbxw<2߲ȴ29r<_~yquj4u%^ vDfsP?o4O87>@lQ#R>l1RN?RR|yEyV9?받A$_r1;5tm7BKFpecfRT9m?掕uX7AڽgBKNcNB.νې?|k^b(r]_*/d~QԴ8z-ܦE=mV8T* ]~H9vYЬodK]j$WW 6CӋNNǡ]'r¬< iK~t̃rc7z_E["]N|cS|qۛ pw:3fɭڛױɷܯ{AIr]ozBgq+n ,8`!ղ3 b͂3M>{!㐬z> YsM}EK"9Cu7 3O.Eݿ]搦jd> 'tw !}QW[8ώt}:I9tJF/_!]K=K#=)n^@zgn @:Vu#U#p{HO}sHāګO{ɧ@[%fP˹}%'_$8_)>Wdq({ H6o6=(Jwvs[d6K ֤ $] e3}A|PsZ[Y]2dtnw65Rɷi:>?id!]T*3gpQA3[3A2hݥЦTU"ߠ$8ruY~0>k4E9OF];@ WhE8 qG#ns'-~ɯ2s`"$%z@P:~bSfc䊃V!' ,TOGCZg,Y04h>\dͧxG{gjL#J7M {A,ͯ{7yi9:p4,v^%G}Jkڇ5ͶaʱP9=Y3ޔ_iF 5 )}aB8/akYPkCLִU٨ֽە YyePW1M$4g?[6 }I?xHz"O uziA]m |>F+Awk, ޏ<{{EJB26y9~[Hw& }ΧHgʥiދ7Kӑ!HoEufH3+Hc^Iܰxrw)Rl+1>5$I]{}kZ2'蒂fpcPHMnQI12+ 5U, `ׅ~A굷ߏ*MrГ}Cc4A׋)-33e !VoY~5\^2O6RwR:IT )`xNdCcׂkgjBUWTOy}g!xscŊӡ䈉\8~#sFBW0IHFS=Tt?;v{_gH$O h9 3R]f aĸ琲(JqRT_;%N[]/կ谅|ʼךCm.di]T=T(_>pHaM 8<94d}[T[fܙٗPqNcCFY;e )MݷNGozoFO.iG MTs}ԏ Rɝ47)BZI!G:6&~}(217UOd_8|zð]~Ƕe%tIK>drx>2ZI~ 6L7 Y}O]t IUv]9:gB(E/o볽Au%gƜcוCjtyrF(rP+H~hs/ƶZC!?7CJҵZSk (ɼ1 NQ ,=x - _3BrmHoF1EU>_WO6ddwR3= M8O~$Gh?KlVsΞ9{`֛Ǚ읒~$#"עVBzE z#Gۿލ e?>=`3yj'Hk>nD+5!]e_Gӑ5*kνkO{ːN崉EHwHoaQoW@ DžHäG;&#Sk8$֏dUSc ]bŽ }WKCdtl mJ.Y &6vApf4~R5^ӾE/. ˇCrњ~K9 j4vx<ս)z I?į;yԹH wHzH%@ H!WqT4p$h]*{u3(\R{ rM8‡;7$B"}׿s%ng\`W QB;hF#e{5e#>7uHj]ÎAVÿv&C'CUCȾ9ζ(50mT쨝խ;d!-DzVd,N7 )>YkI@H!& z 07YpzT4l XrpׇîB‰KGfWt7yP+6"hdRIkp?8>w).+/)T#-,zMm|^M5ܟ5 ZV` \ 96p1ό?lu;65Aktt~,_-]ZƍʃjJɐSV$\6]Ὓ( ʞ]m~( Qx)ܑT/N}<i~TU|eu)7_x7}RH7f-3vqHȞZwa>[ɉ4l8Ǻ )HR=/ ="׻T$zB L!oɛPu z9v$gfh0t˞56kY:F Y7쪽FF(_.h(s_ G?ɯ@Jv?ێTw\i i433D7wDJsϙ">!S&MK{AT_(*Zrpo6dp}v"(,h(tL t&9ם Dw_{Agi˃Hfṃ',O1ӧ+gƺANӁ{BR֍uƗ=5bBءN2oK 5!nËS\gSKPQVU(s;4޼趗9c- 4$Ӿ*ri Vs_Hwɷڡ}_\@E.0AZ/@:zM!j:e!_، eJ!c.F=W^L4\z|uoߒѧ 2*_ 8|~s)'u1Ꞵ/Vݶ ݣW"Po)^wܹHkDͥ!M+řUůocu1HuM%iznv$ƃOف'! O*@Q;ʆ:je~64T)[}jn:ݶ@FR[Y"Wַ xAe(o:k.HI 5f9em~A-8$YpruTzh|ǰ`׳+CYvG ԰}R,zfS(k?oaU?jFM(r6X1`L{c>(>ٮ 3'ow暡/!Rd΢H(Ҭt|4pˢ䏦@HnZ4|YIp]w f;WBz93!vlM-}>PpǠĂ{PgeuC>SEqQ_:CM3w\Dݺ>ᐖleBc4;fwG_bdP 3Ld #ٺHo!=J#nߴ[g^ e(ӧ>Q1ޚo1*v\ 48ƶ &2z)᚞O2<45U,ڂTee}DwZ79j]8z|`]j ru[On=vƭ~SL /CM_+ YFAkGc _^̣|^A-o\hC9#cR2Ki}z匂byo't/ӷ~Hj #V6~_' Ow

C[!y~sh%h胊1TLm-2fǧاjwC #5,nw\Fɜѯ 6X8 jR>l; 9mS.AlB!gT^||\w o?j{B돘irǯp+(]P%:%t֏a+B63uAR{յnH%`QpY6Rew(O$|I+zZ;06ԕ>2[ %Gk I&dqN=~q =6Z@(Zݑ#UN#5M3Z#N 48{lOҔ;H-6|R9~ӦHL=6fTK8$4z($ƻtI>`}7h^447;.45T>6wUP(෢-%+cOX/҃;۬ U.-H(t(}dzh| K]w_dR_$SWCދ?s[z"5i'׆t[' #z"k#F_:uEQ:'9Ǝkin翑CõY2pᛴ)jm'@ 'yzCwC٧) _+,A`WG z ? 96$.r -nr 972C$|~)[Am,ٸq"hmu{aty:FKNI]fS{@Eb̳5|a7  tAmJ_O3qlW8|l~+;1twK //Kk  9h&$슠8`oA}G@^1#!;d#[Ɵ%} fˆ@-6@Γm6PSn]+Z-W&DҖ'暘T j.H̅#!5ѿY"ͺ[_"]k^l~66iD#Bv3"Ιw;Bާ~8Hاv" v}ɍ]҉T_OO|unNV>ntt>tx3_ZA!sL@G' AC:ܯ0ڶW {5ׅ١BȲ?J.뻤{_/i&q>[ZSӇ'Q G$_~BYƕi/z\nd'a+֥~WDq̳8 y̼$5R?o)Hu^wsy{J Hs 9r)Ź/H~nۨ%L^ wtnu9\%ݿd$mn;2ujfÔuNhHߩDrPx1R{yvHc#yI=+k26~V;m y  7Cslj}-Տ 1/D7fdһj\jЫ}>~ظ|!|?yHs(kە'TgW,|wLeHV; ytOf={ڡ}Hͽ{L}Kbb[R}/L3Ҍ{αF:S뺿rFzFJO91yt2mm_(iڕ؀z_j㹏^?} nL?!Gs4l<,:R+g08+Mw vE//~ 7. 5K!ʯbƞ{Ah+xae[ܙ5ޟ0۪o!?*#uys6OZ:y~8R\ K EVR GSHo7nxByqX/}w7Rֵ`ՔYMR~Hųχ##j k/_&4R@\] DvI?ՐU!Lcg".2y~0+Ȥ5w!c-"raiP cN H?ꪆv3{H'#a.E UE;&^u*ׇmAӸw|uοfZujvD6x[<t)CCd=՗p‡=J :Ӷ>ݻwƱfu.7B٬:O!H#ʶ/:k62 .QPN=3pAQۻnSGvZp ~t;b( =5#I<STޓj9BeLOذҠqމ[[+[cQn &&|,oiEKᩭ{́nm3dِ~^m%y[2q&5_ժ_?3-157V%:e  ٓj[BFFƫPt}#Y ˂KvܫCy6H>;`吽HQE h9{On ThBT;OyolЧ# ;Nc㷛ݠى։k}k]7&/ܘ6@ l'T8P~vv+8"H)]c7T6dpwkȠrkHjo4Y Yk\tɘ֚# JeّÑywF #2"87)uH֨" woDDMd>^y<ڞ/5͟z:rF\z선^B_2~dYW3J-R ; or^ 2z#T,yWk4fv.RwE,~H1ƗJH%+^;A)xi$lU[r$&]?<{,}7FN36^p'nF-*8Z6~} I(.,w :c)Bkq{Mpk點Hʍm;- ;[Y5>N_+7lj!/^ѯB!1V)ÁagKH |3bӤmOh*%eN\ % >)F$sc}.ڏߠuĮQfW gf/v"u]ӤJvϐ7"EWO$?٢9HVqB*Ieߗ+4 mm TOpj>/z+(_K~o e6qj1R4gz8HT0Y#,~em'(33Lb*÷]OC Aڄmjbbk ̈́^}!%c>gBNnK,b7Z4-}=>%tʚ["^iGun݉pCOD6LU*] 7zv2GGzܕ?F|Z6rvyrR21eu jV2hD?4n Hm׵!}>X awo)*SZr^o1 q > A>1LJǔ3W1P?KPU5hpsP(Z`ez2_~;q(mQK>FA!g&Ԕ8IjBȬsޏN{c!jn _O {حc'XMiZ2ɐV=|sBcd;$n,I0"E Z^7;?xӭ89ݮ^Pe@]۠:LP~o\G_ loZtzRmPq9IJԧj$51GUw葈~VП֗8tZ }&BR9侐/|*mMRsO@e΋Sz]HJ^yxW$7<4dj} w 1R'\‘8E3wI}``{T!H%{#sxgH=7VvÂ;!=e~;#mل>Ͽl"^۾G@3nHg\YH39Hiְ uOⷥ3/C2[O /Cy} /@jiե9MP>i!4VuXBȕڰa~Hz':nFtLNC_@!n]?x.;hTw)z:aڋPtmO߽$sHpD 5l 2,z0eTTovT.~8));piHmKw~UCo5!yߴwokRؿyNmWg~o($r>:Vѽs+ %{#Lg3u~ 9187 V h;no\yrU폖QV _as.37>.0M<μC oEJ"5힣f#qHg]蔣^j? !5yM}"s''"-aGB/Bs] KtGM= tŵu#+7,򃖴mj5s@ NT Su0#o=JRP? v᪇nBʭ;Cv渖bdS{P|<%<-9:I?5TgustӸ䉔5U )*F?kX "61o\Ǧи]Lk[^)d;58 /,he)8?G-4Cj@}$=w!P);/~.ݞlz6\`R59rV$7iGL[c 4|Ԗjn&7D%^P{ن-P:wȜ tO;BBCшWʉ~%-|wo G1;1H߸dsiPDf{LJ<};{%p߶GF3gUVD G#ݪ/ז~EX olq@>Mwi]>꽆tιho.y;2~eaȨR!x#㾍&]EݵB]y{3Ahse{y-Շ HŁߐ΍I&v[I%-~H全Hd _I0Rco1(Hi`H3sH6wCX熤߿5= ]Cwr'fh9eƏ'_0O1E2eCPx>e O({(dHY?φϷ= C@:r98#7,:8#[HM99V(_CH=2<2z1c!3/XY|#zAJsԩ?Eg+3 GV]n g~ar6x{.HY^˓aiJ'>;m}:Djޅ }A 7%Rs=wMO Vq[@ehS{%:M9pH-`xt$м欛P(06hϚ8S8\]2j*ɉP~Fqh<}}bЙr =);txoAVFBG}Yt3O 1y^MA]҅#NW>AK|1wEZ%BZ\y4k۶c 3W$|CZ/9H+:Ҏk#ҙݯkH3}c j۰VHnM*1Y>߻˳:2 6zq<n ^u0Nlx3bZR4/L9:75 /sMl߃W N\% nAýygPrWi1Tyߗr+Bś~p>xdaP0`Sh3%lɈV$])h$RӣRZ/>}{gйi׺8bXی)F ;zp]2ƒ-#>:s\[jkl9?gTAۀ6|u& 8mG׆ʩFR<"trfZ{tVUf(4 *w#4 MnmDS^Ŏ _oRB͟M޾rl" ҃->A݊_y#RǤP*ɞ 9)to1p 4vVo5 RUC”k~y,3?tOp%4uXZwEզx$CgLbtJs<]{U=R. ߐ: [!i*BG/O6Cשpi54(X> շ|[8z1{,1Y6\0Șw^)P ú6L5 8(1sPThXv3' Tsg P[ | lf$-{M|_i.9!#ycgR8`i#GqHe{qtT5FfTrf)[V6''28&u2 Y`uTTyZ2mA8v~ͧ\؆&4tVҵvu?U{ٌD-;"[/B*{QC6nGgtEH^pf8שVqrS h?0/^g TO`PN\d~梪={_"u5 it@? uil>yٷkDy|<)")Zi/\Sr$4եʳ"5ISWNAJ:dW /xtԻg-VImϥs>1ί[ Fƾ +iGWCfy|{?uR^iTHVL|?.w B~gPǡ]sװ;LBUЎHy~1#T?.iO(7\8_)aZ R.=)lR Dҝ_O@ߪL2C,*!$:v>jIPPo3'PgQ{h[`.8pjd${i@PhH~آCH_ش)I#?W6.ƞ;gTBlk G]*ҫϥ@wՃTeqZ~O|m>.jM+9J2m#fQdisRȨB(О%$0-WzBʚ!tM>l䣃@C4LmYփ̸L=Soפgn >pda]W2NvxSs2_= ?N)r3WPcsi"4~  W5_Οa6YZ#?Û/-U?9[;\m/d&tM73ɬ鉔y<LnϬBZ.-霿M/ ~t"i_v4^G_98vu|{RtR؝e'1=U ޯN_/9Pe~Z~u(4m!TVϦ!uWZf9wgHq˧AֵT/G ]_j_w: ztG3@R]gB<\PFLaKR)sg!u.i:41j]H=s!Հ{܈Ϩ>g{ %χE <ñڻFEcKz ]Oܞ΃6 EFCMކL޼2~/m?m8$FWq^;IY #@G'G~nIAoRPuyH-PqP-m< /:u8 9W*YAury34e$(<px` jy$ut$rQ3 oyOfUxO~_# / HyC}@ћZB@֗W~f=} IbX2x4[=B ={@Y־Ii(^6pN?2+}Gԃz{HmH~m;'Hn,J_,Q/(AGؾ/=ݓ t;BTp1լP^L~e=6/LOtNHE2662sQ/@hd (D@z6=O*HLŒqC 9795H[5IeH7kcGZ*F߷A3i<Fx[n3LSui% zԟ#:Ҳ"6ݧ< "W>s]cMO/ru]dHr$x%}Ӥ">_+]Ӂ^s^Cc97  eU-=7b3kg!OZ]=$Ck$BX0]+HDH3ZOi;":yU1jn{+A49 o%פS,ϡϜ- ߭" ũC_aJ3';#AVN9@\ |2|EPqrmμRwt!Ph.. {'F诤lsOAm{*MVNash-;B֯;L\V~ȝob]Lg=|tqd<\s ΐ`"I؄ x*̪6A{ߒ߆ٻLxo ㏦}5,S Pd/kYw-oail:y+ SBoH{~d<+uwty3+bI&T`s_#udC%?mb#ja0nM|67B5ˏʐ=yy rSR idBhX¨n0v*B!%MlUΒUE'W~&Lʋv#Ǭ Xcuv ,{{M <,ۄh84nyC>g_>&LO1$m"M_j rTE -.Tdz&=rRsB^7ۛv"M#1fyFB%8@~MF } 䍗Eߢ}5R|FEFx-LbiryS2CL7,M.<&AJ>ț"ky$X!Ȓfeҍd!唫~񰡢A7市 0Qrt1*pѠ{Nj$%d~3 Փv9נX/i" ~ W]&*jVFg,9NsL!Oڋ UqdФL8!l(߿H֥c@K5PB{ :t W?]a%>g/V굠{ڧ}I B+:=Z 53f.h*šU$=o_nȒl; #ZX'ueX |D/A}6*[)x zVaV Cȵ) FmWw}&y[`*N\aƄ_E}wpڞoş@vItdtP\Wfh5.-٠@!0e^f5턵*23 x2cC de+;ύtT`8PJkOci,'Gt`Lti_N0fvjN5GBuq\hSzBtaA#5IiSy$눬,ȵ O|j{UQY;Y)+))LXL/= Xs,Hg0Ӟⴃ~YD8osr_ȁNzCAOx)3^4E 4w6aO_;CLP7f^yèH͢6̻?LKe }M NlC shtsCO h_^'`}E"Խ%r[-*ܵ>g@MZeHPg"4#n. z*EN7\\2sd:nmZG=,U dvBHN6+Ojd6$j`T`)iw=L0  8EuDiDZ8 ?>4@m_"A^E2|'NPv򼁻"q_32 ;`q]pTyi}? *#fu3{3y\v<2k٭w5u]H9M{֌Vԃ͓N} ZnjF!o(H=ܚ{ &5%m9n#;uW\mD-~޷w<9r-BMlq#_ms3d/]bRHO)'//=ŠϻsQp$Rǭ5*ו`\﫫CϏ@hƋ&"C[Ifw ɝ Y)3Cw2C7'!sPYڌ݄^uv )~H22#ZMI2oڵ s1-3Lv zU"RɓHѲm`>bհ>͇;@YY?PΩ */%`QuVɪ5L98e/'TDZo4^>bK2 H%dzypm@t.͗`tS}h:wP4K<:9͠gdR6Le%uhxS>5\0d1? qu?RNv% %%8-5d (—`^bxF?F鿛(F"O? W(l߶= aDuGϕe^,٭VYן9dL-2ꗄêg3`.rL$t AWm0:\"- #3[Wz`xgb0NqyK/}a9|<%;oR OֲOMcw-ti-ghDY#ˢgȺ0B\y#'2W[2XD}gFİz x<5F =+Flyr .W$kqlQwbz'ns`@}n}8Tjr^{%<{3>G\kN0N}2 u5B/BUyC0|<=RM֕,4#G]sܾ4svYY }ǢBn51}bxJH b,V~o_*k L%?&~|̾ WZ 0>AxkyoTSDXzaI"ͨ"g/t\B<|x H8C̗Ő{噣[LJ)!yBB~.(7;;oM48E;8oR6 N얹ԇ nBQZH=挜K2j<^+g dW<"̏뽑?BR3H{DXkzYD }|ރsͿSFszO' k̋5!QTVݏ4${H#^4azAv55i[vB ?^3X8F/\ ].LBً~v0)12 40-ҿT|ICɒ7V?0ux|_ h)::/֝,^2гn@H%Y'"7)(<"fd!FÆ Tڤӷ0e䪜,ZLT -Z[AUX7i-ĊvZVOc8+0 㼺~Bĺ4 [JrpyG| 2`ݯHJ4D 2>y+PㆬsVS+C܊* zBym f_TINرjPa?Z,ϩ<ՂAe. TLt^`dYkFhU:K/xny P?+HEzŔǰw>:,- 4 & +t}fͳt3~ߩ s0U{17,TabPи|6gLuũA̍{G9Xi(ǖNdSmp(5hj3SK/:W(EE4;bNe#WP2o )"|/lΉ N3`|њcW/&Q8Q-Cu7>?v$;Xg0釅ζjq 1,ФQxժga>%L7;X;ImxllLVyY 9e)JA:Er9z *lh#]_I;AB m[OHw,CŠNоu|^gX++Dt`qɖ8Zebp111F-cH= S+gw9 Wwj Lu<|Ʀn̠gid{ ju)z"mus అ)#'mQO>#[%/~,flG]amL2-i\:r^G c4=#K7.Enś eҦ`cB+mv0}B2/l'$cm}(ykCn#?Ugܤ缀 rrv}XFAA&28)~pD%y[yBJd7C.S0ΏIҏ^1p;$d[2`fF`A#'.ܻ$Cbհ%Z{laɅ.W~ Z!Dz/*Hɲkxs >J0soep#fOUXc;7*nˆ{:>ht7͐bo 6ڊܹn>XfM[fCSۑِPvؐn8Êj 2,tVӡ-rjnV(6$VTl-[<1n )ɗx ԩ$ 3 is.5&$|OG,&(V*ڏ 6W[{7ns= OD BCgCR̛#<hƸ1LDT-OyZHnz/ y(+~WwCEc> o#o)|>fIc~!A Hfo9=oCmpegxV$ m>]yNG`ʞXҹ5 I]!9Emd{5*DԘ!4Hr1 dL"'~dz)s֪n<'~|a@qKz8dd:+dN2J;fw??h,XœBzXf*7~~fICf/ڑ>/)A!X R?CNxba8K3ɳoʅ3YC5@VMIg7g`m ̤Ք.݀]:1nǙ[UVaW] 9U<~'!k,; *\fXCM< 57_Rw=dBfG{" M`I=\ nN/>zZbW84Wɯ=Y`X?` 9K C9) Jb2Ul@_? Xzd &8W}.AWllbI$O4`M0561|m/YAOk!P2Ѫl!WVHP͒RR̠([y1WNڕ0plUI?~ҦVbs%MV0iz@KP>} \5ĠtC6V?_?a$,<>6i:W$[Rl2H;maBo"֫=OK@ |0|g, M3kPml9`Bd>C۟lN@b8]-E^m>KdQl + eۯV%T~TWNͅ kCF׳Udz92˩K)@SVJ".mn K?n팁kaxi;AI$>sG ܻsey"3}y?" waj|Ő'ٍOhO)F iiU:Ϝe[ΐD')3ZП=tRO HTfp-&yXhNKG:B>g=*Cab`uݻ{.b3$EmTЅM</R 7S4[; v6=|d+.p)yjRR//7}<س4K_ dyz.^(2}뫆ǕN[̛7|&ݏ$xi*b2/E'xߴWR[M-p hI<ն;nʐ'8=Ig5* p6D{ݵ7O"BGGP"Đrre"bOlţs/!۠B}u"K>_ +ȓw0'/r =,٬fR`2Ix#/>=hvM]/) #?܅*1茜dJD$e ODt '!SM11w9r0I2W3M??`yK|6hٙ2Bi!kh'. %#Z<6umWȫUri,,y%xljd X->Ua0>j -w~P i02GX̬Jj ?#ϧoiLwu"A3?;Y c)E0s4U@%|TJB`n?-a5ey/Dz;L|9 R%ܻn흴tz HF_c&gᴼZC8 mZnMiPnȚN9ٍ߲0]@,|Xt]I;".ZY=8jX=A[A^d&bfߵu)X޷yO`Ah4F>lmdK:v?7>߄:TYǡph :0f`wu-^պ7ip(d6]|Ʃ4|BǷF^iG!>wY˖نfG=mGŧ)*BYm>5Ⱥ"cϚU G1FIbmn_X{؎&,9šD_iQcf'mjK9@Zr1Tg A]7kEcae|%#$LB+ȔzP2D%,ʦjX<wWe۷Ӷ,,@(*V&dztkS_b.Kt2'`nmS@Y~e1Apgɶ@VX[=} ҟ3 HSUd>_y ]d?X* W&H>~Mx꜎Ŕ**0{.&c?LA n772RԻ._RCDnM4M+VNI +7?n9qݏzk&/hUF0?, 0qea zϧ{Czaϱ 2[F \z %`&1MXO3.'#p¿Y_W!MYsŲ롍EEGPpy?0If-~6&1vMɔ⳹|pd 9w*U]ELҤZc] 5ܑwNWT#s`u_\,PShPgr(&P&&羨&ݿ)C#k`BV ;W4̹h=ƿ>(Z@Y4: o)N 3u}4 gvjn'U7@o˞L3z/|m 4^Q>*&|yNM^MU"nxXUF13uHo<Oؤ7@ VG%nמfJkDN:6TIQиw/'%ƯMX? otlOyL!˗Flk '_w~"{1 4hc"45(*J Q筩b0\?qS~+n<ֶ0"٬ϞdUjMC鐚^ł|KyG?xw:3$<}jP^*R Af{#ϳk\)f5C{4W5eMz+6hpMJt!*){#Lm@a]dmqEL(y8-K{`j'[((øD& h7 35.8H' ]$ou|MRa;4:#;|!7~ڈUդdg5H{ ="+O#OlҳE  YLfQ{G e!6F9ɧ*l ^ jE3w0aJt1](_,ҩHٙL85\}u?oni 9ҿLX-&ja'2UqJ'Z}[}wfIsX2Є^=ziU(wߺo*:&0Ŵ9;aӵ0Ix6u] DsǷBOM]0|y"\/_x*~tyq 29On@ M;q%@/ LWל9NDoǾd%X8^u@BGrȨm2"z,ȡUdBg@I-**kHg5IUe G0u/tL{&|qURQ|]F6)j*aj/_ ae6oF[¤uV)r\*.r"OQը-`twV&ړ`=gcx3]ĨzfvYm ,HWÆ5\£{ET~]8+`BvBp+2!l!,\fPbf,AkvbKAƙ sG풰5Vb ǵzwg]c蚽F>Q%x͍r$[u;ӏV BYA}ڛJ%O?n5]ȰmNg*G2'L1H L"u6-eWÊ09f|L3}~גwO|)Alu04p_)/u`RI]<,Hh\gȃ?$l80=RԅZi-0,Ks3?[`^5X]W:uRO^ᆌg7eS*չ`nlg?Ty'C>K3W0||UG`/N)|6XB_)]B'zw/?G{|fYk01;KC}OQ)om}\:/\M@ZvȺv6 ;c}2<*HBk,` XMs1`*Όk}NxRF j?>z!*fHP{|OڬiʁaaX#Zh#"'$l}{0p2l4~'8ػ/Lk'tge?@xxRb0pRze~/?N=HjP}z ѷ_l!0'7\@o SS}$t[A-P˄Hz;ⲐuD 7z'oA߅ß*3vZU_al.[h/ ^OZn[gs$5ZQEA@~=$ӿzY2Oۄ חڐ`VU ng-3!s }YMr=0`f; e3[dӄ bOe8cYK"+*2Uk}l,G#O<ꔐs#s+:/AK;H1-,1 !*F0vfwqG=_nd=Q.kT0RղƎ< 01< A;n !W*,Exi{fa!h'?S -3zPz0b;d.XՓi XztiBv3/_Y2W*Jё=֓1V]mdS&Y;v ȁ )jVc)d_^}ׯ'*<{Ahrs6[`}%NV YU=M" Kff#wG/tۊCR4zX{uqS+HN~G@n z~if]c} Y3ɺśg테9 ٍwd4Z\ۈ? ,V H#CֳH~.4E5?A_WnoU@u[uNB~jy퍼%P@T=</[߻$xfmBd1SP ,JA'ʿC{V Ѳ2á;mQ$A{V!1Ǎ̀jl |g3}*D.<s=~X ZE׆GWdV6$2%B "iQ]*=5/]V|!0or,43#'0ݭtސi34WJlG4^zl_vjwȮ;2@''܌8o`/e{X6tUI*X,`ŧ!fJPe` S,͌BBm!]lnp087Tmu"8i昔ڌ,s'URS'R˿W D-(B1Z7{p lK,F~fwy%PC}'!Ƕmʷ9Og&RM*:ף=Ll=HN>-瘅KMŐd٢V=ӟ?EGߢB`fm &iW a 4_6>'^¿ώp$Aj@=1M|r7#t gs`Xܜtf]飶d kPS.#" ®] .t8E^y8a6tz<۳ۡhNs3mJ^|$a'$VY@YPW_Ke[z 룰Q,dt-G2PF dZZNY/E2}􅻃=~h/NxCkC6w ᫇.CnkӑC9_]}f}//綽G2_[~nAΉכ^v~ (=AgFjB 2"v4r_i[B#ȑ޸dːU⒄?/K%"iWHaގddPPib_wX bnvNKAJ92;hV ;iHǚ>II13?EM[B̠vY䬰'9\%cmkaincNlD︉ n_a>T1"W9r!Ɏ7?+őJ|H:2WvB߃bCΠܫ#kgd=dҝAFZ)8x͕ǰh6PI#._@A=^Pu(ޖq&9:?eq} ρ%/'>#]H}S#?2/2wGG+m~r1[~mYLX ryGDJkgR>ZGJ7NȻpf2`nGVo|+׵sVȬ:S ~;Ioa#`jV,G Le5XMA΃TȻ-\HwEUY=-Z@X#gџ fqcv;@_ 0ugX*,?IxON!Lz$ߐ>判.0 *,ǩ0=Y#LleY&kʐĠ!uBS,b+ax@*pm9Wɏ c Np{|}Ґ^;dzKH`RN텍Q1X ,x {o:tq&0V޻G1whFUk2;jPrZ$BĥՄ7L]Bus$26ԑ~ &ǭ`E="3.Q-irH`VQX'^3|י{lw;AShA+uqt9 #n^~!TY󰤳ݞIw׏!YhчqYCヌmj!% ^.y&kx5)E |[9=vA>FI O_ V?Ԑ~V!8{RKd,jܝ,:YV] /蚍t"]@KӇo,k/~üBo3L 7y_s 'vjV K*'m\s^ρ9/iP͍+'M+>^ 3?FHgh^?ku<,a87p-LI>)+c#Y{IZ< :7|X9жg{\MTCq;2#{% \=%OũЃFn5bUrO-t n~{x卟 JȊMXLT;,HqL y$ 2ߺVև;M^}yt_3 \Jl%xU?y=W9* zhJ/ %A_OUMaT}kX\!)|2׊fr۟G"_I.SȷmteFʳϝZǰd\HBVj_~!3r{F*2.%gŨ b ˻+cLzn(M{%<+nmu^YoGKRmf6UV A)0=v_M/sbZׇ@h\*#QICEKO!F RyӏFيO541nLCH]e5ld96H!'ΝbvG^bxa! yĎ+z夜 %yԮIZfYz(o!-+ߡ׼, TIC۟ P$aD87!|>X}[#~<.Lwlե~c{ﲗ/^_BV3@ǡšq@U2nƝ ;ٷC˪nd}8ַ*ݿWrHгvs8L ez*+F^8eSM yZ 4(Px- 2+g!CP"$];!Xv9pf6k:COE/ *d_,_}4}LON=@cC3`V=3R^tr6q[5ҥ^gjLՑSjd3rN+HE2غY/$_|X,;MHt@rڑmOyIdr9)EG qzX u>=}(0áPc&aO!*ߨC%A$J2eɋJ`шtQah}n#vEBIg˺>h2|S; }d(; st[Vy^t3 5zczR]= 9UV1V)u.$8'! n[S2a!/16OXkg&ApVj$m|Tc}?:G !Gx" lO +q0=ch[+& ,o@Xo 󬝃i:rV#,uNG¾1)vܤvH] w>c"WU"K|t䙻thgRzKEÑ|2ŗi-GfcHx00YFR?3&ZW>TSPf7^!%_\" Bs-ЮX;_{+yBmL3YOh<[)L"F .e2҅iƈ!/['K㦪.05]0E/ slw"UTZ3.%Cf:Pr%Mi159:MXϋ~F_Ӄl-z)Lw`~|o9L*Àcmj`ѮBYz.Dx^ȁv@ϛ( vwh.{k~q ~avw͂ga׳ÒNHgy&r*2Pg8n"%^ԉ-YOOhN~@S w< ldx/y굠:OTˣj9@כCFA܄PG= IqYO*-$"6ן+6˛ctDkߙ fvwyS(.cy!WU)2:"ԉZdc5Y~Sqc2G,Sl큌k.;"s䷛v7PM䬎iN!ۛa>o]"Kַ#ʍu@U||1nVz&\ `6(^]w wp){Y 2bNOڢ]?"YvpXWL}+t!o4$1DTj3ȡ#r)08:swy)fy:/ܔ {B\pjS\cQE%`"߭T́fXzU( Ku@(f[x+,"`t tw 2IcGF;"IYOH :߭.=8Ż*y@{ȕz#϶vKLۄmY``[j˲ym(A.O}Pzd(z^UȐU m{.,ֹا@,D.w\F .E%Ո_!Hd*I6pxeyv"*Lno&*YX>XC7H';uVn t/?.#>nd'=wZ<'xB!pJ oSC_Od㶡ίٛdWa7176e !2ݢSC6O=ն+eܥB&w<0jy"0wp}{֪=' jAMDTAa"-q4ՁL;X(cLD(/,v0q Y0<CoSݡVdKBTx/6̳X̀( ^ tNo!}gݘ99,, _a>*Lgr<%A^录uԎ<0#zҚK#I!#uR#|¬S2hCsK?tVHwqPO^],C+wHAj]EfX-wz3 $aK뾵zh4+dfI8 9/oe mrֺɻhPfϝ ρ  Z~gKNvX^RC]I5 6= ns#'x&7J4_ A#-`+4fx̠Ca_rUODVFEN6#Evmd+r>[__ wIU附ԫ~0½'ָƶi h|"Z%'!wsWv?DpYE}fZxeA7+gJh[md<>s&,46!t<8)?zċő(侵%l-d7^!ܣHwg \t- 409к.DyBA@,P깂&E+D_~HZp(D!J?ñ71)Cr t+=Md5~QC ? 1 ,)}K{`idn0n.u^F,+Py4R]ɁE[ _ 9O$&x?tH5@ƞIZ' g#mJNXÝ-+K4xE_^Fֳ~8;흋QrK\R>!)y26}ҷsa&;(ܻ}Ȫ<\G^F"_-ŒLCT)q^7a1zV2+HK*جKB#ȵeOpޣK ?qPդs0( 3F҆"#{dx {<_;A8Z{3 &0|Q| 0-؅tP]0^/N^%,u"z 0;jA5CP* .na?G U>j L9_,JER\F2I`zn+L2j `8D羢2 7`aѳ6zKr" fB@v ?*@p;#sk8M@M*tՑ_65z)}Ob m]\!otN+v:#gx͹Ndw>=$ki F#>[w6ddj3 ῍TZ$ *ڮ;~bKk}*Fa`LuA t]Pbe ]bmRk8g kq1+pe"L. 4y`.[>- $7J!q,g7[pA!*t?Z_Z=,6?ق=;%[kl}ӗ*uX8D*5i6wDRbY\,Yݝ\”xD& d(Cj@hQ!{'4\AxM0GjXp+Q)H>8T~ h|F.)Oo~-D>=sSQkRx!שTYK9_dv|9uEz:*d;F麔Wc+/2"ÓغOsa,nl׳q14d.J!GquAud-?2Lo[WpTmL @jhcVΖ~EW⎬Sg3$Hq>hSá cաzFd3WE7ÐïcIubPIaEeouK!;|mKMwOy&s `Xs0}F.&0+aoOD[uh^JJ1O!Xk^wEs[|lĜ{{jc`6;RNX+c$fK+1wTƁGqOi d 4L ~@Bsfk<,0== SlFZC5/PyƧ?59 }i<Rxӳ'ACb;4 +˽?ԡdx'D,R=fm'L0ɂ|k ߅m*~Lq1gRf#{X4qB?9"=[D;zfzXlJ0xyy7g" ۔ lc!P^y£Gj$h`$u;rX(?T&8i_dEw7=bb%e;afs,fVظ1!}YbnNzJ▪pФHKJ1hڸI0^L7&AױC O/~SZda1}kyMvPF֘(Xd6WtI,mvl7!F۾Y2LCAO97RǏꢟe5~ULo2YDk1E@ZfwL=o 1_i 㹩!0T!0;3Wz|ZU"dHUCF]C==iʨ܅D! stH 8H_Y< ތouY]E^#} D&4\Ղe:,֌+o ʼK%^%`-yGs%YH!Q}uBKTW^7,e?g*3.H?`v FYT޽7@87G;"bXRG3^iuDtiXId.X=  j.H2Kge q0wZ;&c3 Z˷Gghׯr.11d#0lLDXYFiB{ ?zn~'d_;[ O'[Ѡ;Ez,'rէ}e*4 %'Q^Òk\b`st͝cϵC>Dw™s$d?Ֆ, >l"o0$ÿ!([H&= KNtĦEK~ j{ 4nI͕ ~MPRKxKGQr߮<4rXB§T:PΎ!i{-_$3S Yt T}#R&d!vzRyݓƈ'BW[NE>cȬl; Mu#mrδcϰGmcazT ?,' uMNjyL DήH QIA򄓰W,gbE@ qfH| 3$;Mg=LϏf;^Zf$0,lv+foIxxJC9'߇Гt;&h6ߖw$|ZZ03)wbR]A0&&ٴU,{7e$I\x ︳9 myXS:Rz's2?J=F'JPoT%1(Bwm$S ݺJb.{@6V5DK!Cv0T%:s⇃]!r_}w,!͏mV]So٧L ˆzI#>;94x_GaBL)Aa=w zGVte]FP̛xdٌ6h\ٷ>mSCW.g^ _!ݑU;(Ḍ(5xG,!7a>rdIAȺ;a[!Od6h\ևfCZmz1I7zE^P`ͩ=,IN$nʢM p6WW4S\10D^4 Yn?Py<ϑkO!K+;_apijQxJvMmPnw#̿$m㱉Hn^ MG%72anG3BR֪-2 @~d)Ma۞ +EW\#-_OH2]<÷}7ݼ* c{!8Y:}R^qVy.084 ZR]oyƟ nׅDÂ$(ݮK *=mFA7i3Z$=\ v17Ɠx?>$y~3~QHIӣ='>Hj`8Eu7 ӉY4b84o.M[Y$2tWt T8/d+-yít{([܌l_ yŽ}*k82^]BFхz+gyCf9lt7ׁҰ2W {4u5 0;[M3}iS:9P0QǼ3mN;g.0CL͟a}\&kQ^k~Cqs nɃ2ڦhP{ra%S]2ԍ7%>ЂkѫC'mS:~8O,ݽ-/guX\ndro}aTKF6^t\ZYRBb]Sk3(bF5Ϩ ~dY}f*{+ ۻ=Mr4-`ӷ#nyWѡF=,@O|e̉#w,#`)Ð rdhFvq^"SΡ\vH7np.="ZFL,FF~'?F47A"PfKqcX_Y!!yX?1VY¿F]Vi{R䒔#DՋ#&%nj:2o!Kd:`I𸗷3ytڟ=62ý7$;! LX &)!{؃6edVxSNxW$lx G| =6l0* nfF4Ca=ۑZ*\W+μz[A,wq9Bȧ @kѣm: [Ox! # D.od54ɕoGS1F :ɊlS mWzE=uTغrM嘩BfZΨx%XR4-P폚#Z/Đ}[`ևo=FmaH=Vi$OFuQE$e3M;7ð_MZjVAcK49ش\ )mjfgYyS5q (a"k?? Г[ڜыR}DFvL 򑋦u` eЎJ(6`TkToJNE]'_얅_|Wa$os\aiyQH'VPݧR TPX3dϔ^qbX8% ЊK Xat 0(װTsK|3+\O>OnAҳ yj07}/1"gVr7"q^AU>"Fu/䕋LFkAH$ QsW.{#ktǃolp2 YMGOc|vl,y*T`R }., t* Ԑ=6YjqI;3xpA2 jUo*;)0rõ2I.;'HdXtuw|L&Z^ƐnS M\2<Ϫ#]Go$Ġ)]Cdi f8o\B?rjpqX%g:Lݯ/AӖ9{͂Ev2ǧX>ԅ~S@$(S .uФV(8حd*~I eG1*^V`K|76mʹ"X I[ekaN+E#s Ǖ3{6#G27Ta=lQT²`wF+w S6Cn֋$gκM4IlO]B&/HKdS4T׍XDRAgbyy7<}w_#/MဉRB[fi(#Ǔ&`pdnY^J#xO4Ha&yMvھOuTzpRR M?᷁Hr9`\r"T֑XQ[(I0NL]j/5@]BbMq~NǝS7)S-%{?6cS?ʦKpu:L`m)ѪKgiй!m'Abϛ́OjA`tk,԰-MUcW p. Fޑ ObӉvq99$"J'ȥWHɑ98?g!:d\HR&`Wëz`ᆮ[>Bc6D[awz '?y>BľLmH1wj:׊*nR ͕%x^r E y->H,y\%&f`Ρ W}`w, MmLuYg{COW}Z|xϦArq|(#~|T|#sU'%XӕFqT6qx8)u%a~o5ݩf2wb$m$(!N>V̫=%9ɯy6%9]WKd>^=>Ú@F\4['ꇃlsa-'rG$Z*ui Ȕ.dMx!ofAH-zk{Od#r}(lH^QӫGf %GX*AX:<: kS)ʼ=%d0?"q/QY(\\tMdM_'!e%zR-!Ow6i}lnEgtQ㱽0+ɇ*h= Ÿc|ƀD?$8Ȉ$6h r9#LT~/׺ 7=~BZf=`fK4$沄H٦| 7HD ]{^"hc1ە= 2DήY3Uz=RB&?~կFm9OBo='G7!dGx; ^n1YtGj~ ^W2 dž۝Of;;IYAǯ[DqjeU}@2UTBk=4)cT9s'gms^xjƊ*x8cDI?lړ(?n"/~+)zt @^[qeAkaqP%idؼzccJ9FE}sӶSW! ]Ś"C̏qȾ34?e,@&c+L:zaO {c76ƚ7?}<4߿ 9ӠGprX)R42K6=[~&#j׭e@{q }ڍ_RT"ー͏?0u.Y't*V؎H-$]DC~p;SlIЯ sٞGwΔtwdb-WAKdVInEBVo*6BƺCe ycuf$Y:&A޻ ܖM=c?=)J-dVD?tLà9Z,K-A:sM1h{(Fi}FaTd5XdX!Gdj܀r[9nnI=B3vS39m7Z!椣n: +~ >@mnO6{ /\sZl{'Gh4sa$)F9v$'"5M9=X(ZŶ!1ȫ$+ۆg[Ԅs_"[ҿedMODe5d2Z#""N{brgLfenn\D 52]YoOz٭F9 9l z>voЂ_Yfs w4mX, yk^9̞ qշSwzEW^T iGZ7/(h&'ʦ ĝ-!!G>,ɄU 0;PIVWŰxj9VL?T`o}@;g{<*IP[ϡNUU8ǂ@b{UQau7Qsd -xƆa8j7yW n |7CnYSˋȶs[(TdNj=ș&t}uK |cH?]+tbq0 g{AȝG4\P;c1 AD`N,k_Ǒ2YIBB_'pC¥-ȵ]\| rv )yueY].7 wޏsrD]en$,*XyOWe]C.Ef{#e ɾ٩KȪ*1띏!oj66K!0xBph5::E䀨 $Yr7>L]Xz@̇0Å:T9_}.3!C}6R0sT3͏AwyP9t%uiP똿,yZ:5dh->NV$oϤ!Ygp}¶ t>VH$P dFW/DgۑD*oߔԡ/ })d_}d}:$4%FE )K]s3, 3[2K[>Az-<]+~s%9F>vsy^yanl2|7X>Tv!?+{ߐ!g:|!-yja0@XNdv+0FV%ɨdz? ~^ ԬK/쀾X3 znPTB5W#D3Vx v (leiG/ip}`W42OaGdc;>qKna9#c,a46àZDq+geu@gΘ?zG"0!3|d%*<*a7aރ9OL-i>wy b+Hxp_e>U}׌+ڏ'e8-vo;/cUE꡵5lzvx&42vzeҘi5b*/Ο'z`.oF:M>$ m,Gl'~\ENߓYkY{z5C. v:m]3Ƞ[~u_-,~LbH p+ d5CVnh&z+fzl߀7+>!A$i:q"F_-ȓDpp 85#%V]ZXv0<YDwNeYz2_X;Lf;JL(߹$_E9QUo"m*ɧeH,ɂkaICtm7U?LFCT%!j>)Q7>"'-bPAY4x&t`[_bx )jApSH=,졶>Ul<34ۑSW&_Jfw~P[+|_}^DYǗ}>|RdX?r f>(- !+t(Iy@ n{,dSKbӰm7 dHoz Yۦ? ޲GaQBYD hM4U/wnCܩ7bMbi|e̘xt?T6"0,O89:-wCY4(*) ͆@xW훁Hӟx M4Ԉ1)5'AWu3'^@n!k6g5f& $?&q8A^Xz\;(̀Q5y)gbޛ4x: >"dUb_/9$T6fC|8uz'lLr`U`  %??^邬O2SꗤQ-Ry,6Q@>Q`ۜWvHl3'WUW٥6C[Z}2NpY kL9w\GL;YM}Do>$^zCVQm$if5gʻ\ۡ|=nFdQnHnSndjv6u&͞OMU(W; Cq$6 =Ξ=( -+4ŨA 4#iib~2 k'ۉ{=ak lz=`曂cBxNǰMpScbayƸ-0oİ!$LM/ضvkڨXDOE<+s׉-F^zϭ+jn `' z$x?3۷Z~C𯊄O9eκx\+d;2!˅S,9x7ds`UBxMթVdua> K3b`iuNX"OQcH O1{A 2t&!GaE >Ԥ0KGT5 yz[Y3Ӿ[ z]>F-י576XdV \~G=e`&X%!\;B G*s5;BÃ3ȷ7s#S:Fa9 8,b. p+BǕ/lW9L̐m7CkP{υh*PR6B s 0+x-CL` 'co3AGZH ?6!8䍑kQ@w.6S7y[*X3oi2ƊF~OvdWc{2MDV5\3 ["_g* iZiY\vV /t90} Mhk #ǐIG/֭Y߮ɸmqrE&$_N5`1m\&p/ O>ܥ@ar#P\(N?gW~&8u"*̓JŠM, P!6;`p5Z,b93 DvZaI^-Dӆب'lsne|$]ÐWObrM։sC6͛n s'zaXyVð6{#j{'4*0'|Rztf^_o"vm@F/,̟`de#o5z)W&Er^ 2]́l%t=!FfMD{lKi64ΎAJe?Uۖg{B'ˉ3!Do!zFmb{%c k"sbgl>= )9gOlέL`}» 10|-S! (kR]VԵpxԷ- fQw ê;~o`y9ӝ,3c{~ wRAR3ME`f]m VibdE_CJG^C/-n4Omni@S¤jZ܋n9٭.⵭xaH"|@ʑ^;O"Ĝ4%A]c+-N)F\*ﰁAД|4? |W"Eu! {3Lݖl !6a-r5d~\?#R UߊI * Xwc!84T= Ya&bm$%:XJ]wzkDT db?JWI' q{"%T]ȓzoZ5sW% riل\AmCTJ,;<9LSw"sˣd| IV:aŅuQ vRRa]xi3LlheՂ~ʤ+V_/wALj9( ;O{:Wڨvn5L5@'^1)k NާzޝJh]E)X.tL.^ƀp?\ gjd0BTQjPbG l'YB9/R`T%a1ja-8 &BeOm5ECCm `?k0!:|`j=%oI8U,xE>ܩߔ}Rпž0E^i 0 CNjn"r7ɓ&^.H^y-X!$A5ܶjTAU/g2%1$137c c_/2&hjT}~Y CMH6|SGLdqN@rKOHYdTs,GΙ&c Uv+w gH]k,}fVU < )2 ,Sٗs #aԷK,0tϘ:̾xPt*t\|*2Dz4ڎw01;!2{ݯ v{ )l$e߲Jh ϥ`he"§w<jk:#5dw(O4,e 50#[BC:nZ Ifƕ52e6KJ7r[Pig0AO757_UBz~ӐBqX/=BG/>sBH:rbJK[β^}$j%K',,Y-aԪFVF4̙'-nIX!dL,a7i>&d|J2YRl7X[BIkb}pӃܶ uٮ`QwJdspL|/?#A~p$d?bRDO 8m}? v߸[:ts%I դK$XmMEdrmsIYlq ;h#~Dƫg^ZΝ5|EnO{*t0L~deg0S~\_Gh}wV F^ ~4:0&wDo4&k=#+>aUfNRo CnFPyuI'=] L[ =c:A3g3G׸!rJ~{}1`t'PBNA\dLؿj \G瞹ҥ49L`Ovɏ'EmsIZK3򉜛SEUDArn; .>1G+HFR4w]? +ƿd2,cs2"kz0)ȍB0"ygK t\mRЃ:r4p>|[9i~*ntHЙ$|/T\4<>Q^PD5ԡY1d8ni?q \)_AW:>5k5@惡u{Ⱥ4sAqgf652o7~A0_'L0%ulj;;  ѻs !D}ub;nג8??s?j4fVJa|^7C֔?ϱ Ղ#4Om!=i0SFy[ʺ!k:?;wEY6A[c~yݖ4ydF !M]Ӑ720P kVVW/aiG.*!ԫCUM&M } [,tIq<E\)l5AG)g AdaaHG#vP 2a6md'${^uaoj)uTeoF݆[79wW`,1AC2\|}#s SLWb)~rq $պ(ǶH{rc2ib5?"^'QzT3/0 6C̾>v^V44; e)K<)n0L}. s>ӥ#I)7,%,ŌCn';q^sY?_pǡC0g9&)~@@qۂ^d[IFݺ+|8G^mw1ׯ=GBpwY0YXvI+:HpIf=|U/s2K] ֧ɓ"D_g$P>^E:7w^rXi_P?B3 fV/@][G7w1JIhu;\!~9Ie e%.f&6հckD/{Y7]Zî_r"iwk͝m|no6Cjk_{O7"{ޅyYY*o>@ie`@zO O?%Ipq;hK^DM>|9E#/:QKd~Y2pDSKŐk";rUougj5TBם^h#bg$Ŵ2G[^#W1XL=S7TvLC޳-P/v~ zy6ލ a+ 齾os32+:t8nVid+λӋ,l!ɓmLX_FürDE&NƳZ!q "O N˰_l<Rl!;hy#` l3j0[jz&/uaʧozנ[MP(Vm^O)$*\AA%024Q ]BwqĵïbPW( z"ʄSդ)Xd;QR S.'MCscuXM}wKLs.~mx`P~Bh?@-aW5ܜצ. K *Z|{|P$r-hMNŸnfn n oޒEJ@6i띜H^s}~27P 8j1P2p𩾂[Q+ྙ7βq>&~j%.i֫CqqE떸'4 ufBOEސ=f Sdn\XXvXYJ|OBDƧ0)[-:5u2 +OЅ*'NwSg -$k% ך'k9t9&G@?%L-oM< uz3r!M}rj&h%a +aWJ>`rZmhg֢<៤n:kݾfw[{V6?8+|L Sazw)<_ vۗ*wOg;lly(^[ @Kʮ:dnq&%VoX`;ltJqݾmo|^ +Ѯ|Yh61ɞ}Oc rV #r ?">H:~wnpwEm7d<ݰಱ̘xQ637uҿAZՎb0x!sIK96++^!9Tx܋oDl{0Iȫ+/$z5#߬U'ļYA_,uY2!Qd ]ZyOeO LWݢGvґA]o}B<ɛ)o]QFd]?~3I|vֆdi|͋;xM$ruz&S{/,9Џ-x7ˬ/ ^Ҍgpw;S<\J1 %a2\W a|`~$jP(C&KP~o-*4*RȡØq%[>$mц~"_BP(tKgEC+*f~ o;`qzoh;/hf>4&:Blhe2L۬SOP{%qL $Ieqd^;"\ezo۞KJo3R$(ۑr֯g轟3' Iq'I9Q4FɹY2}Hp?*IL" )H Dg!C8ro$R-ݭD r޼U[kOG:g &uZYW !S{SFCXqE`Cw[*3Lpj+RG?7zHgP_0M[b:ٛSaFR5 JLD_zyx& U{ll`u5J$23"-^A6٭dX7b{La&tzl9t^.$iO>"e /_xB]J; s'X\SXe90b#b{ݝ盏C`=ttݲ h&?ϿݩY,7:Z05t@ Ѝ4,b~? 9z&5`.7^/ )_~;u7 KQv|%9̭C]?)"O#U#&˼@NhW0!"׬^#aG`z+;3}oTaףA˝Q0LN"lWb;&!4*YYɏN/SGԌ1'8}(|/$opruk!?w%#īdS`k*. z0vVB0N/K=!Q8#Sַ?I`Vv8+gTX"ϰ|J%!N6$_Y9`+wFò}0s$}+u!:gE.䀾.>V\I ߐN!K==y3dFAT 4w'73;aO9,2x@d~az'zP&)\8Mڂ3 ~dݱIdax1}.[Mt*ѿ|>"޺?/}-9">BZAF8#:E NJuSz] 2.Z2 pfL'aS /g'B_ pZ[cJl7쵡0u Nk{p_W$Pkv r<&%'>A,o/6 jzezu0dM,>S历L>뜿?<|>&=/ ezV9 F[["glF϶e5ٞ:&#.^B{$K$Ln݌gy4dg PdDgEjv#=\mGja-ra6 gCaCTVoP,%-kDGx/p>.1 %ah嚋vϫ VZ7 Îo{#4ϩSȮ_Y< 0)EV'Okcb.ow~;`#Xx1c=4y3[W=5Š붍*) &cRDD`NJ2Z B[;~!Sޘ=w1{v-{斺n]=wcп7W$A?5Up54ZtXB=?n$A\lD =r Q]HTK0~U"D޼9GWruPEZf& `-&}qeI)MbKtB ی| ]Wd'??'f7I䱕2OP%˾i@" f OSfS&CǥK(Ȕt<:"eǹ/Ѳ%VګLčDJ̭҅.8ff6D$C,ýgnYm~$ <)tS GK ٥ 5sqvp?:]򱷬YZ'ǡHg5ݛtT2ʴ#|`\AaP/q; K yȰס1gYNq}%l/& 3mF%LF^F 5eĈ]K NSȦߤȪ,Rԫ 6̽6Xb)&CZ{|US_k4;G}C-EoF5<r|DyBh;){ /\>TF("՚CC%xr)v7 Kv^lyNV~hu?a5]U44AA4=0W kA_&fȘ׽\8|8 ! rW5t#&VΌ鐅;ݾB_,Bt\P(ٯ-E^qM%tgr$xYjm C1Yf &8a!\צC s`B'-> \]PYk&K +<b'\B#U V[_<dž${ ;^"evqQ` 1Qw-JNXѠHįC#t}7QY0cQ,!(_2}96BVz9~2[C"<ݺydl]J ]y<~ewꆅjhވ|NϿ -9,^HPZ`LA/JO )lM>w]LAF13 _>m`ֹIX1sDoazNĈ#+C}ZU}# -<300_vUv9٣ՙWW o ]L]{OۊA30W+{Zaka$]5.\Aal"Ǔ9ȶ㭸ܸ ͽsёSZd8O?VqW{kTn< WVmJޥmGŎ?ca|]2«pCQISӤȒO1dl:$u{Xie L9$k n+c(̣GӠ@㘜 1vb0@w4Ո-uCY{fPzl*ms7Aȼ-jyd9k{5!SI5fFʳZHr#Z[)gwO%TXCo"㽑'h`}ٚHH>%$rbtanMÑFt'): 4v^!n/}ȨV1Y0xrڳ,\&Eg?GjD$qYBj0"2(4ixB|߽x'Elz_ tU[FpX1O Pp3R;| MRB$IU=2$ W6M,"I̸82j,70FJ%"9.ño}7LG9|3~H5{r&xQ)?Ol Bf_)ڶ.$| t>xc" ƈmBN7 q;BS{$Vi011 sHڶ:~c}h Rxj6|*.ժ|@$M?v+óLY|- \Je5RymS]l8 zʉ{⑆/J;B}Vçߏ\ ΂3{ctX+vBpPREݐ>绅$$,6 Yh-,uQuV5 x'Lxϯ` T.(l.}.R`M~4\)Ma?˶C3|H[~ȿOnχ뺃$;A>ZN~!r~*m9$e0UcɜoKb*[H60^0$r ba$au=t/Yp8rB;׻ub>TX;Z~2Յ?Â4rFkFE߳- ǛIIf&]'YO{}XDݬq!:.:z}X(3 wvi :K䠂}!b/R[$= s,W%] 0v03\Uza蜈޷ddOYLi!M{݁VxDm6A_A5KKvP͕YSz2OAVjpُ]I +SJIOkx2I@FN۰ЛuU3Li\SKٲ].r,<ؑfi?d,o!#]-Hf_#rtYQoc%[z%pHGYS-@W_{2xxa,J I婏|wAO099^ k&߾gVl`g|>a[[xNC՗RNyX 3&TP<[F̋Mu [^>{侳Hꋒ60M-ld 7| ɫys!n>]r8 ie?MWr1 #KtpjA>Խ~ ZcaTg'N!oOɗufx%H w/,NԶ$כir<{aZC ۸'/iԔCƕ3oO#kV;)|:,hOr$_,<+s9vP[b *9#^@jNB:Ws[X*Wk^bB8~ܿ$K>IÔXqVߪ2݋>rBk/M|Kd@1H/wRQ km0^\zcU#zfN`d)U ~Q$ {!oAlAQ0Y"'nK;ȺPG?؏L C FSy֧Hz^W.˅?x_ S/]gܭ9q_R=VlL_])֋:0pWg3o_d1$h~Yڴ|oIwï?Thҏ 2I৴_w݂ȱ7t;{x-Ř+Ǿ2<މL'_!KY7gY[DQo>F {)+#}NOZd5Q1t%|`=EX|%7.IG(dg" cY8\ZOX* "$ Ŕ0-DoY@Xgτ#gƊPRnUDf1,`6eU'~VSo #ŁvBwL^JB$ ܓx7&7|z#JJ>3&gwNEK-TDҁS0G mMY.{8j}2OTS %{{ۋ^p' bV7Ci*wTNdl_e҃9-yz0Hak~{KRW.rvw> ?3ξփwI0]_30#f`Iq5$^Xsg32mJ\BV)Sκn5e:֒|UF'w86l_~weطY5!*t RܼOKw9 ƫǴky --\ k<uk߷^~)9;Uw$YR5_GdYdX_%Bol[D¬Fi2tahsUм b(P̙qKPe`B01;4{ވd\g6̶X%)#CZOa4C앸ok2a"<0W^y$N??W~Iq*LuZC'cC?Y$A[v0Βqd)p? sfNsR4s+ O'zѰ]znvl7IR0/R~ ,?;\0C7< Pqđ~ {BԚj x`>-Zxv2np|C+!ۗ;+Y%Ch>RHO|`L`G=KuЦ >s7gGn"kZ v$ ~i[CusGxQz6pJPPI?+0u*f'du(1`E\~/QS ??yFlk|0OHs*wA$IUT7Rݠ@m}y(_e1z>KݐJGBU@-:N07f>LKMx1=g{?a4ve~2h#Sy|fB$usX1Pz2&Z!~DȮחPxӊRn7.@KsېoX+>O}=ҒwV&}]WaZLbq~BpGxzZ5+p$Ë=bH@[cw1͹`Ye612jn㔵CtQdaSWXu_*>0>O)?^%z2vqA,bv Bb+Sa*c +L%OA߭t@?j07mwPzZGہ~euZ'z?LC-$ݘץuS>wDڦ^'04%Gf߿x.. Wd-fk$"Ǧ 7څ5#meE \lF_v!;CBc2]9{Vw|xf]ʿiPӑOPK\&ev3 }W n&]r{){d$ =Td~Y)/dhVLAnՔrV,~ӏyY|aOĸrR1j^sx#DQI?\6qGCK`Mig3՘8\}:4#iГ*aF/"\F:g6 nwhMp>׳HИ;CGGmn\ KGfHZ< .!6Kۛa/xLY ]gTBD􈩚ӁBPn%>[kDC}7kjnȁm6ÔnaśE]Țz휥Cyٛ^&œ`v[MXaTY \e$NdqsnmC۟ Bm*rW'_P!_Ju3+܇@,_hwy|)A֭V9N"v;_ Խ4l']<ߕFըz;9t:< Mw(!F7$g}*n[I5_!_UXC|Q_%0s\5.L#|^J̽DnV#l%Ed? d3W1hQOueT]2E }%W)zO-~jdM1BgÇ;3$)ߤy:305|zs CDio.y. M>&Pa*yKh uW`@fu01j6%Oeq+ Q3L&Vv9/N/ =pqk\~S/D*C+e$X9F F^Az <݈D>Y|R_#DFb#isEO"Z4Qd]4}{.4O k܋NHyZ2ٷx8o[v;  > S.zRĩ&dXxɪ >H"ԋ~Ad}eJR!d2 hA}9amP8aDp{ZEK5Z~. CE4n{jD;s.UyF=e+AΌW7W_$TC;s鼐scde:2Lnƅ6j[H2c,y6Cq4p<%t>-/ɑ[Pdݾ%z?^h2ʙ `k<rpiLlnY)t<&r.B19쮯uPdh?r .:;尔JPioŖP8[iJУ+Ъ9R!; RGg]Ghwz,2ȦdXIj[0 sKw5 V0 9ֳ?^^%9Bw^DfՃ%&_>e S',L_,@Ey3>}6_T-܊Mr<hRn>OY夏DѾAɈ=?N)t{1Dь F9ȯq."\r'R{xxE ΪPNRdI?8R֩(?9{R3f'2za 2LL@v"=( AR^muM[_yjȄ-Y]#zET#]\]m2a8E,u[&oS:o2ƫk~hp_db1|~r?#lz!Cl(dúC=դ$"gω72JjB7N!?+CDݖ}fA.ܾ`K뷨*0UfoJPzs2 ~vL%ĎQaNE8T~= ̴s"^bB3g8Wî!dyƟҊiu?mJQT&A34dbeȼyb32փYgJI0歍0粤ZBɛ-iz'л ٥¡TGXC;78#do ݗ_k!"n6ȶFbnzCoNG!IȤ<.DP陋 ѝ'v?/M L6\Mڷa0CaF U@˾Iχs[`BiJ k ,Z3pV"c-4}<`l+7y#{\q=dѱ@yU_Ou@2?mȏl'=E7ElԐq9] J҂:f=nVTc5n|CI7ǃ`pn j_#AZ8O, Ko_Lܦ/SHVtKŠ KI»M2E}ozd$[I/ZzG ,N A$}e|F tüɞJ +)Gwk 6.9|{̦il;d/Fd{]fVc~T|٨k\NvٷC:zfϡqzG6|SRW!vTTD >[IVIbX{4v<1I \[j: H .wvRUI ߭ۡbg4 sSڟ5CԤa{)M5^ AW :! 6'輑4P0h؇:ɋdmr),6ɫ١ y[m#SwB+-u&NDj }\ !nWS: rj־`y-ˣ0TXpw͏zMuP$ \I('[ZG~o)Ȑ/c0Dɑz/ I'fJ ohUW6*]e*|LlbPi*\ߣRI he&."zbY6敵aFX>_λ`f<:9395 kAa۬aj:F1Cp[o )dɐcmbmB#?2jj!|o=)2 "G!Ba rlע!e <2hARѧWw&aN|/L]!``&Rl"1\cMNy$#7y!|dV T,Ln$r*}s17F d=S_}{7A׃L9ml9r3I85dƣJ~%=%=aa}XKNVo'IP!ixu7ELCޗH aH׶ً*\DV]sPD0^A}>M\$CdqTLCV[.@)ZQ`)N=ߛW8B:[p.-F-쟓;*Aeݞ B Bx>ޞ0iU9&nU1~#2TFRMz>p~ 50;>oyǎ!]dM孬R)Gl݅L ᚖ'9^qq-"\dg>'O8 Bzٞ~w6%7ϴnU`tJ0+Xhp*m܄UȢrOcE\[G/e"K˟9τ};"Iw\ٝOMg.6hl#UlM-eaخ=tUGP Njxn}z Qti@  俅W5z~^U8,z?g^XP%k] BQ>p٘*d9d9y fw 5`h/H]@E̡֫Ee#~gw' [85 3U7Ow+`G' =7mx1dRǥ |eoG*SniTfX: ,½90iwY 8ܼ&ˡuzD=9J<ײ: t޻o]g V7"c<%Gd? ٬ !HyT o_aCw5p? 9",ޤyDһ}o7}'ٿDʳ1ν6IAto W0~\ if]$<#M _uhla_ZM,ߦ$,s]8a)łP C.zҎcK2pwR}W|Խ c/t,nv 9}?j68_d^OC_1<_7 vO"KTcooId a||"W>6?CGw^:1!9WG6{ϛɌWVJϼAhJ,[AwK?E$OԷ:_@x E0,nK5TDI·p=^F#:,SN@R˛c\\$dOxw5Ob F ё[(@+^B!$oLyDmGљCqxV IJC[I*$KQEQ-[$I/Ed} /Ugs9r{@w)x6o lOFߩx}?; }qsW\wvZIi3s%m-D v ˿4m8L/n("x%(ʊ8 IC5w@#HI@ X,`2) /k "5(O_JM۱ q|(m(FR]FVd>FtmI4ij늮r$ zD_<څX'O. jn@}wvhLX#a͇zS.W)Ľ(*㞻x|wz0ޞV*n/4|DnC{|?~2D` qY3X\?VyǝǾK)WÃoIZᛗ!G0ќ_g5#Y7`7f:gjo_3 $%Q0:NW5/r=fb,B@ĦR@W1Ygp ` ">;?X^!}Gc/N_F{%Xva_&W!ڈ3AQM@I]c6b/|x b3yFeJ :(>X%Y$1ޜ,M.ټm[eaO*Uw1'2,޼{7&G?05pp&\u/s H?CR";lW;c\H}@{yk4NV%Xʡ]atCz N1#r<"цO"Ļ:q_p: Y5;Jܡ.+ږѱ N ~^#*ZeE/:ҕ#,jC?鞄:C==ʃs^\pkY]$ë1 ͧ>I%1-{)o%)..uK. }#0e_|>5C{,x(g dMXk5 6e \58V#},oZTgfo',hӗPƛ雈-\{q =kC [K߿60Vl u~ Ȯ }rO֑W-1fc _r\ l^a͋HP•ls"Z,ͯ)ʥI0VK pc~oSFci0#hnl ]ޭ.5wbC`m۰xx̻f}ц!\wP oKl1[8o_1jD1NS~m@7x* ?*X gHa&a3{=,:is}4֩X c~-|Fx`|pN54q'ןǻ&08q( M¯x±eުUnxG ~K>~s4,f%q+Jΰt_y&lYw\Ƀ^2-J2Ͻ9WXGgWBKd0|mL6yi{-LmW{W햺.bߝ{$j p쩻+~\IQ?r 2E$ )|4'oi/j+_< Q`GA}%[͝!/,yK Ry<ڸ47d8C,$OoB|$wRW!LD1}qeZq#V+Z8+yGTM4\nё͏swQa8W,UJx>ۿi0z6,VnӉM~EJ/_d́ &y⬴< #(r!r _Ēr1rߔuR.[1)F'4_qO#Xx@r"*{/E` WbXۛ kpEXkOҭ֥0st&ZoJK۽ΙY XCN ;ooQnm'`A=˩5A&diYK^2XkQˀ" hUbnvQD u"(I9U~;Z9'Ī;:똞<)q\DBJHkY[){靱[\J́8;N#vw6gnNs5%7L nOr}?}*N&8<QD ,u %aM`yVls1֝cH}sgEYhiMD<1tҜ}fNudl핾WaHRWC^g4@t rUkV5qO%0R{a_Z@pL]@<&,'#v?d[8􋾝KɤWr Xqqxnp2 8‰b'.9Tν/,C,vfj;wס؞E=E^ <FIa]kķdoK)WF ـh"Hjyi9X9צ s7CDa%b/aprNJ`[ar+ OQ`IWD*l*oSڀo4'[I%56Xf_/arHk4+"\aNAtC pY 5ъhDE*ֳ%'{IgVߝ-]sUar,OvECT1Iܳ13if yy7%Ş=7 3' =ԯ8.l#|a$$K[mR ?{b"Zm GKDcU镸fw<ʬ* :7 ^){4Eui|%xq1pFl[j"R\(Q?+1XL?/alozZ=[A## G"`I~F V~W '(;Xspڛ q5:ؐ7\̈́?¿kPy<3|:wqdiᙁ`}g>0,:7 nOYjI#!llKfrś=C0BPg/~R ݕΛk:'I~')4r<@_s.P(b<4v!ctzѽ^o:TզDXɟNCxøV܄  @ ƞY~֋ohu 9ϫyvAXw@mCf5Cl^B\d\G\@2qx5xሂe8ncmCOzEqD\ [ tDL` iy`aM"yAДs-}x<ܹ*`?yмGA<0 ] lfQDQD{ІXx!V4('Zda(\q4 qc9!=|wNf@G's&]!>Fa~P8#2T 8޷K hY-rΨX?^r="1y]}Vz}NUgFUw.۸wO&.ݎF 6&D/~_͐Kaک,5İ$(?Z Uj^vR`[n|&+b:XcξmDZ}A=`d ʀfaL*ݝ>Tv}PnHgӪ kD}X:s0:R`chaaADOBz4X=&z;7o4ؒ 8f`F?o8MYZis',w6S|Gur+|)9#8tVx ]/AGГP(?4rAK%Tٻ7bߔqU+{2Я ^U,H‚6M䀱B #М[/$VEx ̟ 1Xh Oɍ _h!ׇXxtCU>ĽqƒfEDO:!&Z\UX(^lU(4s1JSr: oe\@G۹X$Ie9΄),kW}O,kP0azsb/Vo.VW>y'yQtb |! eRAt^{dϑ4,t5)$E^ր e|2,?7Nl\kXJ6i|?jh vwqS_˨"0 4nOmZӝhXۀϼV"~}udzoeݠ*/p^\3kU*6'>SRC?"!wSbf͟gA0I*zu]Y[R֞n7DZY~k>֧u}`1Ȣ`FK;慲W*|6qp*i||@KC_8jPG+or̞:DK F[#FKoͅQPKHL[= MGƹ0lߕ87XU+&_)1hE0<]@*YȬ^P (Iv{$#a5wyu4V㷭S>"0{˵3b+}#aJ,ҔIsT("UV=/c{kϐÚhD|6g_=sִNnqK0’h3vݻyh5Őnq%SUӱ NnŽyktKAn|#žrۡr`=ĭ~hm"bqOI7e*o>N.XB@{aר0og9_#{J vkCmlT0jK`}Er̛<0"+e8?:eSɊ#UndaȌxgӁ(.C.'- } ["j>h%`:gwp SGqJ_AҢ.^O3m?T3lCᎸ0d*(+4JްALۇiG`>S LmKAaL,plb7kO> ;w?UOG"GrRXP {tt/ |9ո܀vHe{x=Lе#c҆}j_W4l ".6^EdY7EbIGWw\3TD֞]DIfED}"/߲c~o@%roAF.˼[Q}uy)Hq:tec%AINc-FXw^Ϋzp,[+v!&IoN|?mK{[qmTHa` 1~3خbI>?w1DCi㩘midD"8+fCQWpmn׃?t+(@wGd^¼eK|{feb{fRDž4LzdN UX~]샍 M ~X~Ր%>@ЀSgeGµ}d7Qq2@c5,d#k1>H?f 9};=8xtG 崒 2X Y*Eϝ="*K2. f?~#-˨tc? O ^ 'ӗF'CJ>>**T&t ޽k K*7aѝq&&q7g[7`2'TǏAk'zmH9s:haM(0yJUnP I'ubyX qqW$a,Q^P$tzt?_:[i~EW6n a~3])sw' Z|m?"d3C,AZT dA>kv7O|xn#d.CrY$ 0W~)G>1Ov#K۰_:= 7t]/~qX/ˀWo&n9kr}urD?+fLmNU}v2,w؛P6TO_WaGOy $X)|2- kǤӍ 9n{2է> n`!Aa q3k,u?lc3AVqCHLkyllNSay*}0ή"~[;"pnDdy*]quܽ;i@?(_֚AI/7y7c% Űzc/WɎӹGT^{+xĽyBӷ8Xf}UUZs4zOȣ YSҬ̨%5Ȁ$ihW]8.*8qY+>.xʍ|D%3h׉nLYke?z+I_MWWjC5>M-#b_BD0K>ߌk|vfV3<*\\1cefXx穙=desG)0ԶW׊nMy?;\ݼ$r*OCz8\ͦwY76X>~u?Uo=V[:.ъYc H~A⠯^CVHTHHz_ \faNTUVklads\?*ܰ2>^zOGHV rZ\1LN X~vL {n؜23hBaSOrݏ۔'.D1+^x~~\SC2jqlRD4"d %Ery7?<+% vcfJK RW}PpЙ_l'`q,{R|.L- 3mD\%! Ny\{f0c䔠}0 S22w_ dt&-Е+tIu ;uq3cɰvafU;o )G]xg&'*D ?,p9E +N0}muYA ˍ)#\L!ضO1g|]HE-R;nV <u .n0_XG81/7>2)y?$D?[ P: =^mTH<8"]hs73@' `{l;!<" 7a/TS^uӓזsqGlX~Z4ӀY\w7= A0f~LE A[2Pmtr28wÿ́$B^c5lӡk{cI lF1Y2c:w1* "L2ˈw 1}W;kWeӰTq6yv0 Eao/bHiuy* մ⼱n [LRz1Sv9Yw7jY¯\>_b͊Nv*~Y+(-Jp˿zu0п= [ƝopicTZh$+͏AdZ!0vn <9~w_]Hj3;&RḈ)4vX$bS0^ݿ⢥!0ԏ|qF yvWF<ם/#Rg;cGǔ5*^ʡ 1[7H~:AE~qlL^Sؤ@WQ6)&tB'k{3lmA0 ?uXH!6XEʁq䩓̏i02Sy\_*e*oOSFPҺ7z0Mtr',3[tMOGc==& .U1iks.5f8RMٓƘ`_W2ԓ=h@cMlv ?]/ k?B8aiy@ =COzC uhq.U hq u߮a9!~*7BňSM'%~{ւO^An{i78)Vc4YWhW*kc8^ď)e^F?'8%NZ@|+,,5J0Y #CAk4N̺H|Ң~TXYxi ]b,hU*,bZ=a{N1r#$=,e]5rH z΅8x#Q`@# IEՎKZk)f4 hŰQf~FJA,Z9шRйIJ+zg<"LoosrP郠ז0o9#q t<&Rw:my\(*ܟĻ +W\AŠnV{޹8w}A!2go@x(Q'bL|?H9 r*p 9Ekx]yb[w #8H'>Ӿhl.bU9$6߇0 KC)JĶ&82=Dg!CchԬ b)~џqp,FևaU^3W/ʭAH 3G`G>$_oRuV*6lTx9Rmfǒa*qxJDÞnLiW 1>'UiQ}|?$cML]+sc$E;lP 3_ a}MZ!1,eؿCL4A1?0mc$d_}yY*p[5uP 2b7'C5:6j@}lצ>So7DzWܲy=X˂6C-w|LJ[GzGaIrN4f\`I<3̥X6 V$pr|"r%bJE\ܹu=KN4 5G|رkj3E\MMd5Įz G3 R`K-":EwS_#>lu-ux 3rr72'c-ޓZXQ* CiAYdK ^@) FyMcݯ2j,p`QQcix6,w{(X?&OVcez],{&~GM٭X,_ .b;_b=0PSn7A8OWDŽr0q#y+CS}YA-&HΎ&CW^SN1ˏ*!R`8^/lBJ>ۀϿ { :#Caó]ITaQgY L"枡w^G?7UYXI~s`eA;5{B?Lz 6p6^%u(!hP@ۮo!>0n&!k'oD$3 W&GL-PGFm.sh iKJ&i SE6Mġ_UgRqU-44|= k*lc0ΉWo7 "P⳿Dȋ &bg.g ؊ =z "McXhr{MF9@~鷡k{ y$^oxcf=f̅ϺЁo)b +s Q`쬍.YJ-G8h`{[|o F̧z *NSz\)aCds@?"u ?ئ,'L@&.!Czw"KnlbvU6= ™hbq :knb*zE,\ c ajCADx“a֭`a[$2.<ʋsEL7'1lB-_U1Ooނ\jX}YweKgMo:b}ס7Y:1*.wvUȁbИ ş޸8]a@}VQl X"Ig+0gƽ)()41H#-۫m\"*%VVoAdIUK>cDzL$&Cg 統^yQAf0"\; "Pqpa.˶?V`׹,l 0Uo,/=+jteȃgb`)RfhPB|WQн: <3 gV-)X?mk6B`+oOL!>n\NlOf.?0ԡ ?Qg]ʟo=ZO7*LwabӑD"apS]A9>E ÿDN6EdݿGFC -Y#`2e,G󤖹 3׍:;oށg"fɕX/gV"KjGWԅtIwOǾ 63ϧÚI@]63̻Y/:柨> ڧ%fB,k MhS>}莘V:*3݊] 1iUf!X~ZP0y"> 9XKB3Z /?\mcX/:z-;d`˳h>,>F_t * ЯM #8w?BG=f!f$ô/7IrgU8G}!;+p,,M´Qr S~YB7\ؕFpZ2:q;g{ԯ$`/S]^Zغsa|ˬ$**`*bfM ,9\j<[~b6?=g}EV퍾|+CqrRB,0._H=,(>,]t;Uliio J[n*bh#dA)9‹д "3U*A14{) Rj=3*"sDzY0PJ; ڒ%Bg8q0ڏJ1ԛ[yn=sɾ!5 uzN>!èUXH8̳I)Wf}@DVrұcەiN[5ZX?/WvF~VIC~#E*4zs |* .S]䵩1^gm%"~ֈ(M:F3$!gˆw~ox(_'>+QxK9f7'j aS czؒr+6[MPal}~(;`?mcI[4".TȉDsu'\1fwSqo=TQxZ= yO"QxMіdX #RyTC6)TX$쟚7>ݽu1냯ylk o_c!Qk`hb1vq;6"~>ч(kqU)=v{Zqj}; :nʻQ_9 0Nj_~ҙKǡxQN~RUφ+4¬ԫ~EXl~k3!i롈(8*䀈_d[IJx? b9? >A,a 峚RaxXwFMGVd{vZrQ˰m 1=V 8p}./{]B'2ކ"߭qL:x"\`X'Z`XS+ Pw+0u2ۘHlїHJbDm-+/&QR^] ch=07 sW"1ud~>'а<9._AZeg#9\iiTo/;g=gEo)9a)#Oqմ==R bIu.bmE"Qe'n)_2q vżMx5i?X:TK .Rs7=6B/y6Cwd}#e7"*HrХ@ l쯼;|޽6b +d׀'ŝRpQvםMB0fj );3>M9f K$d_n11GK8I4EN{sv.0ܢ,sLzRz^⋽=?_#UsD rwƙ`^F[MM5?@nsbn<8>Wr"L _e"8~0B\mQ DyqSp8\ޟb16cjfEh GzbA@u xsx*wt/>X ^$ _]5>o;y5X`uvG"Т{ p]\q<4^ʀz-*8h?4ܢ j;ABWMD@X_FL::xtw>2Q."Oe3hXšvHxߤA?O͞6xw6|M~w  rJc@tK} "|ۋp!T6b |P2qKJJO!#$D<GTeǟֈS܎??WD?q`D(2!F).CNٚ'5ZQV@K45gOCL;b.#Bf}WXaUã|:[asM<wR4t0Rk5k`DpD,30"VG8CǶv²1J5Lw/ V"QxՏ l:?o ' wtd>XH;"9fRoED2.mHfuRo#0|jKDL!HVa>./1j zRe绱ˡ'ɾ\2l^J,F%~fEQMd3 Zƹ4&qlﱞG9ү#=(n /w: Dn`f*;L<q8p̏ݿ &g-D9iU4T^0qTya'$4Ma6#%uv(L w+L)6HLB`ޫ`pkm!2̹uH09casP.nj bpy`zgE_)<,U^uΈ\Wo w?VlMs?!B9 ol d8xo]p6湹[ ko胁bKv]T>&Y0+DfeoV$ND2.ؚLctsK c 1KvyqJY,lnNB7B!mQ$p3HO̓kO#Mnh`?7Ga񱄸*X ewTa.${ 9,Z*Mo6M~ B~бZ(TϏKOAWm0up /T꠲@[[xB!kC0=Lgk_"on3t~|E$#߱4?74x/UBB<!mףLV7X"ךFYxvR[~y7vBlȖ|u&,>|&Ro.h]PYA&~P.[HCx:j`b$r{χ霝ݓgp0o <CcTh[nt7ELJhv7R%4Blj+6%P\AV0uae H-V< /˾$tqmV&;"{`))9^065g 3D~5ʡdv"w-]A^C >>g6ٟ/Rse9SIDn¤u6"kes XF˼S0e'J %~H]|JBȠnzn<, *SY*bZ }RGGxQ9V[=Fڐ/sUaZ5 Z&Øß{"w0Ğ˞jލ#I| wNyƭBvchYHnXmY7b"p"h|{5r5&mȟ9ߙ .0rs1h|k3'򯿌I*}7+m}&:0D"y~("HՕ;:Gdz'7΃3ܼ+.ܥ'a@o$Gv 2}"a{a%jHfeR^m`~@ G` ǼC6X,ushuDa帚xy 8#^y-@iD1|q쨷NF7 U);D46Yoֳ[ec*N x@`7RzK~p޸x?mDmP(fgMϹ& kiN*"s'v(FD%VnJ}a]7ϠaԑS.ls&*6oMWqPydclKɰrWTS9fYd>h|+$"^ g͟^ R2[m볼CPdf2_nnÄgt4'74;4.a, syFn$EdBS4I";6tߍy+&_`EkDز9m /TPu;.d Vힼx}::j9_Yq+lD"n/lq'#q@~^[Vi,@8 B"}:˦zp˫g+职X9P7vEWh=g/Ju3z_`;4G]N2働 qUJybZ ea0wkö`B5Agϝ5A#ȮKnVBHÕ]Th= }i2ĻtWߝn!d =﷘!³oaAV oqe.ÜSCH?6sᾶmo"ҥ25bo<Кj*;'%!Д<t{xn5P ;UazP]eJ+iCo_a\;w2,X%|,O졿>/ٌHxl^6O92YJAzk^⟱W¬f!)ĸ7ܯut:wm T ʂal@7~|6wl2@-;>Eɉ g{[oX@bw߱(K ^sEHr⢍?y&KcUF,t\|?0+ &7sAlMVuGY_l>'>Rr\]U[n~M0 75Ҁ9?ЦyPeJ[(co¯.g͠enjS 0ѡ;Y?l˛w3`CM)1ҍqiࡇu0ffb. +EC穷f=v6vLƩ̀0U]#AY<{G,J=Li 3\؎ \SIDLm{'6au;=|:y WQa,N|?C (s܊z۫(s&&=ol$Ÿem@;cs"YQDn[T8}xq:ϙzog-fP$,33lQZ 4O ,]v־qeS@* s#5X5qGe buU4>sBx_s`30F"\rF,ɟ8h2zq)8Qp(bb_چH%u&^uζSl~>{Z^c,'wXnTQnd Z$)6K>0o_Ohbi&n>ZqP SڅNl,a7l͎M~A&Cd7 "p!Yf 4Лcf /z-cXmM`mB/e#z"d>n8{HCC]w/!^ D#6<08:+ rI1ADdޠ=DLj*aMin$,rt`hzɴ፠$zAiwRco^k10&彎w~ `ym!"}.wuC\b6c v&4E_WD\|R\^ļ%2 Np>̀='b{X~1҈u T>6{x'j^Vž:m+*B^>Rk33K=E7j"q xclݧ674Xa|0cŁOOsY1Ӕ,֠8ͮTb=8õ V@C7*aCJq h^/Xk 3?@+Dy8Z 6$XZakW#;ϫYW{¯R{& ϟE̟ۊ"koW~E$w ]:HRgF#װt,{C훆?pu²ԀlfnHR! ~Ds"$ ^Ґo7'u'Zam/]o׀quErWĭ1(ǣG'/Bon?h`}We_]"M0p?u">块 (9x`Od(ώ0';@w k@!qIlI5]VyT "_^خl8Wȅ ֣fb[ z4` w{PՈ0Ks▻ T^ҬaI3y0`8q^-CIW۴XoƳ|U\,H-&~ tGs}0͈<ՔV4>QG?9): %4 ^\V4o¸*I$7~ y·++Oa( J:-o)7CDW@Ƚv$Y;Ycncw*2^b(+OE\ )2]g-D"3/[se kzyX.] s퀭3ð<'wᇖ)BlR1x|e&E)Ug=x9qWXƵ_!bwD׺8,?AI|LCW_xxnv$I6|~ k`E~Z1gH"?I)MO*t{#b`QJty|l5|ɂ /:SGv9䇂@v3oD>u\ $=zCWZ3`"V)vS $ ^?̃-lQH7Ēi,ӤȰ%tuw6,-L +66YnIf }1ܣsOae|ZZ7A#IhBU*?8A(DI^TCF<#C͋\[2Pp83`vVjX8(n萛&'ce_i< sZ+avԃ}#+X_{8!v*1`q9m%by0^6xϕ&0ӈ#Z|vlr߼"LEl&|8FQ|VJ1b"_8n[kj(zCwT[bDYJ)m}qNpL/?L"9gJ62{nj|K6`LOVnM1kŽa{B?9ad/ +|]۶z#em}T}^X i' 0=>w11c6N(X?zCՙO0aGrXjNM'얺F>T S'ƝktbU|StWq,T[uԴ}ʺ,8J`ZW OL-[>,wu$h L7d*{3A&4IЀ$0%,{^}) ki2و0SO {4{g]~Þ }BŒȧ ʛ)(3{'e"e" [ .0EV6c$9C֪a;}J#!q̝Wa٫BNٛ}|^0z^fۅ4?] }\tIdbO+`X4 ,9qz$u V] _üv >Xb]gmqS~*L9<^m vāQ)VsjOl׺@Z"0уޙ: k$Nܳߟՠ'?2ksG0 )ȼqFntw AI8ƝqyOb|`vy Vw}2qA{Y መZnA9번X2QH?xd|"O\2Ӯ2M7āH U. %@X1\Sd]aE:l !NzEU)Ȝ[8b<0cMZT، y`v8O⢉-TXسC2".'"rR"N#]c*-c}9DaF~j5Y⬤H.\!1_(qNkw/1iƵeNes<`&|pp4B1|I8] t``p#Jթ'뜭X?K5"fOl &l-^q{DXi/2 O-uSCoڣLaL;Crr&%"t=h# `}IX}  <泏O^1g7o 1rJ`4%M_ir׶#Bʈl<`vfbV_rFb+})êX?Xߋi#`)d7הo{Dl"<6>o1,UGU|u0=fG OF_R\wրaE?[3OaՋ0V tUppjn q|- 2u^ׅ Sܠ P4W'j?n0 ;G׀ϝ)T KJzGTcWu1=iط ޗL˨"-J s1)rն콛di/H[}I9c-bh١=’Lg>+v: ğOg-N_D 韗]ѓzqT7mAc"iAL/u`]u=,`skB,z [-1Ű& ba؀t`B9TLwРTh vTw*Løaȏ'6C@qϑ.PeQA&ܥ a#鍿",0EZŚuʟs6 3GeR>/6 _ɨ~4 z.z-2BqYuHe ,/ tj >sx/C ;ZjkuA7C[1OաKn"|<)%|#OoMTc4dz^3-0k,Y6?gE״6}%<¼3K !W(Fs|oS7#xE=IVd [[̈[ b('"6E[ΫFa{)p`8\πÍ [.#ee{蟅6\o2`֦m0w,aADq>l$bQ-;m,\i =ЃI)֝r_WMr}D]WzhqN_[G9 [T3o) s'Qs0[ ;sJb0?wq{~C~yk|br*iWdB' \j^DxӹQ21YWK؜/:А6oPOI h츖.~sy?g.nΏ =>$#!̓W~gcGax5}GA,9Vo<Ķ;Z"0bvGle45j`S.XW<fFw֔a"Y1p֯r `yk,׶{\ڣ\~|khՅHY/GӬJ#N!D1c]@|_4n[f:9ErRTD*B@ۗ8PS +#a]I= 7xVr9XN{VSl#?füɘ]@ ,ݍqÏW30ь`龦ΣO~z57U8~quEJT$$)QTe';YW{^qϽy}s`9bq CF>v" Wy _ƥEâM=X^$Vld􀩢RgR~yuE (ir 1)Zߔ3/t"Cƃ?\~ z'E`Rw28,'޾ukߔGR`n_ߍrRGvCLĨ3 UFOcRфWumnڳ%`nDĎ:.=5E8\%k*xr2F`yBˡ#s7ao"Dj}:l(`dfUE[t]'.He l Im> :V20dxbɬ0c>GAZ!6I<̥8\oI+yUA٫UCF0: ͊xKdh>ByX=qCt覗a9σc0?g`eX>x6şj:ɰgO:D=Gv/SnDәF`bWX1tx4kD!h.A)p6z[ Q0<Q^h ^*_˟ $ 3/:S.GA@n TJ ?.yWoJ +^]A'Ҷawl^*Jvt8 _f_C 3mr3r3gv-xmc/ b(@i"%T_1ݜ('_)!btŸ_/?),QP-au ڞeQL kGa1kv/I3Nξ\od%z.Q!#w V3>}3]E98H 4`1%Wj@`kjsb DY3~8 `q&FXHf~ݙpӚK#GM`Yu` -|@r0ysb'L#*rA qM?7a,M]+ EN>̼N۹g9cw@T;n| Q1&֭aH,ҩt20!H{Eo}>O}IfmGeaWeh ɤF fcDЧkgOEl!#оS ݗ1v %Hћ2<]|O`q̯0ifȁU=5-]e{@T z.ߟ'-D a|@xA /^`#|W\_Tw7cax GhL^Y 6 פwbuzzڿaCT3vN!U(lrH8wL߻hZ;/_/ʃ\0%M:A;1~9HD&] ]^DRu7D}RcU@pEj4\2z}zn3aVpn@% eS~/aŮVI|,u<-!q}tY޺Bh:2u;m!:0*-LfÚ]X;z Ə|RA=]]sQ-WCc)b8rI[1L~h#%0^bqQ|Cղ=mbQ :?G[0=5zP|SyL`Ӂ$DGYVf WR=`v2G5,_s$(^krcn#P", }\EV 8/ 鄳:l)ta{,U8 pO{ EоK:&^)P q{ &uq%o3uA7sg= }F1r!uR4W~>F1o!ϩ /#΅DV^,v~eӚ\0sd?nY @ML 2 N.Ev=!0>f,{7iҁt9N/xgI-$X]aHmOVOXr^G5ŅăZha("B] ',X01k5dal #!6P{ɧt ʆi99Xu&^ptȀ*kd(A?| l[ӣ݈ͬ^be.ȅ1ޠ5CnUmZ2  yB@xv fB7`{w> ېjqVYR(4 ;/˺Τ:UrysBQXnɬh#r4btغnt#MkӍ"BtߟD_|DDqiKXFk Ӕ˪$?ҟiok}{fW8W/C2Ïw1}e fJLHqOj4'":P; ~#pFrfLJ)ؓs: Xbyě7`t%"Aъ8h^SD0q`&^0(g&(:Z<wº3çͰݯX(}U^+5Eu&ջvmG btx{dn; |':WM˞y`K^l#߷OaCۼ :az3@tsY FR>D"a{!JG]7/_"wΉ!y,' VsaP /9]7Q0/,gѪz9O`4zCt=t2}w\xx {ony{.X\4qP:лOi*^a|7 G"DM>|+DM:[Gl݁4Ky &ssEm]< 3L^r KA˭ϥR >ӌn:GXy/ 2PvB1y .{ ߿O>_pn*0,W {1_#l5&i71#q{AԤ6Mba5P~] 𗂉{w3a, 9v -yO :PR›w0$wY_0)teٰb !zF;zڈTG&__cFwz• ?F CefWS QV}ǮvHj~뾓 %`Z@ZkȠۧtOP;0oKj\x+VlW% 2_#yYp_ßFl`74+FHz 5s\v&;,8C1މ3Qt 5 %,ga*~ҋtULFN]I7AM+ؼ1Ȟp86)nQKGi]6l=sAKj1^> @%u9uyLT`bL;Ƥӵ,MyPHb;.CX'hۻ"ӥZ7ȰNqq%Q=WEEV&fE@\1̈}8Ɛ ֦u "ܾ vD)8Nl < D-/KMc~>ě\|out@'dcyxܙ&cXkMya UC)F?*>'w``p^6/WMJw# , چ" 2 ) b6o0,WU& aB``ػ-X,4R~O2? ƥ85"ȡ֥} ~e3ы@L-_5A)C"'_Xbb{8w>iAAxD;զBh@xEyL֒%pFWL1v1L$޼kւ0~{|K LpvF:E׺#ϛO*dW}1~vO!V E! S>iWZE!zYA~Jzw` 4{&;)#;%" tb 98"9UYſ܌rХLYTJ$@ͩT踜igs"d3"]F2ӦsEigz2ו8+G4ێ҆~.Pzo6b{$dH류sCJڕRaC֔?I%0W9t턾A0sޒ%MaCԴ 3bf}QuF5+g]6{g8+[Eџ4>h8WQYn&jxe H%|e0~^;.?Wⷔh]y6T9V0 IGa{rӭ݊b0+Jk5Z0)tR$,&oCB`Z$Qa6I ]]uJ~nQ9X(rB;5'`Dnje3}vKRZ֏Nan,n{stPDd;::V6pPΎӂ3ue`)S?5~8 󻿴NS-#+J0^ +?*j`RRI2lڤ(= wRk;: m-D5AeA!W ZND^N*MfvСwCP3Fo Gͻ?6`1QqLXk?&AcgC5$}Pa e)EwNsїދ NWPl-DsĔ&DSv~ TܜH)h{4/[grG vk9OKF}`qH 1>v^a8|aBڻi_j3J)EIYX즩śaU%z66Pta`і8l+nuѳi1>7+&͠BkA9W`׿FKp(퇜tf)+ۼѺYZ7IvBT-'p͈&ߚ ѽ?>0U!ɿ_ +e20Nx^~M5~v|OaG`fx{ 4q&cy4r5ƨM"k?D48I(H`:l[|Qyπw%Q3U#Z.ߚ~Ĕ7AT\Mբ}'t~ߘCJq6V19i/-r@R{&li4K[`]_ cH0) 8@!3 6es ڳ/as`W';?q!j.[@fY ѯD뫠S[o{j. #L&`yL`>17DxtWY)ȯuhnJBη qPyxLSd*`Zo߳ ?iO֨0c*@i7I:POv@lqlpl7FVuD<֔Jd`냍[]0Tm+cR!_Bx/`,r4VT|B#|5tDQKt1ɝsZAwͶ#h{G1jSX7XrW>;l$5V\n|0WQ}fTp>dY- Θ|sCt4/5g՜#iDf)?RH-"eO{Rks][V{^D P>|=?Y I[Yk!h/~=b@}P vJ s!@ D]r-w5rCLC/D !T;cXnE#j.p!ӻ #qMA7X{!FUnDA[zk؊64CcfW]F-OUc58ߖjگFu{v=f0vRJG4iOÖ>Z] &9"n"jHr輅_=Ilg4_7N)c} tA}otCv21Z#Yha,}Ų@NbX0`ED?$[1)0!W7]K<[́q)DƐ1ԾȻ䵰eDm%i l͞ӳEb|qA fO3 K~V@ %je,Wt|\MW I}ưtI0|2[ Kh{i-{5TJwLS>nN >9/⼰}g }|恛:las,*WvJ`Esмu~G 6hǮfa )TSݢH@>#u vfE#pǷpI/ w]nJJ[D5;g>Q+D+02q=rn`a#j?u$ufkA{>8k#W/%SEAb`xh:X*ۨ4` Go:LJεSܷ+eabe^d(A:#L`P,1S Z'Q~iܵG?`1>hG FB v<̘G#wv8K*xWaB֋cyscG a՟^ϛyǎ=~5 >xSye:8t+LD 22~n$玀PR0tr tl l;e+u*L{ٚ3jW[Wa7x76b6`鞃wY=pA2 )|CnsN)^ 8䨿ScU |oh]\v;`$ 9ʠ[ DMSaRtu IAKw ًty8"4RFh]`A'LƯsݡU1dmB4Oa>WCJ45a>yƒFS]`/6C 8u}qٍҴd߈3ߘ6[Qgߨ3: ffrGF'Oʪ+^IJ3&ҎypGӝGVnuͲwfG#R@e.eC5|ϙ8}Qܳυ|.j@ZlGVT"RafϹP v75ZvnZ+ia/6L1 v !}c*,N_2٥M06XrH* k߯.ecDa,~nHNFJ$ t i !DVd<ɰA癮&özk$:28Ŕ E90zh^x=*_C;y?`"H(u9}zd#צā5Cv.fnr'eS~۰12;S6n5B\ U&l`WpL-&=} C7Wi㠑 H :l>K!W`fYa~O/;-(]g[T !g=OfA~]? V gV i@t^zn!*HnXbl%y>!&YCfD 7Ht s@6`Q1%whkAP S1Zrn004X7 5E'ZNwJ43w/0{a‚>H>_q3uȃ[;? Je+7`iK}hK2S(k /<!9;gɗ$_ttuf ҕ4 lD}wX%T#^Wag>vgj}Az(dkؚKXtۅ ORb~qdߏ[WA`R,~#꾼.N{!0O0:>C 1؜`7 nLMp݁x5Yw/: ӳ@(N*0wwEG,7<[~x$oƝ@4yet ZP&J\' +~ gSo77a쇨\<Q釽 >'a-^:,V6ւיafS}jjBh_fo畐l2d.CI}Y#u\Z+,x`RRUSAT{1DZi b+_``i^i?25\UuCmn^L,JC!qFoB+* ou MrJm;|6kTElZ5h)Jaj$D044D,T\#zE-D{_&V_k k`R,I^sqc#8K`aN^g?Xk<:]DXZwTE iWn#`yD(L[&%k|nIݼ8b!X"F{ }]1w6D#΁OfJ&"}O_rlW?=+oV`{= ~+Qy5Ͷ0>2L|d7[Ŭv~9IS=uZ\abS5/} rU]<,OL?x?_~((Q(5~膱 ]@%C@߼<2 `=5ʖTX0X!#7y2>BdOXݿ#;4*â>ٵu6ARD^S`5Dk)Z۾&HѢjr5׻%M?,ݲ@'}X+:}(<柺ʻ @oKԛ?Ȣ=%qq 'Hr C.<Xޡ8OdKePgF ޛ5꘎/=;y6^a/5yz u+8I[>0N0 VC}ٙ!0J$Kw#e^ǍK5(yHt:'~ KC0qE*,8$gcwd~z~2b6H%h<OF _eibs>USh65~Q-p|=G`Q,B9ɼߠ)]07b(M ᱆kau>;GQQKq09]nf9l=82 b:^8 SL6@T Va)UWwdTl-^C8Ffkf-ϘRho L1T)AϵODӼ߇]aM.~ylO"}`pdQerlu#+2q_~ 1>Dc<8/\vџ#~*AÞFuXMW>Se&=dƌyʫ`AI-"dͪ-galo 3=:x?K)d kѧTaeA+_bN3 1|OLG,STyvtO 2gԋS9K~ #2;3 ,LcUC/Y+_Ya:6fqP,SඥjUJc&e5;{& NB-9qvX/x l}ׅ9zV(?7Q4 HAފ8ej ;s u*>w M 3':,ᄆ`4ze[P@ʿa4O.Zo]p+IAaPrbڊzPF`WYVmH]r1QLئ8Ir>}q|.l-{K@ `6ꀾz{-PJAAtvM|CB2bȽm55nKa+}~wS~ Wr%z v`: C3/b(yXyb}}`LR3byRR*,loqATJcg<  *N0o>ᇵo|S-T} +!ň0dO h :?jYw$۫I#ܤO0kʪ#nWE><1kf'M&gÚYj6.[a`x %Nh2⹀Ν>q szqm$>EP (OcnA<VeѹvWH&&ʅ@UAԈtwm.OGvv kYz B_C^~gaVi9O/<.0P>SA_zGtsrՈN_@vJ`Tw, o:c=@Mwt])+KB?j T ;VˈJ`ҥOo@:X-mq"3Y8v[kĪ@yLGn?Si{B ἨQR &LCl`al4\LXp/ů^%0&7 -}xaep;/h~;7mb eL5G*+-D=U+ ~62kn$%A[Fv`K5^%0x>+/,erF#mowvLdUs a %p22v;c#Q0{^)iy(ڞ6dݵ39;MF'ն kZA8Ʒ OacVyhNr8YeӇE ӯ@2?V˸a [}O3Xn&Ӳ[G%ل }k 1 清oEE>0W2AuRZJsϢ ݀8cM/trG1 qV`Z%I- _z3`췩_JW/=OShm C/ԡ7L6,W5w׺jX꡾FTnzof#ַ<NjK\ gshmjz B[a~mgqXN_^̄&yx$Ҕf`]v(L]qLґDm 1qgF";7"I5͘'X~}$37c5a5W3J8񅡿 [ h>+,"t%N%] lPtD%H hiQ#r ƣs `1ZVV$Ձ .o9DG<\2}.x1ZTFd߷7!O7F1[^VETrmt{7~vB[h5530ǹ4U.Wh$tRdzXO)I^vw&"*%*u=L>(/`=\5/0@4ሶyV& o&y E#,{s^杅7r7}RY );UcRCw|աq>gbozcuS rҬu`(Ce&{ǜԏ"D` )77Z ƃ~TD_{fpNvXk} Yxsg0!U6)Q 3b"(o|zMyF8AuSIaq'w/S< H<( ֳo4O}7%6='诌O#wF fm=DpܛFlȼ,~GtUoi!6QҤL=󐻤R+^4$,:``C9YOQ ;gbϺ!q8LP_#@lG%g0= ҩ: ABK]/s1w&`f"Dr nN=}3+1DwmտrQ}v;^ 6޿0%CuOj0b-sG7tFu[lƓ]aNtJ&97ӡ~jJ^A\v[qCE<GF}??FM!6I-q 1tdBb3_՛c3:8؋h &Xn=!(קv*8ړq fpHU&ʩu)Di.(7E ݏEɈI7fU*"PdRZ!y)8pvZݕ! | Wfђ)rF [f2/g4 2uKCX9c~iOyb<>?py Xd1ᐴ,>gbvV?l< 'y͌( ϗ6e)&>JrD}N7.!.s6< 1+ڣ,@:gU|1;] Ka}X5Zc=VRǽǟ=`C˼'1bD jѣBcNVt(ngrC{IS؊g0Uk6X0瘷vJSj ԕO|Zҡt_yS)Ih]($[m.t?Q\3H?Z|7OUdK1;/A1t*:GTBv:r5QV'ݖ#o0!Pyp^Lq\p+w*Q$"d?{yZs#4)+DidFvV/ >o_ s#/S_D3һ跎ip"fiBWD24Q Q( oM4Xrr,VprxVۗSPeA{1E(7~3Ԃ`m FKSe/}`:<ֿ~.oH0{HPSdaҢ34|} ښR}7;@x'LW9+ (s<0/`PG,4 Kn84]/vDaܬ)y؀&.[fâbŰu 0]bGo+baK[Rb!>Ow*z3 ߌi @SǏ/O^ʹ0w)W-3NFJ}?WUڍͧ7>4^qfPWtx`ƶ&9t+JrC倖R^7l=7<`#{@Lx\ԇ9!>\07}' Zo<8"|x7jA kXH1$߉W'srvLOdKҮ8K\ LT8QSɲͣ}DNزḋ#D:i`/qp;WBH.a ODuZh 4.!caw >"&J>&sȦSQoK@4^S3GS0~C)muCC?(S/o SalCAM7` \Zyח`ϼC? #/y:!;M]Aܵ]v68K17>`/]t>,~Q :S`:@ Oi֥ZBti$y^@ܟX-UaM9%ej{!^ت~;KKUjQC0{pZ RFVCeB`1ZͤDV;\| H'2cİY(1ː iRK.C08-i6J%h6l^8.M, #ԙU/``K;̱9D}Ia$=Q?(gi(X?{-?>>cuSqj@}f}1h ?W ?TB<j >,@f_q™h7iv SA,b bm8]OѾn~)@cH!-oW ^֊z?QxeB  { jٵCOUɇCGy,~s@jW,gFu!]۸R j()! κ(Do(%]<χx ˒1QX~#0As8 @x%s6|{h?XqddabYAY"Fwv-ӽ8UļLaZsA_b}[N?Xԫ'lu>؞ ~|qc]xZ[ $>bFTsW%" 4_+j}nQ>>(V#'ǫȗ7`qaߤsCs< V\L*̃LG'{6?qlDPD{'D]^[_[F.28 G3#5IJXO3z. e[r[1R,S@j[EW `B' oW$xu4z?B"뫃dXl]-5&a53م5O0uI]cGH{Z[W`wnu,F4=4Cyv6PpE)WZ3/WXᇟAOam.(M2UDma+ժPfAtxMJDw\x\Y?X `zWEu$1/~Ey}D ec "ε]:*UFV^Ks~kl}&fR`·c0*;AN©DCGޟ# X*cO2W7CBo,PJ8}qޯ4Oa]c= ~*qZ-lxThp/cBr23Uî9`C չw Xwcض.8]-ANޥ*誈 2ou0Mhs㾸ݙZXdgae(c(/~>ѝAK &!}O"£Б. sEfDLWMݧ|r{oѐqBbuI 0[-м;u|a j 5r]sb3~'ש'NEAe.Z?h YU~f%{8{̨LGeɝaĖ_E'Ye`9E%? Dk0+f8ࣔ )|ڿ$`>0VF`=AvnGlME}2MD+O@=:~:1Mu!+i/uFoy4X6}6L'rn~U=Db"OE }`I[ E|"-)([H~DۣkN契\->D5E_ "cwLJ*Dƚ6?m)xg;1!1kU7 ]C8Xâ6k~~Y7%y8`|i86Lv)PU%.'y= 1OXޅF따,vb ANّ k+ƈܽVrc/O;˗mmr&Dp0+$,MOeau5QRXD샀|u___6yNa|3(BO>G@t¸Huc06Xz5!/˲1Go+FaÍk㿋}F#yX=(g-!j o#)\Fg(|Dl.}wÝ0ж13.,_tGw4aZP"w;qC`U7ȹfg~a:@{1$j(Qit〽D{]) ?I]Ad4GOg8ᓜ4q,IƸv^a!'pTVГ`R;WF_}.N<Ŋ8X* O1D=mF]'ƱBFֱ&eJ,‡1@Y#f?ɣད7 uXjX>ac !07jX4Ui0'ܻ $:,8ˣ`;׳!'^"Z;j{bLVsYFt>\9D-SY*$/Y:[äg#tJݣrIi=%zS f Nɧ:֔BjJxۡT'^QQfq*'K:?kCjh[Tr烥b'18T6H\lWdM؟+0 ':A#cex :>Q#Ly}a :U0N?Wߥ3To 3O[.@V$Yt,[y#˨v?|\.Cwf-2!Wl#M.H. Ęnx+8@5$_W# Wׯi!XX.o.j)0mJi[Cgќon߼ՉY<ſz&?h/2 bƝNv{jJ: ˝K̻3{n|&[Lu =s^ yqG/-.IZfz`N6!B?ɆõkI{c4S?apb3u,pu?=!  Niw^Z/3Ӟ=cůXD7;A#ic[u*"hfG,U#FQ1vuib{Q=TxN^1rDS{rb HSE,._# @Db]I:٬0t0OcK534Ac(|9aewlY.CTԆVO¶\|#`x1# 4=2 㚡ßCϚa1/-C6yY~D9-/݄o5ve`}0GfT:^2蛟\1+z2e!J;K,D^々 濮J^NB%n6Hz,_js\B\Τ}]Z{ # fDs%A7PiegCo _y&2_ug9+3 N!1$=+"^:u27S"[\cSI+ĿTFt1|kA^=ColT-@ ׌y;[j9Ai³pKsW>4ovs|[%_#*g _vM.@[?|z7dQ8D˃Q u kf]l7t%9b;D y8Xxh{_s<8?guys%0MQR ]gW? a=hW6\Ы60 P8c k6TaOD$wN ?V~>~*(gVlM"*bޑZlֈԿ wPg{YnUIQo E'LrêH}Q<W^Axʃv(i %)_R"mo, პ=l(^68nvUɰwR:/?o5Ǡ ITEP.X$"Kb LVv0NVrɝV]bK_2űi 9%I!\T#gUah>g,&Y&T<5Ԧ?>Oau޹V 8{peϩ_uXr Y&""8B!!?9x\45f灓HT&5_5ݕ7θD4Ic6mSWP b^B1L]:M;*h=gBTn3-:/0֯Ϋuz^X/:Gk}Z0 v3)+c( +uA Kw0*%dq{]=wYc`ol,hI]Rʽ\*l蝿|9\.1ǖBaU6ERhy3KA 79 U!_1y"R'ato*6&*[b!PƘV32$_|aj);ؐtD.#Kqu<;݃^ާ5"9/KDj>x!G ^EƉ#!~o {j;O` wNyIgXeO< Տ5a]#jx'bjǴY-Z8[6Ō!U%cj/&Rӷ!hz; `@äz㒰ز.uآx$5_I?w/]enD]4㟃O_:% E;ЊQ,{g+aJ(׮| :[8s<uf@D0hN\kw;i@8Ь;X}K?"r6ͣ~W D"<y~ϼ®_`7$ϯ09X68 ?Rчgl6N0Vpg3/oC^N{"Uk=򤿛CZ9rap)Q? 3A4uYٱp bg깂ذq"b]tWQDz{YJIylYgs<4|@>z`2#V݁=Dg>L7O;] ` Y6e6m 59L±z s! kkmy| ͏֞9K@8ɾvMY ׇ\%˿q*)u/o`;~dX8nܺ(,}hydue%w?,cokI\j9f!>Ot +b{Ewa*E~D̓.X&K&hȄ2Dl.O4DYlWF47U. UC{z5)Ħͺ"b{Qmi-XgIA_e vBU!˿KkX":/gm_QaD6wQ告!\f+E1Sv  jY`W G=/}Yj(}w3%_ wϡl ߓ}-t};L`Cϰ ^2eqL85xuRU^ NBu/5 lS=Tn߯/gAߎB(L89Q7|Vb,bTS1:K jwUҎBUyOK`DЃ% >slV&Vx#^|=шF'U>cD Wr!&P/<:يh#m[|SJs3s0k&ˡ͛#OS=+r&~~5^/K=Zޕ:0fi!b GA4], .QS,w[ao6{H׻{ueh8K݈i6jCڂh'40\]C,?=Gpip@?y{% O,m̴xaċDa${;}L-ǵl~uݝ*1U6eF~/ "{rf=QDcv"kD <+3_ܻ`-) X+5$Z?j`4npnT?Z;iaDMyXy5#BG2$a>lq4_R2;XU`^Xj ncdѷ!fYsAyj`%!X|Jly%PM53)aZih)X]?_dv[bYMלG +l50?gX`5 ~ ]{ I!oC;ܳpq u?/nTH!i@N#ywK%dJa-#lS{Itw aJ7j6x6%cslf+B5ϼڡph-#kl z3Ll=XxH7H0p]Ȧ[ZОT(Vf&Z4agK^*((ַͧ HsuXv"Q2mulbq4r~hvehG"S\W`/\( g@z`x[/KAuϺjH}}iϞaОtGL]9=W:CD}wUQ ҫÞk`Blh^nzH6nytnBY6z,;|A%QN0q>oVlʈGbAlR*0=odX>EF?fHN0*gF'iV.Bs[0wݕ+͕~: mkE$!m*:~RjMk"a0Ǩ$*(ŠM[!~1eKADzэ(h=;K }r` Wr dر[(3$=AtLu% c7,!Fp=#~\kh5^Dyv l?)o6,ߦs`B(Dճ@M%hAۃ!htLWn!cBJ3D9-G30GQ $;=w`=OMZ=_WyyF­5Vnd#7R`0bkFL7n)יv%CGU1:0w(\-SXZ5vwվP1l`'~1;ٞ @|:"$#:'4bb@,M|!jv!f>Mt 42ǘ>ݫma]6_Iе<OAN* 2.w]4)DSs9@e&B}')NAXoLZ< Fs!u S-/l2Mޝ Vn+daX>jObTӣŴ![0UH]Hx4N7}9d(/5Ј| tֵ=LY9mksJ"|#n ƶDDkeEܔ#2'ן6zu~_wwze]47o=k }8 q0}1d+>! MOpx/Dh jlBR^ZFI:>}i~5!vAʬ eIl<IϾW|6*lhy&/ݚNPv>/" ݦN,IB[Xz0&(ڨfc$_=0̶6~}'X-|7Y:[bVb{{۝!KR ̻jAg;˦1Ԗ}j݉+fڎtkZv*M1j;mQȁgʀX;>"U n0 eil6'B]Sx@HtDAPMYӰL!RyɤiFw~zz2N6[s羉pf%#*a~_ 1:fkΏdm<2,S2*!W m]x_X G4)H SYN]nYRV*ck` L5Ԃi`2g%@R@<x6^|R'"܏ M}kDFtTzk),j?"\EI#Š(ƈqeUk*9NN67?ˁL[DAM\DMN`s, dY?F4N@~,>⿡ }͏>y(h A0~0 HWK ڥa5mЕhj:_ !ͰhDP{2ISxQ3? ;yċиeayD-=جZq.t ! 4mJ=T+hY&&ܹ娀oNCш/3ѝ=ΙG𽃕54~;*SZt{5B_, &Ԯ&| m͎E@ jO a9۩wrikN<%5_a +ݱ\_~hs6%C0FI)eоb`-WFLAy:9*dp؎{1XI ya*7C2$s%\#aWϹTutRGrdjȌwo0w>٢?Þ(C#Q-oUGZo=sѫƈ 5kbp-*(h~\tQEiSj6[u[}R@Yz;1$[%P4vOi$z@wџJ?(vbg*4RX@L *0U|e.؈fݜKa/X}i vrē/_o?D QDe-{k\0Y8IO^r=yeNKi7X5d[^TH RDTEC]uEDgBՏ ZK b}tA4"X ̱[SymKޠҷF>ͅ_3H)lϲ֗z6rrЇ:S/wo=PxS a-o~e>;RUJKSTyX TR蛅bX~pPv燀|nү,{?`%נ?1w^1u"ˑd)ۮ Nz!;査(}0~g)g!z_UrF~iAUSK3m<"l#juwNlZ4Éa+;o?l(çaܒ Ĺ0ֺ`I#r,8ӎt : @90iՋ`)F3_n^|kpnd]??_Um"-JJf6#Ks(b]xU\hŝ'"WWZ°sgg V~\KM`XJrDpm (G*/auܫd`mޢ9I-Є6 'B>46\ 3݂:N{1?3 z!Owe#J|q؜x-daz|?<42n8َm>CR@bYZ +RWK@~ʕ}2,N,Ʊ {C±螏Cx:KNm1Ĥ1s,3"'5>wˆ`ر?13N 271Dԩ{>$ J8lNd= WqM0;f\JXz ծGA\Be"̇Ӝs.SW i^B8\rFE|zXexX OsFϒ5X+J!WezDŨHэnXoئ [bBc?w%DMTl~|t&N0)%`9I7.n\w"wzѪ4%h`pgBp $ˤm(k9euXVq kSth5ѨJ!z3cvӽZl|)CZY i*~ "5?} Piv,.<++JYE/qF?̄CP>C."y.=6İAÂ>=xhM jFx<~B-_blsdq%f/)%Hń)9TW7Ԝq'vv1X } ^yª;x2lÜdŒ< (L 8d{z 7W`Ҥr 46Z?ezO6 =5 uSUfe@F03$%5Fb`i'Y 4]Sl8z6ywu2Շ;Ɖ%0HVS! sV9°bP4˧%ID|`T!?>h~LNb@1$W;j s>(+*]+~3Eva_`vQr#ʦsrF ODS͹ԗ݀G҈TTj3"[1Q Lcx?lr 5½4zG(2uЫ(0G5BO!@"L&:[W? ˰fNWsz¿W4o;$ i^tj /vo}t,#դXM?'HH矏=B*Mlqf3YJ m^x !sRx?>S3_]]4=||bf8o]"Bs&7QJgOnaGlHƸ1I%_dcn3I@[r[f ei$1nq^ρELo/Y/'W|KīZn0r#{9no~7xڑ( u*9fvxmҴ7'},Z Xp&A=Ǜ`rsCp̭Ǒ Q';]6%0g>&>-##!]'EHm<9v㬴Lz+ 9?!~6t5+_gmBaaW: $%J#9gĈ )o>}@zOa3[GsRSwlI'5 ,9]`pf-BǬ$䍬d=#8m'kFx * ܪ  }PWc'N"ʁ:`%_m C[ՐvQjL\V܉ YtE,YZ7"xr5tTW#G꧉ dOYw Y$l LXyXh Vv{q?yۀ i)q4G_5@KAiT}R΀j‰f%VK`RH a]%AzQ~Ѯ &yT;] >#f{ ]kEۼ hٞiq7|$r:f3L\;HԲGLQ1T<㧫{O??oS@CbZ =7Zz aUG9nj~i RːIoO۹~G'W6!x+~)'GE6֋ǞEKbH:*'C_M'18 {šK7u3P/&uEiWiBW̦ вQm2ks„ӄG0mb8Aaeaio`16jhF[b_$U?W~)uBD. n}LϏRV$Ȗ3@Q~[J~88y(7qX\\o+hPǜqdLݴ5YjyZ>(ٳ3C; >G!IhM>Rﴲ =A{/ }n$d`sTLAO r'нWdž-{ gB!)aMeuLN:%&t4 >9ڔ ,j~[Nx7L:,۳n$~zyqHHHC\]vW&R/hlm>0]I3 ݏ*a{n'ZdWF7,`{Z0m۷t+7հ!ndqW+k^#{s3q2y(4D~6 #9iP/J*%ZƛmK`ZXc ~Bvst;G>ygݹPm a'>ؑI6cev\rA0,$ VwwTE HM $rV/ W_J!T} r-Z+Yب;-=D(U(t[Hyiku_ e!{_H޳9e$=i|jN]ڟ sU~A?)lcͧ\%r"p %UAߍnWHp)+0sQ'X~WdJodKy9,9c7jF"od/[}ܼeڧZ"ţ { m]"*.~\Ё_F0v;ZhϝE<B|Ʌ1NH^nJJO-'Յai1 JҀQ\ݯz2ן鲇ǃgݠ=L}涭+Sͥ6H_=Ya\>rDZ;)ڀb#O萾-+9;`Ⱥ/W亞oVX 3'qK ]`<$T{J\jGx-wmȑh(r~@v̖ iiv'"]kk^W6̂Kč.La!>5khz='iPWxe74&dLek̵s%j [YM)C4d1Z]Ep2r)_ ]Fw!ra Wj0w>"s)ƃ/vQgij] &x}r8_=r|66Жtab20ؤ|taNuPCWȕZ0ٴdG*n\k9K¡' 3Ӡ It!ڋb/d`i'=Rw-#<%d< yeBjEyGYάEQ?Nۗgڞ!sI9Q5XpfD%t$Z5%AY= O@!}bkG5r>qHlym(FjU.oJ0BCf ϐ>?2jcCS/w@Ҁdf1Gš $f""E>àMF{(D37NX#r< #]TCֈ@؃\hzdkoW`f e1r,op.H6-ԫB?k{t_9R?w_JtC}rx2/ {'1d^iw= Li)ՅyCO]o63Ely@u׋MIIc6/&o_jK}G2jugD'Kx'2c{!/N<\ ߬1[x{rtLEDf:r+׻[ ?v^ G4 9g zR=NfEwa~W}2v6DV?ӑzO/phZ-}Ъ;W?|DB1GFAbu?t0 j6{Y e{k%1P*Lx<K߳#]7;"HOSzSPj-mȻ)՟7/CskpVO. OAvy#_%6ooލ" k@~]n2=[zT, ?. VL ;= Rbu3`YOO11_2=b+9/AwFڸu}ȞF[{:w}Cg? fx L)|}8L]Bʽ~L6n_P.'`ɨugmaHXTSO4.%CȋQR 7Asi-ݖWy@$?SgC.B/r)J9RLpެYeϬۆ;"I1g,Z9y0@/ck7, ^{ԾI|*u|edS=:+,ow~rzij-v&u(Rvh{[)9VոVY΃@kPIxw:7\=y l;qI#*- :#zq\2fֹ# 6 ^tgGfPDzy# *Uy2z~Jμ:H7ٰ;:N CЁmOwChd( +gA+Qp6t \$8vqNs|09K o B=x;!\h`xaaS_L8{, ל k~nuaF(Mؗai"1HEgHd;b{5L3~Txaʡ2CYGl0Ț}D1ʐ{2'5u{.&^Q\,+b1ȑ;~46rz/ ~7?#c 䝒^h6}CM 5F,M#nBK0rWP:RFMu^Vo\&#]Z`K̓ hۓY%z]HP.g] +Qy++D8zccնP>G?`4,w$"t"qNn?I0fwXˊl"*?.C; \Da烡-)q]~ ]R>,JCid_ Bu_a kSEOYaY`C]!dٶnkdwgrH ܭG"wR̃XHʱ(AQ]ܰb,_gСOyX-sśM.^`ȇX >;k&+w1dyEjDnm*5z?9+z _? M\$Q:%6!Ͱ,:x!8LY2֨_5gXg]yȳj&BX0-ECJnoV?͕siV$]duz[u[o0'S;BCɍ0R}]L ^N7_ P>SG) jĺ̎l6jյYvX=RDf -dYU҄?(_5y<(RN˧Ԍզ_zYȹst LWaCu; OMU0^l$i'uF Zb 9u4sg!41xG!zfd^X,wjB; #빽8u&+ro DN/mv0-Ylw)]`f =I ʻvB) {%*dw_8%vYf}uúX\V3n8ٲ[GCP!  ૘$8(z:V?C)]Wz9&?ԀIPKWg#JC<>YM/*tfByW_%xGܗKw VB3y7woA~*2VsHS[49M@!w]! ԸdA4mK9Gh׺ X."3qv.$lPE>AN2Zzb~R ZX#lBȧ o2s]cMJ z9YHQ;Y*5D]a6OXwUǧKZnږ&Aw͑PsMv4ʽ~OH{sӀWx\M57Cz +D>­j@R";_!턭YʏoBʅ&IȒ̹"-X<7mgL'}w/mb7Zy9nn'.H"<'j0r~ $iSD~#s)Y L՝m`ʮ'(ɽqyN%JV3I<'Y[%ͪ'>}gvjt}MSWmz._?݊a17|cW?UId/dyqhV>#]唁 Z~iy itv9??%f7^'bќ08gO>DłǐV%#C{J>9OՑjDW/D3y yKBk~[@ NJBaF/| C)ķM)U64intWzc&{\OЀ_&oؐ~S;G h!qYvK9v2 r/gl <#ݬ~z趟4)]24c@ /]@ٰ/y 2oyG:6gx~I&_d\5fBd=.0>CٯP w. v=>   y0&hp5{^1seխ'tao7hP/SA[BpUYbvi?ݼd;%c3yBIМ)3WnIC(#^=ڹõ\K1L.To\" BnjD$lq zx|Iz{0[f`80x{v?_}޹y^c9'Vl!!dK)Mw7Afz`-R]ٰĶ$$l%4SŦY@zfƹF. ^}hi A&u1\KF-0%յ9OE2. kYs5`hK1lepǶAhD3Js? l!eɭnPk\&]O># @ wYvbƋ60o.f|?X>5/i "E\BfGEȲc9%"w2n(lEzޭSUrbclLn\11A}YZ7f G^>ޙg呕ͦPmo/STGax3%-€?!\D- mQ!FB*Lsycߎ2HkN}?k M<#&ǓFį:p}ĉxr@7T?Kԝ/a|jJ 76)x'iY__H"x5dݑ$ eǢ~zy3#5iSq_tD~VqسJ{)}> ԆGQ%W"zos;aNY*J Uvl ׁƀ)7y/ #ܧDq&*dxT5h:eGa1XRo";YyD rj:7$4 ?GǓA{F+ʍ r] <%M`57T̵Ko8曎4uL?{7#o:Yr9̾w=3RtV9?!auf}E9$K] z 7>e߻2 ӯBY#:ϓ3LfZi'H54{f_K`x10&6ϫEHwhBm)Nx#fY K]nvLE=V9=&a WЯD~ҭ3XCA/ M#v+X9 &ɭӠs3*2}n!mp^[?Pþg 9eŧ~渍l/c1'^";MPVbz4#ŀ|T,A'цk)!:`MG&k?85`Iq*G4jl4Xttr+ I&ݠgG2Qo}/-, o t5̇7`~@U$ۅH:a.ʓ;,yr{l9_RS#骆g >hx< bh0YKƤV㋊'JRr=3bnWFn3\; O,\Fdzx)َG~n/~En!Powi+3\`g~rӯ +N#="r /rpUgFFph6ʽi;U=cIAQ=`ybyBV ,= Aw;d 5Bʕ)7y2䑍 ̼ŋl'yN>J* koy?;pNޣUKQ\ɑ*.4Dp#c)s׵ΰPwaRrvE tL1лX$[vA5H:Ys: ̭:P=*(NZ/i(/bb@kZV8 CvmO6}o Np EE`8 "ﬗk>ݠrh6ʼn1zOyw_VyuXհ!? oLo ݛthϑwT4KḲb8Y3ú,w(\L2Yy<_ fXx/|206d~n΅Czzɶ}(C'B nP(#ru7m-qe]#1qL61y״ev rci~{ݚcCV?~Pӻ@}ڿSRr!z-¸ Z}6C_ízux)h^Häs* Wڰ†>Uٔ\#K-w`SnfL+X Ց'ׯ OWZPT mǑD3-LMGUz%e.3N}Tټ95+ $gqs W&h qԁ4&PN{kuV?wJ {uӭaR>]۾kR~e>&|KT[E?ׯ {+:_+Aר͙M75yTrnu~Ogz˛`oæiX>a9~ReРVxElýLZ.r xcSLE>-.#? (`gh4#Us̤!wu^9^у6JH剬R_Tj|EG4}hs r`ԁyDZ+տ2:*a C0./BN;\I+cG+¢jKz&dӣ^B6WѓU5UI9G"O-X'#?1Os{r/ f%ag$kpfk:0)ƃ}}=a:EKzQȔcH$p3~pC7ӴoCHJs.䬴鋀e<"<ĕ}۶BGn:ܳ HCo|[iW_S˞;Y"sADM#d?1_ Z͞8 3jyk,@.6ht(!ATv>CX\"ukS+7) { nQBH:SO[w@bos5rn~T Nwg*`f>xzq%bT{EJ~!y-08 IRXI3{fBTw4 *ʞEBxc]hy7I޷OAkFS0XamMzz V X7߿,WsW*U9gme>3%\c"6{?u&RIzhZ.?J`Dr3Nrͫ9$,m΅Bqo-`k*X $lek}s7fyq1u94pp%AT`Zjy-cJr}cv d;r8r <ԁ||ŎSg+Ov'WQ. ?:!ϢXKoc)9$]u@p%B;/*C.Hܼ$g{eKŗ;52 _)gr0{u{a ;TkׅU.P!YjP9sĄGZ0yYvXrZ%3iq_qϻH}7 p<$ 5%v(FݏZYC t1ie_0M G?j;bmb}N`\7\ F3iOa-vJ5bjg.V,U숇:eu\:^ɆJ3hinYyX5Yz;u 4/@;w"Lo"r )q~sB_n|".6Bȿ %oUc )uo|Dܯo<`mo`ccmv'|Ӯ6r_CxAN$<^Zr9FWy\.SkhԜ_ #s*d7h(AM C10A ?  #s'Ц; (A-6$~H! ժ,BϻQPmYw~i>3 څ/exиJs6`Ŀ{ gf*WͲmi 84=gA$9+ahSh-)!g%(D~/o@@o/Q\߬a':*L)- _υ-׉GhBt>pdj8sK X۾DL,'KVԦƲ ;*EE"2C/o>ZKJCCAiT=z`F۱-lA{l \$L~O $jgN!a[rŶ(@4T( yaIή{o(Äụ0?,dwe&RKpZD?Bޒe뗄yN ϊjn/+eE܏SȞuZV3w)ͅ5#w0`ˌٳۮ@˜dhgld :2oHнauy:.0Dκ?+%Ⱦ5{!dqRG'X{jU cz皆 PϽ!Aђvue[2ֲ`H|"*:m̜OF!74& 3n<|+˗o!sv;-CuՖșR-% 5>w,}E'xQt.;~} ,|Uݮk痗S=L&ڦn15":Ŭ078zj9V|Ƴ>coŷˬ+B&'{wWF qXz{%CX;2+mhDsB}Q;mlW\~g[e!vڂ8&ϯN~;d~ɬn"[B\G3GnZ>CorvÑƲ#.aFO>mg0`}K;SzB -N!FzPNLGW8ƒ:a5\:gܥ #;2ү%Aۙ7eGy;Xl /J1q$'*I#',KHݱbҡʰO |ۡzhn5^Dǁ"u\ |d$̃ M]+&+RmQTT_^'+1CR4AgjhLB~]Q>,ZoΆ1ۈ P5BMJ¢9p//2|۵Vw=. 8F|`fkO^-pկMvG»"Mk AIj?+3~^#r`C0~gc4WWd2@ɛ0 G>,:x^ymTܽ` ]F54&E!蛲KJe}-rA랞A:mauyhvo t+ aͩBi-";sl,dg3jeᇔ79Mi*| #oH&#AɑD9"q\ )>clF{5`U>ʆnHM-h˥1IWܽ6]-VGe3N$49 Ž@ ٷ %|?/7P!_͔<4R;fL`{Z0ye~p0US#[\/!zX H`]A X־LHS ZE(c>*ʳa!nWqX__34֚ôR,5 YNTnq ^F^kh}}4gv:]Qk1(%M~kT9WG^ #7ikL2$YvFv6 @0dh? dBa̬c otu%-"0p/F z/X{qBqnBhWk&"&*ז%d*η_lU7_g7^:Cw R7mgwy9/RFUUq%wゥZ`YHҽTp3 j͞ H^wsB/%GE#)|o~}&ة_@_QIo\lC,Jᠩ)BCn0H[f y6G06I_D"Q[(yF)%/. :jMOnhn:Jl6?O:ǦmZ\_IYPһOS5d*M_"A;"ah#?hj!K«vʦcD;X:3La5gҐ@Rr䒖 OMՍgHeeuO"Oͯ:ۑSTP ݸ~O2g}l8aU<ފlI0^b {7KCJu%"}#(R1~n%.{C\&9 P-;OBYfyh:)&X);(0?QdR6et$ p:+H$L"NJf r_Aڙo /qkH^(6͍ʖ#?yogՂO*Rg  cIEYXm/hdw2x^91E_7eDtًukWJ]烁SqW 6d-}UNV ]6}n‚Z̈-p$پ]V}[&"ENMf$WP9sDJj>Rol~7D*J}Б W><ЃE'^ 633X]|~I%_Q< S1=Uܻ+(/Pz[t^]@ץ_`O?PpʶozN߮y"DaH1)4mD6݃|!;G:,I`'gLd#L%?zW-iA'rūۄ )7rV3:Àgnڏ"9I7`8X`*nԉQcuށf_;e׵漞YzpUvӭa`zl02LʌF6Sw# ek>}};xV"!(r xjVoHpCic82DIn 3ǨRaU,Ztzvp~L2!4L{^~=Ttj+f|BD+$t-c\0,;k>L:vzCV4rX]6E;rf!k}gl a%ea.Gߞ(ӒuT]">eD~43\``!L yd+/{S T7p}ArPִ,-dj ,IYŝǁKYy tzГ 9_\㼅fYG~=ٸd,8GaM+/0wVsg&~-oz@x`9+PCsl| 邼R18 /#a#W`ZQ,'2:BEq=s''N^5ePybMV_~hн]炞XLP Lh0Xr2R۟dBX oR-.FL~qFYWjWS M%KX! ?'=MAtt1`'f jaNX18akLHm#!:j69XXP{bfzf_Vә;(ko2xϞFfZ Pbx}GPrI7T >N\NY!Ax*~ cWzm Gs;  '1qCHGgЃQ9X 6̻Y뻠-X R!t_\ΨCup\B$Yz?q)Ch?ou.H[ % r"0/JU mO*#SF9CoU/ً6wO#K}W̾݅|{>=D~mP| ݺ\|~ tj [^+*;'9(i7!rmCH#J=4u~kJh=qbDK?,M7LOǗJ'0*_09ce߻A}?ĝ-MS7Y_i0lV8T_VPFKGi@z I]*_RRq 9YD#|1HzKco.]O=F͸o@ӍP/e+?$3Ec JCwFP"RqDpc X opg2-[Xj@]qi|ۆku'pa8;!ߢȭ㻞OD0"yR2cuҶs[ӕMbj~08i}{*]9Nm:o\Ѱ_N#}!mAt; 7=eZ tXBMa0=Rr: o@RT 7 3v#%k?S=*ug82ҖXE~p!7j)ֻ?mf!oK8܂3dD:&z^s.,?r * 1cD ^.P:,-#nܡ8 蝾XCmٯFm=sa٣Ƀ0,-buG< ʑH9/9Q)uA]={"gT 3K!?ɛHVw&(dV{vo:麆w8maASxsmeO;si8rW OXF ,oU&/O~e頱B0>`3mU9O3\ J>ӡө0a5bb3RyVG[:Ϙ8ʹ!4tt. 7LP5zOOna*L\@9(CG`fNIr37GvRAѶn|E+e2: :5x _`g<q?C}TBN_ЬS5]}`Y^X+z^/Pai: oh!-q@}8^3uO->8&'r 3'IC+G+\h~سG22UAjCnA? O, sXUAahI0]P&Qc`Z5EvQt&F6(d(6tIHʽ3*j!79]u yI!@FݝkG1cSZղJ)ډԾ /D[i3>m<1'oASkc|x䶿+ 7]LpVDf(S޴mbWhr ӧg9AG !兩*`F\ R_A oo>ǿ LD,]nKN駑4!UB1He햃´-ܠ.eǤ7t*sѥ{qQC9~N-*m9%Y]78S椼ȵ` +:6wB?|FߺEy<0B?BaO~Π:4Y#?F-;U 4dz r>d`{1kO?; XԒ~WSj}X7=qHgy̘|l2fnҧA1="ǿKX4tM7~/ %x׆3#r EE[K#5cq{2#9~=vBZcY􄉓ݧ8mtBrYH(`[]/Eqř0/R2fn$&ԩ7'ZM+[0fH}/ֶGBe C<4μ\'eo޺I9s/ \|vc >eVѠQ߅ >'{?" ['G_qWB8]} Ii;anv$s F ^~u@՞5Ѵ}PUj!23!Kl=ehvfp0)X7K Y\4 f;=YQK64W*ʄhj=pc;rorBn/mD=.$o~,sg$`ԁYXP\&!o+5'HD0nv :Zh])2"lSfn:̚V<!f$ڸ(q>}E="^urP+n|Rl^ sCmBC!F$y &'xGpRWuz<15(Mp]WHD.]$8ڝQku ưىi,dk3\KaZ_йV8C?́HST_:X>wH7.AsoEm@iҭx?)ڪS|0X3/n!IPu`U:sMłK?\@k㇐lT<,1|Hmy^KI bUda龶0RhS?WQm5ZЪ1ѧ1Ef ._zq4ٖDPᓣ)/P]yL9|t \:0}ܘg*^mN]rB[G!LJ?f ݖO0% D AޑT};vez WALvinUC(tT7"+ uRh'r/ kG =.Τ@ BfkZP>:SǷBAJ }]\o 0,g3誂ȿڡsSzMnȼQ%CG1{oC@c^d \ lJ+KgXT`E`܍6rև0*CAl\Udڊ4Aᭆb0wRl:L?P<1 lqP tb%Rš`v v/!Y+/T"-kc1^_ּZYF&NaXزM hmR6z?ux{*%3p nZR?mSIelXȴI"߫X2mLk0SrAZprA;mPgNE=D"_ [sUJwE1nF^w&tu@MF"轣bB˼u_O+eeS%p:tiڨĎS򑓎M@skte-*×}wJ8g>Y0k3IuC:!Oi,ןDv;2K(Bs=5inS ,:09Qrh4GY)^CqW0.w1 q8iǓR>XHqm#>u f}:Eɂ9,5'Cũ֗jȝ[ILB2g[K~̥#Ye=r!{Gʱ\ȦX9edB&`zb%rŲ^my({%N%T3d 'Ϻ*FC`;.>Mn& ׅyq%hT( suLͅбrZ0#xڽfL6Τ ÂfHzc/6q˄C*Jn8kC651@q;35 :bnJI`CY/¿޿nF"8/'hc4Ӻh֜9<R=;f-I@pqL9.Cvmzm;!!#×݄:)&8=`kO U(+ۑYs2]Hq?dL7bee19 3Oxb41Bo!0T= )67{ ɡvAqb83r6WD^niȷv׷H]8sD23Vh.CD;^_6 56Ō {G;cxy7Ӽ:,{\4?ێBν@{tM޳Kx=,ơjߍQH@*4RDa`7g4{<JY)&,d1H!u(/Vve#7W_ '(XLUWP%$4Wz--g*~b?ֿ>v% V\C J#kn~;/k!0hM8r/WSHF mE-")#oPH:㺰q;ͩpAuu?򛳲 o1G*erKeeh9ºH !ssnr \~̀*J5! #ƕi>(~vTk,'U€o (ݡt We tj {E5_r억Np#Ji{SyT;'33"[Dn=pF!,kdG[EG[wD` w FGaxfA3,= (Ă漁U$L g*=x0^'(= 쵰,cwAm3 gڿ"JXk;ա d"ōD*+Ѐv|Ϊ>i${T"Y}UK! rZڵFF&)ף˃.HߛHP{HY;_ce[B, VDrl{ I_Cd ܄,Voaidfx^KKvۜ~¿o?mN$V}剥@߉E!O׆]6,=Kn~a?C}Ի2؊/oC;lBuCtxF}"Q@pdH!RT,S,ȱ&<8cU&_ z*ܯEM4B#$]G[4QsN|xNϫ+@cIȓ/f-ADSpIZ2KackN 6ȴ=8Ct1772Kgv0\:0K*;/$Y ma`z )j6b%K % Eu~<ܿ+:xߝNP{^ c S{.DL9IHHS/Bg3nP|sځ*tjbf>z W#Y+e\,,5(t@>ǀTzi2p6Wm׼DOxVOu-r1F+ IpMR_nqhv{EWμNsG;Qe zAG,L߳р6G0Ps /3q,A> O^*Fn{R0ϑfkzVԿjL3"SvTw`eL[}x,yc~Uů n;iP%P=Y8˟Z 6 kQgzވ5PX7d~}¡YM'.#e_ەd|F.S. ռFi/{>r=Gh[QJ?0iױ;\] [[F; uSypF;8Gnpek.ȷd_f/lVb?&!渙1DuNx8LTE1!}0z_ ؿ5i+vyA2W<$lYzN!ֵۧ 3Hyll_"< -y#׸lfdܓYouCιMU?`8h {ĐA3n$܋#pmFQ\!# /Z߆ELmž*ǮmUAL8}>l[@}$監9x5 3lw-%HjgQ7YsM}q=H];l2d*'*Fgbf`ՙ,tkJCQґh'd]OwAm4h3T?N⩵i^6yl E?a%ַybƇ';'a!pݍ0xeLӟHzVK􌼾M}HwdwU4dvu݆,|nG{ȶI]Ǹ? 氰I:ӹ8{@ܭJkSq 8~`sF0򣕻V>DziU6k _ŠQY4CCGlͻ8f_`eGAMu;!9؊fU1N;b?AS2;²7Yy>`yu\&t:f͟hBNO_eQyS}8D^zh[W% BƿT_ 鈁 u9H-KCf]#1a9lnI̩붽eNP&d˕Ep?:F <}lYCd7?/fHm @χ!|5@O \WDu*Vc&5y c9-rgDB?H:q0[=Z̕ˡT0pXR+)_5"G I+dTatmG9~^PC~~a&{`\sQ K6 MU}# w>l\շ-ζUcdP ۚ#o[pfhۦ9  9F. /jE.)˶.֮ԟ-"u0nms KA&2 EE٧,]~iNGyTX#Jd[ ߄zVלk ]lI7'zvEC2ռ+$x@*3=NUPgM|D^.h}D^&?]S%BG}~m! :_75s_#:Ԗ߲ K_,#=4K{ԍGV*4dvn KNGUeEM GnJCU" wcַզ0lp22KQ# AoPix\[(S5>{rg#̛6~T &v|}sF- ;j)nn L7l'Ԇr?Gp-9dv GFߚ. מaG?Z w!.M-‘2uV<+3?8oq9 {F۫!筱{^R?22;ԛPC!k/ّm1쾻ī8X4)2KeAnr.#bI1pK0ʷ m%0!V? &”ߧ/Rcr06w}W2D>0%DIDSj CnFN0%}Gze&kB`E| i7_>E[Aй8>:.j_ЋP*@!h4$ӀB6%YҠOz=?'0GNe+cȔnij亼dt:ZFU^`Nadn}t)\~I~l'za mFR”gY0fѐ͉nz f2f(7EԨ{Mg.8f96VISaoxl-oݸ s_,j`*z}B;n Bys0k&6"h$T5Mo7$G3Dʙq+q?+sbePԠW6thl*0Z@_ˮ0mΙ\U +ev>#H1.l;py ) 7d {o> <1{N~n;qΊs(x~-|i4oX7ܽŏv"I"rĨ Xv+&)MJO&J3uhR+ )Hn;ÿ;y󐒠{7msӁB0Шn1 jtaMXm64~vޏ$JN],dFܜ +AcaVi70:3-[IzX|C/ BsqEfYߐ5j/(0UjOX%ޮiL-/~+~ݠU1|xdha" $iShw-X;d k5H*>NI7}9普?0|raFT*?[~̔#@taJ&O\d@ݲEl2{>"z~s)͝BŰ|Me\LpZh_ ) $m7Ԭ_ݤ܂I⺪af`7<MF<> &^B)mA;0](i,ud!ӧ1~&X~H 9Q@3vߚqY 4o^ G:$dx,dգKLGu}p=kv zaWM;SA [q]+;Xݠ_y0]UB5h:voxFwɝ~,;k=o{̩.d昇Ra!\4Kt~ rĜ3LXZ5~B.,?}a-/ ΅IW}) x'wr rvܻes2wO}_fv݄:iWSU.gf gdIf\_|5O{xMK${UḼ_T?!˚ rulݽ, w|<:V(:x6}gKD%TH%S!TBTB"*J(*E!IRBBR$;!;/0{]93^GBV$٣CmwKGW7{sCL#V% gv"lz&I.ۇ"G'tVd;Ztm`Y\ۛNgܢm!җ,wC׶Kv`18N?Uڌ|^ݧG~0MnyFqSmSf-jT%h1ZEU.^*sk ʛ^|* =?F ?Mt7;wY[ggf垱 &ko wM>ead{Dh{?'C[Q> >Hw.O?_E@| ATS:Hz #wO52_PO"k9gB02݅@dgg8t=~',knv,mcn`bPo\ աU!ݶh]}]th.1z.z'?6ƨ[/ K6 >{a1 VHN9 cj\~+a0};] 2 W;DNU\ C賎{8/= ^.{ +t*=I0bx% z;1nc!Kח-f]+mgmTJ oN9 U [DH'!?_D9PWio ;\V~?UOu2|spjCx|.Rбڵ(Ƭr`DlMt]zU Gyd"abIrra3Y huO_0%aG{/TV/7ܱg 9azwr;1%GoWmzDxRCZ:, {B aUH^WB s2KGVjC?WG~0F.SeEsfWb  673(l;ce imzo D_~S1;$)VC^&wu&du <݆ i& V}<{|NS޽KN^}_?:0+nr;Y2 DxRԭgb8go֔ѵtCHߪq*2ܒSD&?mԑr'Y*rg_-7>CVş"$d2. kә{Za N)x*c|ʣp]d&#>IRF'3XlZdc;u3͑:ΰzI'֤<ꈭRp8>qZ!'_yF?`?M*oוTJS0(w< +m0~䫳A^,q/[v6iy$;YSFv=(2e_hbb_j5\|Hm*3Lu)Pl`_Z_VX~̴,0> ;39aӇ~JȢUYSzQU]Ol+ڵ/;r{RD)BGgCpSd~gxѴ, hJk\aLT( 1D7uJIGÈ~dE_=;âb-yZj`]|$@ e N@Ի-t*Bv}1IQ|+ddܫr,q\yÆ6sq2ЬTjIZ{^ $o|^꼮=EQϗ$X"IwBpf~G(J!aΛn4n)5BJ B{]QUԸ?DWGx=7wiKRmX޲}n9L>Y'ܐ] /=r>dZ?=5g]{yn\"Ǐ/yQd3'qp}+)R>Oqf9s|c`=W\FVUG!@-lx=T_VcGz+ϟӣې^RrE ̲v-й=/lcGz _hg$#LsV l)p0.blo1 GuS#D?d[A~c[nK/yg#7oN/\doWdyQfSQ rJՇI|o!W %˃Y E͈Pd7v 淜quVa&w z?uɸ57DA_kŘ]LjeT s5.--v>`NlG Az!H?5n.},؈*]7^R1 [wz7킎"WPP{轇> dܗ?IpU3ưg`F#ian)kWN:L[8eW= W}16 xùpμb LLW*έ'K}[ht*zPT5 ^0z̤Th7(b s',2;>TzM_jͨ%TLV=(lrw\Y5v(4ؚǵtGUo{pcAsN?VD?1'zxx ݨUL1y ;-0l"[J =xi/7YQ[-XP]νJգHgiͨU-J "COoOb,.~|mD,2[u"gW! iBZ^&2^ r"7 Ͻ!*k/Mx`΁hrxKc[ 3 )0'Y o{TԽ簘r.K?yPGj0"9: o.!mjM!0+i MYPco: Fg;A0j/Mu9Bgl|ă_utA7#=-V-ӜJLݱncBT{%vC ϲ#mqʝ}.@}Wɭև Y#3,)K*2{aGYվ~&M0C%_^ 5L5臱.Vl0_ˎLXxԲ#]nsK_}X.D?B]c<rn١BTw*eIId񝗿ly|Y`XUt0%L;Aּn Y,} 2/lJgxZfc"7hQB0v7S o4zB&䒀&xf 374e"L} GnFc%H^x|2}QWőpScrMqqw"wH$o,˟p6.u>˺!SNj/r,gccG;H/ÿc]~*%֊@Q^}-=0ɱ8U f {؀Fއ~=:s\ L~Hfs8}l W\$N吠m"^y8tȯdro`m3wӆf'nlyW l@?}lLyl%P =Α!h6:u}O(퇮zhdh[!A4N0x][Mbܖw1ugɲU#={ odAwk心Ⱥ+2z< #mgL݆_sI ,]" sG~@>_ZCt}OCG+rB_ԇ*da 9B2^-}E "}{UçV A($tL$u'(C^ӛy +)?&_"{:"Z#Syظ#6k47ave贞0lSNG?6@f} z=vCvB5fXLZ?96kyu ER=QC] - 3,l<)H [s==q(ZTݰ%ouGmTB50k:9h~oqzYWWI1F&]_6D s5!{Aڀ0r P 2Y"o3 ?|$ҝ5c>\kG(lYEmWLQ?z[>i=Sj佹KVj{#l'GF[#WVy=3l8IK\QYJy{)]}(0ޙ} F6ni'jcOs,} Cř"/ g{Udz\n ߄$sH›A RCHo$R.L98 \/I.TGI|3]5ؕ"~?[-_s:U}ȧk GVmyZ`.< ]3&:M!5qb -:FCtߞ*2\{#R:xAaE쿥VKQ |҄_ Z>Vw D޾lYd<0Cadey! p,2&OV'oY tfD|)g/FLh{I_LCZL:=Aךּ?} S7Kū~@ܫڅIJ!hIz'ťJn"9~q#YPy9u6mJKDwfO8q2)a3"/dv'leV*ֵ@')D_^ЧYb$arהa3hXіSE׋a0PBT 4%) yO(۱XsOD(vѬ#  4!i^)vsB%̖-\q'BB4|)tQ= 6b~2ZәZa\|Ǻt#aM}.C7'L _f2l w]+r~`Y)o+(;ta]wnM2,\u[_ vKXdM]$e<3qHf*2\yvl?A;Y]|37!>\Ȳ_\ \ZƵ0]t7\N_W3Fh|F$pxsM.wrO_/Cihwi Z<{P 3:Sߜ ׬L<ʙw"0.&]fNLrLM@X!q^ Pby2O:$jGw^5򘅾.("AK#G5Ee?G`x{5?U2+J1)7ѦͰ?-l/Bf/e[1d>rW_rފB ك%2ȿEM-a&m"7t<>Q?ĥR'}=v׭I A&.}H0Σ^T?י}W„%3[`ZGA. Fˏ|Y6 eWl ?sF@W2NVy9'grq ƚJtman.fZ7{ѹދ:L5O߶G(i3.0A;NTMi?רxz t=Pj'ӂ? eGHTsib/2Ը>,mwDLI|7Z:GA)e5E}>ܘGby5a><\s:ҹ ÿ* cV2<4ZR{`W˸LĶHAXxȞAhhCU .԰Ԕxu`7";lI0/ }dR`׵_h~o7=`jWu.nF/4%:mn]X(]9f;.>:X3orYaOmDI+{dsAj2-^=Yk9b/#GBՀ %=ErJ?>N'ldZ;S^t+;(u6+< (zpp9)RWo48,l*~7 491{e kU\+Eo Pw|tSh3>5H2mGƾYuMf2uټ>]}.ƀ O{.zv̎HMV,SAtOG)C㵠]rsJ&Ln\x{*y~[,^UZER"b2VhBa SbCGOB滬#˿ );*$o^2}%fnXٽvY^]sI bsڳ"02i uJ+GmF B<0u]}Δέ ddP,dyr7HȮYܛ,ReCŒ[O`hg0{궸 #eȯEQn dŻvD`s6Sհ^ijJӽXd ̮϶;UYl2$ zr y(ɁV,sl}yKN#ϐg7p?$Wj:!'~PAkeIз/Qn)~4 63e~:5`*GtqX<}VO'nѴIgԈpc  H*x$=V@4VmK$`wU;h|S 9\Z&-zzՉz!>i%%jiz}T- PR%JJN@m8LQaC5?{{/k2wvAf/4 _Rp|{/`L㊣3,WrQ- _9tt[Q4;A9Oǜh=Jp֟X;Gt-z~*gP5' av/ai2 xd/ ]- P곷^Q.%vqrR=nE}"T_['YmHZ/Tmzipl̬Dz^ȱY?Lowm~y]\ cT}&,u1{Z ŭϗ"kq䩾|?-NĨe1($DL jA~@\U)o^=>L[Lu 韑ǐ3,oK s}ȣ7G|~?t\ baDJnFiX(4nuê˒IK,lBk'곬O%a9łzf$uSyud6 jt0r<+0@v1Y'293TBR@!|J&"()ݵ>_ ȍܰ W3ak=Cp^E:_Zgv܍晏e<ɝ!ǯ@[_y~6J"<*O+0m0En7T_>y2[64"[u҉fe^5nrFj-g>nuV7*aނE&7|ٻ)wvWZA8I>m z]R`JƝXl]!3 R)^ȪrZ 9O!Y3Q3~FGeTpdpd.A 7*.Y&C!\a4O7ݟޚ6>+z^hէ%_aS /XOxb sna_^]N4ɗ{ڏy򳾒INۺa:X,= ~ 8_*%NJ?Ӊa!kB4 ?Gf26a<ﺃ%-aqލ,fX/F#-mr>BTk"?aZaP#IE|i c~d\ĵ=c oa!NdJAGU"bޕh1:ɐIɊIOtɭMJ+a)ɟY @Qxƙ䉺sH^;x_h WFΙMCq)ǪyVCI?o0B<8h4O0轴K*L$ /lꋗ)5"[~\h1 `{Lb/dMX^nNbQZ,L\(maU.`=}}34hsܤC8y}^|R;]%dAjfNְauMhi/ 3Ί_""7OYoSְt|6?3~љ9dfSPZ7R+OFg' 75ybBn'ЍOXQ3MR GNCw9w|O2%165!wGk3qկZ%l KH:#wjUCm[agE͘^;k93*vQ/"`Vxzh7 [X/@cvH5Ѱw2)|l|f[\!o/|yI vI=A̯ʢm*(ԈwN, oKkzw8&ԎRx29ڃdI[RCQ}>"&rd]ꕡϗ-^Wq.Wh߅4݆3[[tKW8a㽮:()[cue†>J"2h.#B/jcdwђ Lvn"Q{[2e/s4hM!\m^$Y'}x5Lay"RUN~&P"h5߽w<۟_ aY g9{5)hUfzF/_7c_VȽ鹷V:t}L4)բtK8[L5{/z=uCY;." ##xLX0_L`R /m|T)r!yYc% "fZ<>ATA"zHwjmMde) 3=ZX) &#3 =̑>hH" w:ZlK~.=yjU bכ|xI^0y35i v!~2޻HO>AB>ՈjOʁnq|nvó3tX2CzA NJzCZ&Ђ+TF1mGoai%/XP5I=R@\9J\ AI.O5{M9ue *Ts#~oS.&YEuj_\ܚ[ ʮ<!8;a\ġ0y~G[hԢ>&|7)^a]qcWMnjHTcYXWSՏ<=#]bOe`[l ޜ8CNr0?}}kuRM&/Y[D@ U 6EC *oNS'XMN+ [Oo_ Ut]̑"Zaqh_q"XS%6% sEٱPWIL n Jnm_:5FŚ?~Ն?}a2KqߏQg/HZ.mL_+(GvtOys.EIn(p>2P\0&w ЉJB3[[DZ(DIgudmSEIݽN9m0c{^ˈZ@K..060 TǍ|G,IMEay<~̌ThFf3nR@+  a^1z!ӜApF4B_>e8&V3HZ?3wiEg N䐕 @nwCrD7EG:-y/u ?c4 5P8'#(Kn{ZTcQt׀Vh*}r. i ;<{t7D.}LGx}RG: aXza{MY>_a'͆-CaD> z[DkPq ]6;ov'й(u+,n7;* `9~_.2H*o[k@_5}lnȼ9vp ,ٻ)岬ëѝt(>R BKzRO߬@}I{XW^p^II9"@+;B;܂XU%4]amEP,FK+zsųC _Eܺ4 Ӛ,FBlh!iK7y$kO/CQJvLzm ɲT5ӦԐa|6p8}xئ! 3?,@ Y3cX=KŹ@ly"o^~Kx G9! 5%i-I {'(]ro'e ͼI,fQd eWrb_72G _F޲xdKH3g/'|W,Z^=\yI)@>3EQ_/x*?}/p9~^s=V'YuZF!CU`=G,t(P2UaH>ٍ0O',<9\"unYTx-R/ ,m+DT6x_Ƥ BIeR#]!$qR#rVxad6jE Ƞ{DYeT$3;2 ~|y/K6ؤ`g9z>vKˌ8Ws@]RSFe^3w,<{KXPrP٣QrП=?Ww)6~ 3[Eҏ+3^4>֚w[sxH]C^VbqKnEFY+zX<s>9p̓Ҵ$''c3  o챓U2]'EoL "w]_q_0izP@r2b+ 6ɡ͏/ģYŕc<}y5 v#ǿ gzK#Ul l:xcG\/K* OВz0#lb!lo\\7) yjv˭z !$m|kq> >4Ȕ#͙ drtJg\ Ψėےnb--Wʣ;*`(ClmFxhM+u{,;ܫaMSNH ;t"g8r-iOB^W: [ɾA VFޥEiJi癮9a ZOg>iVwBB'z7 %œQJ{!nQ~?U;HW%[ڃ-ˉ9坶)LOW&DXEw龷}~2b:#[ 8 ; ody4e7~F>C뾟׉y)]9JEeGÐ翰OO=DefQڵ , q1|ؠpq,}{?∾^2iYokwX}$ ;Q[`JO3=+ó gI@:m@}庇8@IE_PA2SR8\Tho}B`dπ OX`7tם45ܞceaEKr\Fq[ݼ- WJ_[j`E<sq,{v7}AsQb[(,#t<Ttw!stA3{>,.$O"C1z$Tw_PONչ%H,z0_UY]nOqaIy8pa=wH7E^E5Lg]]%_C\ҙo'hMZ ar2^|t|<*]Zzy03sr6|~)hD+gN0WvC0o d>@9;'LVO ͹dѐhHp{Fr!>aJ9 &|$dG(~8\M'N⁳2Pd܁ Ph]G(P2Q HE5 jL _ڌUtHnh%mlN`-ɻ 0lJ 8xq7 P*]a/w9wHwTXn:VKt]`Npt7,_ڷ)5eq dYKŇ~0#dz;unْ`+$?Oa:IPޜ-nV%MQ$(-¶FxeӴ, sdgm!7rxJoA^ssK._=5"Q2z#_L:{6ȉFT9m䛻b5OGaʗHJ^G \C6qȘ9}f_7,.)˯b0(L|gV[=8aΝG0Λ$?6tBlh>tkPϚ<~e'1Q:.:9ϑJ%Kx XN|PfhJI!JJ_Ozs,o }q.MzQa mmUc9Za^iXsT&gF€˒qjHĨ'^fS!q;U>zqvƛans \vT㗦$]־T(6?z" ǥcg~gB $ qn,쑁3j0hR3 [kF0K@F? MwG6#dv9g[(n2szv/m e|90s2JxIuXj*kk7Ee iz~<hmrB#S89o{h/iE`R͍1ZZB?eVl0z濭`!,@}#W~L#ۭDTj1F^'3_-}WvR Ba~(={ݖ4Grg'ȹhNdr@72znȅUlEv́ebAŜ0hڤWa%4{XٙV"M ]Wې}zdTVkHrv,,:\ TWz/ϰ:^מ8;vvEU@SDʃ$(SsO n|j|JX3)rbܡo!!ȶWYz2rmZs'ɁnmA>?7WQihL' Xp|L@1gv McV Pti)PnI~/zy3pg¥sr3r0FVK8oI-W[6a=<4M` ,Ƭ^0kޛRXs~QZ,HW'Kvԟ3{a`M0sjh&_St$ lN)5H]DV~0KfdQDz_:W`%Xf#WS%[xUCƒ0g^!BF38*#|⍂M?kOpÒ (8QWbKG%I_M'Un5#s}qwskp`4 b; iEGzd`(TxxYn=lAN^!ȝ/<07Od ӞvvV"$K%`aPgu~J쏨rm uuA/:/*հbym-vƭt+"!zkG8V {mdt)#28FGz [)HV6݀@OuKlz/ F>>;s++g\/ 98pttʽ}UÚt`F;r:vMˍ {/ r VN~2,ngk:n T2m?z2R Nb^Д a}%YÓ}tdX1 I?3v~lZ9Oy)Ru6r\(]Ӿ݊ovmSF[g udN8c$g>=?+c`VEBS ?/|6. SUCIi@{>IǦّIןds |+-Wl=Ld?ʾX{3w߲f?6Ve 7|%w緭"ÙSl/ .WZL'j}.< ǔ!W&;-MbҩXdo2%{Ix3Xº'L0S~snHD$:^G3 ֐S Xa JЗip=4Fz!\ޛBxlL *uzx+adКW4I=#.<5Kϖjړ>țFzn Z`ć0II;!cu~[`2Uy 2 x~EvrGD ?Zrs,:GwGNzHƀ }cew'It) @ʏCz  W!F@-:^0sv{Ik;؊ e 1#|࣌e)Hf%УReeMEY=KNNs"2J4V7'n'g !d2u(2z9 ^&%TD",D~\b/)mX\ؓ#$+)R) Ս82d{Ep-ILRc[|G^MG #kXkCh*M(0j1|U C5v,;ͧ#A~AGg`Eɸ0 rFwWrR`4afbڴb Xt%n|x$j٤dda} Y#]]R+,H,nz~/~b/u }~I0^{a6uٻJ41[ 0!?H uו8?2>p9U lFXǍwҩ(ϣBjF 5 yQPzG?Yu%>N7r4,%4e3*'q3w',)ڥȧAuqHʩmW _1'j/aV8* :o.7=O%%г!oZ2GƯYnrȮv'z429+ T+X,}R始4&Ci6tj9(j2Q,uURO|y '$[;o>ًo+-VH6rOdM";|MYqS'vZiքO|:l9ZBWt8<~*l:0R1z,QҞn.`K^*Mr .1Z32^nΎH'}T}X˃H8vfz!kHi0rJQ/< kf[gN"}xG~Y2Ql@4ZQm++0Xta嶲VS0Q@nQ,Ւl]"v4Ns lÒǑlEl!!h)䟤xΩCN6 ($'_ݯ)<\*^2u3t9 ywdO|ȗ֊o/ӺtxGlgX5/cX }LoNƏ`rK#[MFIУGc fe - j |v[BԤha䈤t5Wi C݌VK@|` > -W+Ps]́ӞӳP_Eu< ӧEP|NtKlR{,s`ɭ{p57sqak{a&hŭhzFSY9,.1yF@RG¡d:yE 52Gh5B]l q iK՛ !&UB}\5IP<-NГ#/K< }?gNvOHA;m|NFI 0skt Lq^"DW!mc뺳]!xGuoW|rH5FwT6%hz=,]a_|퐣xPCy}/y{4AƟEQpeU 摩2F q3}ٸ9"5,W~}Po#߁ȿ +dnh MZTAp }:Vd.\Lx05dOT p4ݎb_o 㒉\VaQXjIW*acC[f*Lj=(vT%Cuoa} YY֦],mՇȜB:$aǶ7#˘dKj2 zk[kr R`Љgߝug]? ZSfxzE#w#h. k5>DQ}lTN?>B++TtPy]vNPZ$FŖ;&a˕~尞myB#q}CWn"}D^$_YIwƯGMy]Dv=@3 }}[+`,wd @ k`MQNh4OAF>3Ⱥn<_Lٌ-ɹ'm0@Sר)uWWOd~DhqO:oyOumĜ_u)+u0x%f֏"}MdOխtO`Գ łnC)tGIgUs>@h韏8 s B#u0B<c1 2z3mbL&T߿3 6.DÚ).fNG~"R`+MB299$T4~Ysz"̐\M_M-&2,QGVz6 -sc&Qq p-,彰ӻ6̙Y*\Tۮ,kH~=_V{(U(v,aAgRޡdwv>pD~]K!Fz3dEfo-d+@Nݑ)2JJ3h"}ԋ%a !' .b's7X=Jܿ懹H"`r,Ou3\JHuۥ7yMꇩrFH LWl?6TbS?,fD/ŊQ鳋aɇ7s`&s<=,UݟukM&aga2zy C^@="BjҽXEy{u+{&DGٟFgnePr 2c kGz/Z 1i{,bmh E烜 ,5% Gq PT)zA1$|teX4i=(h{2qOч A\0q,6`ஃ:0xIVʝ+@q /hcX.<<9@ dκj:sW/,6(L3$AķKq)"#x7*i+s'Wd3@?^!;D=?95<<qc0OKCd>3[(X( ^.ajl*_z\'ۂ'l}ީc;X'A+>U ݹY'X}!4K XxYZ~D#אUȊ 2\ȿ 8_׼9OЪ0td]}^y/CMTIݜVⳚ=7a{۹S"T@=iz7 gCޗ.CFB4#^4ڈ 8 /Ko5=qfd4٩q Y]iE.nŴ,%NSNbGf:y ;{BzT7L6/\J8<9Ya(5 }o "ˢ=?E'>=sFJ3ΨfАV[vXt eD =ԐMFd۸t`daL.%08`{G_p"۷ IwVnx>L3>iF!r FamȕiBsrk|G"2 (a'9$l{VA5MzZA 2ktLѺ<W#vܹ3XxsԂao6s{^P1%H/BDr [I$ag:Y)|9z2p)+\Gׇf+}se|:n#z;@T|qO Y;$"Ck :5TVЍ jhHthׇ:'~TIou6_m~Guc1É܈Cq:Oȩvnt,цEU'Qiٺd  pZDb_D_ݔB+7)cA>*E>zK}a65U[ F?{nbNޘ~ \UKtD}I.%1B0X;F",|}~BFvU!12ʸtWğU2JQaX-:ӯk4[o%(ULL+釭 _lp<a'w,6Bߔ,"k(% wHh9H`Tl߱[G RyfK9 /=,m?c[nQk'4`952u} !=DxLH[wVt$`y`Q--VK$R2ai6 4̫`OQ0?_4%0}o S1`KYBqb11tBd-M|O^5;r=(/ΝT%|0@^ Zw!pu?ՁTx _:amGlB-!qV(|Oȷ>Y([@xÛ{xw$EVdξ0d g ܫ$Mo݈ˮ|D d}roJ› 4 ut{F"dc-[9:EBY3,O0\ʕvi;<|`fAes].ca ܺN jJ;фl?21[nF9ϡW`=Hˬs0-׳JSſzAgJAD*߇"43U1!|M8 'Cd,m*HP1 OdhvdN7`eⶩA%Ɯ36O4MD7lJks#r'y 6tvS>K\n`'X;Ц,ٯ-3;E`$1w, %n{}/X!X5+ӣɒut,?s8ҷ3o܎ aq.*<$~YcRC*qU1%$=yi K "0w]C^z,. Au7^Px1H {oaǦڬk Hky职ZK]2*ڏ ٍw|&M4ӿVCߋd5)|Ѐ a;w;TR,p,u%1}:̃]~M5+:E$6 s2,yD2d#IfO`dh2,W5읐}vFZ"yMU`6%zH[)mu>GkdƂx_ɸGzBK?.0z` sA*ܑ$XۓM̋؁JdQy1r~@ɂ$>3w∬ uGWoKJPv֔KRCru} P`'/ oIfvR.^g#N֖B+IHK\PwN&XV6 :y>iVQ*x->@ j1E:y(wX¨]/ttZkU2t`}&0\(, nQѢGߋXQMSo4}3;b"ק^~[B$Q@Wm߯aOS?]dg%( x7Vd!} V'N<*Rع1&(UA'9/[42_aމStU$5IpL@pҙ_X?iq^%q/C@h8i}; 3A5OR`zq0~o A" V곪JYWt,. <Ɖ'I[ya~jo%0"ݝ ~5>^O]szrN>ҽVV,?w\W#k.XdNK^Cz7&2 &Fm} _Ka͏vê1HpVdLYB` biI`w LA>ӜQ`3* [j)?۫OR?.$_-!9G!W\ o&ruC}t> 9?w!êd*PIxQh%aK!xNSv-C>sKJKjJL8rע3'JLךgߢXb?4̋đ!$uܓI~Ϲ47ɵ&&n 4FMZ`gzaZVX4=.tK䥠Ķ;|sgAY}42TÒm+n Vlhm"N|Tai+nO^p*]`F[9H=]^!zwߪίGzE[l C0V[o^M vwᨫHiFfܺ c-&m%[ ^N| $VIE4tL)BK `[0پ<+,$1D7GrP8^MދαȰ%!vUdJ+Oϼ5GjOfȒ6djz2i.aє̌ |WϦAd @xԝ60uJXN)D '<ݭ܋a[r*•۩HvpK ^L=rޔ+SڶΖȻUk|"fiՑP(w%Rm<]1ִ$ij^'N,/^Xc_Ͱ+3<$œGљCGax0c_ƾVBETs5)$)*E]%5m}wƾ>G_s5 "E=*p[ef7./I`8lvF7A;{4Z͈Z{=@oGr5#>~'UG=͠[, ~Noj+FcUt(saKTa"?"ϟ_syc-d^#GLK*33AV \BJ:M/GWi>/#f)kCI9su SSEi]5[90/M!`%9=/X > #֫ tFVLYD8M@Uv!*oҕk0y. U/@(C4v}gJ1ܬlj3H*L{V9뾜X-)/k#{ohq"z"zwą[_xch/wʼm 9 /yGʹ[3_!Ӗga~5f(0Ҽ?`$Ah3 |e&j_TaQیQ23): mջ9ž|؉|rBksлD'|S3YP\L ~cڄĶ&3P?qf۝BďⷐM=cG ń+ ڋ=qd^;tG.’D׻0= :p]>B[P\s+eTzIxnY =rqK0xUOtvWïF* on2*z~ *KF0msbt9,0 Sǝۍ`D@dvG߽-ЊB4FpA6WRT x֊ y$6Lqχu+HZe8,TZ:Y2 x !E%YdhOw.8a Z}5oNڃ!ih}+{uƋ^鐟{O q97-; 㡙/B?@΢$?N@ ``ޱحePɽZ S?[0#S Ď Iga nd)0܌ J&bU %ٹF9!kd䚸ǥ0Y Uj#6{uŵ&sWΧ_@7K/<#gL0{ N+xm_4m}f=~pEmK29̗5/\Fѳ)Yk:| ~7Y}dOQH9yKq$M2= ˦).OKa~`,18oD1QXNxܞcvgv .iy S:=EBI0sGʬ!官g;H~4i[u`QJcG-X\̾63Frq05Vvh?+9YC~:F=$+a\SC< dI#)DA◉z g"ڞxn-c((2جք{QQiU_(ftU4meOu`#$|uYʓ90Tqv( s@p,?=+li5Ff0+Ej%zaxN ,H}{J])GyFUe1g77#Ǟ+P^WAjM{y;|4fi6,\mq_c/0$~}CoWo/$O%h.ʝ.լ5?xpVbs?ez܊D8`{R.ub$rh?5Hp !d<;kEI~0!<`* C28TB:KOTo}~-0&1ufuiy+ J&_N'ȹ4=YtkD#D3u/`9F6'9L-R< C6^{C) (d~_ ڪE  QmПcCCLXGk:eL^֑(c}Lmo`Ơ3"~D> _; ޕNfȳm4 g Q@n!Aֈ.@^E 0i";\p2Hm A5)6qn$w[)x{ q]7ٺ;M,iEp}s^ȉyQI!ן:Q0#%' ?qPбB+d wϕ\ׇgɓeH̷m#g^e@O 5v~5`l d5 m%-:]v'&9e&Mw)Oswi] +5dL _Y]#g`(4zb<fJ' s90#t>6t["ɚ6яeR3d{:M,9 H9;j3ʄ*!枿$x :4)TMX&!QoxLFyk׏Š?u }Fl9%UQt)tj@ ~7I֖Qm9U8JQv{sLJRDZwSRe_K"Er_٤NM*Gw + ɋGݯo9#kU'% Xۛدޢxħ'=JCǙzK"O9r23pi'2KyK?@֚G!WM(#d . ǢPYR(oE2kV5PlgTH؅Ԗx{b{ 7rr13tCV2Ci2;W'+k`IG^&I񃩑7,X+0CǍ&O>x3i-ڿΕL_^B\aǰ,qX?uVy|S ܺvؙzA;-"3N[ U Y0;ݜV{K?PFA߈ߟ?!['7ڼz HewbģQ~BE\✇6|DMwɣ3$|:zI@a-Qܥ7媏(E_U"ĺ5Ȫ7kwgF6UlH>/*}T\I𫷄9?5-F4T_:3Y 6=.VZ42ٗ$aE0s/ٴj@O]!|{3q4vQZ!H3[LLgPrA֛_NFN>"'|v")C=xogNݗ:{b Ӫ=Y%%Xe05CE4ˊaqg%QS0~yҥXݠMBu 1&Яn4"4cnj>w*0=" ۶ 㡿F>2fY9hYm˾nl"gaXF yLBAo"5[K7yF@G_WWvaIKrFyg r|Ky*XG;CvէuK^krBAidvk:)꟔@.6i Ȯpҍw<$T{`Q~h53"aUд4+\\P&jAW5W_C]obHOym][,-Ban22tg;{T\o5tVu2?.HGlфOa0fTjkۍ\Q< gh d+!}˓Pyt>|1T>'#E%AL}/x}VFbwgyjXԍy0 bUz-X} < +J`Q3~sE̪M:hC=L9o~C?Kߝ HL?|zn%v x\nC79 LA=+'Zբ# p4vnѺhxh4pE=Ҡ`K_ntzV F B`Q2"kyטIOYwg|o?YKC.iT'3Y"7kp Xv-ۺ {E'mwerdZS"oy  wo>vP`tٞx\IH+>O 3d['gGYrz}փ\*9$otox(`W %AQN;Pd50fY xP3xw%ǖDPm捜+]-5w6sô . $Ec)[azO3GH*x^Vh7ӣ`0J 2N.U}WAҷL|HQxad>VW V9Δub0ym,ͼ7sjdcoY Zi=+4hْ.d/ F\.eb3n^+/ 3Ee4;t*CT Ʀ"i1ncݡ?*B*co r w`ZTwY}甡"ty8'ꁂ -Owt+ @_? )k"ӓMrv$:0&rBwlT(k7'zܥV0h+02] +aeː!s.XX|Ȗi SgUކ^ݼ~CF/5w\GwWZS;}{Xiݺ%[&!嗾ZnD\O횤 +s lEsШMPW6/{}1/̠nG,ڿ7[zpHls;/@;+@3=y[}~y'tJMاQ!&֧]UP>C?q%"F{ ZlIDJè ̞~}2u~MYF"α"[KS$o$/a&nz(LĺF*AO֟ dHZg%t9Vab.OF{8BmWs<·Ɵ#BIhPv`_%B1~gAC0ɾɉJXړeP, x%lS䉜R1ܾ& yi}rv߰&>;(@Y7+| yE.C'ids 2?6, 镅!L>Ezbn(H4{oX4f`h#4|Y?.%EHn|ȡsժ,׋nϽ{QpB[qZ8@ ςDf OK\T*<@6˧5G amd[:qID|K62-rl#2f0+ :pn=t[nmb%F,Hyh hqpgl) (>XT] tmWhZ_qP}}=fvXs]RE{G[ c{ۜ߬4ŒJz",7yD' RY|(iRt ɏHrdSYE\NK[k6<6}v ªtPp}L}lE:nAi̎_Ql2,FN~7-_{Y\7GΑ-^ʄ\aAQul(M]_:r E jcCac#w*}LAW |=*KA\N}Ʉdysk&)ҪlԌWYWqv$z} ԍauK`Zq&=!BkQta+R0wE_$wFd\1ɔ,;bSH'*+2{#Z`Eruۭmadi6t#,_~Bg_Ļ/p]he ֩L"k`lBpm|wyh N{W*8!cF (DK&(rJE,žE}3BI[rQlǃtV(T*J|Djn1Y rl9wd$ߺxkt1`v.tm|Iw?$}V0&)M f͌7OV,{Lk!;SWX9ot%L {of=߰v ~[mXV(4;s.-XB&u E^qd_5uXd3 X澧c=t1Bu&?UBʺSBa`';[·]X0݇ Kc{=>t5XñNJ1m?@<^sE&3U]Rѓ??#s3R<DO~"_S5Ci҅>g ;kkdvY6y7܎|r~0uD CgߘD^t9 äza=_eS2xLgmL`l<fd q?tSOpA[.~Vйu32Z- + 9PurЃߊ#$SدZMy:. Y X<n]̄j^#Ro4ۍ_uIEs|1BovBrYH*}\ /I7+a<2!31+tڪ,_RaQ4h 6 f-X\CE$ht% .uKEz7LŸϔ td:cԒc=侣7/ iEk*嬫6=1DD98Fu^*3)\g,E/!gr:OPɓ7Зl<@Kk=5x&L%X,B$4xx"g冭G}}SHͳlE~-}_Kr_hIG'+ z=8*#"{/#NoSjh@=UD挫ƌ9-D8rL(Fmn%lq|FFOӂ&砃cR'B<D]q`y'4 =w4^򈇼[)=Jxz~9i_ #;"$˼Qe.5$ޯ7f0`HyA6Kc*nmYpl;&2ti@Jqa(z\ / &<4Is? JKPNT+Γjtc-X]TGV1 :) 0GP \#,* f%R\ v ]4IPiyZ~X'9}gTʝ+Э|e\(4pCmȥZx4POW'LV} s*s]?Oz%̋8.YfhPE0#*=z 9(Wd! )e!Ю.eg何DJB,`m(j){kW9Jd,~yb9̠Ho'Pw(rX# <́T[]#wZ7dbzvG ɮF)H:ͻ  CktCQ_ [$(9._̐%zV_X_V~ji0ݦL *{LCaqU ֔"qly.7#S[ ,;u _B,UE/]g{^9xL FD)10]U+C FoD5֯%|tU>#\y; r)Ӱ)5zuYjP$?=]UN0)}w,Ym.W:i^oe`mjܜ_r^! Hj-^yU*ͺ( ˆ\0uEI%>n3U pѥOb`·竭'g]?QzAU:$S,ߎ]W̙U;GԐmĭD/>I޽R6FtI~W?zARcKx#f !DmMw%C-,m?^& vr& %g4r8)ZY3Õ r8edr,{vaXaMҩ3N& €'Sa={W2@H~"_G|^0$7KZ4+XVGfП;-ؑCFrGo?h!%S_{(~ omnH5,}Fr mB6sady6OaD7;LG N@\INǽ:6C;g~r{Ku~*r>16H3m~ fI3\0$#,b ~qm>\!#1Cyg*Rj9յl ;u?׉^;dBJu- (OgIeb+}wj>\H0wQI}/GwsZ JH& _;qLI/v>p}+c6l&0׵ų-& =)6N{k|vąм X P=_y!y" ʦ;_@&%pISO+,:Rf_{i?LMWb1 rX߸wǐ*wyO>FJ;`V4?w]XKS%! 8i Fj駹i"732XߓoRtt+(!$* ;`Uz3)d{ 6ר WmOH1qGY}̏(#:9NI/ KF҃^80)?tBբ z }WYNx=a0b>qjyPI2黈<Lbm=_Saq77id<j808Ls70a%ɚ"g {ͷQSǎB3GkkQ:B[y]^+GJƤet^z4N=1R h'b)mo=B}[1rrSm@Vu7/\'ˆ&”ѲP'Sw4T Q5}2}xr|:VI<0X8Ϸ{ATJ`IF,Əga 2ENJQ9i,&8 !σ/7@ (`q_ ߧ+u!rtŶFk6,0rM V/8M̻5gad1xϪ fOR}7t`Z%ݑY}eȖx;Lܤ"uewWFC& ~i|JB]Yk V1?&+_G 5زQ$Bҽo>b*4:LD}s?> jhZ릧ЧQ| jSiayP1 R%OOcA|UK* :mcl?C\ݠg+=mj?cLg-Z^=GIA՚\dpFrE'.1Fq^0\UIqט^H;"1 QPw1 h(sF01ic%fVǿ 0qQ"B ^ XӧRb$x)ˉHPmpx^լÐPq^&3u;迡JtΘ~HW4FAChW+ 6G OrF=Ėz~&еW1cIq aZ+ܷN4"2\֏P$y(ؼAfGHKk$ZGZ)\3ȮH!3W7N מa/4#`HB!9ΫlCr/z' ~^䉬daF~= zS]kBY]8XCr>(8`$mvރ| 1fUF䎆[-aXQl^l =w6B)ύ %6ɗРgt,LV%L*wEcWQЪࠚܦb$|Hyxfg&kW|dG=п6훃XY#mw>y#9GO5L;>Ar,m(:?3BxgaMRݯ0q`zOT訽yN ۣu`4$0wL$: Qۆ[E0 0`xHyIͿH˦ߤl5[Z^{mO?v%QhW :N1( 5'1i:@q F>ȿo,ًB eXʼnIXUA.jeVJe(h)%[ 3KThbuX.sC3~swҭdLx^',@9ɋb.u[Tk",JȀIk ly_d sOS_^e 8Pa`2.3a4_/F}'\ TEAFj۽ r~V&8ϬmjȜRCʃuaћ(ۄwT-]zء)fp4*M0䓠S1 g&yeOs0N4;_2%3 NîwTcDMք?`{˰R @ۣ`rPV\\=C>l5;!ω;W^}C3&v!el9ImgVac[UuTU_usƷ5>!D/X,}[H0%ѿzXji}`WolGm:!7V? ??e7 q#4-ȧU#=OQg K Hxp~Xi$ +\o9pT7,6_qA_'ʿ>+?Ϸ'NJ6ruwj-+^{V.:] cq0JarEQ6.$, ~D $8#ّcf7xZ Żu O~_J]M{l`Ȩ9*pCC3vۦ d~g咉?!UOŮ#on;[B9:JWHmmTu$]& J6"#HмkJ5:eĀan@2}FIYV(e=qCTWu턾vq|MK5_q25_l@S K\fw0yeG<$σEW͕~d6SY\6mgp"9٤Zd3y|%9˟+#QTgKUfR %>9j}W]&t6L|yÃP3MݪsIؿg f/y"I.a{z$ E7ik(PBNF{:?`mQMsQ,Y1Uɾwtd^/k1\':Mu_05 dWƀS~v ,3 jN ,l1x )#tteHg.3&'[HϏ>ڑUnVI!ձrd}5LEr> rhur XouFࠗ̿kØ.:js3q r0M;$鄟X_m9RTX 7UHNFѐGWR~䌂V\(\tjŶy7Bѯmԟ|f%/nC'_u~2Fޥu#Z+P$3~?8QQm\{.2K}g Ls=f4vU$ *}Awc'0eIKIRDk߇bEn#XSHLy<䝇;f["Wu?!/*x^.0vFwXZt->ۏg6L!Ft_v?u]dګ/KGZaba﬏t Jxwnh}f X 𻂤m]yFH]xr/:r*>>yH; zB<~ *t !w҂31O8_6ra:ż# ܐj$7'F] RKBolS;yANeH?%XӡiVP"ŦUo]B޸U /hSˋF.*`k7Y :B)\k ~/=qT&s3UFd#us4Ɋ|6mD'rQ<w })-xmF*^CDNx߷nɧ*Q\x} "ۜ^e~-rq|Gj s*WliW Y5700z "F \ŒoAKf±duӵ' Gwi$[I`w9- N~#䇒akK9ʗϽm#t|*7)s\2<戟0dq٬}zdw2޶ꈺs{,%+qRdװTaXع0ir 7YOo$_[8YշB.A7߂6 P:tg ^ hᮟYĠU2Q5Ё1rkae#݂rQtwiES*k@rK кR>Nw~NЧ0d70NaWal5rsV1x(ÖLJM -ߝ >`x.To].P'py ĭ) rԉ{~ݔf0r|k&ԭMS8nڏfsX31a5b w}B/fA릵N ;H-#kcQ J´]+b8d]$_$H陁,E _x6v"zKfW2^tGv G%"gȥ"+7WŠLf; I ď7wQ6P9csFw~tKv9=;s㧎|h<}~ō}:طᮇxoat'~|m ^EYMwp],F5A9}>{{:Y;b6*ʋ(>QD >䖷RAJ C?DAI[CE^{W_SuOx%J`rh#04?芕=@q†PMpeq,*Mrcydɥ^7ڍlٻg"ugko <M%d!c0˾1.˳&Njvj: Ȩ6 F31fR*`%ݜV|佗fO+BdRHr>TC )Ob(~GY1?MҦ(|nat<,'qׁBREj;s96MҶ!w-2IԞz^4rɧdCo5˼'(t+n,&>B* y.[tsv_lq׏,|LBʫLfy1DyU?z ~͸p5pѡK{ BdZ r1"6.LIOD6!ɦԳwxe]˜ؗOu׈$;u<}sn97DLSa<ӕ4S TE~dgK\EH3̣sCzlHz ec>(BQ+ ZD44-I%5-D Ӿ봬֮}|9oV\l|bwHiM% "`&պTbo85ªW$k kՁ]b!ߗzd -s829 C[9֍JY;:}N| `W콖sr7du=8m۔)0>M7œ‰H[#j :ch@˴ţH0. \̓YLœꭏLέKxKC L\+gto8C/_oY\fzdeVўZ/p{pI9 0Q) u@qr QI9!$huu$O?oZQCc;)iG%?b~$#etelX܆/_'#_rȟ&Jn]FX9I;|ex F> F^yȱ^2 _2lTIw+Y;D6e~{WdzvQXɔr yPf쾪*m0\F yhRǡIFu&[H~^]#|WX!o'eW_I"|UadM^^ vp>p}u}+2 |Rt87 {x+ įYQНdf=E=iPϾ+Q0Hx\Hx{`7f1z#ǁlvd!+}J4R?W ҫ҄P8 nq]*/DZQt]{r= Sv5AA{W!u 9O)~qD?=|Z ّɶG),5|9c4t>V]4=xm FEd ~iݻ#}^ȒЉZ YU쎕!HH,.a ܆)uW1N,Ѻsbr?$fS4}@ѵ',ӬI]K6+^.Kf'+(ƞ@ړN16yK#SSX9[Xn: ˶ٖ v5 wmW9do UυpD/h:ˤ%*$>ci&EWErO^?4ElX?hCU y-ӹ)ʉm<X:vDje$9: Mo}:-yѠ~!{nJK̷ɁuN{lw(䞶*Р[V_F29*T¬䩙[fj2~g%V|5`rQ}j޺(x 4~e;ôTۖ3X1˷[?/!;$^FȳNK@TӍ"ϑ;>-m4=ITHvH҉'-`cQs:(| ՜C(?,YdL$萏 c&Ej$s,J/J9A\C'sd\gGC#YC5T>4Q@`g6D鵣~(ב4+OJN6E( zO=;{std,!b-mȔP51_Vql 7yQ=|5 "Ҹ%x~s8KZa~02<<"8;% ^nxfQν}o<9fE;\=N*<ze~/BG9!XsR2{ ku9PH8o1RcГ\"[ceiTVO.l(Vݞ@g~|C ҝC֯bJ ϯ/iP<{I :9#FLU[~ʉ0aW=joL-ǣv뼠B>j`^&KƌX詠3гIo 7DAi?m~W+ߵJgΙ8&iD`pt΃RJlvwu9}eOo km] ]:gixC|ˣ`DQ#_ICҖ.Qh@VnzGw,툎{-Imvy0=jk0ν7|>yGVջFbs@A(, s,s? nwb]m9Nݕ^ЯLl8Z3 sunŸ@d 0VAn3Y2v0<捂;FQ-^BgdmHCRPjh@ yl(B껞/Aȣ d 98>#y(L\zCGu0Nt?G؇SOXƒ|{zsBLHRd+;SUJ[B%s`uܡ Tz2b7F ކ4,P.C4wJs#6G!͘-d휘,[R҆ Fu-!|yZ  Ӣ+ݗZEk;9Q2fnb g*ۖ~}(]1gMpa)TX+!_N:1 C nDub0tY9DDU[)w,C3᧯w8n5ukV"o῍g.ɖG`Ƕ( |u*OdA}kuG2xaDI]ޚ&^P,e]t3ObaE=62L;t6Ή:Jwiv|,?썂[DAWDo9aRq aft뫧bu3Oz۱ۤ=@Wot#RÛD4DaRl>ZLg8B!}R2z⇽+Btd J>!X!}*4ߙijsE[IB1e d-\`{λL)řQSB{+Q\xQwk~zM|= lEdΙWCKB* Bb`B,nO>'li!rw wL\v2 GX /Rd~n*GN O7I,='=t9:Bڋ!A#H~> $9m`^ 3JaɏWafc6xu?̼fܿ@E zR R r?FB/ܴN8!Ph<;0p!qۆrm>ɬGK#O$UN۵ټO/GWi /M@ӵ9 H{̎DWAK .Hi ޴-o{͌$ #Qaat8;=zO.#HۼN z^V^W~{?zQ,-VȮ-iQ\jHmؿ/7wʼnυdtPtRsEB,+ nkIKآLY|AzM ~Ht gKX{ 70}ˇ #k`Ii[|_?t\d2iw(Ź?)) hP=T́0EOE|O4u3iG=v^d+qxr) YOWO~>S׫R~L V]ܫ"kZC^n^$=A%$(yvl,F}3\Sw/oG6*V{]Us*i2cax _6]cYxłΆF&t E!׏Ks(p EiC,0M@';EvµH>Y!ivZˮɴQC?8ͼ,2&<+Gg3BaްLůښmuhI1q!u|XB8Q 4hqb5 'Ӄ~!N=NxE ⡼U>Cy8dK/3;lg||-XUlxFkd)6B ʖ}!8_ m`̍OEd}j2˗}w)䋗kuU!v]<\ߗ$lyo {hP]JzQPZfe,L[BH+Yъt03t*mv5<(yG ӐS.eX\~LM&5_+`ȍ+ʍ`!_mVcTPF J '3.KnrSIM XؑLj2;aQ|;,;)>2ٟ ">Р0n@ˊfW#:ѫl.ahjပb6I}kL욺{}hv#GZKHZjD~'Kf2( pH=G#TI:~B65xtd.,(Ő?TabSlk]PSa2X_j:,3=x,KNEpޘCpvڂ mrz6 ;QG*tfBיg?QPSo%ӻ.siX90:h[d!`),mn9bƟߪmco$Ii/b0)e'dl2~Rw$2~P!">o yݗD6(?٨+oe$g|A٫^>CFjDdc9]s`Yw"+Cͪ`*v>Zs'fY ٨ oء^0ogNFz5bSEw[\}N)\n$=Q#ѳ$W>Z<-2$ECauUFKEc2w\T;W(o 9/d?x(̊>#JQiӶFwn 8`v^?G뱖PHs p8,rNâTdM!E%U׹UWN,^oF~1 dEPhe:"E'Q(Gc& \x4lyFm;Φvdb-m!0Z=in B&2H/HH}]3(q?6{KF~.>9"'MC!w ǯ#\1\>"jRwZc7"`,A6{ C*fL-شVz7f w*"ub4;|4tkQEUIQP'¾4 .xrSuҽ]W"4kc̈́/^[Do"ü0 !0)J[fQٳNb02hsCTXnp۸Oy?.>}9 Txrm%谹3y/&zB|kU|o*d>^ʃ'ODd/iE# #ӻ4T=lwKAL{L3|)4VEiy6|KbU= W Ϧ<8Z=7CvZ1五ܑ#&%X o  0A97H 1yψs&ǡFN0](ƦK=]-6.=Q4!§S%/J̐;lJ x$J!Q F< ( @sע(݅w~b(T[CFk\zGÉdBDK/\&xPԚK0ӏ- | @,k@'a_c\5%mT6lK+ܞ7<*%(0m0LxWG:rɱNlw7MFf!yX* ʄfXʱ!669s~7-5P)o{ 7`>NBWNAF'w<U>t^m{ [ؐN.f;H//$}-vEYdUBKqWvمϰOn^_jM4ut#-IbUp)OuD=Ѽf;9oPb>i5-$HC_ /`a- [{?MkHCo3K1ӮV-"_u0UwMڭ?6Ce@S`n5a?}J2o8 գL[|W?wClEՋQ|XH!G+e?RDŽoFzQ`ǟ_(OaK 7|>ŗ[yfd*FW:{ԷELFM75;Y(!ys2Jk̷|D0??c!oK#7Iky0Q ̵(,xG0^ǃP$;z;tVcS:}#t rL_9.' )c mTg朏 &iIڊf@WK\oYcPwVGwzI o1_a$i1Y$_`?JˏSKkdT rهNlF=A(KBwXa~`)NiJEy :'~r1فue{Kzj@ѣ=QW&V]|ʙ;>Њp6mE5>%i {MS3m9N荬aUʟh!myŦ~X+)blSwTBhUTrުE&; 3, Gڽr $^nWFAŶR :'yO*e(sW^6[r,P.> ִktCZ FT5׮͆:pq@<ѓN 00\8zVZg4=%W9C7E]KP7_\Z$Qw-"[GQa?km ,I~,Z4Qs'dvߡ؍I1o!5QY䣕)C9*9dU+8L,~;`k=z'XK9pu I$QMBRj$~ 9)J.!Նw\Q*wD5C>ԟX(\,-Ro#ױh^[O%W,畟㐇s:ٓT2#Xn2M?έ0+p[ 'GBS+⭇O \ST{]a ˕gr棝V 6l'z{P/x^~w7͐kɮMs mJj7nyLھ0CmoI0s5Z;{)CK5oo%}{9w,TOwWvC:F5meN5SN_̆Cs`l(R;dс]BľrpnN\2A/ɣWz/s/U+**nw XnsenS]a@MO0t_vЕt5 $t[A^mq&$zU%v 姓^V6SZlf2-&0 ~ZCw)п/mzjIĞy(*ƫd*0>5='_ ";k}aC, x z *2U9K3E¸RH2wQ>]+9Ǫn#׳#}B%|ngfHͨyUpwTQCx'/ע`t) n[NDj1.u*=, wgI,w7x06X u<dpK:K|21&?K %$y,L}T~NPzBwbu^ˑ`jI; G<\`esv~dQssӶesb~9()". Y>kXru?3qHoϻa`aymY{ku'?ǿ~ > E2H*P_GrTWa RR ,MW-Kb~x,x@h 8ofEi>)f| ۶ªfG_`u5>f{hA3'T'i3s3qKHKU>V;+XZ_Yr\|jlyc^35Rok'z%Ar̋E]F:سLɑ=`z`h\; z. 4Lϒ'J$,rʄ3EL\!<@$}Ɏcw7l0'K*Tq P\gMb!GᷳD.^Zo`bc}F֝2ebdNz ,[ms]ms`6RKܤ3c3"슜zTd+Yl-|+j.=0~[;qT^y}/̦9ͣZ$hxi<g;cWI*I($J$ ITB%QBBHBBHd+d}߲wy^za}{]sD}k9.k"`܅鷖pP.RdVCOg޻,fފά~<SN/j' ege f+]"G/V4C7;tNY_J[Wr<[ED_X̛/kWӑY\e^O<-:H.X˙rwdzk^M; dC%7 8f#C!ߐgӿg/"OB\Ųzu -M)DK'v/#uUIW%r(s:{_P˷.v)z [) 39!u llن+)²V0$G <=x)_/gC663{oynWwF, (E1Cko|⼻N"wץT|To}x_߼Gɿ4FLD̋Q`s6+d;k0 q ԟ6o U)hٝ3S}6ϋ5__> 5z{u yϤ7QCg;>c9Nu~,_Z cچ>7TV<+XOÌW0x*[9BET ƽax`Ow3FbνN"%Mw)d*1T;1,FƜȡt r|=|ꎪ;1YdSeasj!,ϳ4I/! .`$f²B#9@ohu9 o.bKj8xUE]*Kagu"D,.duy\@J 4$:3-r#[؏cThoe -sG+zvײKM ;kJ. $ݗ76`DG!1l Б4{hPpeyޗ,;9>ZvљUN1XO!Yެuh?T?}Ux3FwZD)WԜjKov,: B?3;AC?S7/^.=`)G,|oj6fCdgtgu_j"U]~.&JݿljOD"%NN 5 YotX~{~! 8{;]G`Ko$S7 Z_| rgGj>=׎ֹ_]Qs3iwBa(K4+ ꩇNm"@n'ޛS~_8hyA_ smQLeF&%U9dcEH5]?8 0W|Y~Oy9Wrp;}>t/^rREaa;?)asy4 2~Ϗqʌat10'6]އ>g ΁Hrg9Ȫ YYd}g?r>m+<}?r+4?lF2u(FiR,|S€ 6fGw :Cɟx9y獍!KAxDɵ3@´KLL-,mr&Wh7vb$ϵ4H, ǶHdse)t ' `e)=#igCgBWmCdr2]upQÓ䖃R qtfk=| c:ᅥyNu;~H6τ#zdh,QFRsd߈1C76Q.[]􌐚‡f>V/5#)i 2Qsa}x[%_÷l@v2w\P>w n^?(;3o:GBڞc7VK>OXJOӌ8a 91HҺ-{3 Bs"Wq#9 ?GQ(0{#qxϭtw- oBRiPfF߳lb/E0$AL) \'[sZwvTOT/XAJPGu f~W?Bked|T'Y+K Yʎyb_Yzv"bgsE$] :`iV<`':%L% Rφ-y@k[8y1Dzyf fضZg)EYSBnTU@_  h\zYy/N?h( ;!;\5\1C7eX\$.g QV hK=*6绑ϛ(Ut gBj_]뎓b Y쐠ovs#Iv𕰕?2]12ؽ_ l: 6fEjl|ID \%^WÄGO.,0c_UA9[)7 e"1u[ڻXr.ZfV{Z4Δo ʆ{R0Sw<։;4Tn3U[7  }j/ve*":pT [#BD/BұVNWWXᛤ=b {Ֆ) I_HvlA5M: 3Agy37;AqϪ@'ɍxaj _W(gÜT_VnM__ꍎNP~1"UM*w'oLKR/6)H1BH*2-wݼЅlQ_]tm RI5ݾV2ˆ#Y kM$'jկi0筗?Bw$aPjL= {~ R:20X&G6#Y7Ծx\ľ>M>*Fv%3BhJ"L߫ȅ 3h{k=WC:qOPTfeimF#&2Irr C֚a!m$>pk{)yM0YQ.ϕ vgsᤒLf*f z]HGu;~Sȶ9ȵ:92_Q"ā|ڽ3;g%^ۋ}( K.,o6gM>U:Ww@UӱI091Uff`9ޙPR$s,doz?u\éFUy_=Zn_Ϻ0ߏg@ѥ-0'{R9,E]l{{"2|1Fwi"enMcmYq5?lC۽n"C؟vv{UFX-`^S㎻\}f6v\BףL]7sc ëË>7,ok.4^J0ʦtkjK =:_~;_kt!; j*IqyLZ?ۨCM4s89lZX$#X5;(X$ Hr@rSHȿI̊&??11d}k7{9ets)բv7WYEjXtѽVGQϬ 򼖜$縲fs?rt @Ok#H~)H6^[rin<7#`Xr~d z]Bs{=C0Q#+yX *Fq>d\밍' k-r/lE.>s0(29،wL ˇw%A4-{95!$$}"l>sߜ=il?Š,3Q4Z{܍ qP ɂ \7 L2f7rnf)JmX4ڎ!\q^ + R,2GmE^9fGrdXlhd!H׳7X>?B8rsu +с֗sL9,A ,+5?%B۟nHz3s$n?FՓҹ ]YVƷ:P~տN31K(G쌊SH=Rpoʢk!Yw%Cj&];4#/!nΰ,w=rْd$N^X&w>E"nȶXkQ*[n;jgi[dRO$G.mMawˑ]E]HeZU7uOgJ`H‹rf6}P]1L^[Q'`> CQaWĪO,bZ񗥳u1z-xawP! ?;!qmb`e$īKT"#K 32';Y M%Z<x\-QL/ \Y2Vj/kDATae Ğ^0K٠)pqďoO*9A? ?'|jK(Kƭ?a KKKqy$OזyG/J,ro!vdž!Rx2y}ߗS]]DQH_WFIH!mȝohrm.YSo}I.E1D͉`e>C0}wz xfu\gX:*oϯ3}dxaOslz3YǛIoX:܇v u"T\k:3D+~%4N9}ͿE-n[K0&\RGrڙ0b12/MߜW*{YrQ&KAk4{g5JYod{9' {{mZr]kx;DYSapuWXT#E 7ú_<)@σ׃$u]ga1o~Y~jZqD5j>,DwxyzÒD3(_/+4Tc9%"!0WӅQpwډt I>U5B-IYc *qAPYQ9(C; maFkVl 0ejL 񺣛02`SuR ~M@Gev@v;E~Z>dyI/ǽm!aMy#rm sjTvHZ){κM y($մ{YA~&#eηy*Ɛk:+|"p#A`s^>a8]X5".hT_5G/rx?i$LHQ y_#Φ콁v>\||8r0|9ci(ٱco?F>^ww#w`O,ݷ>ۖima,9F%j#8b G͐Q@8wbS|0Vt{򏋡VL }.qG&EakFTX.xW$td O-^E;wĶ2)e܍Hn2܏ ;@cw(eYƺym-1TAN0+( OȾu{Y^~ {ƞNFPp l;yO@nܡ}TN4,B񇌇GŴ %@b}8X oFΕ]H^Eώt&V井ݽ)RƫE#%SH*9|. kdZ|{3;GIo|] qnCse'! 30'ȚVƙRQl]P!?]e3G wJ-|y(L:]4*O2nIq6gW|`^ٖV ͏AS+2^ ؖ,D^AF:8zRekr;1FVˣ}df BFIYD(,y\+ wN0PsPjtUzCJ2_~r=` Y^B;q*k- `=bH{-F>Zw#G[fI!,RUFj2!j?E>M_y5ǣ(t41(g$/vQC/+iqGdS4lMdh6E^!YHO C-vd \E)jdxdXl<8yoN@ԆԞ~ ÑgaKP15dф+M4:_0\:uSTōc>ԏvGiZ>0u,w.7=X"uyivC&H#]` 4lu{02w^!/Q󢁺]+E=#0˼Oxf I!aKE v;e=Zq& 2N;aVߗc35!{ӽ- :W,%*-O2aP}j7CwZ$FaxQx-4Pmy[S,$}{]3nSO !fV4ɞ5M~H6hRxz"(j2:h ٸGťSw엍rxjr"ȵTeDܞ7!pMrGL 4r/ 9..V"KsOȸAR,<L'8^?2Z>C]l!jĖ YG !RP+2Y /DV3;ukj(roI4 6}m@~Cw+{hMDyͻpY.%>EMR/DÜ}aKC }ۗl|郤Ϫ|hsG|\rxf8D,aq8vX[.쐉cd3q G7O uiQO d9бɗ3"Pg!V A^z1Uk}C0Tz7Xq I?/_h5E.ذ$Ð@JBv~Cj" 643AoE[8n'(uf &[=L<2'vs!iƾՕݭHk,E /maB9 5nd @* VlUu61D̸#>E#N+q22tY̎i Cץ-|͐|^ SFI0$vj7:Q[ae[4W$+ۗ QB;109٨8 sa bI8d{v+fuw^S-`0" j{1LƯ/; 25$J c(޼Bef/2Gjbܶ}z@ ~GD=zz,}Ձu_{ \/]kv{3']t BBE!h#P-B #Bz8k0_*lO-JhuAF;h˯1 JNemgc]#<.N!yf2v\CL#5$&i0rmy{)_N\=Ў 66[U_Eʍ'@x-Gz}Ζ\sn>)NC<}NU=%ܟj6䃅/ƿFa06d%; Egi\o~yr% =#/D!6L ~nIF>+BH煝qbHxٲSb$cɨx֍~rڱ3^A@"*]|F(4n&0Ńě0+s$T7GƄ=U5@r_o8 ;kƿϐM.Xͮ)+#QuT(,+gxU^,x+5_0tdh2xhardHv1 P, ' r>yaW ow [ >XS,;k{ s+ `yܒMk%Ѽ$V= H*Y b=#Ȯ>3r-%ܽfhja.20{=ެVH?^AW],?m g8܉HN@v2 wM{nѹ~c6n ˆ/S"{Esrh^k=seϔ̊ĬdGGbrmA ,>S)[%_BQ|Jʣ{Prex`vl;."POԢH1!ͯZxu[iw.dH%`UXN?McEx/~wSLyևT3$!r|=Og!b;d$FlF/x/adf( K2qii825::/;Gt(ׯGVQL73"/q ۜx'O0F~ wk-;|}$9{q̻W)5hst#3;Gf$apW7(!@-Wx ;͙舃}XSxTI%5ty2Y""C)c@Œ=?`u"5tEwQ ֳXõ|o>ٜ$yeT”ۍ[`oHxɮP9\G]΄),u2|,0ց>M=Z m5zwIDT%3HF`L&`?u {&{L}ŠHKO[7ԠLt@A64||4&o2Eͅ'ANHȏ?e]&.sjJLsºHmz͚Dɽ,~l`42\<'sքEv~s? 3oNvy)p9HޥdwwHG7pζz|lS6wQE ͛sCRmn`&žJ0R'}u\v ro'uA9\aT*,Z~t7o!weyϰ32E0kYxM_#RdIXO& dlrL]I|!%Ly*)0Q _t}"@⺵'9F2sPbk )WZ^Hmb:?8_^O9!É#[(d;#ޟc1P;`iGgU-"o&gymz/#_|[d<vsUzsmE]E-ͺ?+=#6 s d sN{w_wcjp|m识7:6!$^8\w@V'6[ާJ\f+|ly?&vIh\X FAGE`OkZ1N~%(HmJjET.*.Ch ;/FKOd?\? c"#W{CptK(˙E/3{"csK$q&'"Cg ң ͕Ȣy}Ld;} r^7[jl*E!G\/%ym rי=߅ i+W Kv=>g͍ XB=-ga1Iϒ&B-bԠ}PTpV56;yo*.HtsGs1t?5?->)yD,}`q>/!ߎٕ&'cyWo[7޿p1` \07  wlѵ;~-B`Dq(XC<)Vn#ŜW{ ""/mEf$JlEUs12+!1d7QFn]|H-*gA?o{EH7=k$&RGa$Nw!9)yJn"Ǟ?LB-}l:{y7}MB3zaʹCtij 9[T"`݇OdY{Ώ晩禑x^08VT6B?Rr @D FJKh0b"xGA烏dIQ+dl/GΈ%Z}7R# ,a1XzKa<$SR% UyO?!׿ t\ho`aTU%.l|WzS& I췾g{"Sΐ,}+EYmytqjz ٿ%ĪP]ekpMب}#s/0ti5!{MPh= Q aUkPΝ((yו 0%': /_Y!hI&@]oVDryًȱϺm2ߔi+F 6aV!D.6SåwإFߝ[rjf>LQxXK'R`΄aĺ-\=p<%~F=Y'8fӔ Ӡ"1)EOyr.vpQ 7;p* }ofi7=laHS;BBX}{/Z&2J1FW8үr79 sf "sC(@k\}Dc`H9NYhM\:& 4 |>Wzѯu{KQmι7wPRPӊaN}S@+T܏ )(i()g+r G|Rzr|'Z{ x-TZء= (_y䛹<m2t/E!{/d6?aQOA2Y] 6N>?ۻ l[(4d󻜸OFŽ 3#\ӑ>zqd|!i$Dž,^ۊmAuZ9JG"%d=Sgx9RNyۏg2& v_?|Zem/|b:WT9\2,WgoɃDMhWv1El䅷6O*m5Lֹ%mb ˇJv`-;/$ԶBgֳH)ӎ*&"q]u=eXkmuAïʋzsч;Z[7O94Xܓ+W`R!:91ԕ3f~wƜ ]֦T(Rl,3 =&ug(vkdv4 c0,2oU_o0g!ƞ]LKd*~E$f9ħ>hwMJC\`H65aN8kk ͚ O-wpœ{s}Nco\߹IO Kك Ve"R:yESVa=U!^Wu#[w^ d~ЫddgNdmbIǑIH@ncȳ=REh4r##kACku}ěN{ǧaeɠ0Z~:~ E.*0#*U̲$˟=+T5,%+;@V2b7@@?WW"6޵@+ V;y9#B 9}Zo/A{c&Hn ̏J.5#;a䅿K&WdBYt?!~_8 ) DXz+o[2,PMD)sQ{%CAM<vUfS9qfԵJU%#E7pK4 jqio0gY|К s0e{b(hDŽDWz/P%>l~"}8y?5)Gt/cf`y^x ݐ"Vx2YWY v#Ug[$n$9[_bz93L| E NVѮ~|-&}-GX# ?/' ìJ22 BUd] Ez;wE6ۧFsDdߋ G}FXd 4T&>fx!xv"E9OeCK|7{`,yaއOXW7 9Z {uZ"d6E'gaxP/mLUbۺ}{!Z4$IRi'.$0fܛH턡#!ߞڳ!oR~+ ]cxXX@=]j:0.>AP XtHɁ$adn̚Dđ[dn*rR$yG^#(?H2\!l$W`@rOA^Wh]!MCBQVS[X~b7HfnT9&Cq~MخllG睚e{P>:g U.@Qc0:Bљ_jUae\;}"n@Vdqk ǐkYS+&,~2dž|ҺoBQ%q6i5r5I!/͑ZIb'[r_"`ꢬ2]$ caݯ&< (zAyvKN01tS2>ye^rqwCrPsjR4mE+Kn,,wm4mYБ1y{)9h-Y" 8(g;oe842 5DwgCrYH7 :=Ɇ\0U'\TN/uma%OmJn 2QLYu"QnȞ &t]$+t"VL:NMy8m ~-Au t/+> koâh>l$ dmDG^X%Zs k|gBj|^'(i5"?VK#SG33ef0̙n-Qi.X\~p,}=`L(4F_OaJVP~?mËC~k.;s"*Q$Iͮ$P|fLvr wjǛr5JuݕOGXYXr 2_,"mfw~$!=Ր5^d V4;lO%z KV㐍rPB55FFr LkDsp.R'N/!!%?*IjBUEHLUveρX[,B.49s Tú۹H&9ey̩sw!rܿsv(D 9JF /y)I]9_4ّ93=YO^}oA|pu^F[xhopil=7EkYc(t{vc~U!0{RVX9@SrEHZ~T RDy-~#KRX-RV]@n+O{rN!2:k|b)+sG&®Ʀް !]5lκa慊yX4?8f:,r@)2{x!~bXg̸:ו E"&4\?X,W?LO~gsus.~?/ ~Hx1ZK- ́A(_샮b`-$uqC*̿/ٰw%3"~d;${.3Z#7!{EgZrzܫ+nC{Gfէ|y ^g5’3g`g>{r3~ork:d?1|ncia-F)G Lcau-.xs߁R0^/? ]D(8Z~,Q-3v[=0, ytf]5?m"7/ülH0poWWgu.hҠQPmt`n&Uؐa_"F>oSvd$69 "7Ǹ˺7ƛc =qI+dEE+| :b̾4V9@DZm:!F]_- fi#WRn}cdqa6gL<פּ,z [7o "C;5S&dRk<"c)_r90qZ/c13 IJag`JQ3,Ա:Efȱ?WJRG\uj2{zG9#e$@ա@wу^ +s 2?y2Ⱥ+X^od'% T>X$7VafPr#ګƒ )cԽcm-JjDy, ͯیvd=w)9~nL!s>z_Ɖl 7"Sx|Ud? hfGaq%IfJt(#\Y'>`hCa(L<T[Oko|krLIK7KOGH3ZH0,g .?LɰKj-,|U φhL{;hy($\NRy@B 0j5K!$r~DF)tE(dݱ2K9^|]PB5sdY?#^0uh_1 GZbcϩ^PW{v"n 6I5(h8d(x\/Gt`NR|cʛEd%jHCfÙS7Bϲ_qʼo;*G뼳s)HB>ǞG-/`Rnls ]~,{Y IOhFKT`ťљwv|! ٗ$!WOyߓil+Rٝ9wG}Ma`VQ hi?}`4x#r.Ќ@1 .Ŏ̝%[/p"Z7#=5kQ`ʘcN<,]R)oǦ8")2[ z-=E@6)N2!ZT(- NݱPCUomCa(8DsA"VG!uy3˟p/ivj"6O sdz==$h(ϼdžLZ#PZ'i{.˧꓿ޡlqm$g:?.&d9v,VZޟ醬M>LGqԝQ`0t/\y  @y`TIjFؽGw }]P{wK9& xU~ 5o!9Xl [FHRcۖn.$sv|7t6;lXJk7 /xA'%2kR0bz֨ s~Ғa .jd h +a`%*$|mr!ܕ eGjȒz5zt-ԭi?OXaO) wߤ|sQaYG{T(L7#KzNdQ4 <n"#S< ui&Irt/,͸~w|~'$lUS$d!GGaEy1tz27儮isu/?j3!LS+v*urEdRAy4u L^/w{ l"tS4*\Z̝ E(Uʧͻ<)`5|aJGzIT!v( F9AOoJfG›a?\3W$~wᩱFbŧ6v205ג.CgR#꧑%d 4/MF.O"1Cf>nUf5N5nNoabuHeC @+﮾z'6_E¼r$7Iqħg4}`4v5"UPߒrƞuo#ʥҫ`σ`dϛvq(K21Ecd{JzۇZ=mH">dX=;Gv[_5(57 5fE!wAT10_pQO3 :0鷏+|t}};x=cWQE;C6"QVb7'!ӂڇ9>d3\}=P5qy[X$ ETi]=leY[2oiv#ai$@l~Ws-"ԱG`ɴIЈ3 D'4B0m%s fwMOs'vKz,lܵ= I1˧p 9d=l R+ZߑFy!5y}yPC ӑ>jEglEZŐyS=pԒX/D7z`-ϱ.$nLSBƊ5eIr84 ['V pڄY-ݧ?bM׃?;F\ʑݸt5R\S;ʹFX3~Ne}/s;䋤`b!BCt{w]sc0u{ f6VEm1$h`֡!Tɚ^^zuY -'`} YԍAͥ2K$< k+l:duDh FRŻ~ 6'+41M;>(bjE:Nz=~fdq|gӾ Ua2# dCuӢ0{s.|mVKDtaa""#o;@s֕ol4oJSLRfc}3''HlЗ 3-DcPd`D8q^AK_W"ZAxtΙJ“g y!tC9A0ߺ!(#w*5/S&&*wԚнew zRgaٖ9t/Zi I~': Τ@-Fǜ2vWW6 \77i̅jX2PF—ܯAH>l;@G5U1dB*'WYuDs~~&;?Tw\qe> pÊbݦ?H?ةFӟʬw?$@=)veo%yzTMSHUM_L(n1|yB~*bȠ{1tDU5iȦ7eN_PrG/Z7P)sh< vo2VETc}nܭ*>(Kuq2}f4"@c<ݏV"Tdj{qq 0( VӶs hOo}:KRO9K"#)VYkx p#O=Hat$TxniqmX׫B!۪tr~z2f}p_"M[T·u]o73E!*6~3*%_̓aݢȜOj}FnكWt*ȸY6%=|&\qENƧ"UVG ]yy^[7;EEBbWFxi~I0!/s. @+2ٻmCv!qOPF#'7RԶ tDވO]P`ڦ//,QtZ {~x?Н@ǁ3)Ka_Jn'pMZ}S/쀧/} q M45nW.h |?o0 &گ.úpu;מThn/ +G7e-ZF=ڂ|/zHv/΅LA&MvE9ϞSڿ3 "i_@SݕH]%ԏSы'_"S6ۗz>t( VÎ݆ؾ:Ckm::#?3q0L>޳E=HJہL_/!-B/jYT<ůT!Bh|?7{nj<' sdYKDʝ)HL)e}Uv@o:)  zB:셁 S`m=<ݷ VJFݐ/Y~8"Ś쯈x\> {HN\.R嵌! OιsH-V ^@_Cvd|gjr)20 ]DҎ {XOls=zVMr* mXZ$Jw 5G>udu nSb]:嬑Ie2zOEJ"͸*{Lk83P9 JGWaH9WIydQ`U"mP;̌j']jEE;Xt2FRϙEt`ՁK HD z%dwL,2hlGGG#sGw/dߵPL;( pR ݏ׋WG\ϼ-&|Y/nw6)@z:'.¤E>,XH 5S-p4햢}jލ j0ac&Ϸ MFb[ G>4lx=(3#/ rF%B}#/h Z,naz[c-t3t6u~ ~WIսu߽j0]S]~!I_/{L'bߩ"[wְrٰˢ'#5#Hpft8,+Vc*_wh)@p&8huffOn|K6v⟟残@{'^Ŗ l0zW󇳻~COU۳Ő}ۚlhPn# 7?ҁYKTX%|gR|-2忬Ho5v+`v"r: PνzG#)WZ S`)o-J-xzN?Ͷ̆gjs|OZ#7l\y9%C^=а'##N0gy8˜ k7^-芤ͪqa:zLY#|?TjX] zL#ň]QjD2k=ڹY&2Jr~eGsLMX+wd4,%)vпj pQ02}8'_Y9o\=U ]8>jB.KHM/6E a{:Y0+ﻄ^if9ӂʙZ!4ܫh1,;6>L}! J2_ܽ=î}w1F#2sEB o=: (>xw,ŢT70c;xi%w}Կ_/LE/8(c;w 49n}p!K^+-= ӹyrV/xB^Phkյ;FY3t첰/g՟uWd剑A蠜8 S$ACdzxwoc2.OlPBFi>9(>@냧It!9lO X 5h߻wskY y#3o~e9d00~aLWÍ0:E}}ͤGWLs5=?zdF縒+g3F!+CH߇ʨRidFw| 2ki/tR !w֏ǾYS;C犅DT;Q f~O%R"=9[kMdȨ5'[hA&3lczKX+=%՝ǔ9`imo&j!bgaWL"hLg#i! دZS~Y2Hr e RTed#I'lmO |Sv5;s_JDE~N ȿ;_W2ȏjCngrh'\@S*wok'?J;|iLt27&Yr vC64_zBjWN:w` .?dEOY4dY&Fٞ :,qoE+o͕VYNsGLdߴ  -?ZnG# !PEZ ":P|a[mZ k;dT4ϫ8 $mœ`̷-sI01DZ96B0;I l֨z^+X o1O,X*#R<4OY{$֫0=tEl 2O tL  YLH ?~OV I[z5VyN> v!OkKTz.s-}*BBq0P~n"JGQ\JX.9&(%`"aX:+OG# `S%E $9wgmUuAz'gC!LTmU ~Q#yv`vwXmmU!HGU c1mT`wB 2羖#Ŧ@S$~uME͉w?Ĵq~=n,X&?" Kfu6fE6*/H#uTWY@"rl `D[IZN51kSoh 0(5*_Ksw{J1bO?Zr_ϻHbs-cf#Rf'F۸nS1WmYkƾ$>ׄ!,)㿑[A &#!V;Wt{n[XVlb4wÛ;0͜ ?D.]R }җX{~FkX8t]: uX;7ӻȬmWlǧx<7#Urg6F'S7.]}Wư9MuX" dh.=8+ߟ#ulp3dmcVX}ɶ3=560&eI^aF he&D&nA2F`K=f$ Z 4BԢQk"%AG}dd:2n&Ed"aE' ,u1V'Y?<=ꁸqTYa?a'iM9/1l]!$o3bG\?g#Ei9U"}f*ҹtQz+R00wG,V4Sٓ;{~SY/kG5{?#fg΍ "IU$T3K}2D)73 :d+xl=^@ŊVf w}{x3m?e[ e[AC-nuv0xpR뼼 <], ؼ` '5gchxeR{\Dpi[6ipCd)"eb<ؓ5YWcr `b%4wo.v%=X8cI8+b*{5aw|9> 6; dCbcQhi 1t \ ~mw"dXYɤ2 =Hy'6oPHrtsf_DN/ .fdyFO:HzqS=@"C0i<ЭzUR̳7s\lC L-z:0Gםb#5RzHI(JBܛ1Nyv{A x^r |;kk,K!ǹuHL3~ y"\\|a ;>@M>;Ct_S^+8q{V+}v$ !fvdHsb`W aU鎌,L9NVgÿabVMv;Lꄖ!Q Sq}GqVs¸FRDo!|BX6I}1 ['>ф3M!4HZbϜ ZmmJн}c}fH.B>0T; ̘ܢ*=!mJ#A1"+!DICNd[(ǔVM9 H>"O1!ߟȷk|l db?] f|SQ0Ioi>iVТYzG-Ҟu=\&9A]1|X1 ϊVABIv9t " Xr6o=nNl录,<κCȾ!sfN*/\zw,Tz5/%ft| 5:'O?9g͈ I>nlY\ Р6e>;u9-Bs@ *s:h *+Yaiűd Xp9ͥOGaBŒϞ-5ׯ"ɗTvm"Y.P02wDpSR2i^dNl[T!W;ͯ26}}"s VH&s:zA `n+d`7wc jhwqrF7yX& Cb6 R8ws?˦u!?~]'ȣ^`|o]Py~IGzeZUgXd_ "L~m_؈gkᕀp4y&G"@O3M0]6Xw`OTH**a,@?]T7tF~j"/JURu^?țɿ8KV׫)7}>5̾! Ct><<& I](mzWP7ũbK+B+I <G9Y;FƟI{'2H G_؋#V)j5;FAvc\"V 1^X9nP;ox#uWopqy ڵT31KCa؃ބ+G 6 -^Rh_71{|z'n95jd }o)HN|IA_w?Y!}o>пf|9 smf(I{1J,So%<]VIHEoxJ6%JH %i݂P9z- kjow*#dOަlc'sDu3t?c tGLM[)>@#֧agzLG֨(Cj6kUY4R #Nd"@iTu?Ӹ/mh-RF'~sa,[wEK N\ G^Dx,ֈL˺/ i=y\7]HUHz|\y$}D كk1Lȸ| q?I*4(Ra\&:\;wGݣM4 K5 Pwo+z:.ԜN1/C¯gE说 Sbe`-$z˪3xi|^y 7 ϦFtߺ4}͹<Kmxg yyWNCA'v;^cT. {0_43sqh/WZn,v ޻+D 5=ޝ s|F?~8^njB4P!$*RQ*B(%IH0 "#d籏<I烼 U]W;\ Jtd\ǁBմs0W;f6j CEj/$$F(h}MW` WKpEl2o]u=㍅ D(BORPP;bM1\ BjJُ?CNь:dzR7ɛwsxJxO1Zft> 43~8, ݷOL>H;UKU#IBh+%.,g?˽ۋ\naA.n!b~]L6b5sB>C) ElrE$"5.n?LG|3-2p  <`%~3u|`X;2짉\BvE.n-O8t5>[=: 5Z_ۚs'~M#da䐾t{1W%bBoPXaLGSq~ - ìPqG&0]ri6ݩ9 3}50ŰOYRK~1 _Oꂼ{L`b ´|`q,>Q&=rN:{9oO. ócH^4 {m\lCIrf+ruK>rrbd/䵊8yrbvo)y{ҕ?zF 5~ 5&sdatibҼ-̱EA;rIGEI>lMpKZ. RrF`nlth+?/x *BL|vm Nxcoᄧo>GHJhnV,ujt'V!tT&ҭ&ΖE~JAw hr# &< }L#`f+mБjj{ S,Ic.$;gAlv ƒ ZQv0uBr%Lv 3'Nn߮ Ki_ِYmBb2 7A~0Cה/+%Y)%[ {X 3$67^}hMG!5b59>~Ts;Z#xG=|.4 bE.L|uA..3ddpYOGPtDuƹH֯wo=.4"BGma9Nz.K.?w;"ۑӯSЏKl # Ͼ/$ kܐi,LY3s8T؇l3 }T[d-d﹖m6 ׼']HތRU}r'^k 0L\[U$yCTb[q"&>-Tg>y$>%:2ekް卸.5<=wX;YY$ ct|z,,/&T7oғ`\>iDz:fT e)V`]d8;;-/fS-In^Nw}#% IJN$D7G7"{#Gk|AFaCW`kM"2K;5MAvߟ{AAu&0bW5 JOk¼ΏB?Gl^ף ( [.H per0|ɒv̲q1F92\)=L\QÛ5ںkwYrx u!Xً~:Ȣho?vBm .4XT;;FevnScy PC:~F}O.4Ծ}f{[.aOX]@0o}6osB[[$GemӥW,"/_rkTF҆jQ0;_Lp=kW5Ո ?1=׸7"q{WQ0ȤD_ۚX*5vFns,M ˶  JkN?N:pJ/=/~㭳Zz'O¼;k>Z~B־??{D!ˮœ¶a?$ 3 8(0w{O׌(:m_zb7B<+(vz=r rd8 ~۲PL0)r SBC%@e*^fWGFGWC9L)%577ݕNJeeQL^hRaK0<3;o9׻ۙL૦ǤNr]0ҵ$h ukՆaPP<~hz'3Ki1%/ ϵx~탚>'*2RNzlH]r 3vƎ۞˵d`WٹYG&Èckj0N,ў15!Ydt+>Iocȿ#pPfr٦haGBf5>4dP[-EkNzȻ$]JҘgΰHY筡Fֆ&f {F}aMZ. Ga|?if_(l՝Ms2ܷn3\2r]\D*v( S:FCcZB__ql7Ri.]E ݷz֐\r^j N޴0<>r1G\Ӈ.;\wXk',h#CP9|{ϑc&t.y9"爞u? H*NU@F+Cq 9D ߥ {͛nW.Ml,^Qb~ԑ]Cp8]&_{g>R4SQ7#KF+aߍ6^'2y%&@'ǫҩzx6> r2^ۼLcqsduҿv5-y H#b?`E@Nd$es^ArHPuwn#B435Š0UsC8xj!=+Y"2&nsgB\wrB$%g6 YTB.47] 3EPN"o; `a,~t^~2?=*(R\w"y \kc lQYeOF{0c_kMݵ ##6{$\y"EBd$b[ꝝ`f%Ro.-$"GrTǂ32݊xo 0%@% :.]rr&_6^J)׭`!gþeX}6,?^sA͎$/+d/Mb~ c:-=0dDV5Ō#|729oܫl"/Rr=#KA١-|@,2?`[yj 8gc)=!%iH.ȻzKK@AV.9)^໒ ?O!L[0q9jVaK{?Ֆo|}8:}׬9j ? 4tu%Oqt M4FQ[̿%#{ʺ~NlpExɅK-ȯϤ: &p&gve ַZFO/e"}.;|y w0-#lgj(a}>$-o^ő7eh,y5'UCBaŮx7 k[aU|W27nz6m?dTc&$qK ']`ƟrF=NA`_o[g^ 6!THښ$]k [CCԡKvEm6c$vϑ[6އLֱ s蒁{rmKϑ yʽs\fWz栿wU+5ۥOىLaio"ߏGb|1#;*}YeiAGI)(-~ž LqAnW2{_W4מ7OL߰)9YzXÙ>ֻX*If<}DsGkZdG}u 0eXe f޹ǡR[_2LidFl],zq B1Ro<=f:aǓ0ZJ9B_#N(z3}A6t w u9meoggϜ Q^M0zr~1܋z]!دC!H` ?i\WQDA#,lwv@KAqdy8|+D*s#Ke}{mvk$DsPgA_|;R]w4Zc(2f, )5(=10U5]|#;ﺷ2 jBr}c:&`5Ǘ%9z{X@f72z_pZC Vy2ҕav \O;AOSD:UkL 8ZK/3zS0A=s(9s7=^+_ cC[wGc`b'e["{\A&wW\yFCXNA~ /ȶM>j8roSTӌޝcyϔ1dQ߿]e3l0|гХr3Vp2%35q̏g>םz~]#joەICaK*= &^K_ya .|}&-t9nn:^#\=F('KNFM#gHj+/} )Ȕ/XE[F6XUJԅy';ս`BUUtxjdg4p#Nw:Ipnf5s,1Ysr0v:g6U)lPx;*g=oIPAݡߌXQp$DFijU7QA dl2)2~ IwyBQ`MԙD>pxЅ |? rBySxBO&xBѲ(2'W&22}mʎ' U290 , b+o~ N2܆?Ca97 dڔ8z9b8 DF}o凵kiS.>\4 S[E0$kp䠜3]HtgV}"*Jkois7{K^ ClLag1 ;{bx捷^95dv)]k+&E=. k=:ReVp/$CO|v_XY?U˱a 8b{d8N R2G0Yse R$#^k_yc!ge9m1/$Mhytۀz0A>axzO^3 mGBu|8!$ x =+ߙZV}ҚN{4"G}RP% .#L-icK:zrT=})ݦFda=x OpGR90~,T\ZpگIu遉dUᘿL@r:i^^Ly!i2s7榄!& GQ`LDH_gF~%3{_F|OyN?_kBMC "m`5M^ d#)ֈ&zh8_'˪a5"N"SBDǽKdY/CߥMn{ۧsí.2F¯K*0':3uK$O;o5,btͪL02y˔Rak-0?$+cð%'FjC b|1HIɽ΂~ Xs` {8,nec肺ᾏn04OYe]hq 2Nm~PC͟`v4$tYܚgi]1gd|aZNē{ ˡ!i$)2j#Llu(_s:O/0qM_.5 @{âbt\ySYf&!E25+4YsNvaG+;#ΖUI90A'.Șt[ , եOWGLOu"6Ţ : Yңc+p+,~SVzScР׀|[%sIPJ.:g)eno9-}:7*3`YbSJg;~@yKGՐkQ҈*Ƽ2S f߂wp夓4i̾SOTQkT7h >S@7,e0ǜc smԐdXJڐӸ>LWa)zAt0s0q@(H$omazԡKCsޯ ÝoKא普\b1w!󦓉HwTt՘/\unPM&(t¥$vȻpq~"7 oaG|ҧ!/*HN4ڊsvځ/a5`˴8nւIk0s'N~X:]N_ EJB$S28Uq*׃Wlb?`ޚ8^v?ܶ AwA忍KnP}0ݶQC 4<Y"OR i$bo[\(H.. Ӆ+Zd{kR,."g^Ԑ;j[rv e~Lz7IϱxǝpDӳ]rKw=hWa!)<ѤAWaޛkZ{^q,4|:5(ۧL`\釋 s5PO.?G}6݂)mM_ev2| 4j Jrg^Tۻك.Wj{-Ʈm4 a+1\Խ"0eμpaav\1F7*tW^YR>ѱ]em\ SRϯAߣsyHvOK07umI|3WºcQyxE?QFb5>ȲZjr יEI-Zyܗ?{#2-sf2}B͡"ʰ(|p씨L >e VުZP+"4O_ |cܺJճooz;%->>E j`:`UrQX||ҝ%KY?m]`񧍑ds&ȕXOK0JhF>v$}{veUGVo`i+\Swc`nnde) )Q1f}`]$W&,WKd#c0Ja 뇩gMLH<Ȕ Kk;a6eu < ]\ M vnPiSILnMm^Ljz~hWÂ~x6eF{Y*=&6Y ϓWwpBʥ>RV uCQoA!?]:r"mޗ%. }G? /ˉ[c$@9XDt(>[k~?yMbo1jSoɴ?%u>VO\a^QυRwjzc.ԽZ=*MwϚBk/H瑮-*/.KLHǓ$F苳TxIVЖqg$>]طۋa{&=cu,j[ߺ ݭ^[89,; ;sdڼPha72҂9늚Q֒bo舔mw}Aֶ*7G :fJ?+_dM`{4y+Y |1]1rtsG#,㷬7$9 6[0vyb[ ֤zX̅8[=L˛DdB5>Lǂ![MmIVCjI\XiG+1(G*‘r|1@UFfg kH>FHɗ}i-̬; 3gVfF"N+EY/\LQ @N-c+ җ? ݟz9V 'R[믥x|ͨ]wYu:*bίI*=2@W6T s?3@<ſuψ9t3nՌ ,]NN ;oC- 1$yv|v M?YHVEn]Kz4aA&+Wu!Ybb22^aX V>V9(V F:);B9:T>&M x`F{Hrٰ|֓x| |}SUFeBs֏w0!Yc0-}a#w?›`,]-FKI=@?DFC2e`!bߧ 24+4ۺȥZ``GqO.:eEaȲ4kr,!|3ùRs;/#YkJ+c0cV;9>([W|tdӁaB'1y>D@OTa,(mDe`tJ8,ߨ1Tvv.B&1.Ț?\980T"SUT.rT";~9#iS *i1џ-W~5o-ajy=]i'},QP[d~9'eJuԒ=jP}WH. f\ !"w`~#kC Mߜ{c_$ڹ|rcCpaU?s5̾^\vB[j θ4q_(+͢`t%.Q;dXb˓0X;qvS.oVMx | -mm2 S11:ԝ/rGus"*=r\KP;R]La,=,Yl5ïHn7(r[_1+Nbw9c=lIUk|N:Hyfz.2g."DC#QA?$ׅBJ4;v?Oa>r1}3rN$=IuogϺeɆ} Gy_ y>/\IMH("nP2@nѐ}ׂs{T]0C!a?LW2Hv@K\#_ =BQR{U>0\Vձtǧ-ȔivM"ֿmE`(!=?yeZw>:-[#!,E!D5߹NbBX\Yڭ6-&.3jZyqR0q*Ÿ B4t!y\jDͥ0_QX3ix`82\)20eh>fT#rK@,{Zw;Cc#H2> '3¨6QAGy4  ̅$c:3K`1޷vtL} hu7%T͈趱+p + ljߗS'RYUu9>~W6/VⳒ9qBJ,i-F=HE+ωɛ,N ]OV.μcg/۵ ı~p_j0mP^޻WOd|ԋO\`›&cB:cx,/ \u?7({=#E`&q&zc.,Oi- T@F?CTU8v0ٜN=%Wqf>G.U&A]dK:J#,yŃhw._tRN|W8Ӭ[fW}IPN }Z˜E&iD%g,V$ps["VЫjXI9R!*W+ U-$ZMգ#3amֱs$o,wZ 9l8z$e>![6e} DZ ̣变e\k)DjD³wuzz's|ɆH]*owddj♎R{nj=Y~jxYu]N]kT.]TdFUcXoVUↅ q:yf; pN-o?_CFz zZY#_ej%hГ냡I;0s9SIIHJ 1v%I_dIݼ4ǧim7C.2h#,r:4)#0owp3}U y9|7sqw-&teL)*Ζ)7CԹCGAγ朧 7uNH.&S/Z)kp L˽Rv4KX}%~6MwBS6x||LjtĭC4)r9BՕZ˰q_O3aPa@o➟9ɭzזj`0WY0.VYK_ $ ^΅҃a7=yN*`m^ieGv:dFq&U‰0z=OO`FzBg Woth% :~{:uŇsw446,(-ma?`OApX9萳klM.fd_lj<1u&-+E#id6W=F)ߓx>yk(T1+ejv~Di ݐ?Kf79CV< ]&~E}%#hH}J)/&Q(S4 $|;Gv_i\Yv>{`,ӌghq+4j>Ɍ3V0*DxFӾR~l3#UﶻyU9.5e<\'^My.V'mT个Rda,nWɒ&$7W~Z եrv0*mC)4]#\(W|ҥŒsS5!\ez@2?t:|J)s%JlO3,Z/|uZ[![2w*ְWK)V4ʎZ!$zo+k-W]Զ4nM/a 3 TԘّNX(c  t^bD\lO¨F*K]%̯;y5E&z*(_L LǼ`$y3[Xr(r֪8=7b]!lW7~.]d hx,y  GձCyǑ?یy\D`fuH(/iz k,ۭ0r +?\2>En *3iSu3?sAԎ\<:h2ɺ u o#W}s˅iCXa)֠#2JodkM#__^ָh=_Tv66--!jȺF ff\%7OD]3 d}TgЇ,>@M>dzeaN+0p|/36i#o`G9J/ >)tE{'0ᆚ7 ˮ/NW^[q4k= B 6F H  q%`֟hA6 XfL Xܸ|FΟ#e+faaË7^gn"D1w_2s&MrO4U{C UlJ,lKX[G%>(;ܯ*ęAݷ͖~6\k2l`fB' {rDAڭD>YJ@y>(z]I{sy[!nw>%u~|`pܓ`.pb"J- .~q"o2tϧ}U-OҐ0,_ E?=E&=M.R1\DgXo,'L-~y&rjǥ=zv09rV5 VN:‡⾮dwB9`^MӰғ{mO9eɳ["Kיk 9x~lxodr!:~ʾVmEGN!;o֪T(/E\},mѫ\ `ح@RQh3yɕJ(T|0~9){`-qH}u!k&{u*߷-*r]A'1rsg79((YvLN܏:Îșm4DyPB6"D/I_->riH晋n +u;,9@F۾]|-~dzyXlF?U-m8d\3 {8o1CdPd˹ >k T۸9l cܩ\@5 wf`Lq~I>aK5VַG]mfrjFm/UB#í(ˡjV+{r3ڵoV'>BFڛL_ݕ2#1("E|NE"uWBvR #b ?^8ReLGK/yjeve0XQ\2 S* c$cϒ kS/i]`rAD)e tS9ZfXY:;𑲬Gfa=X3ܙ(P t]R{ v8{!{EvN:|U[5\9֛̽)/~@MUb2_c7 EBsd!H/,/7u/ uIOK绝 i^<]),̷W٩\cT' WDa^ݨX]ITνfV@+Pp7"^|/*DWSjl>4Z,."9`Xuw@ ,VoPp/a++}/2/"Sm'C.hU/r=vy#F^;bD^ʇpYo zyNX^jZ3:Ea@w(C̮ew\ɕlԣ`.3+oaR WMdMCʃf\=ׇ$? mRW-/gK2a6&Kdˇl^0#R^>:):^aѐDCLERc0r#AܒVԒ:o3Nm>L*La3,G(k!1?BaQandLs4e^Exwq 1Ü۶]vdj~h툼~{z--440랕՚|7`%;YR$B<۪Q0a̓td(Cd{ ]KFʽ֘@xsRh$xȤ#`~K>*Ճa4Ļf]֗=Ӂ$ -`P:Ke @_v6WLT]|JF;xmv3x 'ta`*G~6@ʍCc6,δ 9?oVad-k,?אܔiW9N%7NIƣZ0}]KLZٿO,|ː *1mٙt&pi , =|h@ƻVGa6/!'D}wjװwmJ*fK0wAچ<]$[52=iDX"ۃWCu랜e%RӮM-H!_ʞa?lgnCgCP绗Ym3y_:?P?$ ziW9ODAнn`P^!V)K>@#dH}R 4#J@Wm&6k Zг^dgXܸ1Ls"KpXֆq'.!4+mE=Uv12+n93R'~.w3ϲ2E'r%Bp2+,ܰ'C rL{w䠉_s}m{y8SF n@>*h`q|.Z=HpϕYG>ʻkQ 7Xf$LE2@#حdaH/7f9E[>U]jdzj$zUAV=Qr3՟b.{2!?O;PLJS \Q5~s3^HX-RXhXo(g MeXۭ`zIr :0Q -0W}U0,XNHvOx>ŗS|`.o*̌n#{ԛg5G:PV̷xl/DJ iXOH_MWm*edRTK2)Y^}Mfe~*zr5WXBv9]` aݠ|m\ < O/=u$$Wߝ,XׂsMs KE7E__B2_ ]]+:OUPІBo\,/"$iھng8 jF5GXo* I/uwz0>Zґq]WAoXcQR=T;:0Pj4{8.&H@Gƒ{߄UT!^s5XT7WCRhH𖄬5 CJ $^S#wL1Uk t ]\ݎKW=?z yG;7#~u9k TBvJ[bfڣ,PZ3+ZcG a쓌Ӳ=$3O'/lnytq,ֽ| f{`a۴ielH<}B^AM܂I-wtAW3J0~@t[ q+YhQS.%/9e}]n0kwxs{#a=Sr DmDs{FKs:ex Z7f5l8RgJ'> (뵵ȳekYZ~6rs?.7o^f? bLI1^s§ sale f645 %buaE{N.zrgNz?7]qCh0>T2XۭނKy wA:'?9;Y6NqLH׶Y^{AvRtPB=kK kAG.A1.T9 76G'XnìJf?0r5M?sxͺͰGdx I{\Q')Qw3ۓˀ7)+"ȹ( d=Y;$g}-IfՇcYo!9DMU9Y0Ұ,"EQҥ+D_8'6nDn0SsV܉[/CCU(jsb2]zI]9uƢESA32z0?\ULa 1i4uɬF *a+풓DqEvF2 =LN!4]SCKݜ [vMly`dMk"ʗo*x29dw sӏP-'dg+W=_=`aЗ*}m*36N~z#yA.$c(-49ogmSX/快6} a60V+lTw/,Sko#)kOG"dW>ydI6?,rJeS@;/Bd:J>k7ȑ́=0Z/y kimHϪLL\{g\c7wd)4O57ҁt MY&%Iم~9|y^֞HRu!,s?9od3V4eAvW}Yav{\`]0E.}&-1Ph++!ЋbX s_cfB, dBG*%ZvF܈0-f ;>n"v>NjW '5p)r;n*e(v3c3s8 iLn8@(1B89cHxv0Co +klte7f;+FrbgN??.~è l%֟פ5hڟgVt[0=r>gfC$[>p3CL6X'|P|Ff Ⓞk Rtk(9+AY;²ζ;3Y /ޝ&;&d;6 # c=0; y7kF-Pc+hϪ;Ԉ-r ;0TfJn潐#qKJh.ň2t<՜4sbsK) TX0iɠšqN[Tdϊt$yKog6vY0kZy-~Ù'䕳!R$`Zq 3b|),D՗60Hs 9o"=P,g/M ʲÚ@ޯއĩ@w, {+nƓ;#DRXuU@TPzBh=ׁ?P_4_mIh{OץCYmmQ/y}^8)n\wrr;6tX /Eס#r04'#$n$<|z Z*q fJLwIH!8R/lJ? e5R ÒƦm+=^vV >Z7Dq{/ۑW(v;u+n'8MD}=>|PO K0W1y6dyieVXw$:H5*2 d']BEFnУH1]i_B`1! qb(| 99},(-z|#'%M ڪ3ȯRqD$Qxӈ|/ O~oyyYn^,!39X6}xZ5g,QZ'݆3>]Fi)_4F#kmLg2r3d{2׎YC%{t}-+Y:Gx_+'.o. gn䵲rvG"ĉo 'AgZ&a'hs6=gav#$9ן v7xZS]0,g|?'ea:gZ0yWgFn;\sN=)LB׽Hg{T:{sߛ#)\}&ó@ǭ< ~17ݴc&d ڶga}3k}`\0G):D>K)̭qc^*,SK))/4)K(?6ڬ\e+ PUŏyF3?:*IeBH/x52Wv>ϐUBs@r(_EVѿ;d7`\~r{%&~ЁĈO_Dpdrq8]`bxSd=̼.w샑h!hWZ/hYJ'fn0y#D|rh4($]1)f-gM лEe 4Or6hm`G ǽ;VMxiw7u y ?~BWAω'NV&r[2D.r⽏YUvַ#ˠ^$zߍuwr!qNhM<~&$, XKGK!uHѫ7G}#WN[Ȩ* Vo5\_=MwVغo;IZ F7xKhZR$X.kfzQ F+Ό{rFTd1*@Kiy%*LI!bGtDT=oHg]/ȧ˦ca2J,^tE/}ӓ0UruoYlTsV LI#͂ }B-Иwy91\eX?p8)^C;^"۝A-p_EW(AOoGu=57(q+]~A0&Q EO>{H@NN.VbݯǞ#)\ >-,y oB=O3PhO)!نt'̽RHͳ59fohA1j$,ҋU>",0j }aXc!^c]3* Vߺ8<8Z)^dx=2 a/.+L#{j`^SuFX{l$WPm,9uXq5 ]\Չ}/xH}yᑦ49QRsm.._Z`ukftc\1v9]O.,(}#OFf !-?<ߛYOЖO]2mJ/rSd^sI_-INBQ0f]Pd=py,l>QZ]h1+_6Pb"A_w$@Y{ z݅> T7]+7lv=fxko&aq\Yd<,}lt{JҴYѫeGͷT/= KvB>f0sM?O/@}Sf=Qݿ3N}FcVsA6[M5s{҅Y¾,7dޥTy)I#iqÎ ;aqiXz!L />N~[)MV0C|H> j4}j)y2v^ܕS̭S1 cgK!rIE]>J&są3 =>[Pc1Ӷo t-\aG,ȫN[@dž˷d#FȪ>>Ɇ?ׂUR`qa7Cc A 1$ 9qC0kk4BK{e(NhW7`b{{4WPkV$XiIfȁVz>-??/LO⠗24T&HZ @sٷŮc; ̠ ~*T'䂜O6UCHp#>lh;Irv3Fw ./ a>ʐW9 t xtߓJu '`pq0[6_CXʟ;lw:!}%#vJԚr#Kf't?կD~yy5s垝]"AQ^] Lr g"{n!Þ$m4D"{T%h4 KǠ& \ )Υz,eu'׻UH9ή{ԩjnG$ (; W:Y`*+/Œ¯.J!])=ֳD]U ( ܱj?2eneAG,L^@k*ȷ_r I+iE>#:ckvξg\Z2=`, \ź"5uM3E`%+}o^y1onk$lF-;oJ d3/ށԉ:Fߑ)v=PIGQȸzH%f:tD\^_K _ޗB=qPVHZ3!raB(vEa7vDu9`.=1)e}kojRw`<ԻQRL`l]f+eBԬlV|),s~ | qCR/gQ-Xl KH dv@_R&AaiZa"~WP% ϧ Uev +^W$^p.e 1&:ܠ]J9][;Rj~7@PMTQR$caYߗa`P>a]>D6z 1rD}0r&`W?>^U55$8|;Io!LzÀm9sXO$T!3t_u22WZsֆemu0sV)ĢB̠p NK[?Â<'3% $#yon컯8#εcd_|0 aE ;_}\ߤ6Ӊ_H#6/\,dբ6HT`5Rf^V=_'}Js"kW'at) 3JH8xvZ}g&.lZĺn0$Dyy0?Oqk^}A2PǾyгU hqE8 m 27=|#:' ˆ UK g]f! 5WoDiA Vv:6?y|j>6=\oI ґ#W+F\$m7KgaF~'6ȼhY5AڙgxQOQa4FH"xgP Ls=)qu;nPki ;}o2*Xgͭ eXhp`aPRP/Ex1b,tuՔ1*dDGDX!뽳9/E0Ʒq۷ح{j'oǘB5۰r}">?lADp"JȭЊl!uo _n: /R`a0;YuZ.4d{'`[7{~}vz6;)[#Gȳ{ڼCd~WkHJQhIIдa}J>8$3YcV킕+/>!yK|r|y9e܀FȝM[{uϜ$MgFd4 kf0IXÐ;ݠJ$Lρy}50ld>FI_2n׸E; 酓0{]X5v +-_YQ^0dItsfq Duyܑ ve==,o2ô|m˰D7LF׭ld&BCg鰼.=q3h+;.Ro̿[ҡzv09_X ݵ.V2#̊&6}J~wZAb ̓3(:p0cߗD)PIJ="Td)JH,DQdϖ${}}w\5sykf줹Ji "XdL_qu/tml L;^܍?n̰mʻ&>O\H_]⓳ wH0T?tu'?LDžvy -R~8: zdurV =E}-^QyrBx_ץ?GiKlW&`M7hB*^AɮeQphaf8(H5 =B nHd<Thͮ:RTF>Xv4\[r cYړ̎Ȟ\#+.#uq 1'dPiYCR i!ˡJΐwߐ~Қʼn{nx;"#j>_=XH^Jsj'a84q>RY?{v}4 -fm!Zhk/6y=°oTl3ϝ;ah^V8#_i!Y;ެ%?veYx!&μvJ>Rpv@a+3H뱵ey>B7`L~0Ԅ\ ;@pԖ+v6~0v H{ Gp#HCbČ&Ȟor%3#/G-5=@Ukgf:c, +Tu94*AE25F5RSvT!ÈT,-NmQRѓ.J070:ID+чo8;G޶#ƁlՄ9x4ܡI\tּ㉬mQ-^'rV?@xJfzs(@+|im7Ȼk٣)b0)3loxDJ)NEToA*g9l|Xt:=];R|}Zal#xy,8^BG`&pdȮf(yfsqsoyQr^1o~I&cgRF7L=',6U:ir0$^\( ELz4UAJ.@2!8ǫwY:fT`LRFv*3o924~I"/]u, 5~Y|n_6Ef)?_ 0W|;/2VyLdɔq_{CY0bc`^7vB_y i.x+{&˞/ Ms "ǟJn؎R_g=> F;w}v,Fla Ÿ?\<$q*$rtr⎩ \w% CbMee⁚GY NꪁΉc A]q$ތtVkzQ0ʺ4;(u;ÂŅ%~)S|!:_}Fۮ&Aw}ԼLmۯK/=a~,ZkZa/zti H7B>sP,c>.R<2`n_xwDX_oVCdӽ"~k7ȮZ'I, #!V 0aCR/c(&) uZz%2z"q$FEUWZƿl$)\ZExHad<[.LKaLr:<K;#?GH+$l#r:< yc`hjFͶh8wDvkoz7U(W nޖ E0qhv_,>Y` ISOҤ>V0_ CKz9rYh#o ٽQ/O2=YS|f!df4nBz"IŽ01 #~_+W?B&^;JkӬ)RX(ㆴIqS{ׄYcehM<TZɂC7q6994,/TUS{X;BfQ{%dRa%A#B=~e\{g V~P ۬RTGa낟Q |J%懚pwº'EL(K"ԜjY>?/cvL͈X5O;ZvL;otsUHY {8w9Q7 z _no29t-1ȟǟ/d\~#+G)lG'6Rl#jDwѧ~>&A9/~1}^ {rYSϾKJZ "m -B/Q?]S,~[jz{6Z*\q~0rB0&_lu̇M@X 5 Ht>ӽN2,wZn:Є>zn )jN8PR 9|"6lsJUF"E?+~AհѥP~1ΓY>AlL}0u/XvD‹zdhlzxX;Jsw˻pōν0Cl;l3!z2u02s /2ՎR~!oꌩ> pZpt#OCYr>烢!~m| [F2sz:X*s :xu .n@i|`*X\pq &yۉǐ^j[xV]{#X=^V;9k\܍tLz䔏^䏜-kG./ZiJQbvI>O\bdT5kÇjφWhO~.V[V!$:d=qX~r5.J#pu&J4E'!9-ʺ ^eXhb^OCNBWeJ~t=&k5r}6P3Q `0V[.PЀt6ak /  F-&#*AlG3W@&0]!? ;c` k Ryh : ҍlBK=nMC|1IU_PvG-.0u;w: {@2iQ/ '*qZSٰ+9={+6 -uUEdxD}+ "Eo:$+u$1'OP¢A/Է?JC:!"_ ` צ6k ƭ;ǭ${b,~q<; rs٪ZE#Ǧ? ׃/x b[;;ܤ|2?\.YUőu״ϴɫطGQGjy")W||q;Q烤ZwVax FBa tx|J/2<7ϛAOְv%>z*cKݑ\Gzg NY+G=ffHzo_W뺳y1M3%u G}nh4sTl\oF&>W| o] #OO^X,I!t5]/$W]8swAkM`nf"wy >&ߵ9c&YLqо;)9!V_XB0E,̞JM0[,lsOxu ##輬S4ǻBRij02bp4kylݦFr7jMdBm@=E:qq)62=ez~ v3I|YU:’\GPTֿ[<нwx( =<sqNj!r99U'wW#e-2\iB.ӿ5@wJ0D\rtJKѦA8>څjUSݬO8 m 'zWxřu1{{$k:zcCEɐ*T[8ZFvӯI0wճ'/G%8=陬7ȮO7̍brcI>`h3'nE=yv do:_LϏGBhm aΤ\胡P.țv4K˞&rY^W>Ԗh!_942UI~TПo QA3|"eqmH|]# &-7C0Q9Y b}Ȇĭ#kG*uSU@ƍ!z&:SɚJ-\ְd=.AQY(P.]nwaӨc/۫[} j.L /nHMHPoR6r?i/`U!i׎k]/o@AWig ڿtz Noۋ5д#|A{bsE^|3k'*\/Y%=X{9)d:cx#yN$ S&u1A̎nƣ=u0BcdXJp\a>~, `x1 Jً:] JFFw>eǐ%gg rJ?Y~NxMrȸyV\FZg% a§Ao85=tVԕ+kORAx(<\TΦC%Q0C+Pme-E!A [ tH0kuSl:q~ٚ4m7\O!kS:2E3\Va㒼zXT50%EӆMT(r^t&  Lj'J*}[+sd_3CԝSp0[T=C=y4HlZF 2jЛ}\\<(N<Z%x?{> AdqK5^$^תOxU@.LiAl$ ͦmWe dyߩ.uZ" @P??(>v } >clzD3Ie>61ǤI LQf!'[G`ֺ.'fܵaEh:Bz?1-J|`䄁+caz9XX;}-B;a >,>|F%tJe@7)96; HkR&Ax8u:m\M\9mIU1Yh(`[Ddc7f)^=b-:@MښoSi3l GC]0ryj.YGƐn+H^-:lW(!m0X _,x|yM^kun yˌ/"WŸA`-ia< i/JGH1[CC#/&kR3F65lq;SA^Q372'Vk 擴faUd[Dj4wz [=/-{j?}}rTE/7tZ"{4ۉ+BGӑIJ%eB<ߑz`D3h'@Dz@-odIxcv3B~ ww^BF3#GU˥$dI:24όt߇>d-0y{|*J4{BcŗY=#X FaJkrRuz^^f??7yRSc3Qcו$$_S69TǸ@듞&]!SOIݗbIN2W EA\a0񘞜 /xԈ98AA#7\Bo8$,W^ ӳ H,_=i|_˅m?=aF6o@"/t^%ܕō[,q ^]lh~xZj7_\/BջXB{g:_:t{>>YΉAa-C)U<4(`vhU?*Ì26)Ej=ݷv@A]nF ,H,z- ӑ6<${}sdr1ؔAK^s^#Lx=CV>]g}vLgû rB g=Y3b2WL`b231bDTALT>cZ؇^IBx[0k%o&Z_3. |~YD& kרѧEe4|M7ɜbvx0Yd פ*g @5ݶͰSޝvI>4nx ھ琙dTk|Ozyn{ 9U=(@o)o.Ie3SҊ9Kݗ}`һIЙ.w`oH$ﲕt.]uV),c}rYՔx- l'>!gnr9& 8qJ6A^sq#7;'A#«'AF]@-dc7]Z戧&UeƊЯh㫥ъ}+UV<@ʬ󶙈}/w a]ad}~lUdJM?܎ALۆ7!)2-%lDzGOY#Iߓ'waHg~eA|BD%Q# כ&VԬ1Q`:"KҜ-RwB~ٵooa? +Mff#f5=SH _ Le 2V-%l|<@28HOQIЕ5gaj[B$RdcL-'t뉅>˦BĞGD!$٩mAҡZ]C(!Kkw`Qf]~#>֯#Eގ&HK&ԖJ'6'm&zzc]2T q3J.G--HߖbRw.*eMw`T08_2m !0jmq !Cǡcꦨq).?}E>.W.0+nր[\ ~@ 8Bz?{y5^"<:I&1ҷoEcFȠʓ ndTEtEWsBj;_ى]b{2?~*Oֱ.Vlh}27i9^="ǮPS.v%V ;%&nG q+ EVBȼ{vߗ]H5? i֗"xLIW!>}q9% 5*wW~BNeP<` 3z'{zVaEsQÚEi*>=J0 a.,v.<Ivdghk6W қ\FW[ rR8ubc;#ξ}ʏ ~OjCZ;9dҟvTw) h<  ?f{+[hػ'BൕZ)c(% Ucj-}&;I+\h%L80ĿFqny)$%ԗN}K|;WmhY623W" 14,gdi4|td=6y#w?mA*TP_̷J4:)~MG+H8Csy% o]ʷEZWR/,TﮄVB=o4G1? "HPsFsk '~|`FX{z~ڧfa~D@ItϪ[v5HK8O?G`uB9sf^-jl`FFx1VfR.MÎ#a/8`*>j<{`xxc9;AHf8,mYzqfnzczrO C<Ep,5|b)? /k }u|9Ux{VE^b?]'6PQD[ffކ B dܱ):#ضOSTK(A)ΟȩE]d\O:Y%U6"]qKzmX:[c: / ?w"r\ro8Úsu; #ZI[;?<2cI^ c gCW| y[m[xyr/F)8G,TZHG(yJ̯`Ge'XYn\۪fj1ȹ2ի01RM{o؃t#RH vfB3G!kأ_%K)X\_]EV,w?Ϳw kYq#3bHG%~. Ig}!{ sԙ!(u|P2BxEfomLlE[ߖ)oޠA++#RD\3듛p(ےL'"O7Paxٽ.h$Zj$?$eC]sK䟵Z7Jt HQJkoS-径3uA~qI=CK$<6폒llR)FmjM&Xgi4pEؘkH*2ܦXU,jt~~aumIkxjՠQʓhC|֩Kn h=_㣛l9Mſ/W. ~.L.ʧaAD*Y'Soxe % T=fc k{ wl}1~D\1V..h@'( T׸%at%.CųN9tUHȚZg'iL6 DvV2f9:32=/7@Q"m0*n&zy( *4?LL^fzƿiްYNu@U2;TS&{0LH0;ra!SϬ8֙ş8H2H%˾/oeswA.^hRiEe|hn~ttvvr eaR].k/ՄB]#,ۖgEaQl^idhvPt mw[X &^ se`4nI4>`i:ni7:Gx4CxIXZk["gE'.#=W2*?HxnE^;RȔ/wD?oU L \x."#͏G}{^Vt~'5Z#oiaX:f'$*ahE[F$xlnlU`QU0hU2t}J>S< s^@CK@U;°_ S?݆2VP)Ԇf<>.”ޱC7پ= =ļfEenC`esaھsr훟#iȜ{qx2{۪?ޟ Ӊ|H/7vV5dio&y#  :8joF3tCWយde|rF%[v4:qITab)wIH>U_a+l0w,HNE&v\9s"t>(  ︸@;v<0ulaq\DMg/ Z'D,ܹq>ޒ}ͩ SdOd*dtx->U}7hiaǠ1 y8eA RINש&fS%o 4(%r-N^/m>_>*"+QwrJ\!Ä麻^#׫ڑQ4\ڪy2DǯF%![_;`WV"l/i3SgWHם*_*(s+J_ZHvf;RT%SԐun`Y~pX9S:N;]tћ$d6B૥zH)ߪZc\hꅓ>_&x-huW&).0ط4/oH4BMSE-9d[ךLVsL"dKo&tB> v30~N'k꫰Ck9>\`2Y)I }ʆ9!?j$=I΁3R(@-2xmJWS'>&T2"A?=%4>yۜ1:>J˂d75|dzF:ml=iaEz!ǽ9l-ό;,d\]Ai̭d-ޅ?Ng8Xxs}_֘[0 `r=-3R1G߆7ޭ;PwPTBXUc̣ ~gΥ#iF6ҿC: {wrXEʁPHwG"N╱_),3Oǔ>@>?x~Jy$W!EtY02ބ?ׇjU$?vi$i.Œ$`*ٰK$oI/T? 0CEaWafbޝB\,9O<Z3 E[|~ʍfugN0 qty<]W'֨t-it<żS:-zml,F)V%RVHL>clKS'7[8wAG_04ȬP#rEdм{g6k}Kڛ [0'OY_)]/$~x+cMDnqB+ Yp;IZ.mg9ƛ&~D_IHnGZ?t'Y>ȼ`9jmKUz䖣Y9?6 4' oْXwT;)/a5E ]?O%c-V~!Mb i=^q-i?N 8serW:!Y,q\Oˤ!#fP(d`.:^^%!\u2i+$ބn{0焙e+ajԃ=~5 1~>' }Ao L|mGa{Hs;<而cMDSV? 2:(u]Q#OdíKdo6>n )ѷ$JTphxN[TNلbExHRMw1wN>Ct:3zGG@&4:9Ҹ꜋4BKmadHI|M8'|BKHTslh Z{"&w}!uEQاPTHow aN'LjY+¸q!ZZ#/ݫ'Aj8 AgD_r ja.0eRv",4WX]rH9#Y ؼ cĊb4Ч݃2aofbRug_n[n|s.К`@RŶT^O:vA !HS#>_"ڬkq BE[~(?7hzo>]:8{+uG4Q-]:7X:*7L FQ|U0.m05d 5t=;*";a2dnfW!;Hq:N{~j'}E.=I,9]$ HU%(Mx-ZPa⢸j/u螀!u!|k4L{ECQ#֐ʆϝZSE@a,3s!3z<'j4F!]xcgOXt,oHA/+lyvt!|Zϓ 7oMiteD&C,VIC-Q;fUsHYCu$(r]3GlHrM'n0W^S@rؐ fJ:rXzP;6.l:$t SLc'rŏ<'=!d2ާত *( Rv c^H="6Dv]@֧v,o!EMC} rgԏFqpxT?/}t&4 )BvcW1wtL̙NyPC'~鵥ﶁgJ0f` KyNHU t,7m>[l!NG&!Q鱔r[;J'ª=Ix)OCfzVH SP!++!}̷HvP:t7?@=s^\'F7}Y]awtO$UBaUNv {hL>0+.]4iW N\a촵^ 5f^1d!Sg_a(50>]0C [\`&Bk V]=7 {i% mZ#'#.Ba0ɵA-7^]v)n1;~)g2@0+b# r1K!WYe^xY[.:3#EM\鬽Bu̬ 9_>ǹ@W>ITǝ 1X@üi:!YcȒ]Or./>`AigYRE7ϐӸo"XF<J8 ijD9ʤb7E\^Fh]ki"w-@>ylVM8呶gjK2/\0,b]#[Yդ:\ };It\GWd,?~h2vXFHӄwdq}EROng /+HyPAZ~Y Ln3"2= $?{!JEA0X|Gw6^%񆯖(<ig m~rU Pi.LFtQeb< 8ժu {h.l"= Ԅo)2_O %߫ͮ l$̻`P@%lܷ3;b 3N*o`47$#lcL7O=KQ]H]^=ҋwZJYaZن-Mb-<۠}SX3<~ JvH0]ycm'rVP a[&ts(09?'xܶ07(OjƲ]%PÔ5 um`-۵s|/[kUyXN-: NE*i ݣ|b5=\M_ftWGrtdrY&}kl^[6]FJn_w[Uf =`N; </~ˡܥ\xƔ%o$o# 9=3ƿj0#(}{$te*~E(%}VLeu@ǹ]@T?A3/U`Ny&R d^M.:]&Z4 [&xZ_@gcTQ^o"*]\Y#4r[|7i7hg tkcH9/9Y:ejkSseՉ+H5yӢ I wj`)݀q r7}Ew$vOC cݏ|b j1% ]LKS#y"3/2Ѕ?D,q#䑰(E>cRB(HzeALD]X!!gt}_g< 9LmKk(%,y4`ML\,+똇e0C\!Ssc&og*_}( K~НM|P;j̙/^z +?rhxҹ8c;ұVb^5G bU;o@CA4$X]0;KVX8-cߔɎ]zZHY|-"/774 l"l7<鬘Gi^}M$}yظV~ &ߒ bf$`O&ˊׯ&A{6׈N/?]3=Οrgc,P%E;dƮw-6@p۬ * gʔc5OۏxQa0:wA뎿A5kw##2Bb<&0v,i8iޤxp:=!:]1Ԥ ⸢xo[C9lVjykBj {614R뱐M)0-:wid6'd*UoΫEY?[S|V|3aQT6lHxarNXfAF+ޙfdhE=*W99g̮יyW{Ё73G"]UqUV+fzy*`vdju~ ߒ'ئx:,?/t[Y<`*2$W΍@gi!'gOIi/ yb-ȣ8hS:Þ9ܽo/ !ZN"d5[􍾁I_ޛ\?t4[Ct)0$?) zLNRy!/2`Zc[GXIi_Ԫ3I YMd~%??)/soA![##ۆXNVrDg[yd؟]G'S6m7V?,ީMIq vB̦Ķ.!9^Yrm@$~;շT,9Nqdu]%HaΒ{g(3yaԈQpH/g!p̳cze;V.z]jM{2f:TVV(m~g0rx ̔h-M`g#>H} z‡ ﯳ ,&Ƨ [.<3 SkEaQM"Xwۜ4/ (Ho.)}Lt:!H\^X.|%#-Ve!I0n3(5{2U9 _}0Gl!+jg>{(.RDG$93$WcVtX(v0">#v5HrSX\?H ,-X¸ɬ,KmRjXt AJ/|%c" (`D_Т)+X6#[-~FSdbz{Xk8s75YAZ3/D6l vU!Hџ^>2BW|[$r7A%~Ȉ ,LJkTPgir8 -gl1U,ZDU=n:zܿ>X?W q!O#5q oz*adǞxOh .1%`<4ךs}O^ŇiJ9HpޅN\%IH;=}d0ر9̸Y}DK/ "Y?HY{Zi' l`ۯḗNGA_C1d)&\&Cn[U0Fk4 9HCW"-;륮)֡80wTCBQ`$Ts{۷?=x$^F7:ٍf|V2ݺOJ卩w"iI.W}9yn7Lq=ݣUx/W&3Jz:3 ?lӵM탌P= wG+.i}# i/gĊ}fXdp8"̗5_{ 66.t tNCЎ*|oj.d:5Np.M/K rVNtG.ڽӌgmL/9I{lާ~ 5l0V{:{V`i`M?NXA _^ LDf&amj0u9 MtMl"x 'wsie4\/y@SsU>Q֘ȧ7}]qi*7k56;.|3ѰRIu ݜhX:2.s> t{E{ڑ6JN(eU\"gc‰9sa7ط!@WXY;rg3 )J@uSVԻ)8R1MCDAdBs^YJN#G%r$?x=qy?٘GeCRC1pj؋ 55sσD>6tDߗR}E:̌)uA?y6H25ˆ10e] Vh;-Vn׏C82ygf]z}!ÞсH?qNL_ x9,;rOsA%Ȱ}c[t0Ҝu5x{W2t1"+6"2n1t;șڰn)`FFk0^ ю 9WDM>p W1BW,Gs ӛIбV^(LmQaѐNYgzj~JT7)Uy;/G g#xaKS@arz~0a ,t_WޟHCo bASi2 nS3uJ۳f֢~3Ӳp'fNq6|y jSVT@ק}g_4X4wJ(;}G9SMzv|Cip]9y[?,0:a9o!m:˜1O. Dw}#NGQ)XVjY:4M3~÷>a!+Sv}t%S더DAQd;p`{[d'c_sH~^l<ٹM фc;T˕кI 4q?"46ṳk5kCt6.z@I?kiR6n,Jd0«cgsV,0WlkJ_#ۜ aY'3aɇ!SQPQq rޖBϫ3 aA@5Ju9؝`ݵ;H[z5r 2-Lx-#xv0Gqd|˄3P9ˏȰ-Q|&i%g:|Ut40{9- w9nfCٱցiXxwIfNXeU`˝Ggd4nެ{WHrCڎ3lPs" 4=##ȭ!x(KcGT}z28*^ʅsǶn@W}q<(%}0}yY}Yi)g2M{E+ /BӟGSc aڛqҪ><Fxԅ%^8[@( R=ROa~C#܅~'kaӷDXQt}C{R!%؃dnuww[M@/w)5w}Jy`hG X8א q(Aqri*|Vݴ0z-}CD2R!OO@p64XNߛ|Nrn`sc`_&_$67zKhGg$mYŒ*4wpX*':3ex;\^d3tδ9rݚ4 R6%;&t.PZ~ Leot1n 4H%5Ŵ+kr]@[Z4b5{NeCb5_Nj;ɱR9Hy+V}Y5 N";k!ঊvX$Wn[!Ge)Ȭy1Y.@W_c_4oK_], [`D`0,XڟBkeրc32k[S1oK C|ΌfMVVy kEQ&0;&nnD^Tz!;L`c3Qo3 >$wmBOޚora:2J}q]*b~:%" HJ^ AxҼ šf=]qqL޷sށu=~#wቃ#7 ȜPb236#r?w =k k{3%o0Qh~KiCڮjc:tHjLԳ" dѪ7}QO&2V yO@O?nO(1l ġuq؜&O)W߶4\y:?~*_q ֵ才 N1'rc0WƂȣMFZ#3R5![R=GS-ّj+*2LdBg[Hop:骭hOwpCLG24$}TIG(g~U85Ck>{!yUj> )D^?f/^gOA7_ћ0D^FlsG`V8ǰ7_]8l|ӍӰ7'DGlaɡc_DA_!9K(IK(ʬ,(_!otߨ 9~:|$BGV.2 }j;#a[?!*!l` Ȟ~ÌSu葡.Җ[1&xm-\tY bx`*K"< ᄜ22]wI(iOV:W#yVd[0!gfeȸ>qdYS 9o4#wϋOݑ(P'-o!|FeN@nZgD5Ռ%ral6|y:џsa0h[B֫Omv=h%a /woS EmkPTk؜ #Ny/S-a9hNE3^C׻Z217uKC!/]c,F XFad=!].[.^ǎ!L 7I>"^.^Ģ%ǥCr&ЗR)j !;z#YԾcfC=32Í6&+- tӱ&jَ.O.вs.YPɇ<3{ i@Ex{YƯ(4g\F2|9&H4Cxjwmyf`\=:9z/"ɻ\wߑRDv Nt_?~w$d,up]= #ۂjP9{TJg0q< ~Uub, %/)CU̥ґCdAv!W,6G,3^*Id&)_/p IlmF9c7闞`sg;!7S;>迻Td ,5EI0)d{ߠ62zڼEzWϟ!fM-RHe`$H?>sfɼFGd1,IBgz4/:p D:{ڰJ\#o^H6jV;J9*ȼ# (i/h튁]Fc'F5|nW3A5U_J"z\>B{i{#rx ~ߓ Êzļ *V 1V5pɞl!/}Ղ*) OyP`hikg(ˆGvP|Q;@O˫p[Vx`nB&| x"T0hi.~$ϣ`~lSX$Y'Ej3B/OHz[~;uDj0_^ {t=~ FDx,|~tQf.?@Iò\Ѱn# &$ +~TeX,z1IG^;!i9m.6&{vD@Iv/>,V/#dI~k87jVnANٓ%0fr% 5L%ߛQoUꂲs=g .^4 GOg`0/qI$?Q5 9υSq J yϩuEvrUPXx bXeETۇQ,c0ܿ%\s +gxn ߘ5y Srn0،lR;^p!cHU?ظr0P5 rllOaS.G&|%FjO)os+2xk%!^0>ާi,NkCa {/?e,*S*C~!퇴M%Ҵ vܾVշJqlO;ByKDֆyF7 @Ҟ;God =W 48]LrE{w84N5Z`g>Av)^}O+l!\ |ieHmܼ(R{ o^2H#pNm{ZZ淭w#FِR_{g|W(2*}"ﻰϟ* I_E6~$=-gĉ)i(nEm{Ր{i2/݃,mޒESسc$%hR${P,>jojQś31Z6CK:As%iv{z3ï,HT9| l25ݯ+5!3b &rBU a׊xa'~=6Ǐ| %b0`xx"L 9__ۿAt2nKL oV>cOXXd ҖmD_2ZCF(]Hwu_ayVYGM.KC@W߽8j\b=a`(=ǡ'DW_?`FjVLo]YuNE0l}KeMйۃs#Qh4䫉|.GHP`9ooƲnx_B]4 A+-=m]fdS.*Y~=)G1U6YFRr"<6E/n{{qd')R7!y0$mx5j)ǀzB8t%jC`ǣΥc P9j].7H淭 2j$hM"Ku'ȝjTyV~/ ]AWȓzb+JT#u*2{!M-#CQcJ*ҹ_-GMyX~*~%f򮱾}#V7CաE*^)RʯsڻUlDp0u}ƽv)MO}CZ!rM^WS| \˽Ͼ{`of:jwg`lJ+m#Z|?o_i<ҝo0/t1 "6g$y:}Ӄ s ' a5%X[EVnyvLW(BT X"ammbu5{߯m0I0_h, yu*~xC\q2fT/!kE@J%C]g~wK7h] QشF]Hr_ļpHpmeW_faQIUI5I H~*{i|tv)o"M} oo] t!5b~ͱ3릭,:JK0|: Ҳy5+/r٧a+%+BՍYi<vOX'o)g Uhm? SŮ4)Ƕjaޭtߦ[0uR_]}zz] ~"EM+!z8zg8?.XXOeӢP{~;˷6JtTpM+ҧ!cY;4^T=hO.hzIv*̼=Q@/:`%Ψ0L 6~_Z"͊쏐R {ùC@7 Ym,k]#~M!g;$'m^mBOs6]4Z;ܘ|Gt7@߲< R8B#-'`EY/u?0eg*Akz@cPH$LӲ8x1ALVz o]PE??}2|Y0y$Oc䇞 g]EBbAn{0?3XobHq爴} tHDh;+!3K2 #m7id"׹S>_;30.!L6:fq4 V10=$t o9ÈuR~cb3ރA\M0oy%tQ/BtNMu/8/UaiIh PY5rO0 g?(q FԔE7H~ ]i\E093H|M#`-J'/i9吾K:7-uV,?`52;xio)ih4lMj8~quDBBBS)J̈$HB)#3[%{]{s{?[~>: o ELW%am|=l-&NE;aϓ uunCw[G0'2(1kD_̛=fuA%VO{Yr w_MJP43qALk|Q@RGl:I=D9pXB~?U"g]Nf'gBL9(A$i6V{]K;hNHυv&v0oMJ\6K"\wM7H:ŁH5 |S\^qa,"{+ͮ !DVkB Q,v8>gD41̴EDz\V6씽{qWl$ ;q@֢uF+[l9[MbCz\J7nacaM-U B)Rg&-uV-nߙlxR L c0FXxqۆH1mϯs~CMO\O@3{FG/)b0Uyc<ؿA V.`Zo' {+#犄΄q?K.qC"1's=#_Dk2%$z)?IhE$\nda{]?adz`}h9%v L:`~W`>6hl lۺ]9 ВprJ%t-utӷ%q@U_$$9P\҃)q4g`ǹ1XY s瑙DnrQ;M p[ ]'H~oīL`n E\.uyE ~q&Ep^!"f[LJ=^CmO~XQW ol7P6_6N$ga&oQX?8{JeM&ڍ%#o%o`30>)ʣU 0be2Z)M^5CUljy9)\;jeB=Ǡ8'k7ȸ.IYv!+-G59%Wوm*b~#¥WDdݒR#I_zo53mDVOD?V^/kc "j(?zM1[t6 d>gE"Nj6Ⱦ8]5 Ey&^"6?e"БIl-2%WUbQf1BL$MsCqbNݼ%0)Zr!\ qZl2t 4BÂkp-؆VzˆHIȾ>A ~z%X`]+k- W]ְ*Ț|uoCˮ{,^5ꄵUsր#0S=ŀ3,^Q= 0H^5DHxo5ݶsfi*q}nfNlPdkQ XyI,J 候U%0.,Qg 2wV!Po^)[Sp!^EE[ ʭ_q웅Drq#%>@X;^آk8y |f?y|>O~M5a1{.6^w~Tuz*X ?{& tb%6mlfo60kpUA 6ӿI1~w3Z$^C#FDNzdFѳ9mF3BabJ1s4ܙ~_MY=RDA{udve<^_{K_7dq@է.n|țC΂OccD%_bE_F) B1)J5A,?V xAjj;nw  ALE=<C9N2K/+&'qQ7 9gwϟ{yNe$F!8m0'H=18U"5$5ω.84O+t$(FHG?M(3k*[`B*w/<~HmLkD\])3tX 'a:u-(*-ÔX;6t?ՄC~=SQkl3Fr!clp8eVGbD`ᷟ{9]3yhrTVe܁/g{Fߗ,:A\G+'zcSWvu!cr|@]ѡAH%ŘMFcX"b '΃;'fv aVıi%dD3LD6?$$(Fϗ Mn>s7y2QL v# z[Dz1#{qR]6obvfUsB32@5}&E)iО H~ _.eFyhg[S50xbu0]آцp S^3<3`)@L|h&4ՈAG f1%E7i02ppZ]IM\5K,VkV>;-~&$LtZ>Tݏ>zzuw/kh{6 ?+hf0@QTjPY;I!n i`rD$4ҷ퐴2BS1#2yLKDie-Yoh֞潏ܜZaeP"byثᅮJ- e(Ċ(x~aGl6aO"Q=G'_=kD*sBz()=8 KN]yrݔtuxHrZuW"$6$]hkU!6اtёYVRc3;(q1Ŝ*3˓Y c3{0[ݚհ,uf.E1ru0`j*Xv|hU rG>3 aqsƻBAWލV|<=a}i!q+)սoc6T1 &\Օ {ca$2JXnzpk a_yBInŘ\P(('~^ևp~*!j`>n&?{#73c@1b먰XqM}Xem w:6O,1"8hyD{ *le>cG'w`;>dp488iV_zœዉ\t~ ,$zП(/c%O ,N2)r\gޡ\KQi\C%Kމ߉Y"w%Uno;z â lG ˅_o#@ԉ=D:vJ#lMDV#hj,"N+/[kA*D՜yV (1 mgE }  RL@{"9)Y@٩Sui05zT 7}{Ln>] *K5ĝ;w ^^|[0ۢ6?u#@[n tUt9XWcWQUwd90e{{[괓Ki-KCK2UytOD"6i}QmyKG!,4C/:̽ o-f/{0RF,ׇw|:Bo3?|/[ 4V#n?^B/ul>!&M0E6 uq7tBQYadq; ?.|ytv]ʅŔ/[<qkNm7N#q̬wIp5-iL=Epy=6CGyչo!;Ua_bRt1=씺=X{ޡ at]IqbcdvEbBnۣ09${M=4~g]߸Ck"?lKHb?f 5?fDAvn&lB%Mat$uQ89%)W&5!慃 +5ɖЯ"0r6U ݂(/ cvdK{2i'\v'> Յ}'OzS1"+\$\@DN%VM.G"LM2Dd_2 8Uq/v淿"3IOJW"sL½vxn"? wi_˦awQ>:rdҺQĚQɺ?nOȓp!,#">DEy:!(R4D S`o~vbj>t- o(&KEa"\iIvymN2H&%5]VC"Q/fib`7a1uԋ⦵:zcsakU%heق?шd6+(`܋&D1Lu;qQ+bd߼ĈE2mKEQ6{ @d&- fzR+K5_'C]Q{Nr/=`Iy^v1z6"CJq-u{q??GW "έch&}$]HKS0Io}e>h oٮ 最7 q0ѭJy,ZioV_Кo;H^ZC4,VjH%YS&8,~ 7͹s @H; W%^tF̸Y(rg0Mk* ieO񑣻/z>pؘYfB M0-\kOWN:0}d}Q\%|S_*R;Ĉ|rރhT |FC"7=P.CTk%Bܗ[u(mZp:#Z+K`e$[bJfվ\:u^4hﯦS~DFO#2Ӣ8,RF/%/f *a| W׵> y;u_\/Q,"bW0vgf"6F{u(#Tyn"Jl $pxX_$"]f>$2Fk5DvBp$⦙$ [ҥ԰hcsァ\Hx.zgmϏ\ܡ7D ^XȰM徦NpXc1YይcJ Ike|Tf/*#ˈG0eq)R Y$P=G*0wvs6aO\|?&燰WBmG[*$˰)z,t-=I%XH2#) C Q)6"/ƶ;ei)za//oM]/EK ɒUΘ`y U{?"**1c;n!Ve`["6_T4h%ETb6{՚%~trDf$0Bqֈ߉%վ,X%uN"6\!}ś:b~k'by1vHKhO}viQ<=t 1viGk¿;`7(kw1Eo Մ44"(x;e⁽ib×Hvag9I?h6:!,_zmD%)(G@4"Šd mz +_va]^WH{hDecHcHDUQ>uqQ0Q1HV{nnʍ`պ~=q~ GֺCP;~Y(i9xH]RR #n=+*ڰӜ{LT1<7@fIYog\hY;f#) `KưcBKP hOCނ~ݴAɊǥ>bAɏEoT0Ӯ#:l_bl佘lz1Wy?AɱU"roHfCV=16#$F`zy5ϐuӄYJvQذ/9&(Yo/n$ S BٿM*i;z0sPiX9gsŒ㶷lT-QԖ`j,áwn\ IGEU@u(=LBօļN>:-YgDȴGV{=ZFqs "=P ockDldaRTOj.xeO X*ϗaQL3Rtz:Ei\aBD;X1/%`^c(޼ x*r`b`A ^I&UV#|0=1tWP"2(F6ҀsL6OŌś).J":(΍W(DܳGז/)!=?+c,|j$bY&lAL{I+"lqUDGz,9D QQVGX)z$kaT Jfg] vN +~lb sir5kD5)Bcq./O"XwY]zPD|I^BG~/ "6 _sț3G.HiDy>RVdz cF6úUD:Lm7wBExף( ^.vҘQ9afmŪ ؉$Im{uj֙oq-#LND~,3`l\8 %~ F-;U5ҋG 0=GqhFC4`ځӭu0F,SE@3<$9{1a(I ~;}[Yfy舎:t^E? w^08܂y]:2s$]`Z 2hy^+@_[I;Nzy3uL<^tAΊ P^} 1J!,(oތ zCp\xUڏٟ\iW!!ڞ; 0d֨,סvYn #FfcODѠ7ZCQ oƈ6Ybkѝ[AL:wcˉ&)JR)*M 18<}y-v=D{kvīadůkU"8800V+ofZװ^ً4<"{Mw-c뻌 6jwob@lwɿhBGx o6s" wl?굈p윙YSze?KWFGRހ`L½P1/d[=׎`d lDRiL$;%Es91뱦 Xo $G?ᐅ)֪lFq9Rs­\{Dv)ZXHʖ?m1+Ȩ[g>~YKuimw'6M #4V%DFJC)ctw y6((+X^;i)fG1,M S}t\hp^Z9ChuC$VI'zc޻u~u^0dksF?,`cT0.61,bA5 SγIO9D6<OAKGOw#/g? mm"tI_:b|OD=<jJvq E%9VznPg;'LJLgK'?z`k@$ܿNwt6jʜ]aZlWC=;]d Z\aM۞Kk*t(~w@Mk?]^W]~< T%ܡGt<[4%mp9x&]SZLUaNFW4lS?U O@*!( e"S"wfԃ9p5سG5i.~0Qa=99 ^O=WJ+|߄wKGfaʹ%)b| +Tc>!WS}i,^IlT:VqI-? J;r`UKV.rL!xsov, \(ˡ3QZle2g1o$Dx&ѼfD X'@[[:b j@@jcnWU\m\,7l:7΅s"'cG0PDJ!^e:RkK~-F<3VMqeDA;/d47@񼑽r14V/D7Hj99A 9roa헾NX̝:nN{= `ԩ{OXX`YQAtݢa/*s*"i5_)ATӚrWbO#9rIĔ+k!et1W|̯#Ɉy(>"_l5ekÊ(S֊"ҍɒq sk k\p ~_06ub˸+?sz`)(% V @w|!Ѵ%pV4HO=b;˥=H*ny2fc(j7ib;mU'tYE1< ^ea\NЧY; ϓߙ?N 6fi0.l"h V H$4vU1>R;{`A #\*Usx ;qw+vgI;w5nUrQxؑQ~nRԃ0 ":,-+&V(.v٥!Zt&bhQꏘD KyAjUIw[1Ƕ4yp d*^:a!$nNOC^E ?;x#b鋙DG@z"?bmFev˘ĸPhOZԨAn"j 0ة[]tOFa&:8-Y}Is5tN_b!r^uw&;,L1:}@&{j<\CX9h5?6.)h!l+)aj\a7fͥdsݰТ~4.JXzSlYCKt`̛A(U[֥ΰ9CER䧣m5!iL>hCWE)K~ds1UUa)@%+_71tKK܂&aj3NcvD`EAC7r]_Nsܲ,8` UL֏p q4’O4:܉xH޶)vPpgkCXXl¯z!33KVE_^.+" ĝ_;aJ&rVD`%9ߗTY0P=2/|:;v2lzuU]ݍaGo%̅xӥ9Cj i[#1'z>aY^)~JVL'ѓks\xS?ހ)񾞯ER{-XIP 'r#G@E?(X¦lPvn48Kꈤ' 5@4:MoWnj"}wr/ڒ^D8g|= ͪkN6z4-dJsMɽ]q:jU9aaaZ-okO$~Á$r!"mϽ(ky~͕k{06gmfy¯r=޺&!v^8G8S9㮑\2y/@s9t Z xjod%Xcjw=#a4"7g9`ăaXzfdžC`GKcI}GG G0mcXK=ݲhRk^DX'C"ǮkjmDGm;1~yzVP1vZ b* ĺ.~F6b4Aj~3hs7yQ`If%!Rl凡B\3Q kSu^/PEw `~Y+UZop.TIܩD]!'w"#~GE.n2bH㝸>PEJL#¦̗FL?C3 Ĭdt/X3bXX77WE4C3 E3_sZt=tX ,Eo9߇ {wtmxa@MέPD, 7Ç?ta%G&잹VK4fid\E#)EItiuD]j0 ?[ >ϯf탶Fd[ЮrC杒[?fr3?-+32`qXv&M OBH`j05 ba8k|86qȧŤ+uv*G9m }m&案vLJ"W(Fv^/5Z?m { iBa޾hɒ ~AYЋDĤ/;4!kvh`2Z, *7ܡ9‘p20F_M2LGv!ݧ2HCzm"X~8L0)'K`g_l~KE"}B}.ߕH|ӅEBQ&="_WW#\MuWBL{,"fwE\;iWANG)f<>""NBP ǿ@.9F, gߎe}h3ةw5ϖCRLW!߫ [ևhPz 1ntWF7r ۠%}.,Re 1 /F0n6?^DS;OC, Z"~n#gSB ,#rBu/9pG4\] XP?7ώ ) Z=oo{ NLчrXO70G0vPiV*ob`UXo%"*ZIC+/ُCi*58rZ(6oO]SˊʭozJ9*bNܡTI70Ly}l A-0 TY ؉dD)^GݥP~0@ ,׫ l{WJ$a, 򆍢t@u: ?v`4\/liE{~CDS ga,0 Vy5?X~yhs|"JzsǦm$Ml)-o$5@隂no3@zלwi= B ~:]S {sQkĴp$ dk .M"6*eYrkWa1 fo![WMɦk"S+?H.`8!S.l<]f(*~6%~؁"ֿ,ELa:2⍜ע4RX.`[.AU*bM8.9%Bo?hB40J7 >vA$F/nVTjM %W 51w`zo): 3Ei;><Փ]|Bz/Xح;cӟ%ɥX# /2"aOK/,1? :)>u<aMG_=-26CUN܁bhn%X9홚E]]vczƢ&8'EO^VUZ ǿ9~.hhW8R=aL4, 6%=7HߊA#ߤq',#f /&aF'&1's<#TY%=x8N#ы8H6r e~P|(;Za?a46/.d9H.5RqeCG3abЖ4oB҅SNEoS34>Xhv@3N;]DO^x'y95&z!o(;U`dRgURc(/}c )jhJ2OeDt9^tUu2; J WO#Rf>Kh̙jĔ6A1go$P &:?| ڝDL|t |pz=>YX&Za SDTo#M#ŀ9J j*A$޾AVٺb\p? E_3"6Os!Vm,3ޕYJX 3"{a?8>fN^`QPLdM\̅ gxa ZM2n{Ne.[*Jr96Bdvae"nm9,8>:#}a{Xm`$/N Wkl Ϣ:DwQbsX[xޓ|쀉oe!bN;<^},<`]x*{?WELקCK# -?  aq$97m}:LDfŠV`='+W_pIFb& AYH q aZb߃?8(EOŎnsV_X'zû}D#_o@$Ufl֔//D>L=|(gD; nг[C 4!B~w;eFby8 66Qӂ^9!wu_67}37`=s'KMx+rMcc+`1c%>2* VsnG|9m[0q6172^>ؒL.oxY`.h+4İCӨRC5 m )l %Фs,jIƯxXhɽýZOi<|N; X- }{j%|7Pİ:(PB?%o ~[.l UE2&60y2"ϲNkC?w4iOτoa O]Y͂k@,Q=0=Wk,lb&hO@oLif}1}оvv#F;PւBN&1¯*d9xaLJA"laK`\ky.tltJC?=bX t DCg<)ֹρp~,ETĖ V2-2e^{b.ES -eސlcDoPȴ؅NVA')(&US̰v9̶N0dwX5SPuAToKz(Ŭ:G1,0-r7EQbhq#'DtkRZoVaUOX> Q/[ru<hM/aѶ.\Y1IESBљL"wvh &v Ӎ KXH+}8}r7`ok`~]r9njPYZ.q;4Luqt0b5 ~jh՛dHF"?,!rw=Wh艹HcD8ڙvgÃD>bIO"fzIJeԽ9g dDT)^A{C; ;~z&3/ܡP>g!u >xOmW!5gV QȌ!q׼$|_8dCy2b}Llc)bzV {zZVx~Ey9xDF&35gKܻ ,y>k2%L(1T0A!'v]`*eΌqöT`ӷ ̫a|S3/:30D'jR hQO`v:@lRdlGLDس!仜p44M~2/ZBs'9蝅̀w/npNDaJ+S"DXeJ byn^"mZXMw`FO,<&ѾbDͼPz| #r\f2NDvAZ (>h2{҃qiy>x4԰-T &o- ptҩ`[50bͷ!+bO+ xs 1/19 ܲ7bvq>fX]ܪox#?_TʉwӚĸ@^h òqDs٥naXwRVag3N(3|Q`;I:x j>1D}-\M9bSE,#umrO':B6ԘuD -ܡEb+Ds39sDnw x2O>1sjQFB_~9`{RVc~V jw:*J_^$ ?_6Qöȫ aon(`89U Kj:eyzç7\DՉT-7"T+qS`zʒ u2D>XyiwBfr]MFP%k jw0Pu<QK kM=0)eQ2C *ǎUvjmKax>qC]{nUhB[٫۵JI*L{]"^H+G:ָܽCWROi5Qʻq"-Mp\ >FEҗS3eN% a$WFVk'pxnrnreLݡS$ *tt傦7`"ٸ<*lv޲8hǜIp[+ӠalcUnT=eǥ*X5<NI;Wl/h]10N=|+5Sڰ|٩; a';c"щ#_thOİAy{1/bl}%D@kG}LSA]t$3b2>cB6Ծ j 7a8†Q}+RaCl#P4lA}3. t} _d '}-a "9kI&{n"$cQQϞ1oM!V ~[I8b* t &twވWVȆ!žΩ܈`ު8ֽҗoioTP9}nf8ΖcڽJO=e 5fc0񀑀5Xآ1Ͼ-&Ny/XmjBMnEԗfIhH cbca]IV){qe=h ՞ rX߅86nj4@Vh6i4"i r#/aTkY$&Bv"6ӛ_A1~如`PHUUK`VEVOMIryHC^:?jyb:tO 8(!\4ºcnb SwDDQ~b1ĵ#&mzkWLQOɣueoyLL#z=+ 1+N nqB!jyYa<]*nׇ 3زOdˁ={mx`D`[}PC(uyHnk_QXRv;Yt B6nj/vdv_aַ-A0>6擙|@ y&aJ|~|+$B#vꌡі,rVki?W Z2tۋh22zn&ploΪPG)Xzlkߞ FwTMDOv^6ڀv*r|KX1Ne1*Ck;7#)ºEA0 oK@YiF1h<#hqv'.F-1\֫mаM XǧnρeU/'4AeT4;:_-CV?nGN9-+?l0sPyԠ%1CCFSB6?:[LF>!qO]9嗈LGLWպ!&v:AD a/Ў N*lwMw/Khy=bC祑zwXpiSA-78֧wC4rQ#57vs$R*b= $|e w1@mfȮ|1՜ gt%h fID}R7a_3S'3ְqO'aW4OAwQJ bVԠ%le^ RLwdOfhSW?E8esyk.q0'u#)"|~b1d (AdgDvC/v8?yb0fpwANK XFi%-ϋA%Y#2LVBЉ /p7gƶsAWe5;Q+Gu1h |&mبޘ D;0D QVd?"h^# X6E;1/ɒX/|bU0OTH#1 v{/4V 2KJBf'2Rb]mX?? f YȂsup8Q//؂ɋga<۪@x|,ϟ^`ͅpi I.ka [aZ Pg8 \&sts7҅bDǢjȯ 9w$ Q5W!{ғ+NqG,b|&bqT 6,,!\O^*uE]Cdg gS -DZڼ܋*tv} .0\ٰ 9o%aicX%i cyOE%s% ' gV;hqvYfoxps舽F;nKY74X錦fqCRV̫ %܄ջ0ƤC75燋PhAƔ[>0F$tVk;6~[QDŽBwhu?DaRg59mh\m t0={椒;)`nL8/} ղ3Fran%j&.g uK]Lu{'3W`˪H09{:.]/z7fDؕg`s{FWa,%dC+#Z hSh 5V5}TynIY)^8J4XkdY"J;keA6as+>:IYOR*AW4%Ղ2isBCZԧ$cD}:Y_S5aǝ)`3˼Ox,fFB E 9"n#كH^N# ;,6*:HA<&'EN[Mqb>*], ]b4b/h+#6,}R+0_}17эӈ#g$~rނ4&Zx1mEDF8tOA{Υ4W:aF 2R:bi"ǥe<M{İEKnlr@J9΃1+:1@-6#?̧#i6{߳%!̝hBǛh`vfY=WN}Yۺ!:}Hv.0oKDa"~ f|/Š K1L]|kG㘼aɊ1 T,A[F lwX!l!s3t9 ƭ2%΁>?c]b*0$zXJڹmCUOfs4$S<.]"85ȕ%7ijz>U ,s]kz`Ӿܛ=%X|F.ۙ!f5#F3h&S`C__l7yKˡ?$=5L5$b0ǯ;,"=@["4?V䈅2]ΙPI*^/;%X?EV|} ]cקxCz y#zSBdGr@Ak$ZyQ Be< 1$}jU`0'"1CG߉!=i8*DvQ.{v,l9CAgYFԡPழU4w5A)l m~'lQeCLQ{ Qz}1x?,pUC]ARd?`Y"(̲Yτw¿mۯPo~䛠T=J'YI{ȩlF!o4kv#!F,qx(&3u/ׇ-7sO=N0Uy(<]Hb)lupxq-7I [Vy0wz1- ѳBڪf%`v59 .Eèky̦I^&ζ.Dx+ ۮ^[5(JaUPTwa{)ċ(z8! !%fnd>ѭ$*\<[ε.RưĄA$ۏ܉jASJw뼂`Sͷ h%4 ke۔?'XݡߋH?N]7|]:l[k|l"mFY4r;,I] CIBg boP!!+* Yd,bt^E1tC& #woN!'L+bЖhz 13j*+Bbt kr)?G9lRM:}AAw}c"Y&țYnB?߮F m1Ո&Ru.%(d+Q\1fډ=PEJ(+ DT % Qy k=l]"g)%ڊyѫ䊊R[wM["ta\[wb Q_ݘ{ &ݓOC_XvĬO4if"."bq7bx4r\Ddtco)T0_; \O㠋>_@̱CD{B8`UIX4/P,G=yi-,AWF-® T͞DWLt<چ0X Jm@KuoDq=+(Et免#q0+g׍=od^-ԯI@`0VsƧ2^!t,K)E>. O!0eZ4pz7/bB?Pt m"/Pd+E4,ť X}m'ca]z }%fI#wG5Dd1Gyǿ%>R;53$zV:_}w*)4"4%ѿV1-"rNwDTӵmOS~#pO8셨< ؟@[bitxXH=YXme qDh3Ol&r9i}taoV52XM;~bx@^< \X|#{`FS z ͙UN}0E+F71\Ow#ۖdN竰ga3|>;s6,$Y [ksN=N~q|WCcdCn>4 UGCNSC@XyOBҴЗ! 4|Ev09:cHrXE La|9^R~Gς0%ɣ݊0rsN*b`x;X "814XQ%r}CMqjш=+2n4w VA%a9af!&3zl LMv%{ ©DyS0/s&;PzW$ia$WZ>O]yuVğޅH~8vj{o@4/q}<ˇYQD<aV#I[qCoػ[h/a۟`ٳalzXgTGH5c |C}bmh`r F)p/u~}֩1`"ę\\l*0y I_G 갞~+le'}̸VEu®Gg鉙68 ggVɿ WײN`V $N3t<̅jܡ̫jAmR,\`y!|+8N^JԆR0#a %)8(5aAzݲ~ 0wiBZm|>w&X5`n8v !E19Eq~/kD[8vTJIFԯ\G͋gcCNMb f7pC侜Y{QȟHg~p75 E{ vl;ܡ#d/t{"~oU`ԍ%~XCT o,}_{IT曟s<ɾ& 10F5 W/_*}њ7{MK< m D>\w*2mq;XU=?OZײSv0/{,1?J` zo( %jBb 'kAM!0}/O6l2ʀ0'< A D^ #\pI¼iփMٶCN# Dˢ.6_Rt8l5a(gۗ-p ۏx;ߺKV0 G{tWt?<, 3>mJBh/IK"JBE$i$!KJIMMM^>Rsu}LVհq.J 񺊶_Gl=g ުnq'Z]?u&4?Y(ϼe<.?VH*uT%2|7Y}u1R+]-spu2rO<ҨsVE8xܯ/9#']'kdw͋UBY eUdv &L`1QvM0|zKKOsg'kT(4`?Kr;I ],/ZG5g{`Bϰ#J֏ɋ˒n}f) y]~B#ۧ7Fa0;DdEE'co<*L3dKPUgHu^XɁѧ>ta}w w$]9r2ȇl8æe?a~.}L+ftnW< 쿨hyL$ z/&B,n_SJbơЧ{!bӆ<7I)~w4Њt*eP?Sry`w.zuM$h(pO5T f}H*俈, HPjoK2tݏC3Źe(ޯ0h(,\#C/ ~sZ$??$B+WX1[="WJľc7]d]ﰷd2$r+y#$X}FGfÏrY̫j#$rX;>bT=W <ꖀBϗm⺡(Br O 刅!ˮt ]H*7| q坔J?^T)b] }̞ϵ^q:)r[D0-̅.m"zOe6[Rt|aAYV68ʝ?g{ve i;~+XܯH 3X$_ n.Ļ]-@3~>f3t$?\)r</#68As# շǼڠ6BObV? ]w> 㳓T>}ϳ bq44*YzQ`z|å:.ϚM])P裛 Ԗl S@X:3vAtvyw[L ןE.bǑZ:4o%M͚iYIەKxz<Bґ."GiO~w 3]CŐm%Zr8pi) z7X ydegjϒgAU&t(hc6:JB t Q}4M_? 4)*LsӏRfzy_^SM@=]y:ۅ}#JjH; yn sY yi0P0D%x2EY~7V_x6{~I J: )?k2h]N+\*Bҡ)`CȺu(:c>E)UH9;H8iFk=,@ܗDpu?Nl6nÊ:oŲ^Xv:6r;OxZq}T(}x Շڔä* DxkivCo9#;#Ҭ`0y ,9_w<}, ɥjs|,\}Y5%#{{&1W\cT+"8q-h+rj 2a(VfuS,ӆHE+wHй.*.X]B͂LF7V~ePDCb5bC1YaE26ЉlQ0Y)|W9 }qY 3vBICHͺm7s?Wǥ3&^a 98$o]B6{A̐O+)'"bAds/@FFmSsSY~mC߼ _cbU ")̚("`OHDh3\ʥ՜Dn}ͭq-/2^ kkTMH%.[Ƹ@&ubiӤ`Kz&9XoS {s(ڻkߔ-V&8$q:"y,JZV\X~hL X` 5X.S= AUA%C"?AjëqNxM_Nqϫq]y$044ѵ~Co=K/=cw[^xjfLU^yH@OoTKa3$lX%BaCYdE&hhЧ=_f\t !SBIEx\,_h?T !3u"G> UgK$A4l{ y ]v60؏]k%IȜ5?5 ?ܐ)hBvMwtM+d]Iqu3!I.0GRig˖l )F=W ővaҥmhc$ ԍU$- 3]'Ff`g5_*d~ir0)ZM{EB~R[,(/eB_'~u!ߗb#r=p|d؊cڋݻÝhPt "'d ra̝HPuv0|yЀ(LR^Y[U`+lRH*xcq"2s$,5)%,Y)~"g=7e䝖:TK#ȗU[ y+LXd3@߻AlT#ulLW R:̆Ғo%(+=JɆ7Or嫪&?3҆9Aw:%wuz׿nہP:U./1O Mu@}n=->@LMœ$d3 [nziTos^4]4X,\U/ҀFECY|jHىҰ;A"ApAb`f{h#.rp)ZWnY-^afKuYXr7S+dKdTXyV3١*s0t;eBUjXrȄg Q3ZwP|)o-&6RQ~;a?N9=Sݡ^>@[( I\Q6T=lX߼-ApN&O!SJIvCC6 (Ķ = ˶q4Ńl([~}+_^IpAf4#oIgoCs%ڷ=!M ƱR/@@%2|a5JݐucR~dKD>rުZw>LkhhC%w(u{, oM[Jeq0#)Fɝ{Éy_87rh]D6)oSCH@Xb ?5a]N=)Wҿ\u5&-âVnh }~"G ҡinM If=;EhZDN;,=O3-FOk0||]J Э3-v %Cen"TbOχgT#NGXgv?41.C܂%: rQoU@THtB?iO?̹SPcNNaO%qkXm,7ȂWR%0kxQ nVؤ7߭ @)mgы{>D`zZן!Ռ5IsV/0+` SIwfRRmSQ4z„nƓ8H~ Hm \Gv"̜mz|BC8\jc'A/GO?deEԹCĠ] >@"K TsNuZ,`ӉvBQ#B2ǵS`b1v%f_(ОD?]d@Eeuswh+toXyro(g]jVM}Ցw6r93RW̉lf4=r^9.f߸7}&xfvLIg*ndC.<-QdWLndPPw:F i”w/|1tY8?Jߛ.N;}R9¶cZ2=o VC)lDe؎ϐG R/яD/Tiw~5.9lx@E]"yNYs(AVt( {PH`>j}? /en@e(VjjM_[؛?R^Fo퀄tqz7X _v!WՊ'r^h=̃#![#B2oS^`X(uړ" UP*n2Q c1f U8@{+ꞯlcF.0#)Ibhe5L/sӡS{v~;(X́?;9T2Ciך0Z?nmM/7wag%GoRErN:2WSE:%'^貴ɐAQi~ *GJ 3b~ T5-?tw^{Pq&%;_WKǗoEz×`2C h1糊!^w BS?BT/$(q$BSb, SX/V;dFӡے+Vӭвp\=}Pp{\Xw; U8k%ͧҰwK =IքKgtYAv~1p?wh}*4ڸ ;쇾ϰq foCuW'T2-,C ׽^aR "#9q+~B~މǪN(xterx0Ն#:ݫ~PPnl9s|g!\R?d?/#aʼHr0Vx%W(M7ElѠ#,Zf\6аRL3^PV6GUMCaΐe뤥5T}Ly 6a0ݞt&8 a4I3 b> 4..Pa)!x9:Nq>>t#S]s']/ ZBcWxq%$߳M +J}[XݖKZGW aNh ij;j.6e'ާ zㆭ߂*-[[܁&z2`qj% Ko?~Uj'-/J-;xzD/0u1L^Cy0zq,]5HШ}t}?xe+fmf @ 9X2]h6 sG}dߤN=n({lZ~*H\G2FhM8>J=,rk?s+KFv?oT,Az߭|`S6*߷Y;Aې w|FHClSD`kSdOk AOtd7zz(YS+N/y IcAF "#)-j!BS|@ e/Lm|eKi'Raܣzk?]'Y>t]=_%{}snD,R $sP"r*2=lCżԁޭE <6^B27a:c9$m:l⻪#7ZFmox5\Pu7``3k7< K .RĿvojLFҪp, =(@5 ? IPΫ}n%4Gԇw#7 L2>IMyF~6 HU\+Nh1Ћo a*3{,1o@ٔg{ҖFDPC /BC7Hsxy6K;#IBȜ=$>`2% +}!wY(`Z"-Ϛ8?VkzRKYT{q ~5i&Aao`ssU2٨Q0u]cym^=ܵ ou/FP| 2!Afl1"m A? ldLgjΎ{%D6O_>g6LxPAG\1[W+eINK XϞo| `IJ&q;4ͱgo Ҁ!릖1w?dfX u' Io\:$sJ`]TFk̟ض`*%m hV}cULʽpWdYþlj0/rh2jWNKS?XMT`W(vJ-~ KdhoՋ7 ~r` iME2]IcQ)* 2 [\Rm'rŐ7ȝU76pUL"g GR L}¬6dm Yev"Ӻƽ_}Olhmϧ-O[IhFWZE5j* 1k8 DxI—y}M;?Q!#@OmbqP=V R/}g^(Sߥ:ĥAR͒3!$X*q/>7|&!9CSY/o+؆ y+-"ϙUŬ ;|SU؁643J:–Ñ] ˋ!te]j.wŖOC/GM;|ɖߑ] 7 Ա>;.,%g^F7ݹ6_\dUv_ۉ$pKCO!۳tv2PRESft3!ڐf<1W֑ňC0A>ez2u_SʟĞBϷEN@4ĝ7_2iމ2^X#;*Խ}-z~Hvڲ})_MAΆ65 Ȧrߩ| FxxWIiY 4C%j~FT뜁yaAxGXH7nMb Or<;=~ IQHPzp&-%=۷?һ~~aD#}twnb}H3yf?naufߎr$=ۄ̥ l_e4C7uN#G?Ix5g<#/~bM׽ uĒ}R,)K%|7M"oi^ʵ~y̫L qj_Y[d e9]50?DRn _YY{[|Y4}?z [U8_w%0_ی`NQGۓ3ktoC~"*kOi;9P EN]*YG0+u%tx)\EBZP0p$4 >:( O|R*3oj+U8qlw:L 0hgLj]|UC^˅'S'6lzuG Hf1`Qt&'L!t󇲉psa(Q[Vvs= N|2=!ېm7WFxBbl?E+%AU[v0, ̼]fư,7"O* p#>H4Z"_flUz-ѯ-zPвPxOfoqeloUnD~'WGd݌<N9"׷p9#Ĝs' R2:4"J&$!;sB⫘ـSHQZu;/62 }a0)AOB޻673Z>>s@a7V&&M~= |6D8Å>sE,,{: c+?S˹an]髂0}=:(XT]+; ȧ<s0Y V0<+!Z ì.7Shد#Uf锄1ÔGzYL( %̈gC8qh6԰ѳ VCG۶8Έ|`" ZkomR~wx=WBa2Cc}d8<^1k}u2bâY(*UeזHV2L_#)X@[^qpA4 xw{8a;&H[mSJjs(^BӘ^...pa=G:a;a sw^Liy-{6WI"]p6)"篙M+rwx؎[$x|1?-sd4zȺt|xBcfv4Dx%J4zlN\UY(js:n =SO{s`Js ߍ0X(~j]:}6_ŭ0 z,w^ZC ґYD Y~ۙL#4rYjStB6wq# Ǫ#BH2R[5rF0HQL]iaΒ3<.:A$*F˞gl+TX E&U0 0C9oMW PRnj?"_I3<y~_Q~ B ZYz;3DS^.Cܩ. Eړ=nr+JMV G`:kؑuZnJ'OH?_x]W ;QI^9_w7HhJ;uźA%]#~{]{\q${,saaۂhQ4l,ըrƓC&7z5ay`^yRy%LV sE jX>B+zIpr"ufӠm]$^3 ݮ.&d\ކD}I}ԨVN5LZNBR^|f)՞H @Jsu'g-䤝#_BqؐwQ>TRǁn Ҋ rzؼC+_w [7)-6ߥBiHխ3~0 94 }״aoմEaG-{` gWhHsy߱4H~# Jɮ^LZӯ03s~(h:ƧTiSʿhʪ|ƕR cea#r3 ]Y[`^9lVD!7=řkc(XvW\A ;z؏Bq)(P6v?s{V;>f6r?3B&9^n{9|6/FΣs̟BF?l؍l;!E\gQz) ,\aUH}Eȥ蝸<5#eHs=L׷[+B!jQ\/CaCaufc(}L &zx/j<MoU=Zdvjs6^mk96ܺgwND9Xb=UXM.lao}:i? ZW9% AE jhorNYR8 =icnX69T"}RK.W ȶ[C!o}JBkPb&K 8l>΀+;5FmI0)^-Y%}E`p;,5VN W/2&קdՁưL獯Bٱs Rox:Z ]Rd6i4rΊfa%A€[*K٣-q>jQI(eK>Lwη7leOiHuZ{Q S=^9@p|!Ǖ@(h_ YlBHsLJ _FX}!{" F5_zBWx#G0Mg:62`rt0n6 j ܺؿ}uA$^'EKr=dY?W Jjr{ܿT!銾Vgdzbp<V7x3yO߈bᖐe93:cI:m&Xۆ5 #F"ۗᠿuq'[!R|8ꦬCWy:oQb|K+.'Ud1u9F >h*8"Imt/X9ͻq%0\.xs n= {x !B&)P(e [vB>mC 5-;ۅava1XNx1E6\.9q [BC$ ,C=]]eT!4ϝTY3`Ew._.8{[*[%0,Zbb>ݢ(5;h{8FG$R' ewT@]4"?}t߉T7r] Wɽs ޻}:K?E/elս?g0:vfW΍aHDB!ǎf {kR2ʓ^8qwo_f6˪Ұm˥`>yP)<20ܑ4&Pz:V 2)[j|L ZEAL+_&gaH$rNF'מ@זG%l ho#JBb׏D={I?)IWKsZBʂ0켏 lfuKa7^ qXȟ@TXX;dMJ!dE]ݾSDr>Ow%;ZyǖRG/=/n ݯ_8I@VZ[ SWl' ݮAN9szȓREy/ d{TB{)wQx+(tQBAWFj䱧yweבn$7DuJE:#SGezP\=KO_ks9tcGӀgeb!f;CxRғq K@Wh(q3yڤx4<_A!5lYEv6CĆǻErX,ݡ^iM0Asೈ 0gy( K^?Vo_՗ҁ17n=ߖžE%_u{$la~7pzT|s.ݛ+=³T&QժPl^U%ޛɎ.E6^ An>3@ƶNn%yE%y#Ă~3?0-o_]- XsUGُ3'p+oʸL{̩0dIT/#z^eW,ݎ>ӊ= yM7~?s7,:-CؘF'p=g6]ӲThwh%̆|u#y◁L% ):$e(/L{?;"˟_հ7b 4;Zt H]XzSu VZgs( ̗f 79tīQ tHXn5pBk"COk[ \lm/<[Uu+)oi-rJE# /57wMܑ,ah z XʱQKXPC*ѫ;lgT)CۙfH4SJ>X_esiYQF(QUN%8&ԅ@]M4lyS攟n+x3v2`Z|>Dl ,ZH6Ab-MĞΩIIV$avY$҇s[6 UA*[jG|dhxC֏4G^0d@\U3"Y+yVmz "S ę! 9|7E=\aL1to|r+wĺGy1UȚ~qu/xݪq;KTH8 * =B_BVUĨЬ cB6=%!AS|仼N,1ɯT)ZEKzYX0`/~þ~fwOMl%8v Jނcfow*4vN<ʂA933{z'̐3yVAvV㿡׌߾R( `B9DL`HN77?F7mHp%Y4t)ӹ5>!kJm/k>l…P]}Tsi^:Ѻ:fO`~sMHͻxX3 PSw9пz@ "z)Ҡ[5,[(L&>EEud.l> ?8'8PM7'AO],yg8eD\jwm 5kܷe?Al9%zIct}=t ],/ sϕ\ ĖX[ާ[)c W܉#s`o({Baɔt>)QXۻgC,N6c8MVY#NmegQ-2j#π^BiՌ,N{:7k6'-Ü# Ԥ% ܡiغUY0x,] rGɚ{H~ȺDyOqsBvi6.&IYlHR d{~(P>8 aB&3='íH:Y׸?퓭5̭q߈P/0|ne£PCo2 ~z FM\( 8>vtuPǺ|W__Gȟqц^,N|:KA64Fddy\9""}# "/Qp>ט /X+[2DXey< eO˱#f[?r }SF?%#<`:dU'q!9ȇ%D)#WLȇd*zfzaolܷ˫$_"sϐaܙ/f=QVމZ>غ)]F&Z9 h@7P kSqs*<ѪoZ ojǃG/z HX<V/׷raH:k+|ɍ^ K;aT=/&oy|:4 W mkS0[۱^yPsZA?ߨCOHfrna7T?lZiwݕ{"$R]IEhb~6Z 5ϔ#00ɴC3"/z+ )EU=Vmŭl7}nFOiRvn8 AO~i!3O?G!V ?}HՁamHŧ7ѠDk.R~~'&]ƫHAX(*K"|^dnU>Qba(ю@c8m.{Ixnޖ [Y?ØůR: [,EeKd >/ wYv2%QT *M@_T)yLss=dnnH8t$%Y>~X! = jӿq ȳr7oa\(ʞW|W"8ؘ#/Ұ"A*<2k"3 Y@7+ّdQyզZ9~>3p?CsKq dLӭZ׺ф/yKƙe;aH`AğeM2W~zՂ4! i~teY¸~ B۩IweClLݡlڍ :%vIΟD=NVq 7ON~,ߑxk_ rQG}R\/9#n]NJ߈ak,RyI//J2[v|ɋEaO!e C_uC'IC(zlwy;72%^WG`HFq?Ld\s7nUְDayk|s;:z-z;gv` {3ښ lcfѼ%䌆~:='%62` Iv-{W?D+dxh?H@BT{Sc4iB͝O3v;gcp&6w|ozQg-iEГXvT8|Z1rIIg*3i0313}jЎ;\˫k']%`GKHIzGXͶ@fuI#)?hw=y,54 -,\s}y+}[- ɃyP籇\2Д"?XR1m?Tsx9j^)#OC2Į?0FTs *:v-<;_IhvzFacOhl֓{P7YY9=bB6qSQp0rVDN8Z^v\8zd%1VqB>1޾\X\E=;bPR$ rOvBA&(t5x ( Iw,A紟]G^ɞJzVLMWC#|NyUg ̰ZAI$JR~NF},>> 7/[A#2 #.œ~.0}E]zR8w,qB'e4Cd㻅_!9҆:IR'w!ѪFR .hbŷ$b߷~#+8<6^0<,\gb"iJ:^d::ϽAV% fd|{ds=r-02л{,s{s=W UaL[~ohFl"Ys&|DnƯ鹠 L[YNaQ9$~tw%Ka1L zn,m6cֶCo!=` ̈٬X 祷K12cr <}eQw{!x5 »XFb"4Rz|t(ݜWmrr w#;Ԫ((I@yOQ05%oX, Lk>?8nոÿzM$/%>0H>}f殶}a\300B t*٬\ [ Uˉw . bz50Z̨&x`>XpxO怕p:OHvrǢ',QB}Hi3IQAJg[ǭyd+NSBzGU#)0w$Є^B|{P/Wބ?@>uaPkD,Pq;n9swI ߕAdž)*|fR1`,j|h 6̀=nx%OY`.4wIItIrƊ<ĵ_' ߐe聆*=VCj T=*}L߰A9rE,lX]=9Yb= irTԿmd`Vt*I뤶@_+3:4ZE 30VANKNYkEvGx}dnF@#qI1 |G&hˋ&h' 1)^ݡY%7 j~#Cw&_YbO RLs`mQԱf_$݃wg4aFnS.0|+rhj=#D_cTwwݡ f>CRlׅ) a~s [Md2c}~A#p=دFaS[X{b}*ZΐM?UA̒,*^Ά7C@L(H{|C{(_lXY.[àϛHck;^ gP9|ZDg._ߋwPt>rz<%6u*ʭ n+}DΤ :z-xk+?Y9n̓j_#)Ȳ//B >A3\jɽDMC^!݉H؆CŢ@H๛(:lG(~Cm UdG$mN7vC?FUƒs8w`y8i)M/AIC澋/O(j$r.Ir_ڶA>dڽ!R,o2-[n+)B*0+eka,kmm73 } Sυl<< +WA 38o6 RJy'?\8~OUߓ 0m%xmd1?s|'=sIsb ndjdS[&vF) ;c dHoV +`A5z#9m~IpӞ.W&BSyM]D`yZ,%VT2BM^P,?^x;L3 /܁-ⷠWDO/挮F-m0t|vXS8p+ ԏﱄ&2.*3wsJF u"b7ҡeӮ{rPPzwiX!RHF-'v.^`R`3OQsʵX{6f=R~ZP_{Kٵ|]&%. L93?~r3cR<i[j`$[P%8kGƕ1n~yj-aނ 9wK 8$ y<$e( _%éU{) 4kG<*^ ?;K0Yi/Iy$dYͻ>)L_"{"fdOyWsq$;|5տbx{æsٙn.Slό6tۥ13u  T9UJxE]R;ٴ%MOs=-q`zD r^|`-QkΧ"k-eJu]Ԏ\@eT6{','aj{LbGZtI9S)eZE, ЦP(V-_05+yh^.0|4- @F [&xa_ǭfm⭝*(F3c`~uam\}[E= /|@/[HuCHA~{/!UI%a"r}|}SR|lZO"2*$[X1Hc0vЧUz~7*co\ܝ /WCSs圛$?bKMZ$!%^$y &e sUz5*Tr]xdlnsMOTFA*~hzA>G(^~CWȿ. TEH_X?(n⡮c~@ƐQuv*54&xC_uP 4s$1? 0A[ 3Ho67AO\]-v?d5IRK6uj*bɤSE' w]b|A,:WDvT oa2;zU`AQ=Rrn1"?~ׄ{;Z$YQEspJ }]8f. 3BoGjtY9r(8.![6!d1y& VKCݤgN2~!۲O j"g ށ|_uJ*n#T<^J@Ap}փB EclBA^ֵB9~Rߌ2r;򲾍 Nj+=3aAC+11>d1 >=zo_lX1/SYs?YAP{­E#g~8J)_IQDwݝXFrLdfn$^=wljdm2a3DvurXEJSyufA$ik6z:Sz0hV0^=Sr8*Oat;Ф# I5fo M0{0ㅗڱ3 {<7S-t hw)* ٖ#ȝIҟ&D)}QP2wZ Q =}]"h-jߛγ f7>NxV(E\WQUG.0smPͻtc7MJEqWO>pS0q\0?g*S>^:ZBٵ%8_ԇ;-lغ ji2,/o=#=o fE,Kk듅5C֌0\̐t)F]@G}K0O} ـTR<9٫rNxu;VSM W&Th{({vuhAۢc2rV Er\:K#{"UQ@>Jѯ75FB vL6<8uƹu"Yed툶 &wLe$rgFAwʒ|I3 |T VG?CNS uGv/qCl9T[FHsh^wƸVռ52U#bM3;W)2yQ.da+֊,~K/Z5~/̿xP] 3v,yQAixq4 ,|=i41:zN*C15,ߊ^BW;3jg:D+]Ir]a"O>MẁtfJ)Gak6H3"Ͻ|A沚"veS0kvd}?dJw%䈻j5r .:wD>ŀӓL`Va u7\$!G%Edg `LmKfr31hp˶H$ ̪0'scIZxjs8  !( :+̺]SS2ɇ?m @yYy*x'Ax͚3}$&/?؟wݡnNIm |R~uBi%ݡK;Ti M_InyOm*Mq]8$v]_"̛GQgGa@QLb+ |ޗ< :{a [6%'±~iŒAܷ50jVȰ6M~,+HyIO~;q|yBs6=2dv6TX0*8 A<yմ B7w/O";:s~XIWvz hFaʶo"/s:BK#rgS]Ȉ6%NXUqРSdVT ~irZXEM˔: KgBD?5!d$/,C-GD"ĭ[6"?q ko#sډHNocqH[yJ5_{!4?M?fT1'i2Fv"{ˑV!wŎ3fSMȁʵA?~Ybn+aIPa_O}N> Vy@n˜x2%G< ?_2,a0>"8U9nФ:hJB-WY !/Ώ-t?Q= qFS}D'mPZ&Uh|I6dE9AxyB6ÂkAm8 8xa1} s)F:Ƨ  5? jC&> L+TM`\ }(nH5#0ۛc!.R'IF:;2]wzjtI_5Arw^d9ghwB# GOslJCz}HԣHmBbv =,|,(( #OiEYG{o3ȷ$՞TowA'I ?m&az yKՕAgT&PФuPQKy,IF{P}TK)./T*w︢3r*aqr|L*iʽ >Kr7AŔ/Su:\GRrw9d _WqN{8anN,myo8ʯhh%ݰC2zU=daW2*=䇬6[nEd~e>GK4C9a g5Ƚ- 6$|/iw4LlJ}3P@d;?B;#WTg;NJߓi.~Y[I¼bvGaLozg.cp_ .>\a&F..Vr"6w tǝUtr.,@5I QGF$P?cm6"A9^aط FrC;S`AFc+dbsfMԽDiWSK#Xf)28Fީkj[rAk{X.gv{gk$$,Eҽ=^@e| ^u_fvp97q<Ԣ#>`ۏRGLμ )CNjRk`܇lnF0H~P˃LBȞ>r3HLjXn˝ú}BMUr@TW&_4DW;֮U0p7y8\Y&aaCbxC@=ܐvO + mcήwTđ\7~*d)(~fLG|#)5Xp<7,E_{Ar:R~FWO >qlA VwZs#PG:טF۫-u]lgϕRLV&0r/cQGvTY L =ݏoN:9r1;UdlTUvygޏT^桅-y1R8Y̮"}:צXVm^Isk ˿鱩0nj.Ur8𲕨 ٍb1Bi)^?w;z =yywy<'OޞF9ɔ0u=L_XPT['3C f~8$$Iª[^}O?{fDշ^2gwIzd!҇LR/DܛAk^{٠_u+ggo%ī\0ʪ-0̝oF,)^os'C70H]o|1=+SQ0N7084'~:&ߨR9/Am4JJBJ~(IMswM1R$ \Z'grW-L &|y:sŋ:K+yLaVnV(϶̆j.ȏj ?H푿72 mXء̎_Ax]/iތ_ݼ960LכsUA;y䲗 tO7|_;}6L>lrc"4t[- ȁS19P4P|꿉q睺ut ݡ{NطO!-On -M`V!%xhs׾ u%W|Kɖ@`ţ_màPk+%8ܬb[WݡfҵThl{U& _vUV*9zOe>.w:RDM9 RC]Gh`UC"St` o>,nCRѱf4!}f[ n <o |ˁ5ݡVt$Ʊ";:3[̊*q6igWV-9s&WÊm,|&r"{*:/r9;oC7ﳪ Sv%~|_GL_LpJaY(HkG̀ |j(X0$9^^:RnZ {Z0)\~Dv"#3^AXh 7?lkOa*p?U6-?'AWc>=ǡ|ߐc͏fu m{!%rCO Z 5 <'f3XQ+.{9Od#9 +\{bd# ل$cZі*yɼXk6\`ɤqk0eecU?IzΝ6u)'sՎqPݡNǒ;dY7؅O'yoK >M[ٺ$ab'J:G-Ґ88i!W7ſi޶꨿&I=EM@b,㑋}F1r]GAY<Ő;t&u|\ףr7_qT YYNn#tlp|Eb5z6nƊ;W`Я{hֶdnBU&0~AW:ZzzŜ϶r3z /2~yb"*_md7v5D9w4Gho5O@LR4dDՅ.w*E\-kg# c2$liVͬ"Cu6Gs۽ 3bO;v rϤKwɼ|( vGParkU{ ~@Ͱ }x:vWFˇ`}B?)+&'];?GQ1p]dcj$0vp/"߁21ADhH'YSV  *wM`$7IK=+;O犌9/!5Ut@4{#KHDQ"lipC|T? ;6l9[my6)!QG;-7c]/2g$EHO~йzW m;z߂|DVr\rN兝]OA>l;R(`7[N[Z_9qڟ^1`ʫ_`دڧ=E=f-+^Z/]X-635)t]Md^BTm&##IYmWDӵw2ݔC o\Zo$kh7՗M"7՞ ,a,-AI`+y?"o*Ǖpf7'L`F晀 8*>""!1^_""S'Ê[N Ìf>*Ȇ O8:HddY7`&atkf·E`O] X=4G r0I}M2Do҃~2˜ܐ4`!w*k~sĞ-/ n @I>U፩ X:q66otSw s dqtELeQ\7Aw _t#G%GĖ`a'bs4hBb0 ^dnJA4nQ;vYί ;3Zs2kwsF#; )9t+bVSiD8ƭ}z##Mav3p8XJ |wFN)?7"o]"氠V JzzJ/En# :$ԠX$a̳Nލ;h`~C(c_2(F0~#+( |oK!Q4.Ό*'zBf~;TУin.DD)zx:l0 w5K~h[D"gF|NE9*X:|&c|a֋/`ӵWaѫlu Dڛu1}MYY;31g|#;!$@?Z}xCQ3us9EӠ~dЗu&Iw wj@Emo/aSj( F"|VׇaO]jl-'ˬ{|7AA4ӗ8~kt37<+'ȏ=bfa8!M/5tAi`]h_CaiWEnNV _ɐeh}Q=.YPUj{11柳C铂,MT#CҪ3`N(k6l OUo*z"葐?9*آaYCEVn n!,lmiF\ZGWꩂ4^4~*t]*EW>EoeIVJ@ij_ڑ Fd3SriFRJWTbs:U2k_ΛoP>|=mAIC4_+Օ=ްF<%FՄT½ W:Er8s˕dOSr G5-M`H)O[TzW"NR'c~ULWmZA #ߒfag9"tDƉS&@o.2MD*i\5ElLG cx\Lԏnir.b]3mfpwż>B?Oj3 .W װ۴?[Shh X.dذ8)l! G3{{#T@I!MI윐R=x+/!舴DT 5/rNz`& ϙr} iGX dؐel8p|-j񿞈Vb bV A 2f}9C1(FlWo, " sƈ兄q>^Lх?dݹ;&dp  [m1 3n5,y BOJ<,Kt!9ˆ_:8cy*P$Kϣ,ak3 Q=fPi&V+<(jGZ.KG)4Q| 6X qٍ2VSd@;NOr^bB!iW;Ҍ˃cg5G=1w :z#ŗJz‡{gaSSz$,X8!r_%\jo ymTt2@>5lh\ .HFs-`Wd%mFYcO_nx߂AG Xoo z#A"~\⻡ :]I&;).y Q|u@Lez`sj0bں!V.Glc ~MfnDgFޛp[vt8$ؒ~oJ}R`ط1/\DY?QQE<ԇzgGLw>KI [ybkI;/.:86/n!Nybah7Č&y18^R({W[tCzdl]]3ĚttԸ1PsD2w."PewWX:)% j8uQD:hy0$YWEr~ 9~eKa!{]$ .^~@RΆ@6"þ⣚gB,A\r,ơBm`<*y;,3 WU%U/{BRtX eܰCATs*=jfQ:,vhs>ߛB=20TqBJUB|񈽻 򉂝mw{o!OI~`C@m-+2:>) QeofPi-#f: ߢ oқs%q/xYo>hS)DZ'Gߧ`oé@$~9۟y_wv׃՛;0 |f<|.kF ` QK@ԻjoO0O~憩Fx4N[ng QM5v=Kw1hP28a(+hU!M"~QG쁈2Z~%kS1eE"@)kqII "{0 E^I攦9bǍ,g;6FJ#|pB'x9ol1-t<2-| ʷ+-cJ(ow݈,⟜ɰټ$e4&@UE,Ϧ6s;|Vl(LoT!9=ID5X`]M2g3qiӰƼY5(\oœaNJ;&9*ii($>,(|\9z\Q iY*se;|g6T{?.Ϧl{1dSgasvrD |Q;vQfN8AE!:ki nRb'8F eT_{lD[1X3]73utTՖ`>$24w|iU)v=&PkK|ߦkؼ5d9y`|>O 7Sa5^WLcH[Xo2m|`C̋.X&y$ ԁ@%-C*?ej9Y k"ٜ 6r>ӌڿMp'3J+詮^Cl^"Fiw]Ҍ0!42DYz إD(E`4,ߙX^}ʄ w7އYXaÏ-hb8qYʗ Jq+#qK +iJnu _Iծʰ/\!G~v- I_L߁U:4qУ `I_]3wa|lȯVBO M)?Sܼa@^ Fک#_rUn}Ђ嗹#`ǢO.;YX$WNêj8 iR3Qyow]ct(gix'n5x:xZHK()G t>sԟ@z@; TUęGn)! ~e[ZNwcm|y+V˸ |_%6JeSk'"&#"P]oK6;B!ej.nOK}wn蘖tU}ys a/syF҇_eb$?*D;Ek`7{%,xW#S|ktA83b~u2}RFTxSQUŠL]XY;7 E!Ҹ;QvFnƏ~P@2b8UED#NeJii~D(/XM]GL\ںbP{,L8z//b0~N7'l!x3YVRN619§?kK cM}xLO;m`e~{A%j`J{ 2 O-{؆Ӣg򏃷g`uG+Z>"àd(X$*cFEE Qo Nh[]%qpy'b )d A,:wuk$"`U pݯcjdq"yQ<B't,|~sR7ṙ8BGcG[p3U߱STisUQxFyj vĒ/xI3?L;0GntOwAHrM2~dS;ARҾ-o(;5-Dtt/GIsfҳ4'&ԫu˯QOW Gc4M=%D@zByRE0ȩ\oļSw/gj<궈r#Den ?|# 6Կ#H~[|E–::||7M9c0E.t+zU‹'stakxEJ9c=9h|CݲչJ #(sRP:JACؙiy!@1a粒|ŝe զ,IEe ?r]CDVY fϴe#.Wj(s%'QL) 3K~˨BM1r{-aC1hN +؜7*[0ȜEo tmfz~Gצt^\x"Isؼb$LDN+UFRوW7 ILKsQT#\/>yPs*ER?s:.`cW ׉@6WIJL0Qn uշfc\0楜E{"=\RQ \C>!{?]&UD;b*(Q> ]ï(4y8재Fly:UTu 7,T1.:S"諺GhA6U1a+԰~d4^i -8oXmz6ּ.ް2w'悼+hZpq0B/pCF^m.vxH9Qo.S[uOpBB&<Ҙ<Dž FUZEFA  <"P [$&wFa9,U?p!)WSP$h|y9?o&AQB-|D)ο4ƍ=BɜAmg)ЖLt f\.MDӟZd 9awfqx|PrAw/A\3' nbbE~_)"JnmLD/'OQ@tzW :K!Vzg(=-< 17g~o7D #fW7w> wEdkh/x 1!z"~|(Dϥ~p}+z?G`7 0}.=T /f@1?EPwNphN-ԏ\3- 5.FWk`?ʼnnQX{]fGTJ)vg,u{|D`qB sP)V@-!-*}Sx #mԚļSߚXM?B&.Y%R9*>vxrIP#xU:JyX?_A-Zj\BBQ]& _JӀ ޅ2b5 v~tObU% 3_Ry3Zm)F%)[U&(RIob>"fUBH, WJ~{CK>w.Rк;G#Q=zä S}ʻ&CqZ R{Dj:FQ{O UHf҂B%CvMj(xr_L /)`jxehv?16J+(;`>)I36Rc## ]DcGhx"L^ENN?[ݯ?bc§9qv9iNqYX07/L8ZUEPAeKf3R;}4`2$S :/G(g#7A~܇@ųzekg jI2h7i* Ђ6R!Wo(v0Δ![d`Tz~̺6T5PfϮzo8z|PdC6Y$;P!ӳg,/a?c†$-4t"#V!DY0n͇U%av2J܅5 `Ygnpr]qO)x}\ס,Z$- 9x< `Y0LT.;VXW|t.tPʺkKPku{ =Wo[aN!kV?a|It{.XƁOJ9W/q e5^vO+TQ1QJCvԮEQ DeߋSRQb1խIX(˭M`ُeX_!:_/Μ3̿C‡H0'N`4kSkSE`Id=7N 6ܡY޶V9xC:ir:sHm\V$u/t1c%8*27کѼ,HuwyztaY`|(llz]iCx9m8JcSҪ aʕY`=q]V-E]2A'T PuIhO;rjgr7L?EsaJXP>w=EuOWhAt0ykCWXݞ1ᨆ N/1;g/ɧR7Y UqKsE,1;㊃GAS7,®Ÿ|BM4r/ruOK+ >Zv-;Q |h 0D zۭjqaŋŖYOQ"ӓ~KCTC/82|åkW"x - }I _{R;+o NW"`=;8(8h9꒴[$puӈE!yb;:"8?l;U8ү~ѷ wm!֞׬q)v1X햇":IJoV01}ǮJ4l{1+Ds ?gD#1;ئoQW:+KY܂0uVZ >?H':7J$9e7ʣ~fh5 x])'!aU2C1-(R!z\*Ƣs!*={"s=.a=_{+;Y{Nxű G ?ʸz(f%/|EΟ Y2xmظTd(rȘ$%fÐC+jrpOCGxK2/k:k TtJWE`A ɯucao"P=gRv]Qf11@cog'g{7", DEj}t Q45"\Ӱ8BPw6`!uK鰁XǞV\SȦ6?of4M;>3ƙ9OUSӘF2ȿi=7<`n\6 D8WN+PY$XwkVuJbw(g/"وB i:8T{&z/\(r{|%|@=S\tUM]K'^߇`XQ[F,Mzt-'LNgD=v? >dw;J+ "ʩ~_ !1]'N|!bce:`97 Jټ O˸ϹؼQ3oKbd&M@vTohAT7"2\=󡜾_DK!VloB5\&D#Peb|VPR{b4qpxD\@/} 1=d;7h BKQ2T5 yb3~,s0e ˸/a$»w%ݰ[X A̱i_ gL!b2e8'ѓS#VBuHjXrReѳL9AMD-ѩ#vD)D%;he~ĎU5zk-f-'N Vϗu# {ZVlZJz#DdbjQBVQlE!Yi:L I_KZ@(. x鞛HqHr]hj80 ?+,wF0fG$K>X`=\l>c 4F|JfX)-(f@"cbos.!7MxqAN4pJ/c.תKg|Ϳv@9YJϨ`5dymK;6U/>i\&!3.yX/>ti~ Wu%YKF:Ĥ})]DJd bܻ}BV f3.+c9$(MsX^M U8h񾓦1ĨWEPMz^Ads.D ՗탽ʵcy7B?Mݻ:*xuf'v|r?arf#v @ԣ"ȧPw4H0em" %#>21|\N9 91Wzߘ6&Yv#f5xiW{A,x~Ք"zӤNÈ*]te&S ,"`Hfevus8e OiC]I.""t~|Nia|x k`LCP>{AbcMtqw g|7ʼ8W8ttFs0zoHPX+hӉ)AÖUY~SNE r9)F85}Q{yQwf7!gc&oO#|^)(]p![[C`f<_,,pyN 3ԟUwm~tF35fX>0,bDE.TiJ3NO$P{닆'?y`314t ު"*%Mο8X<{e6d.2a}T\"=vBσs~lXĹNXgSD޻#%[ 3CgGhŵDP[ / o4dbY@,nM~ȇ5~?ʰ/rb(f~?g'jw %8:'_%U6 TvbƊ![Q2g<`GtW)qns%t8ʙPV:0.:VI7z`-(w6یT`*rRdL C)ٯ+6{G!QrmAysojI"bPң8((.O Qft>|Dʞ;(Zpm.Gd|w`)s:|L(Ѥ<`#!|<8ˁ2?feb,:c"DܼE4bI,G,WbW!YCa9*{7#kkGՒjY&ɡ ˆ:Ǿ x^,R'j}F{خ;ŏiq-b241ҵ h' P=l|QȉZ}GƁu#X{vgl@^C J^ǺE@E04TJ֊`EK 3k6E'&^ sI%4pFM59ş1S1z+L 1퍽rfC,f8#^:D*AZR6{2bHo[@_^~#8 EY0SڏD(}*yxUg9,7TW4|דe$RI;zN˽f6S1Xxп1p(d%3lMAxٰP:H#IGaQ>Xn}Re rA?1ceCMyBBQ&om CX~Ojj#@s=Ak^IAY8'>]?+2o8 պ(F`4kZӋA IuQ*&Im8Xcu+?OLŜGD3=F˥,V 3-v2 X][4:,(Ôo`m:6}F] T~aHZʼn0D&A?pJ*ߘW.:/kهψKdv(ye'џ(mh\Zo>@ɪǤ_Gf8,OlЇ<-O|h4iܾbϭA{kuw/`AJV2rx|`|Smg_ML"`U_ )nSԳ O~ƕ@%mv2ڞ*oZ6]%$nC(874I=6~H@b"tA܈zkko&`I$kۑ7'=Vmڃ yR|gL"4C7/z̯t4J祍:oM߃GF@jy y-IZYc"LQ`&}& waCN%UUTwQN, #Sw9E-)1D{d3koy 菝i_Z̧O|,\(tÖ֒?,>>cWȑm@o؆u~ O)FT`RZ.k O"m.t.t1xa yph5t%η}Gi;q.:z!Ÿuڪp갽n{֮lgwab?ݶ(R |+pA, r>HN; "WKG#X-wƺى"J7m0?r?J=tsfяlR!K|뱈鋪JI*4F.a3m3vN֨hp(arX>; QXgL XtY6[zh\YH6i~!(p|E1'>2ƥ̿V2$arBEEE0$\˓5TP8VV)â /kDW|]_9hzyo3n9P,'qb,jFtvvV^67YDt̮Q.]blp'N-!@ʥ=1#G~8 ?q=86t.+@Xo'E)uհ1g\ekԝZDj-}b|crFdb_T~E1_Yjdkx, kUoO]RZ \j!0g_A8a{3l}屽1 k~Y²7QLeîc=I&Bi|tX kfɫ+0Uw$,eO0B3u_,Ht| gML,*`Wt !& D]a4l$JV_Eῃ/a/9\mzcM536wq²YIk7*F΂o=M=y~?/Ys&%4%40&V:zl=NvA1'Sn 5&c@nW|۷&`sϺjEIʹhc6?PʋqoR&NbAʡH4/\3o("jJW9NV|Q-'nڀ(\wG-Bu}9/Q턥1`S ?A >!Wo(oNA'+2^eS{'ae<ɂӈ|)ƿnqɇ&=";U #; ,IPw6 \ԇՖJ>XU1D۠*@<0O:nBދy_ v-K*I|v-'"1:ٕ݊RcG`|$}M氇Qv&h,GsN,Z[k~p|./6 YSxX آ﷔Ruﰤhc-YN5/Dr~LT5xե};σ^#0doi`% hALKO`~E~NJTQxg|nbqGqDy?X؟^zIvzcf#8?heo(9G`Q?e.bÖ?y>I12|-I{σ6M69wNc[<=D0kgZ,4"z=?ƾhEm ʕbXԵ0#&݉;})g$I@ G-~!2{y ĖBfޒ>q +Ĕ"-,c]iPYy729 lXn!;gf=x˅_'"V(`xT[I/޾F^M0omŽy]{~5^ڶnU\ݭ?ELrtMIo_7D I|K?[jKmCL&|[ZO~w06eg{jrOi ܻJ uV"=~GWrnx@jö2VyɅ/ `%kvfe:6_ 㮗+|*I+K •> 6Has5a4W|*q_)'M:KW&_~y|vSٿwGm6CX|mIu6)~ QPmψ#Y |0Zp+rƘ5D"KN~sڡGа9K-.xaS V!NOiWs`##9̣LQZbld3w-BaX ^DHmyWK,1n1 A.Ƴs[6p 3q:p$^ boWEP+zW"X$ߩo^2=4BMN*YW.rvyY~5dp=cVS,P^ȸ)M ~>XI? m#oiAv&PI/q,h=sC6ɏ`v=0] jPӢ6&lV1;k=z3oZn!dZe_L~YfyaA{nEO) zv:wmF2}6Q4³q'5G/)~ #ZDiꯞLɗBQkS nx}M. K8ĤC̾&8uP&vd+CM6jM`@Wٙ-u"X/x˲gg#a6E%$] Jux5bŧ㠝ſk0¸e-gbdTez^v@0E̔;Z`|:y91EF!.1!C`3tΛ=dU*c`֩tnJj`jݔ鐹ZusBlp]@b}v"^χEh"v.E>| 15' H`!&B\~LN)›VŬa܅!{CPvKE* (ģv4~o=l߷v`\|7)+Jar&0ɴ* JzNPݦuz SX2$)jwIqqf:wC&An(!3g tABVskVulJ|~qпu:$݁ !~i W-z{v3!͒fTz&0bd{t*$|sElg. vffbg8Bt.2XiDp9"CN#g^s(҃ QhW 0תjT1nSqo| PWȼtb;#S螋ʲeĜ:^4> ￉h} jգgܮƝ}{tיަ ]Ӱ؋ŋY1E ((pxUg&KUF)w-U1ς'TQpĥWYcwD7j+hq|qB "_m.bB,dw1DjJ0U7"6$Wo.T ݆^Eo _?SYI\|1dKH# 2ԶC8&(Gܽu,-on/~ސ^([=NM! ~3>9a=#wK$:L~(?hervI!&_* dC `tp:epгd\-qH++庸|^i@ʳpnkAFK,*iboqFG˝IfaGa":+(S_& zw6B}m> }pQl< NzP=A̓pAWa{A&S6a9vU-dg VE2Q߳ `iE;} (#[V+Ѡ~QZ>GǷᲅynI& Dh>L{SFdr΅LMIX[a!c|5/ 6\O]^ȶͧtG@yZGLJ{W:3b`w (lW0Ev؋x&w#z1ɝ kn]e0_1j2)G3 sWӫ^ ^˦$"H_d%Akޕ2նUKQI`9 nNt5%?z>Lů I$_.L"`ؽ4= =0CeWJm0f&N7ς^miUX8i4*ũ.,kyQ .8#DYSiۺ<Lrߩ";a}}[0t0EvW @ʻv t7]|^-:P$!G @Vw9LNx,^Q4\z71ڼveo[Ĩl UG'"zl ZͺW/=aɑYU U=pVCWmE0gg8C>XjE08ػ__8/ n†FF.pᶲx)K6'4wٙV0QdwBe2^fBLr={KW0~G,QAv+i˩5IEŒawԇ2XJ-3偵tI<ȶcga63Ǔtng k҇\C .&=;x/, xaoi^#Wc bMaB.j|[nJ@#[*0Fg ~'(~)۱o*r# }Wt%DcȖkOahYm6bSz.j朋 z|AΏφ"ڌj!)r%O A2S=f} Qp?_CT_-Y!ݚ˶LbXpGlcx3w;>b_TݧR״ϜF:52{wq1~k;jsje(Y25a!sZo^J#/cL@lH;R+΃pq4-\ ڕFt 4WuUw3F, _!nګ#Bb2b~b0x:<]r8DgO#vRh',`˂9W,kXzW9 :Ě XvDv u?kpԂus9”_j+ƫ[.#ț(G `g'W!&?P1MOc^J&mCaީbnE V"taT}6kL\b%@|wt"1a4Hbo6`$6]]>,MʣX^|bG7؇b` F0kg(|.}%+?`Dl۽F7 Ɲ ?N5RbS7ܼ_cMF 0!KOlX"l& 71ﶥ;]EG`)ʘz Vj5]SR2Q>Ȃ$EO V` O|seI,ZTV]Ϙ4~dXcm)="{ɫ*zq{g#71 G.[7&\u:%KU ˜m( K/Na|e߸;C"xLKCڃ Ǝp=QZH{I9כ0 .'ʣhh <| *ߧd Qtď)!ܵ2\%>hy`/tL~3G~_>gع, Sd?oՐ=i?<`W.X;w V. |L7mGסGk"l/a34ih`5Y4mXϧkM' `~Y]6g$E-.zr,zwA geE6}.gTTj[_.d&uAX'D5Z`s1Xw%Ïkg8-qW5%@zP;! 9`&D;Lz8Պsɜt-*p=6},`y(BczGsH3::\@=։D Gӛ* i~Uxq*oXLdžl6'cYhI%jACT6Wj69۟7R+݅(~p_ R0@DLAz62=G_؁\NDSה ܻaK.Dl2kTňPqo0ː&6Onl(a't*~Q_Sisg|;;'El|?r#BҎ^>6K 0pbCEVnRuh](p8My-@w,1 QGEv (IQ@9 {_y~JWm̷ޥH@'a-ka(q@CUaf;D'Z1|wI=GtQܿDEy;+)zY0]6(w,{!Gy+Wvn&!3ꉋƈVN<'Q@!bBף-qGb|x6b^V5@#\CIU9ڧ*.qQ&YPSp0V?ݲBR]j&:uV5O_$X'- N48Ke1yR(xxawUX5uIwIZYA" jF&rHV~w> ;nEM{jv[`\ px@ d>arI2{V\QuâFE,ER@Z C[m1mnyv:ߨIfXl!SՒ'tyUˠӁ LwIMUO3<:aը(yK"8yk9dɩɁoߎY@:FU#Ĩ´$A8ĕ4Z "qr}"nb>-ЁuXj; [[cќֲ8 ˟w?ʡ fuϒ8$vzU- -uEO=횄)-ύ~0#/ϑ aY5D /,-X&"_IZ_r<+J>h({c8=C4TWaK'7i9>K`/ZB Fh9`S-WX/|˴߹9x:D뼕~#$`t:y'ğY0}'t|ŃQӹò"0sAh! Qh>?^(M"_|(T|" Iv`5!m+*ls>8p\.aBc8 {}՚S|UBr0^ZE>Y CXb\Qp=jnBT+ ~sjm3<"T8Xy\,k?Ѿz1q hG'^]o!ۻ~VsL nX^Wٱ|NX2>#`zQl99}ׂz՞P6ɪ-Sa3.uk`1=}y& ";$˒}v>/sP?C?ue}v~%?&13/ÈGpϧlf=kBn׭' vƌz> L($J#Ey=ѕ?A. Pg s)^ܳ>m'xǬD\NKݰ]K_ Hwa{㳰uo5_"lتSP>ʽ0`M~*M%D%`dqK-C4ȇ uK8 Oۺ31Z,MR^e%A fdR^²ZN;vbCL"Ò?lZ,8b{տs FƞGe7; l^܆EX$ 2B7f|чv2-# d3p7Ax2o"ы }Bg5IM Bvv bs9~W l!YIaDs씿 W+Y6=TEM.`+\Xro2y}9jZ Y9rWIuR%@9Jכ 1/ aO*uщ>ǿ"!jZ6CsDޛlSP8Jqj]R~E/>K-w"$3<ێ"R0UqIE \Dlx)W1 {I<O/{;Ҫي.A̎~֯qU*g61.EPN5.b7Q(+`{:(}v! q0;8uB\ϕ"`Ѯ yqm0#Euwev0[o,ct45s`4>\f>,.Wfԩ @U;k)Ga0W#KݲmSBqѣhQf`ߢւ t٢dlZ6 +[,Bfa%"09f?A1=#~O$ 38()&phыƟ Y={gNV Tb6lI>CQ\vR*]A/疄h>[#Jj9u#/(=g[-J"*K;ۇ/dpӃ]cg]v2\ \x\XM<~t.TW**㦨UDLm8†<ٽN qz) bGba/s{0sI+ٺ=Y1qJY1YGDJ zC%{+CKPFH빪ۏ-"X;7fHC2U.\!XEi|ޚ0*xQ*X:$]6E,%`>#y#IVRd,݅' u͘@*4T~拁)8/ok.0;Diex.m[GԴٰݚ Մu1ܮ,}s}w;̜&;_?dܷ7W)o5T[~A@jMfƸm\vuoߩݧ $ h0/_ɇt9&P@lOFP-n^7C w|Ip;zQa+55쓓 ©8ΰzW֖V"Du'Z13$:#&gS&2\t1$_'[#^>҈Yuw5;DagOa5m#SB"l=}Zgތ!c}6ш_3~?Ģx*cߔsFLQa?lX^КlYl;0!d[;?QZ}*,c8/օϼDݟ'laC>~o-::`'PwX @~5[ ^A DC+m1hKvq%#㟶KGfDXEfW}q 홹'c(b?{}'.6Co$y8uE;*[\h_&%ۻdi: g.CuV!"}4`9XkכmV/u^b3 kmO;V3HeGwx덓Em˓]Pp#~ޱR6_Z>9sǮO @tSZ^)|ޯ ]Ebs/v lɰ` ,k`@袵+L,=.\R}Ec q#ZL MAm8!fAT,*˅gg!1:A&M 1޲Fto7!`[p7w/0u ,X[k[ӂv AiOmopὴ AMh~;vD+ɈlOD.+bcC;">nJ;=1.]RVn(xy)'?gKʍ)`?s!}xk7eu#Bܣ&"m޼ xx ZC^a|ws1]gd &{CKEi0gI0Y;eWXp\'nF U}ع/v4.}_RS=ƓU~vB,[WV"y/Zw9klűr1 isO |vòe=oc Jxnϛ[~)n=jh>pyJ3]>G@282=j344 1;nAQ r<w,|faHJ  B8\́J9hfK5 U?- {_.n[bm8Ïw_hYsW򋿸QZ4|'s6j"d$erCˢ 88 dы02mPPs*/tFr}z& v^ؔpJ =LDlo=OD/Rga4Ц1\v1bi0c=hv1b:Ǧ.e0I~#RyoÞQ|o-#k5s6麽U 0u⏗g?nAtn]ZX.Rf:: @'yM0 4 ܵsR+9jOpc\xT C;;<4)Ѱ!oUj\ETx\2O.G<NwVB;c∑70\h62 ]T~,^i"$u%n鶽[)_}d̏YX;;;;< F=-wX[ߓ}La2 nXm%b~r# ]C,Ć#<U/c]F"4ZuE\ua^twRjɈZ&frK D߾瘿A,y.;"&Z9QD/[*uR- Q>3h# \F sTofVBd̈́Fae9P@j8m `R`7<,;z=ln|er!7{D%kySѲ)4G 6RSL99I{vӏ!vF*=;J&^"_n8F@k*l)24?*Dk.bhd>ohuUyIW Z )I2'H_6ER11o7=0nx~ۘ&az'uwIlˇ `jgd t<]#o5?Tj 0v$No#f`gD } [|m~7⎃Z'WxC%GοxGy?_)O DoRF[.7}Kh0!FS汈$dS?Ʉt ðyq!GXzLjer3,%N'z --x{#Wa'Eۗ;iX ¥<i``.]eCev"bm4q1l"DqI6yX/۟c`iE0Nf Oy4qcZA,5JوжŦh^}1W&Vd,T/:Wqx?:bsT#ԮkU#]QMuR+lYe&2{̗V~L JrlÖ%NU5؉{d$Q*x$P BeTBJ'{dy朳~OG7$z>sVXv5RX'7B=0_T4 ioN_avEhj=JטSɘ>Zk^u2(\Qg=|ʇU)0|NO}{{[۾ka!W2ퟱ ]'CQ7F7p4b!M5oˈ/1Q}#7ÎF~ 3L0/^ZiT8%nsO>(#c-[ bԙ6LU΄qh{s#a,ZiY~ _Ȍ(_5GO*giaћ-!OZ!/En!Be3@AWt64D`j{ UͽCFxO/u/Q:/auu^#(7c7^f;ӭr(e+Nѵ˳ċtvXnyh g&r6fy]G*2" 2(Eo6'q]ZKzttRU[A?$=} eyAH x 3S|͏We/U{^X&/u(flan4cJk3n< pV܉G50.-ӯ4d 0u ֖=Þumg?:RV1Σ'aE 8}E 5,ۘS@}L5eV2|Pe8~nqUXC;WC g]!Q[%&¦>Ud} k<ۻǼb^ßU{XY#D5X9!Evn$ o!EC1Y͈E-Gqv 8"@wԐSV[-& lU`EqH؉f\dCy@ FѪc/g5b/w-b[B,G*# Kb6iBR'BB>kv= w;E[M#? } 8bQasA% ;CZϟ4MX`X {2 gx1O`3GԗvNJh6Vu_Dn Z{/) !UHu= E@\!7z~R^:v$=[W;x,T#>_{+y_y{I?3oJr2;uj~A'%K6||$|' X}E!lh;iVa$ڭ} nlN{ 2#EսS`FQ汆)q ͎˅cy0}AB,$]>,ON m}U6w\`~$n5O`e3",dWoC?-,yR"8X^( Wm mw ׍8a?_IRm Dtk:&dbbN&%.#e;ш.\/"'Qydw6s.(Ҧ.tWHj_Pqɂr{.mrGoar;"8# >hKke!&VOI~Vq'q&* 8D#}4!$|s쪹h>w߷m=O[s`)OJ^l9Ƈ Zݧ֯On='{VcfTE%W+rDDĒIjg?2AkdZ0|(_ccUFyŒ y۪^<:EDc׫][a;Knn|֧`Ws$Mw% qFO t}lߺd@Ͱ*{X-mK/!w0ZrxM>6~[u uymIb&n.;`Ŕݵz#&@9 S8tlFX5]{?|rj .v۱a~vF5̴QX^.K[o.(DC{:piu)kq[ b }.9c|2| Jͺac'7 "SFZTؒW~n\mEyA0M kM̡QpAiqRsLEfnA~g ,D퐕[0fnrF!\&]q7 r93'*59g(k K?UI/ G=R͡~Sc+XfTPWOл-ii5̤u'q.+wՔܷyܗ+^ 2TN.0X8~5L殼/TAXt9ڐ0.xGmZqIm{T}-?qKɼ7A_lpo9Iy=u.y%_##Z;;},v'Bs?aZFfe-D7k DaX:'ĴΛT߭уj-+jZu󧎛ׇ5dIjgžM~켎tj ̨ jqM8 /9 }gțVR*DʧްCYo3 ak7P#,ٗ Nx6AԊ:3_K>ńT!+@fgm)Έ:MnD=Ӏ@̩<Cȉr c BUS!PsOL 5Ie}?c6ߔ/D8 巬u"gdECdDXیvbߜzÁdhO=GO~߈{z2#⮗Gd#Wq1$+#qRq Ӭn^D|K5uCĿjwOM (C᷐Gu3-_v —ۣW{s6:W~MW7%%WX:ݠ-Ud0Jo)37ÌT}z4',uׇxbRjFP̀*!ڽEXI| }M e t/:שa!`+6?awTA_pgi?e8&Rozz}(+}}7!:7x"=%~ C'&.Gb'EbZS-ĸ˹@3P,k֏^6USCC 4m?Q5"c/w ;`3™px"Zߗٚ#Ff|aDjKGڤvEKEb)Ĩ~9 wZ@QycrC@ƚ$١BCר8l&J@cAFX%9'9 4dT&8`RgW5O[.ÒWX&ƸJ'7ݟm 5sl;j b\ ruиGBg`Md$ a1vtDAh#5_QyF-kF!0Vra憵{1ү=X?b>K)xF:.OIjkK9-ӲxD9 Ci|Eҿ6_UU?ó`WsO;q QtfWOvWb;{ãzWjO@hݕ N=, ٚ.zQ^4wo<=b?-P`2T|?e@℮}1FpQh3v1~3ɋoY2tIrO89~tŒRR֠qO)˙W# Й^ސNخ=&a6f%SQMֽ0vAQC†uy`w_{, } ?m^9A7s-p2TxCğ*Q-xɄo?_R4ea\Y 4hv($2RU9ʣػu9°M$^? n7R*1s6_tV SKAbƗ oY7/uȻ|$򪟊,E7JwPrrP/.Oc_Ԙ0{8Xt$^b{05~%cJҚoҲչsjP0J}R jCg~&rrpej`*Ջ5Hx{ֿ 9G'IG.bCl+paKvoJ5AS0Y0ZIJoGbNatrTxA?l2Zr0fKGt;_䳮"f 'gƯqzK3 Vmi+̃D#Q [,3\7%n߽@6 8Xu{N`7y_ 1 D yzͤ11ѿ4Yh6EFsշ`si: ʦH!TaAچfmmsd ~,'*q̂mPh좼ac'"aORXyhJ*_0)PFL''#"DĴ2)(bxDiQ] )5r)@9}P cֿΥBEd FIwլѰW_̀]+~J8D|bhhR|,4Qo&b]"}1gZ?E>C4yz0ԳQѬ gEXOO1"+@VMG=x u] N+_:0%~{)P7–bٌ?7pN""i$MGeX1pK,z/ c5qv6x?8hg,@gE,uJ(]|(Tx;' =KGMU`5X{uǗlLa~PG %|M_ZK&>ehHU܍H0$L5 ݸ $J',ZtapՆLڒ08uov$g ft@>]K!Ӎ+T*Hd:^šϱ;\N3o+vGZy _9~#*}Jݒ2xki[{/cGHz%_+d ވ5XbOX2b=s,fͶ.ʀaR9>hzs0Ҙc԰\bԋ9ͧA@d}Vg ƞ7`oDi A~}b ͸hhAՋq0x%NrX3glKM*˖<*9a|ʰTUo!(׊QR}m4e+/oNj|skР0?'84 ^~@5{GߛK3񾬃 r61!J3W`U.LC3]+'2Oj_e.<`KkWumTnS{|o6+DCx}{ujp "@~1)Z%up0Xu5H?aZMRa0|udz@8Xx ڔ'`R9ea+<"/ZʀOך a+Nj$C~gLa0,Kd>@Xm[;fkCQj:: %*옔?I 뭧 *] DgM3o"8h؜8XP! ~;:uKXJ .B4:D q762~BY[T?"ywb8qc=b?1Iݽs[X< Ka~.z.aF:z Eq8ӡD3wģjhʛYS]$'C\j$X6D:M[c ~00~|Yr'?߫H_9uh1mLn+@3 S_#Da95?e[ϟcwWx׹xWt%՗1NceݺkTo *Ɉ‹#RtDjlŊ=Ai0kw%Nt^)4;92L a3rjX8a ,0A锍m)I 7eIlGپ6?气pL>EY 0>ogv5yzM"G6_HyDozm`ma 9 m}.Ѽ. %ňcA͙!|DX Sx3?<=A3퉶xh}ݸLE7@%oJ9aStX󥾡} v_K_HBQ&j-$;.^W&Mb'rAkV/i\[R9pZC/Yn,]-@FG 8S)e)"Qr~w{œ˖3aNcex'ЇS8[wm:HJ.{@MqCuClJZK= 8X8LL>VVn ̇Y[IRQbXTf2069}.‡q,9Q}z5PDW,U:Öf'Kx¡ XqtE!F:gf#B'}3_>q˃$H "KUa[(<~ј1`:_ʇ!ڂ]spJbmQ\6TD^F,9"X] t nzKKA0s,`2p(7 S`UpU>6}랽RMH~SP buwO5A<$N uy!^K^Z.Kۑc"*)??FΣʐѶّ@_FH]d<ڋݧסi645r$"=I,ufv﫛KQ]Ioz2}B7P|e0,?6fh>2es-.:$ Fh<']AvPCdR9Q́b56:y: ^=cک GźIq{eb:pлwW[sɊA'Y^(czVl#YxllEn+뒈7DĤMYEP1R{utdCn7g*dhTR(4A\gӭ S?P].]]u{2`aEF-ON > " =k-+-`(M*UIY\IX\WTЊeٗbr8D2lǵt8p㖢o"ꍾhjl{Z᎐@4GДblmp*:EaGf̓ӞWx"Bo3/m6XK^küB٫ yHW SeKk.P֟J x2 CoN'w{ CW+:  SqB9ߌ{e߅i͢|h'T1MP~alDTgWk>JF4n/6? j Wv]}+C-5aV{׸.Vm&]aj(۩~X桟0?)WXBL \jHE@XOw Xݿ[E~F0l  @9>hZ~ x9&nX$U2cKׂ 3vVQ oJkg #߭!&dXb^<xRil'{}˼9~;~O]}yq?_Eqos;4kJE!sP.5 :0Yrr:LWll0->+O ]\*RQuċwbax>\]4 0q~'t.'yZ|$Qb~;#mP䝖7}[F=o@ y ۉvUDeZ$d`>wb<+(El/N%< #ơ/ӈٻ3UyJGɶS#_?Y NJC0hh/< ]=_|݀5U*g1uUzI Vi)ߜp w_Fde"RY*D5rv ?lx0҃Ņ”,а: 4- SϞУrӇ|p$ !fݲ'ӓNs3)ؙ psޚ`K:_#VnRL:=_YYC+CxSq O'j0" =xv,bٰxXŘlpLBT.]jnQX#晦w^C?%TdD\8K%hJo?x|#^ S&;#cr}INЏEamdy$ڭ2^|~޽e҅HD{ϓ ;CzFyﱐےnK2α&2Ʉۦ;xZC<$bTC & m΂='$u\Aw5O?Lf,Y\JF$u1 vX9 ZuvwJ{$`~;1=i ;t8=lnם?Y! YS^3|꒐94_#:/sg= !B=1X|ٚf3;<{Fiv<^8ĕΰפ!fqjĐY{E۶IMe ]\\`籂;c~8k9M RVÿG~02ﲥ9t 2Ԭb8(?Oh0u'T>>B8?^clJLK7i0YQW~:h6DDw)s0/jU&U}O{{ZZ<"-)7q0a|qt{a;8MQyjRˀhS8mٓf1p}&BQ I%QX)suuL݁ݔtT"`={pzh! qLDlb|4XoTAӪc0=%[(Y~[&ꊈZF"&M?NXү6+/ijB2h~O&I_/'ŰôzߕD",[=C2pp%%qe/qPk ()IA;q7eIg{\wߏƸIw,w;wqr~!X3ğ҄#XpY 6ܘ/_2!PusSl̋jsRO.S'5#\EQAyD83[ѹ䕜 $Ub%#w ZCo}]3QÏYuAB RnVQ/bD9mX>jCTf/ȜuYOhI#Y$4b 9Rl+2Dl,H nغV eJ{qP=%aҰn{̗o>]o(V&?߶STePp=lRFng-\R-S2Sl’O0nm,5ƒ]Oמa\Gp&QX]I@ujn0ˤ}xri)[`]9S^%UMJ>:шh9EtYÝ㈺iF~9c{d Fw< g g|\ 5a{Ũ ]<..8>{ H__q>H0 ua"6ueӈg9,̬d]fy=< 㠭vОc:q,[:xT4=4cY/c^:%{Y0|Ӂ5õ Ӹav[$'*D@gy^"Y7n_)ycеQaɺHsuiy aZumؼV- ~ՆpS]rn$^U,ePd 5HhWÍ:VK^_ϥs\5P,Zhʼ5=bs>}s"ޟ@JMښÄ>`&,s\xE ;A,g'y#oG}N8,WDW{>^/^(/\T}&hlfg\1_h,|Хi4l\b<.&XAtVY1}:>PYTZ4E˥׾a 幫=a*$fZ!^S\Azw:&JL&hR4ܲadawjEF)7`׹UFX } G}/bKS7(:\,µd0FxS~_C 88hPf0 ?ɾ8.[5d_IP~mDP|cuP1sRXBҷۉuQ>ňkC*8-u8!ڳG96_/~{#``؉%hV3}*8Cr}UԳD_\K4Yz>z ?Oᡣ)LbjWO1a2a s {%Q#y|g-n{= ;%ԅcƄaU9$a2|K4 *.м>t$] /jϪb,? k:!\J^"x&DUO ?X+`~G/RJCnrXkƴӮ1֫3 =GEl}]uCKuj'=ra!~XutF=m@c.` PfbMA>GasR-|VS/ɾzD ŒaGfa*7 lZ.zVy'!TS:?h?a ӕu I;DeW+DeӌeXQ|EEǰ #F0|g_te"\߻U؟Nִo@/bʨFoETs߬,fX 6 ^H{~C0pc g])l8|`xKQ5.:77 0bN}5'-ί)1n~ҨDLO_n q[߼gP5gBq!}R@<bw$j+i!e?Ew}ğ2f}> K A]j@gqZHH"̉C uzi6;bs}87.s]O Y|>6<QfUjPn1Qu'Q}SW:l0'Γo˼ӊXIa_GNL]v-0,*bF)'Sv M KAzXJg{&UeU8Mgz56l :aCw~j.㚶Q5AST{c'W<ܽ|Ug87܎ʀJnEEDt&1jj;"z-yzXljhSj1AMQ14rFƼ!଀y":;h: f]ߞQ,5;.pFG '¥*ώc@~bZ @,~DF>{#L/[6+nwjؙb66#v`1NO_CS=*F,i_7Aq/{ b+zDhpA0f>De6r11Q +f>tkߵ{)/xADt[ RiȋYWD%zXM%MÂRM"Ejsӫ. >Wj$p_`5{뤖 Czu &^~.]_>5aO!0g퓉gae},vlϭ/v/<䂑ޱGg_-;|+[C1s0y=}Ը* $ Yu/>=@Il}\"򜰳 0Xk4DԤGn m˩?`uyw,[+'x[[*~`\7dXUj#c-p&1(tq an\"FǿD1?Tx# XEC׫ZEEhk kC̯M=sBY,WԟЯ!X2ɛ>='IPjtea@/)IZ$Xե VRda|; AԞNu#jzsz`5^M_rWԲ|`vyzNN Ūa/&87ƗoqacS^q +Eᙄ41FspE/(g?T+I߇Mn}A~RCrbaHF~wA#!caRri]3n )q9q:C t0r*H WoTUYF~ΩAo~@\<3ˇ@GAe0)o_6 -;yc5UeDBUi#C^vlof F޿TG@R6Jۜ90'[)VX5\`Jx*)U/~y VOls?#3 [84-v}>fC"ke^ҥqY(g34zaRD:4)SMK8aR-e(rAuZwAD-S/WK!"ђ1ih@E\"1|ET xVfm$|Egf$ ou2:˷5\GwXLONA7&h8)$R#m`U΅ ±7j )!ZO˲x;ĸk?پqÉ8ۅȾuh~L!^)'!}(r1a7 gK:"S>J"H`Ѱtxm&wS1I] YUSԢK0jU~s.G `1YzDs+}QӅdf); A 0b5EyX/?@)R`ɽ:T,[`WI]rl26UF?ru=X ;Uq^ *"^@<t$n9^#^0uƄn[b?_X^=Ks&;DJ•n0)*iKdaHUeDx{4bXzz^YO-ՃZq-S(fdK;ba0V>qqӤ?zwHve B'iOעq0w!‚#`uȖC' 8hB23)ƢQVpb|;!p1xkfh(CW^mM#\;?[zGoᄀY/_~S4EΆc.P{0U("1]r^ވ9 q_ӳ M@~4J$a]gbo0C?1n*"mg$dahzV3f;$ zS|gR_0P,0}SX{ B'?y֫Qa I"y8hYt* {a*o_%F:qlֶ} a_@a0hP]{Z^k)ڂJ'DaL㥉 n/s}&BNF(53?~ H:kRd308ikP DmkN!\oNNG|goiRIC"`3>K w-lY~p5308[\_8Lϛ~Lr[֯um!։ 6ְorř}h chKzAuGifp7dRYDq,:O5-Ηx6Qm"Ft3}ܿ#F31qE,w/oi{6#jlLBXd| ހ]3PĪHM詓-Xo*hRiaؤ:va |P4B|^ %#/aU˷S@Iy{0o=x 'a4,e) 4.CQA$Z\6˝9!o6U SW%?Ti΀/πEȷ@R8~O'zZ̽vNCo=g^a}#R C]`SF]dX6w_[俊U*peY 3a[3qeZ.tMzū`"orx&Ԙd.|YO4YCL:'3,M2319}-bKeK1D,.+GGBu>ġN! 1S~K뿁Dj uj~=,i?1v~9cT;6cY3=@&].ow4CD*D_")ī1bK*1/_$!zѷM 5%P-CZد֛C;+`L +߼'nNADtv] 1r7|KBtmo#\wXͅ6b.vR;°'̿ R<#nUTc^9L:`POzN~LjP.3),LМ?܂ i ,|faHQjDd=#Vď_o]Aw?T guK5w]xt"o\zė`:o^`N@D潓_#^_Iw?N2۴| ?$1Ԋ hYSH\, e.v)X?~^փݎ"쵯:fhl31z U~i:4qi4߉_r50xtL-.X{Aƙ 7Q+r* T`^sbV}]a 4roLp=t-/) sSI^X&% ׃OGl?#\a$$5My`P{課NG|g%5p|VGb<8vFlQGi!O2_~ʞWǿ ڴ`1 ;ctAM럘'F^S`i] W>a]m_յ<3]/ Ǔ"Ϲ6-)YlLFq9AML;_2OR]`c^R cAMjImz4 A+g̡k+GgS!J 쁃n 0ƵhK{3R{m*dI5ؔ%_wrg0.cdmwѴyL I_:VH^W$u!=z6$֯J mϜ`*lGb~9npA0fo蟞ÓsSLrb ᄊYsMBFm=Ei8\mvV`O|,&vmʣl+%;0f}[MP8('KqZk+̝)pz%~|Є%as},ĩJú×"vSw\~D[q1֖ ]i2h?jۑ3Wb`lj;M .< p'BȿF%VuNqb@څ|ui1v M4ːM4TФThQ$ ۏxDìÕBS Xf.mE4HB/?On^ 2PCA=d֌0>ZHZfOA=ƿv!`gc 첯 RW&֎Ke?Zݿo#:Y'T%gl:pKEWs0jb[q=y>,aG>XH R-{ND-[D k-:]IR[R`+n7MKVX2¨ϩ*l/\qԔ6sq)t_ V,Yq.[X˩~Gn`:\~Չ5X u aOW4RA{XI.+C>P iFY6F޽ѡ L<%NtIQ?R 6_mM^' 6$ raa GS y͞[ IiBbFz$yC<5Op=߫/be7r)JpFAVHPbQ$HɈD{OE$ ^" BG31|5dwRYVDkH'"Ժe ]_0ST4.,JnO;ba'3W?d& 2|KJ= \}WJwjn,jNW?`X7sfas߯N_G >EB=2l#N@?cꚻ91Єu* f`_d"c?Z]fcJZ簀i+YX~VnY ! ^YC#1'>nYp_w[`#!()@ kZj~޲qa͡ɪ/Qɖݺ6UlH }6 /er*L1}PWq+٠`% )߈D5WIA Pӄhsx$¶oQX1zƤc|$rQ(zd2#?30N/E+z&1Gb5_2~No#;혮:,M֓ :Zq@4kq3_-+KlkanWCEz>3 ,Z}±2&į1c[5#FWGfNw؀_\Nu$yh"R`0YR/|5$qe)MM]h1s)j$ba -LZ^Rm>(ZRB:t %EG~#j]6=۫!fS[y`Q߻i|o[*NXRwNU0h XJh8.`}a̭OY09l4D /M~9'8*徭?N0 ŝ`sF 2%K?A0{ս*g+ͪ !/ߴ0ŻsLtS"4b12Rј^<<-\ьEO~8 dzmDc^)3bwYCT#}.[8_M NSl9fa FMY!G2Yw<>n#%;\zM,G aGu^Z(k6 0|1ʶ"o>CϒɓrzdqL'~Ks̫ZWªbI>ؽ1df #CN"M_ +f@D$R] kلa?W˪P\n>}P:8<[a%*YqE3\oQܽ||P0G4"wq;(s/l {Hk(`8#=9hny5s2Зս`*}n6q1bD7P Ny.oi6X9m8[W>V#w#=fobI舼8OTyy!.GOJ>a? c\C?=E|hi=Hfѽ 4|@V?̶> 7kh_]nhy N8UOwB @ECL| v4F4j߾D[29x3Z  s>̯}sk9bƚqFCx*f}W#C=>1k^~Butpi&Lo]RÏ{Bo>^YPqd:E2FlB/rҟoqjc{@f)^Vh`d*ok`U.\ҚpW5lP !S kဨ :a;uBˉ"08~sb9tc ü5EO9Sqqq[<ʏNXߖ`oYQ 6/h+ۏEt#cDMD [%!^%8>:pE8PƎIX^wb۷B9$H`cls$jGϏgS@5L7 `'A(7FZeu_wa^G;v>F=W{Qrm#ZB@SQ)o,\'S~ 7xgwD={@}ز_syw6"\IލyyuDV^Bݥ矽!RvF&WYra*rh$:e:q 7¿. u3h%^~ YqqaPb0ڢjPlN#3kn,ȇe~vmQh%]&vn XaYM_ؾxl](Wxѳ9Cbd^%" l6zK5k .0'6)pY>^AKZqy׬s1FM4wO,, âC95E?CqͭD(;tZ (g?T{Zakj+,,c+BKY_Mau0yv_o_@Vuwm/gGGĠmd 1~\eA]3j cؽypWd\ #:u%0Rc~ЁnX+O&2L%QXM1aB8jvd _$sߦ7K<}kLa~*eh8-5!!#&ݶPfϨb924J?OX6RFky\]9&0<4) 6uuGTaZوD$}V8:Jzo,eֹ;`~Ky͟DyYGMwe,3>㳷Gz_-PszxjlX=woA\0ݻ?~ y QxoPޜX.qq8C\|*G AS4ՙۈNB M@{Ҿv'ޑ CB䞴}I^pfK1E"oE4-F=&Kf40籓ɅìGB`6 S K ryoXϘX2E}>gZq0x"d0da$g2ŎᡥeVc(~-E0@!$9hV7=7,J=h<Y$W큦[n01Y| ZѿYa`'w4,=ǵV!#4CrZ3K <-/vOM/Q~MDt"úQKj |[#t@%}Nc& B/kJE,72Tık=}0wK=Vj7/9%0""a2L~9,Kcփ2-1rغ^#pW2rCoaƨG< sNS_QNn_x Ѥ:Z08":W3ݚ-2T#c@0O^e`܈a`mѭ~X:9hAI Z乻u&F6O[}E4A @ic@1JA5f ȽDdh_4 30[<2\~,5H ^d93Tf6U%'aU$z1/Fz֜gWnj8q/Dcrvm k—:Wy"w={uh9~z`c'a%y~YX`z! 4݃ Йw]g$45X}Jf+G8gƏ&mm@D%]ԄZܰu.^+o_D*Qh:7& m8Pt#b3^!ߓ6 :07^ll1.X\{ryV !{l^$`קxg b +8r$luЮA4]@T5V׽ʣ>QϵCayw\$usP&vx ZX4a]?@I<#[m׽x<W )#,U!5"z dUfEcX~/Y/`IkQwh>"m 䴪m-߇z} @57{nʰA ͹H<4({Ə`xє(Ϛ5t^Ħ2}N*R0|̋5?x9|%K]lXD/ƙJ#ڃ*r&_>tu|bA宅I4jF"\rj+v/8D&l2,>%Vգ^0~`~P>Z?Y Tv }D!JGG1 @>6jW EzF!T ~>e_n]5I|9^|O{y|g}G~/ _`QP>iq 3I~z ;;0?QɯN|6́dcvŸDk9cհ5%)Ufpc92=:uVm"R^&'bɑFWBIݔBD=?}t2lYp]ʣMKDl .0BߠB8Z~:iGj TzY[?ܒ SW[5"49B2A @.e޻3BM0ǣb" +f+cmє6)(a`Pb%bfD~ _9℘6@Lak ?ěfa[L9r~/_ZM{d,ز ?$"f`]Tt#.#D1kZk5yf"{6cY[T[0>h 6A@ﵟrP/o? k&w#OѸd=F8kNyp,;?H5XUd3  !k"<+ic翿)Y8/((i߄%4aI<3͇/Lɯ\"֟L-W Ènؗ 9;*d29ۚ{oq(ȿćxB|2g@u@*;*_#/ F‘}hVi ڷ+3sd$(2Vw۽긐(ZdeФȪbǏ{9at҇{e@9GчDTo*] ji"nÖ&>[[:l!!XV>yX[~&ȯircm:{OyVg>q4 Faw0M!|\BGlPqI!0z垍,nm ^A|wq 7=1YnLF'h8sz=6":k[\`$5m|=zMG6e;bIQ5DjfJܭ|h[\ %ܴ-ܧ }>ek0_&}~XT'gRΧ?1FԇK~`V<ʕY ~%j"Up= c^P|0fy"ܐZj=}'ްtܻGaAlHg=D.djGF/G_G<%Kfp-bl"+!/}ة9:wa#0'{': 1R`̸EtP}Jguj -W`4E=kE֫@Uhb:7 $OjLa<=4Rיv- ?_P;Pu_DƁo0KT,2ǫ}pf_7U/#vCMNsoMPd]_m"FW/`[q;l bU6\-%\[e2l5:~ND1CmIQ6ޟgb#޸[ĜP Q ?[`x:ßye `#OI(U$qxYf+`}|Bmm;/V} ,Դ`9[,g"W}iCh$K7]Vkܳ 0ӐD^A'@7\][7)㠿4Z5٬@,; Vzya1s=ǫk &XTk>SI<0/" KMdxEyX_9vO7ݫEHQ0,Hð>ЎA6K yFvso-#|{6gz>L}k7C O/; ¦k˼olۑ@ /AT!2 BLAod#}3MrM+ӭr@; :)bYtclyőz' ۍo8' 1 ؒ9|4nLuml#_`7 *7SCKw5#4([eN̔.ݖnJ_(x#|x~5?ep)Qĸ_z6]QN? JAK= Yu6Υ ֜%O7'"z@| F^=_`sc{tBRϱ!hu+,H_MT7p D}~ |׍u6аk?:bhcz޶E{DXuiuoܿybiɁIOS} C[iXg:{8%CdI_~?P_zl,|_AQY;ƞONT㷔<Kb`ZA& Gob aLrMx)i&Np/rݥF-wD;JDM/bS@,ZWeI8WqZ0<ЛNqyſψ'c\&QqB|<_額*į4Ǒs =)b"|[ ?t>&!g 8`>׭qCrQLWg.|Ƶ>W0,*ݞzi61W*/Ҍϔj-J=ۗ4XnkoYh~X9}s-֭L KC^>8F'Lpl +7IHGxKuch9D۵f9 @^yEI!I_h$K+`[;hG>WurDδnah5^dX;聪AsTЗw]& *ګ“` @QݟY&ځ tD$Lfxr.)Fz;̻!Ҽ.w֭D'h)x9hg҈?8Ίm6[&<3G|F{^MJDBtqߛ(h\ PBx f Å'rۍji۱"B~>ݑ| ;֝ưDO+>G^(w2CY "G]Rc2דOn twrγ+=G6 & '?5ns00S8v;0Y rqX `g;SߕV5zS藃^40{]roA{&XYIVH4F} *t ̵@[%KƟl򹝠_}_ !0PܶWVȶ|?FN5!ػNk,[ΰx6￑|1 TZ(kD&"W=?OLH%DW,14|JUss|9 Z+ɝ,[َpUC+fnP-Cۢ䄪 $jxRFR߫y%_R-p#{H3FK0Z,ɣ*}Xp5\S ̺(IݲŸ?pШzK\ N=~e)L?p&a_&h*Mr\ ݿ`\)oP,<{`c:i<n9iV'6&LL \]rϹ):k\޻}UJئQE ʘvXF:8?c/e0m/vsa' #ܰ2=ahkrFt0C .zPa^V aZD,lv,%K\t_<@?S L!0H"q8LùO׾'`qk]02ihe#e-X. ʾ淀ONm8hlk^]ua9m<`kx蔯0lcɀ({3dgwa{:nm!'.oes+Dio T {4*@ty2<>h=E}v eZ`_Bv"!ܮ_Q}5A4qvm*k\"o{eUwFg131,o Cߧ/s—Q j,՚:QX*>҆ f6D|cgk8]5E$Ua@/$qR1#&d ߗca'Io}g~wDN^Ք#v)h0/_(NW atA̦Ԇ/`Tw5̄AW7'Ȼ:mi2,q_\ \lSa/O蝆xDz+/ YKNkQ%֡- S|D<=b6G]Xt8f\{ZooynyL 3Fl`PH~|#T: wQׅ V A*ǬSt z%m D-Iɷ. gW GDP1l:_ຄ7`]gKՀ\Ѳ z\l|@uXw(5O'LU]4({#;$ckqJ~3ە~nÁPpXD3d`^5KZ6^IZ%<}QlznOF'3Kfs3fGau8̻'=F>s;<ّ 57,`xE;_5~ۆ 7~Z)＀V`2T 8_X>LW>.bNC"9 D}B_Τ9OEt fׯQg VKa{:t(rEVq@.R&gs4Z"%#Á~|ć K==0(DNt18>'OKKsB5aU*,a" 3?P ϔA;SLO관h3 ͆c0Q̞ݫ &b\BmҍZeT(0-zSEu v^Ii;"*yX4qk0F4k:ڌ]yU5-`+_uiX [ gp9˦?ᶯ&Lnfht+CN|oX5h#&[Dm-ZX#곮Jm=|v1BRa.˛Vj_Fr7J$dDe-õ<~ 2aa> d6v<] >c_*ʠ/[~ LA} T3z:HGu&*46ؾ;r8=GuXy*HZYKuv$$ȟtheO"wž"ljmZD]:b?y z)(p;Qa$/U4hT`IxL[zӞ/' &PNVpY?E eciȲb VNe|X(xemD+iK|sI҄߆S)כWE`{z z F[=2wWF3E5a{%P=y=7UhuiR\^4 ]:Q6;dco|~QЛD:"Eg{J"֡ vJ͟h.k&)B֟]~#{ *r/:$@uɣe0$f{_t1_a v鯁0R)K5 #"%dyB[g6& K CO`!6kgr:A{*ZOVQ Bd'žk r3MDT}!}oB"d̘D!1iW WGS p^?%7HJ}wܵPsz$]$TV2g8txd&\V;/37 0.>.MuaUdTJ1l\ElzbSYL8P~ cCdt~8l7M~+!->?wpHĶ&Kr8,ް%(܅TbQ3 bv~=mæ_8 88XޏbYm0N{1fQ{@ -|4fJ ا_|ލ(bT>zJf y:igc/"8RQ"Ɖs͏"+a pMveW.Z|'uk c6dW ۈ/ QIfN#*(;")e9vLq.Kd0i~>i@jv++AN=5q{3o [&ˆgS@7B49ш*#"-x3FJy(#*'xJ@߬')C(b]k㯢"S`iY 1G8Cza!Áes$"L]6儾Ԏ;xH*WyaggaiVHZH  ".>_(U.?(l,̯LO=[P3uCfIra݌|tϜDt"*V\e õ WeآD<5|;`SGH;U>: :qV3:S(…xZf}8VMR{n"\x;u$pٲ0Cy@+QVqЅ0]qs1'dH_խ?fHxfvaMR|+_>4|iFuyz8 >bi wfN25hi{LzN9w ?t;azeRCEDa5}paϥt'dsšk]ubNXmgq$uIˢO"{`_½ QӎM;T3*I6|"C>{`Ɩ<2J#߄9aqn 32!t@Y `Pـh3VKw/T9-ƫpK.%'c&&26Dx@W|(8Tϛd2~>lAVqNWogRƼF}koڤ '#Vӗ}ZDzCS39 ` nv]EID%`*8z{"&A Z@)DqS5Dvflѓ=sw[G/Bgvٻi0\bA 3wcމEBYۋ@l뺑b}cGH)`wjfJg{&)HN,$~ ~d:}WWCi=51Yw~*L?,Omg6k2_)eN$TW acy1}/B{#lMXDBLH y?V-D/L̓(׫U͐.7nA%2..̚L0f>"M+,ljGHX3[Hs0Ńl }{^MCGjc 2ÇL kaf9~D{*[>^zm7|ǝyC8 9vYDZyʄ%J^D dYwm ʢ8uN|/(^1@dz-jlW`.gzaR{V^Pzq}Yán o 7TXJZAZߦAAϝ׊e0-z$UㆡwabPތTZd.2 8?Xx~ [_xA f2'cWuu4|$sb|]]Z6ǎ8=mY#;}MMCzj)lc\Kqk=X_`yܝ+YBgՔa}567oX[>]~^*Vژ e%籞SoP< +°ǭHw.IoMj*t!&d ilu`>B8iv$"9 :辕_^WDL2[=~83DAfd% Gj1׭!u+anZV2R3 ^[ð1ąHYXJ)GaKFD k +Kw=l7]np 3 Y[˷g!-/V>^fo 3_[ ¶\LN0D1tv,L4͉C{Ns|=X.u?iaZbo+z}מNcCZ *[ 'C3J7A9 y7ͤ 󋤍q0lFk  ]vku^.AgckEG)8 ogڼV=`M̧;#)iD"mzvyua(Z=& f\ʯg] ֖k(*,=ƫӸ3ZтVԘ=lLJ?{[P5}h^hEU4S%cOPh2b;]`FDc+.E3^l9)4T(t-UԅyuEsĄΔÂCװRR|fz ~{ۃA`.2Xx:V$O.3B݄whsG4Lm6LVKwlZo¿Sǭ @ '#OWg'=>lD8k !nDvI`{DDSQ}VΠ"(,UCG6!;{8xcBud<11E5PvEդm/O0D˺o(v#ҽԁt=؛}ZsVw:J+T)Ae"eALKa%ab:<H\ȝScvyN'FD]I}!EgQ"dDq:lmV?8^}(nȲ_TX3l=fywkyG~ŢVkMl?8rSȧUj@|xa /5 \ _a󽨿>N[È6*̿Ym׸uD†g Q1l2#.T ??e}Ֆ|wXaRR)=dʘ|q_E&+Y,Ĉ}k_Wot@zjuld`dϻP/fa0r1&|vV"@JW]7k-돎LKJ(l?XGT}5Zr_D,ǓQDNʰB?if?U 9B/[X`q[gac- Þhi<4rV0sSҲQ>D~~ .VשWa^|KwSۄF1_jf; /~1UBu<Iܔ]8lS} mן^Q ס/>SWqXC+]nB'BNp{Qc1Jwp@tioX;OE{[|Hba%De<.0s :ba6r5)w!+X+<: #szOMp݀|Ңyrv RM?~qV[^ ߧt,Uv>ӈ ^@2h/=b&9ayO\j?6'b ZJN^~6|meƍ!04H阗0N0qEQ5|\ bU_~j [!kٰͩ)GM,x]1_Vf5`x. 4iWFB*ظd7{= 1-e0*Dø+Wxia бuع:sC#XeciW*Msml\k~X?nLG=}x!ՑgeҰW&Ddg 7|4 u+~()T)SRKi' s2?לl#y_Uff~Y+ښUQQUGxMV:HDnwtzVQY7Fqo$!lcooq4زm1I;hLΝ(B,/q!O3^v.V(;Ou'}[g6gtgB_J}Gu?Eq4@8k1~R^"Z؟yD+"{1}ů"Y*ej ),maݷr/`e+cJv!o@h{:I;kcJn*}eac2XfYHބK YJ$SU"-0?|QkZOz(xXbU>"r]<~|ݭ-@z-.QŦ;О)>aI'80_@: Hui? -T’JV/L=^ʇwZ@lo;WR0o㯚- s x6޶R]Q=rh Z+Dnd^gV1h =_!d0D-ҕxD>s9G:[|BWfD9e|7q?c~c ?l43yK< #`ᅠej' tY󈚧 F.hl{P;0 ڏRLG`,}UdFt_+A[?7KOE&{if>m( W\1~j]8?L_\Yeðg>FmB/LʂUs7 `b4,: i'Jsoz(ӽ`*ឨ!,iƉ ;=='-oH.r,7"(lz#{Z; }> "*{ҏu&EŸ mGBdc ʎnF}D0ꨋXOE,TBY)MfƹS# kFG'߹x^0D UQAEU$p| q$PT{e e?Hۄ{|5VF~E|%Ԙ?_9AKGMSf.&k.NfZ{v<[[WE!:&>q8]s:8 RuA7C/\\vW^ /mOu}l''a̞WSy0wC&uSPG0P'i3n4]9; oG$e-B0J(fHWf 2Ns¼S.D߬hBXX#瘼pb!W k=پsBA1L?nh_0\vVB'eEI{ṅhG-%eIE{k6nWu=pŷ>aS5wa8* 5w fڈ̿HXH,Ej%Sрs眸QXm`%MmFb^sW{2wD)AYp3*;vD#f(JR`Aj*GGXn^6 2BuOETF yExu\W閉m#`àZOXVΜNMkrOLð,o:s|P [g*`ѺzrL[XڌNwa3Ãq݈*G괾nC 'm:V1Ȇ+/'D*%caL ~`ޤvͽd$a ";JvQM9Ǚ`K DT|y/@?&vSa/?Q<9=C{4ߖ"=`StV/{`~QQ+ Z@Npk<Ɲ.P#}Lvvݒ|rqȘghtۭjUlz`Nd:7ipRR A̕=ffttb-[ZDm9gU}Z#q16D6RXՂd# Nq" Fc{.yO9VO }dIE Pw w ?t(il%^Z(),OsvɌE+ۇէo7?LN /f|K,TUB$[U%BeaGFH$mmO! D^ba}踽4,lɂѡQɵO,:B3fwa}FdYd"| |-Qh QD)}aeDظe {'ӟLGRNǝVJVf62a֞/q 0Fh֪ xCGbAc+8 9e^^[6mN}Զ zD*2:eh}ZSݤ{3`Vi2uv Ɵ$RlZL +nǣB4(缅n =!r;1\_p_F&!W!ά['CZßެFRak$DGy7@.t{!kjUH`@S񀡎P]09szU-ETa,.eEs2,fd/|5ͱ}  =TؾBN1]^Qo(XyO 6s,Inr|IX͈_<.̵oBh3~3h=^ H}2\("9mղ] r( :^(M <4 YU&~0_$`焼6a+=)(+eP{yuTh hAKw7c<  H0>䃽ʗ-I?ZZW- @E>t6fکC,+^m&z98aC썑9}VE|tZ&Ra+1VKSIXcqǪ:|?2m)5 /+zO`4v~(.d92{u;U< +`~PwϨ&X6İK\$> o_>o9(o*H_C.9:휈69̷#b'm4hgʆy57?@Endtx`F1,Oͅ )W0wX4&v/ wZzKY#y>7 {e9 ҇WE\%V [v[x18h~{;9Uc!:DsKݎNbz@wt1?oA5NH qr?8X|D\5WȂcS'_ֵ HNF Y_*H0Ne)A\uԛYws9**eg+{k}0[X!;`}bdRK8]hJ"{2e45<e0Ҷ; Ğ4װ Jòy!b"X9PUMpBc'-ݣWR9Tw a]hĪ7^Y,qCnͅF_BYҀT,WFP1N,_>)H3/bJoǼBޜ.Vml=4aJti5!gG^Md y=7 .&cʿ`ۖC _՝]M@ ( 29%z^V{d=`x " - {y7] /"%7 tGvM%U'ٰ\$wHJ[V8Ⱦv݇%w8mfJD "Wo][vL5iPb2.GD̖fFV>b9]y8V01(=o Roxi 3uvӔ'`Fޛ}b9]ޥCѰn*'vItE9U22~g|z] BOnh>kGT#vc *a;7X^p ,LUʧ=ka7f FB[6.gaM0W&_]]ٞ`@,ʙ >Oe.=D@)2(~CitCHo+16M3pN~1, E+%WE˩lHLAﴇO.`UP6xDg[aAwWX2x3,7t6 S^ޢUt있3ϥ0T>~U Â^ XkБۆM[Kg#݊C$Z3eSDDA6|w zęv,O֋#zd d1fS-_P8(6KWڟ2ٰO|,k3qW jo8LoylS9kՃo9磔#nm_3aKV ;`X6\( =04AFW(pg:(Xq:{̝<?XϽh{wx@Al% c5r0YP=?=={?pű\aZk kq?{ ,c:ƈ4#z oxؓ|;jUF4Iܵ# R^wY+RDFg~Ԋzؿhh3֦~)&w`˙)TR§~ Q\ /yјj@Tc2^ 2BD8{B ./LCgG Ϭ*j31Ooê ˮOc|E}vNa2xZ"+Z[ϔRAw˕0%Jt&,=\ݸ2KsD7-\w!|[iӰUj8s(°4x>Qf  OQF&x"mEqj>GX-'mtmxp$jaݏE,Y IAwpw'Z[LB{*8#378Hr `mv~AgC.W= `sNjaX)2}Tf8(XOA]s0~ҩ)?oiy\neLZmes9XΙGҹ"ڃp2>SgH%8 *I+}΁2ED ΃գb0(Z"2,+gg~4 :Ym0t|Q` b XLaz$Ȥz-vd:]9$I2,"R0BԅwnK@T+!jNutVU6t݇;_adv1 ~-Xk{a&)˰r+=,i Gg | ;KMklZ]ʸPv6%OYþݽ>Sn."uCdNW9$aO-Xx] |Q+rplZ9wԓ[h]kDAy_$itBjb>yeL&1 u Vɇ%KG("+[y!smD/*BBlW두u3Hȫ}-Xa7f躚pq!y7-Fxp{.CGJ9/JJp! +[K-G_Sw_Vο8&uu6~GT}a}>ycfT,j"=[1OhGuqO#PE4IȘ7v|䆃 >';3.YTuА8<~Asl~nuzsI.HZȑwB`u6_іrG,~-ưoo+CŝG7V 6Tjĉ] џ7o[Mq`單=+`:"nVn7Xa=xyrb٧!8GT5kL\)V/. zKBg^9DBy) w{̀hD{5a@%w ፍ=a5:G%#>Bߐ̕NSo ?UZ!y Irn^7r:gs_ 3[~p:mWD?(٧GؽkY䇭 9(MLWL\qs./ Ef?hSZ#żrUó/7@Wmp KIn`KW^zXl$8GhVk#0w\4ŮVfOez>%‡poF.#<1_=!Q.{ ;Rʈ!ɽ]D>,_iT޼z ~ww:`l݇k0ROG|jqn3f78vX1O:cM`a2B|~0)3d3{`ͤCAǏ^{ SjC*?H*A>uO|1IOt͆>ÅaP}Al5 #%UR~7 ӂv@tmW`>DM[%L~_fi%0̥tD=~~w0LT|k~D;dհR?,I:"ED OGd1I8l /&g AZRUOa|<~T@J2Mv٧]aq%(ky0iL9gorr%ƣC^bt ؞R% ?GqD VU̢,Z%3w9>}غ&Ng" {AO +w $7`ݚPX?-eGR'$`'VS"{p?l]DU$ Չ "6D&Sy K/򇪲n=~e~ckAC2wh %#Қa`CBɔqMs, } ~BSPӚL'h{I 2.CDžXvc]gX5lSN C?NRݑ_vVT,0.W*A]<˓~= 5-I5'cX$CcN9Wݩ$*X T>Hi\8?AAYDVQpTb˺b`k9Zi/׷BmǍOˠmΨӎL;dHlݛXOdRxࡰz>1Xo~{|Mq 5?䢫Y̯ro]r#+= #p ֓ i |:k""dBtXg:ׅܼ ;i5s6'ìl ,]/5U!=uXͳC'n)ބF\H7x +m-`HDNh,gaz}юEa7zSsQїl28 b%Ծ+)]{E*6D%G. U>ڿUT`~& mDN刋w/PzzLl-<Еc ۼ15"ͻ?7(%l.3C-ɈF_!P-QO+W1oq-50RG>#Nv!Z㢀# :e1bG*`e217a 7#ypȮv{w7 S cX)c뀍]YsZ2S=eZ #aY1e%i~8~R~ye&Px" fK|'7yJٺY%5%s(.i[Oh.fk_I&Wܡw&:- KiRjܽa;uE$O/iEd7,UVe`1D5%fՋhg{ ׌q2rh Úe0 E $\SN"+hZ0>2nڸL~̱X 8r=?7S?az DaB>D"w{ջq z8}HWUoaU!K4Hq}-1'^y; D>*bD@G!JOáZэ+CL BVx͡=J3 C[K7m r5nНwD|/"V{+ 93:5b} ]y`lyo j|J ‰8[:1h k?9tqX/ꦢ4Djye vx.'9>7?`UDI MbEW;ð"ZI 3{Mhj_@ӹI'R.B?s454Gaٯs}-ڿ>QNrṡ ضaz O}A 7_!r!qzV>s xsjmd󒴌VM O\7Oq0cn+Uvae+{_ABWml `PǮ~^D*mGdvSIy fz Lp:_/;S+t~S s(N|$H\~R;(_<ŢwSaCX兜,)dKrW´R+\/ {꥚ܱ*xT@5)Liq_c[cTT7o7q\8{:at@?D􇕶b|41_umB,&j+$L%˻1ѳ T lK{"+Jz;, Vw!\MB!lE^g5 #|2~`r~2|&\ί֕D.Xi>'k;G?FEt=DMmD80sV?4}kQfSOZ~v&TF] )sXz3?Y+; @&.,|驒M# VfWD.GSq~ %xܿևN/~ $:M=iLЂOf܆ٟ76 $/(.Gahl]N_)Vՙ~0˺3-Cc.,7Ȧ`te/?l{QBC0 R3QS|ϐ._Jb;JJl%&-i@}ADNځ(mFѱ$ZbPP+Јj!'[0oe*pJU`˴dx$6 w`7{J}h .eDA!h& k$ a|׳;nAi wzp0VƪNZ=,a?*љ:DmDv'u_;MX%?Ae ~K&-B k^78 #q^_]R`EL3/y6dH`GT1Q+,}ʡ)"ЩW_bhz@_0ǯ-;n۟|aSF&_Hh_4D"w *ȃtDM6nHi|Cac;ز?AՋ?|82;f9D5uK 2;0b'N;+bI j?6}}-/b?}4nq߇t4W0 (2(D43_f"}s51:6|,c7"yZr z`8zgdE-ٰgհLj5کSW`]Iq,]%:;wnGZVҔ߿!l|QyKRi&;ь^h:C/y@Ŵ`luz 6WXBaL+ʼÄN2Tpfύ)hsKZmj_Ť\r/ 9ܩ<+˻$2 X/dDR}9 x?\L3 FIX│l|zX_Eb^J} 2O mD9:DBW.ELI V'A܈J)[9:oG4 ҭ"޹T{3e7Q_.Wd|> !> CD$.g*$t8S-%-z1Ӻ9tWP >X}H~z=ك7-&J2t{n=Lus˜8 xF@h~;X+6(~Dcwa:nȁ*إLQVO`n&|`]z0 z\Daċ=*'"?r,?>,YlpY)~+l$, *cȆVyPt{]}[/ -s8T3q\JvLr20UJ ,I;؊Y"_$4Xym1[mkHQ 3u~3'^Zָ 0=c2[yc7&`ws>/OKg;`)GXdX{g%k&c7b k"!F={ku2 RSNܘ^߬B>i-GW,9I2˷MD`ƣ>hO wG#e뭈gj/X3새Y#{y~g}j!ɕl|D%WJ3g>ǐk=X: F z8'C*ǿ?q,d/NnnvZ$"yQ|Y@47<UEB ")z.fRvQŮ9X̚vatߤ vfs41OyMo#[gBCať@vXTNu =>(""2u|DJ9/(x=maG$/=ãB5uDKxxQ}HKCrE=_\W\y2*i~V Nj .gvA9wn ZQ< 9|̵4xa_!Sh[D§{ Q?H]orj(uuL];/+]z.c%2z+lUqVpVka0ؑOtiˍO "6z҆"ҶHaDx9i:|Hx~x&ZW:a"G{2OIݥqR%lr,Iq2];_J"MTkCSw1W9ڃrlAmwJ#r[I~k50}:֕+ࠝBN"%[)xv?:5d<~o s"Y̼vxX =ު\g Ha$5'U8"$].aC#ϑXva%%ڈG/rsB~>oؼIkyyl-QڅCba;{QXa਩65kS ju`Uыu'@ᓹxXVoF5kEٳ_{ÖRW HK߃ۊ !IPpj^uToZUkֽ7**{ \ \yx8wޗ$g4+Tr#֢~t>8q'95aj9& OGu>Cꎿ޳RkNuʋ7#G}H^>=*3;ucv08۪WPi5jnWĀ͐vǃb}Z?-$}1yيu+A7h咄\_aL6fMN9_~[Z?6or=ycS7nZ 'T+Nhrj^G;~s-NNM~OW(zڝlyb7ras-I@i'A]-8 {{~C~|[]~VN^^䖻#DE/0dOVv&Gd[Jl.>j4u#<lJYS 9-Wf"ߏ|I.ϙ9K,ʻV5 bEwTaQ`R|yn3`ӲuP>+d>@!Ik '6uߣ OhC+dĜ;ɑ-^"7'۷oQGg??9{/X`5>9 ,d]Z VM8␕k/ӥ]!n\aׯ~5ɢgHv?<^uqH˥.n ՞܀nKޘNySֺƷo^̉u3(rS>][jx}o/ e.s3L In[iւ+I!jIT!G<;QvN|Ym{}Pe}Vݎ@:t9~]kzGmoXJPW}@5匶U-7fq#ކ BC?l͍-Z`OZ%蟊ג9+xsy1ղ-AhϓnMp${|;oAv}sݷՆ|+1bJPmp܎9?Oӄ_F~~CtT_t}tӒN3] Ubncnngl|PP_֗,_&AD`#ibƑ0pV poۘx)ԞCPz 1m8Zۨ.H$G;7ZYlhtw7hDF qxB眽q4qwCMGM6 AM4@^  Ѵ!Ѵptic_Y󑈣 q4hn1q4o q4h~lqxO!U=!hxq1MSւ qxr|:A<>A(g峹.9ƸyIq(l&ø~q&\)<;Ƶ1|CWw߯ĸېsA ~`zE;eB~<ߎaaoq78;A*O5qo qj8_BnG@H8>q| q8G5#q# 8q~=X{O!>#{q GP$ 8.OFG2q#Y#G #e ?qD^D!A#i#ˈ#H0qּ8BCGh#tU< q q޾8œGL{q m8g!7G8f#pl AsGۈ#|K?=q8Z6h8ZFC-S"S#Gu (=qhUk!h~qJl8Z[8Z-8Zqq燈w@爃 AdIGAv-E~ W#n<!@ay:F~>1@yFA?g#w`'MGNx<ܑ S>gD&'1GotW/~@gZJ'OG}q$GF}O<`jK?:*RjN:n, W @ *DZ%] S*]#NTxhݸu'mZՅN?m$qfc m6DZ"C{BA(CQ-;F~=[^ ̂f&bj7yM!Á8Y%bB?[:>f 5Ié`h*I;Bst8 tl+tc.t-W-t?f d[1.:miRSKGl u oJPߠ1^:?:Jᎆ.mNp ]f.8>xh=gjh@yz_]4~du-y.U y7'~[DG/,!zU`u*Vi"tn3a+n[Q~]Mnc[>GlBQ|OtJk 1uQ1m{̀īQ'1[pVBŎG?`M#udI^cTOuA ib@ԭWgD̛)]S\wMulKi>U !au[6 K @4!vp6_C4Bc0zZS܃^m'58t?R<z͞J4zmL-jAS5!o.U1%Ԧ!фo\i_b= VRz¼\{Oҩ=@=WBh>͂>?5kう _ 4>gZ|;$4yM07{HwTHK{ TCo7 #_"_]mCD㢨TQ&4nրHM2FSDҰΊVz#~H܀/ѿȻbA=c ͠|!޹/ӶnA@gC㐿W"1Ol;PGq5_GAH'$O)/خ9 ?IV:%>]8nKX"y <]b0EmZH?e?dhRC&uD)gCL4_UG3K_Zѿ/8S,x  ̿b`;[n{itۛ5ӹx؊Z$Uw/'Oכ2L\MNWhxz[e-ө,ʔ UX [5#u[Y0\= <|͟f^a:Zd ($Ҋ8JJ*Tm<h .y)chLa4g[yELv)`%%1}%ׯxfN16Ֆq_( QH2C.k'm4bhZȎpN󥜭Jbԙi=s~*cq d&KF0^iFAK3tiQhr7%-E"@*$64E V\^bť+)/R/j\JyJ6hZt.V(8R`ea;ݯ"ڃuLre[Ȩ2mIUi`-92]QގV2! 9%D) F] /K6CRe(&MRnʨJ*RkӒ$䵬%'vs9ak::sEpimF3b`QXu -,8gp ْ ^ioMETe U[MRM- ,)wSX=:,\&XjX5Bv̵_~QPEI}LXc;#Ċ]^j^ذ*D 4դQoҨ+'Jãu]UrǏJEqJWޥeQ+]\i \ָr`C4NPDHy܄ʾ1!7**7&K^z"!~QX敄dט6Kx ؅A,E} ̗uILYБ;]:M24qc+$YZf@vHs/`GTh&9[2#`C6xĜo=)(-ĥPʇ)__}X[k2V҄5A&R "PH;O[bTsا,7Z +OW1O1MnKEjypSͅJ*M^q QIzLV馠4)h2z&UhM0+64*JwG]fiĚ"l|'u9{9H:Tw̹-yu9.jѐ"V&inT-*ƺ` +;e |(/GƏd X44cqv(gO඗ yPL#!2Kf1f$rbvgS1dT\1/+ua6Hj4>=L`wxW6#'XlhKLޟC (M$ -N-:"eySnVLP%JsK1/T- ']Y&fVx9>_DZiyϘO7NkᬟޙŭT.KݤjRLvTje2&0~Q :_w!lp??=`~y5%0Y0ef߯/ʫ8/̩S8 WJ{Q(z߲?oKb1ǿ.ogsqUoj^iOL'SYscz[b/da/ʺ\\%qo9KK+9f_[_[%`B+H)}JOw. lh-8[Q-ȒEra}h|s[Zu/*(QK&*B*/M]c)4cW/YVxfǫX-z|ɚ{\*nW!c+alƼ/ag8S34S6`x%@Uؓp^0zWàArJ`E)J ~ӛd^GNTUgd}'K_r+yye^Qkҍ][6~cl.K[i .V)YұȜ_m8ϟS-SX-NjO_g;]Ts_ 3bпG/# ,S̕^|G(?0d`_n~(Rh^/R$TTPW,ʲ[Qb)PJ,껒_ /ˆrƪW` K[ܛ4WFeHwnU,M`W .u;^b)Lj),î'i+mweکmzQX~.jB3m4Vz3"^[ʲ滴yUi\n~LE|[i>nP|j) 1ߩҮgϸ,/8hQTGqyl¹;/@Wjc߻t*7|a3 N3 %.jygqwo2ק:;(-J-+_aʱ\1V׼S|^UN^ʚό) rVOJp%b6}QE9ȸ趇|98:7<6!x+Ӥjr_M2ks)DQ6~"4VٺYꎫsuX|d"`W6hx(YE@5\.RiZ4+IFVʞnOZ*Q(di[,%]-uNK H2JBVآlK©DJR˹EkPX=3eB˽93GV)"X @@!!!ÈL C1DAFD) (I@(/zeiF$86I6uP B^E( *@ 2 - T@8 M A_#_*UG~{AnqP"OɃ9k1Ud9<ǮL94]m͔01b6wR1sZ t@:6f#s}Oإ|oX5jܕ*N&lrѺ8Wz&=6P]x>7(*uW~gtri'N6̈VsjnMvL,B1K{H/\7Ƭ#@,0@ |)ƽ<l{7cѾ |_`X1`F-/z 2QҌ:ˡCPgշOŤcbqPb ʁRpʫS**U_#-͛ -Ԃ)~UuUqTuM(*Hvafn!̙L=m[M_ҼAj4-8hyn㦘$sHU'")HEk,$$)[i D!"0H%b$%,`!+D6S&bBP{U@"L)%ܯ 4z~KgF+ 2i@ʽ=3SMJII0E0g)(#R"1@eUHJ9 0Lx" " ,>xBB*")„17spatstat/data/bei.rda0000644000176000001440000054441012252324035014313 0ustar ripleyusers7zXZi"6!Xp])TW"nRʟ)'dz$&}TӴ}gAu mҰ2\ =[FY gaBD"TH|xҿ@sgpQĚ|Zt~%m W64 11J;@\~maߒ)80=nu'ӕp;gЧeQ ͥBRL}$Wa ^7W!s %FcҎVz/}]Km9t!zGtpOcfGF*FL;|B=6)qEi;zzh-ڀ] tYi I!'ЧF<|Do|<VyhrOp׵Ѽ(WLz\QH-ZŁvp$-;) >I!}%0G2MЦgX&#е5% Fy Q(~yH=!9sD-\k*@_$d+EVKb]~؎gTk "jCϸf+v>D'w\aSPQ1J"/b63i!S\xwgerK W`O3p*Ϛ7G Xbi]ƩhQW^5Ȧf)I+@_' f/c7X01H53×CVkGF_biK!_`9 B E\N^]*"c0k{(Ϻ^KG] ե'ߨ(:j0Vl~9PǀЍ[W,4,"\u+#c|_k)c^O:kpќ4 ?R*Nܤɷ8>p^3!䠘1 $8n@#{>d\.i$p(l[i#fz;&Moz!<a"iC$!5A%,GEZfЉ Ap YMWJ̺Zd::q]Ww0f^ iVj18kY@p]4:#%83CϞrE /b p!v}#}quH99yƹA=zQ:tSIC$AjJ|`d4Y,z;jozGHhwd枽tULY;%k̃(=jT)- _ Z5MY (Zׇa[^J/va9]k;{E#ʱ"wOZh35`t ތ 18cj?,mNtfDL$0 wuHS&`EИ"2 ,LhmϬ)J.hݸ T"<e3jpNe@q9ķ$Gc[7vXD8| `K uX !22}7$^{Lkp =Mzԝ"J(yϘj6KX}"g@D}9K6,e wOyd}!n:TFzʭvxVvӕsr2c7 ؘp4Nuk}FPIQ9 {ls޶ԢIXYcA?uA=nUbLx|ZM9T|iEH=NZƜ9$m.>ۘXOA/zլgԻ<,B|/ `r}nIr%B_wiUTauooU$9AMCjs;,P'~5 iNp2&/tK Ng1do~^Pb75 ]4q@ {? 3@cV$;H*_^X?Wʬ)ϨP%%~ǤBRSBQ{ou%t͖=`foW7W {*t+r>.ƛ2J2}i0H2-ءmך~H7B.k6qDF4pLG}w١m_8.BV B `N ~l2t2s5[2l嵂лvV Lވ'Õ`\ӚؖrwV$~t{?!r 7wìJgK_B ߑ(/"i͐yPI9:y!e޼rd̰7KSX|r7bQup0@}@gΞ9v'dUڗN}HYu o@ jKZ7J k5wEB5&WmMM m">>#*,ff/"dZO]6|4Ja[+>+zj' F{=.Zv_zۮ~ ,\"NB#t4KztawW'Q6 . `{/Q K>Fb*'ڏ 8z4M{Ee"&ʆNh3:H <7r#-_ vGe<0Zةbyx}yB  E1nXϾM֣I#߾(wipi#4mr(U!UhYul`BXF}P۲kՄI? @\h?܄ F^_F,뇤ULuxum-{躮!_zyaKYIb'de[2Z bI?+X>'4)?%ĐX?題vwW]9VDJl>GNw5/Ä؜o̔fv|u=ڵCeg#l {1:pωOGG,|L =2*9!cf&QZO`dmJL 8\%2bS/`a?;r7ga2^ƚ¥4gC P&:W[nImeZ*I аШB9'jDoܽ uCÀܺX5xGF",T#)]CcΧu)essLi:!ew2cp;Q{:xA`šO\Oa dbk"4~Ug=̘ -x g܋,.nޅx.EG6Q 4ʶweZU1$瀱>;O?,5xu.NAN'+7I<`mi@_pHz5.#Tk˟},)U v4k1C7qBؾ4O / K.?UZG1lPFAuXx2Xo __`֡RbuM,8i'af2ߘz)Dr@v޶gh a=V5_d䧞'v;5R:K&vF9W-ȔYV0eb?a`F*FwҺj [?}_knpg u0mr o+ֆY6̘*!=s+#UyqaR@6-&Jd"=Т?4N2D;{k]|2/HWf FޮƢ,&n5GR×Yӈp`a R^Go*ո HsX+6 U-mh>M.D5!kJوP2XyT]ydء[%4;[jOTYB7)1~$36^̳ ѣ(CaYb/BŚF-ͣr2_d hAV6!^_KIPx&YV^D`o-fqL.sJ>rf;ynvC!:?sS/{kTC2yp3Զ6M=[>xJ I<# = *^؉sQkPv*,ߚ4Y%꾸? [G:Tޡ{v(њZes{VKb^8:&'%⌭GRK0gn{pיW&BVV;^LCYJ07^-z4\âbgrI:WIS92U0 3n#c^/Q\2W`ؓ~3!)qa_, &bh%'c.ҡiNo>7n<_*?8L}_r҇İR&!ٮ,= Qj+sGZKRQomw!"/C.lfI9nU\硡 -VQLu'ҫPH^eS%k}Hg;%1onLM@WQE$w{YU=l8l8GD[c#+!檮֌DKd[:yq^Chn3)j ^AY:rhO2q !‚;U=υ. uʯ<ʊr4ţP}O8hM_铢15p>?h6H/;Chixձ %keUBRTEԕJO)4qº؝mɜ ZJ}Fy;'1T$jc|L++Oݳ{1)k V߾LFhVF9a' aAeړU07Hxf *"L_qN)*פ!:=Lɴ׈n_Sw[:g+iX}uZ,ǶfЪ=kG䵬OI,׮9#PScWX qo}&n?dHxۏn orB <|ZޙhƤ߭;cfuKy<(&'耉\}[iTP[]'WёAoxPXnR_F@ -.JtE}V$_o(M1QZWlԢ2 ʲ.ISnf{,c> ^G(H4f,۩[EoVhnBgξS?lAPq4Kl:,>N4wP-[^ jSȠiϙ Q.74 ([)†>|35F+2t*2@{ )C$:)%//bJ5tsti/0Lw?n VHH5|Bia*2m:M, OY囅2.=8)C[$P,C4^ ;-|˚=`kAN$G6g(2Ag?_FEpMtKгߙ(/y1@.Ev6f j7lgad{:7 ߜjm-ԽDGegtrI9BmhCpWvCږ#e%SM|*1ĆB1-ieuAV,Ϳ"DrAuц|8l|}[j'q8ޡ.^g9e409 :AGЬ T\0V:̍pAf6꼼'eWJY6hFaϙ, X?ϷKO:ny.aR`#JbP$RJ LXt୊'l-הףT(aѓc^K0_\c3_fOug++#znB(+2Q2ZdI5 @&\) 2^e^.#ThCE @S1 {ޢ3v,QYyBtt1>i!ߖg_O RUF'h &'V녔<)_QNn;q@yI .hDŽK]36C{njW|ڳs4?-tgIN.;0r%fnp>:nFBs bځS8)]. |zU0p; "LY:|{rh*~&bT!Jy͔T{g[Eɜ<{QP[җGDWK0|[zqvylJ`:}E:}HÝ:k53VX9eZ +<2BI$vR1ZN%^t ؤێhvS!i澣KՔݲU_DiIn\ R]L0Hư&wܗC`I;#SrN,/FB{#S0#p Mql1Au损EnTzl؄UH&|/YԀB oؕ4Z/-Ab2]9~uƫ2ĄD،O"U|s"Ũd4&oϩ1WZ/Zbo +EC8WW{0J z ڋ J%H"!ro[" +R^-@RFY%_Y HAA{e1a` ,S6lOx tN$ 2*ѧ=1y//>u]nc :EAbt+*?ț?XG'sm.Z}Y!ހ1HmK߽y<~OcDz2[6<&?ZrFxS(uh(-mFoRb-)DX *RoklJ N)kS_QeF! d Wix ϖȚfƤ o?R˃U %$mdq6.I3 (+RUPF OSpbf|{op;\si,~:\(nO53Oڤhw?cGwJB\ 4 l9k"xt{%k[ r? :׺z}R7y\0cMMz!⣽LHO<+#pa"ij{s y+XO8GTU_ͨ D)O9 Xd?O JW&2en/KvU|<P$3NE֗ܠ&.`Ϸi|iIUTߡC]s%2,GRC0W`Y0ơ 8vCOݶ l`eth0JO⭅1czdh?fbψ$)MGqv'ĸd{bX6>aG1+{tDWX}@N|fКb!exxr%Mh7S"y[q!8'8QdI#(V ,q[ܳBx4S\^Qr+YΔCCU?gzg6^U`aT,!ʡmLrW0 <9#B>uaHS>*n.7PhWܜ*BJǭP`JT:)0(iJMZ ŝ%O$`2nت&щ3J6D.J%U)<V~o=[_1FXG? `5ҕbqt5O&/O^?sf@GQH ɧ;%ճX|\u* 9^`eKf(z~-57G,E9a'3KG #^aW gBeQY7[t,pRZs3G(nn!ڻ|k.Wi`$U{u<%up/SȆU#fvtHZC,Ʉb\Cóme҈gO8E<ٳХП6kX #҆N`|r֬ڤVqj&Kk}sw}-F[?r;Ֆˡ%]5F]XU^ޅhK\蓗Z '?q)*H։ &{_B*h| /UC<Ԕ`j ,6$TWPI9Trl|8w̨9\uY$nyr,4mO] 3i"ґ/q IRE(o~|BSAOU@op31YUKjZX-QQR(r rfJ!э1 26gK>PM/Z!$LՓ^ qaR]κJ׭ˉ|Q\\wQA#M@X˩j|0T>9u S+I>*uٖIPC*4:} mOH=|s^6NQr(! .!3;喨"Yrx,yЃ~-kg˝E9 iiI-@N*(iM_rg4: ]Ry ɯl@jDQK_S!!'vOG:Au#1Ez1C}T l`VvTk| ?mwXmcs{F&޲n <2EȊ`@dJ !K2: @;87{C2 [V1c1H:[Ppz5߽T ffU-V27FjdxjL0eʀWQ;C]vܨpDn0gPDw[jAь'Q:`h-YBIֲPS>£DvuDV& 楬CG>/TD0ttW`P{#Z Z!AEÙJ(pݹ9Qx.rb&WPPU0+W^_҆}1.HT#I l'>?0+D*5xS-Կ#;[̐T)SV%Sw6Rψs"7PQx%M] UswB7[x9^DsP25(VnN.mNb=FZkwcH``nCef\e1PXPD A:8Yv@*Gīþ%皐t 8Yac3Ts R]Xf;75E^NsL2g`o.M ^| aC}URbrC#4́hk"ʓpA ʨʮ Y߾Ͽx$O*7̊тZgYr{vijw 0\ZoӔq-{ۏcx'uqv h'`_]N%rx<急 &ݓGThf{7| ZߍQYiER{pѲT3h0`fꗪC>aT6u׊>}ؼVz4;vɾfF v96/!h6VŘob *Lj6ﴍߘpoqvFR}auEP,KnUfKJ W$t>|?&(j> bޱG.L&ȞPD33tvB޳M7˯ɍ$q))/28mFK](!u!:i?(vL°U- Zt  B,\ $Σ}pvJYlc ٢b<@7#J pCPKIvx:ӝs8WfRSiQ1 ?m5“6\G De6ڗ#Vj6]j܎mU %X]Nt@?_SnCmH9sۉA"ZV7Krv ֍A:m0$뱶 FNx0}X|7ռE{7< }{cv%0p#Feru`Q6F}g8Lsϫ%c6S? WG Կq Ta8wY_צb} b* Ӱ2Aݝ1ϙ]@h m,+|3=u4zUɱ LR0f "HSA R g2Wl]x ,g+^jsF%!-|D=0/[z-$OeB*.%'8yiOMfIrtU_޶C*uy4 |JL-&L0܎A P-HП NjӒ#h3Gh Ku!4=x/ aH"wMUf8ڗ O^UHVugVc? +;T5Lp>3iN%g$L{DȏDC1Q@!  U.L< Aڳs2=4ࢊ?2Z1 SaG읠/(0GJlklhfm\jtiIIZ&|Ԇ6Nҋv4f+7b4g1?E=v4TzV7[sQ6ƍPnMUXf}ukR~& BKDH~ 73y*Ey%byƮo1x(Lh'UVrg%_Hk9) 8:V{tӿt7U93kZCC͡,p )@V3h}u-ȑ ih&*2L͟8$M`^r;Tm⁏W$~o|Bp>/P#S0F344vKUk[S(#ϛɂ61\ V(PI"9%}H]Y9.2t73T(^[,avȑnIJhH(hJVI`=WtSb虓@_K!-Z%e״`l|£ oT|m{0.h?8jԗ> X cqӲS5V e#}|%nI˻N 9Uccb?/&\4c6 EJ.W@23rO9P&YSqaeA_2\ :pz\!?4zF oGpKYT"={H O6}sk cڞpe<)cnyB]<c?iGcOC)(AZ^(8 4QK) Kv`2/\76KAۭ݀lݯC[(%;e+e\.4?3LPDEʐ]˵WBoC<[RשJ&K56m/,*mFTg_I ضR5KZ `>gYʡ],40ZyruR)؍CӭKd Wͤγ0<?2^v!w6N{M-C6 |䵷+ ώyVü TҸm,I6pCh`7IOٳL21 G/8w̪nt5;UJ8XާѶi7, b{.;>QaeO>DuCV=|h= 26)l3ʭ>h5MuulwT]WSDȇ[̈SOXe9s]6>avlp$/hꯉ- m!͚%'rf =SZf v.C!-%VDGY-אJ3y4砦$,d 턾&#QՌKWAE+ڴnNlc='jcu4/~wW>˕QZ nxIWn7bU jC@5+ic?k}#Did{z9@L\(Aѻ?sUJ"%Zj9 .hevwz8#1Eߘ]qq Q.`%B6,!BA,*?l* MG+)r6t`k*7qVX CK>I$`<5$[6*8!A='5ڔClI LJb_ykw4vVn2Š֡yCE^\Gw&Y^cl=sK7RGoW{Nif-jXڴ*Ӓ9?VaCJ-ov[X#,U[4=3oxaBޞQYVd bOQ5^ @&{Dj)2ߌxL @cр-QLSve*ĬL~H!ȕ ii6㐜_f7ߴ?Fm^M5vLx¢W#u2GÃ)5QfȀdv[G̸OQ8;Cn ,rM9s >&+p…'i[8OG}HKP9_8CϭOR_8A`ș𣌪͢ XkͨFÊhtn7>0%_ưܼQ/蘺{RRRYuU:qʔ"v?p{h3XY _>?Th4r[KyϺyۯH:HqF cw[㯐1?}to088>7E?:l#^@[< .NQ$ OwK!<ނљfTѱz(Vߣ@/3{|#&Ç,A fNC @N/AGmnMZ^ pssȅۂ+ϗ>KBVU=Ɣ\xuH5SE}7edy1@e֫GY^gbȲAERIA[Jm2|ta(j_ā)Aȓ"2<* {ɭ\@ d; :;(S\\MCs_%}x݃Qr:C5DyPf*m?eǯgKukaxADBl( 2}%}!W:a^ێ#G I%^$ b&;%qO*J|+oR]kdF4N/8MrœYvD%יfue[1 IJX;q`2p^*i&<̥jвJFꩽ{52Oy5G߻ ^dmtṚg`I\Piuv%ߺ- QV Qk'e׬xѾE[&5IqgR3r'&Y곁{rXCkXq'>1~K'~]SME0lBo3ٹ[7IԅvŽϾ|nצq++Cr٢@ JWJ%!%! צ?^. 2X[I.oo"E~W*z_x0k@2_h3{ V:b,pRFkdPI ܲ%gߩCiA:Na󉘃=Ӱ XmwԋFwj%H 'ȜBŰϐZzm;'#rwCݔ,Vd ٬G;g֖+EvE=bƓ`5D>%zf@!\}<tR]CuV'D>.gopnbq7^dGDR橌r (\tև`L0dЪ XCMk AbsLdDWk!)½DmM&i^}\HVap+5On oGe hŏtLomw6=@|ڙsԬ3~xk1ё_˶iIu#'FƐxx@'c,V,޷SŦ/؉pW2{>iY~$Bg8.>QvZ4_.O18Px!f3n,C{YnZ@ pLc`KVf})RS<A@(ZHܯ» |H+ Z/qyx6-ﺝ9/XW[ՃG;#[W 4dY?>TU{DjŸIUn3(sE$`,G;)lgajUb7E+X+4ܴpiz̳H) nEO=LTnb$@VO7yNX&%c^@ɘZuUs%CZܡ#J盁z̼tw:y-C 2*V14nTtq?7T3hD$ [DZ &K+f%Z (N)[ !7CJ]BԎV1@T'Q$.g4Ϛ$bb]YS5b jŖESa_w Rcpapq C:Qwz "O5fD3;.\L/o֨"˳oy,e],i >딙/= r mh|˹OЩȝezzI%ioٌJ@<)o>WϪY]9O𴟞*ph-r@CTjh/ DRmxn#pڹzFkr VYXIYu1t3PhbNyaݬErvX?*"VO  ^΢7hCC{㲭Hr]);RSv9=k _cT5'p_m4o8YθoD9pK fDUZ7k;7( J"KG=K6abjeaZ'=uv,t eF!p$c((NS{+fT{؆,~*culjz@1gUYÒp,.%-T*tS]]`Jr0gLûѼy KF{k=9Kl=ile}=D].6ܧPb}ϡgEI@hPd5E*5θҜ52aΕ$F[bOCZ_*hkrW`ęۣ$.iʹESsgZMA 8êl K'Rno7?r[gq}Y,/Ġ6qZBd~$M<Ye +&RA\LU.-5ӻ讶batێ 6'Ey%!3ܘ0fϋM鶄>2D f)yStA՘s̑}Grw1etuEOKf֧s_aPY8UcXvOt?k%a$-qi3fĹ:P{+dp&U 9Ѱ (Թa-紲7`BW{ܰ1w'_4(g$u!2N^&<UgQ+Ǩx7󲂆+`Md`%ǵڜ]&3 H ؓZ@ JbeIMo+[e4 ?{j_>>g$x&ӎV(:_QTq"M"G|s*\(f^F_vuHscZa_k9<.XLt_/m[%ie (؊\6Ca(7zsG {|gp$Z߅\8.Vϟ'^Y5֒ދᗨӮ0-]3Y,1;yBޓڽrJ 8-MJRɵ4GBy@[Us?Qx%+ ipt̔fPmFYgҷxjfr~> Ģ#ށe-?%^ЏRdxKlK//Y~3Ƣh?dWTۯ ؈o}U*N\d8- o>4 2eBm<`[*`՜_ I-*sYtSN8T׺AuJo$:u٫P `_)Ve>t%0I`زԘ5T-˯-[ܨGgkbkRsAL7+-a`pM } r_y49lU|١`^f|| V pSуՎida?fvK*"f(mP1ƿnh=t!|PIw=iZd؜M/{po_,(qg?h}wn̐u>Oūu+vύT_ `iD4 Y"6wWIlkf7jl!]q- X|9$0LMN7Hk|P&[ċ̟\(I} -[R`¦3OA 6B.I~ki5#ԦI<^}IJf'3`ƺB\I1Gn0]):uǞ)Ph1nC;B򂰱8,UҞ7#sAo}w]Tj^<(\8_R[FpH$ ^9HI2b~$qq4qeL2s]f@c!bj;lڴqc;O|% fm${=.qf,xANαFȱ'.r%>`$x|ɫ:$j ?\4TsyT4ٴ%]Z~lB:A5C(|M *LG& *f\5\u4(X}hJN E:H51ANƽ!}W.6m9Sm0yʫ|&x&^ubr8"o4>Io+t$@Q&b@U0Oׯo(8*oOc6Ax Y5?5 ^:z^DTx$|%6~[׺f&Y$b ޴^YzIx2(|LWuPOA/ (Te{%Wl To!/$U*nV̥ غf%{"z uѕi6!T9b^#CHixg3l>=x ?RUE fZ-U-u/WO\oLKVFrȘ{{6 E #"P'WæF;{7m&ۚ¸&)^F参'K~Ko-Հ:ԛ8Y 5ҔI[oւ 845ɿ:yAE2mҫ5ܿ[[h-K_Q|FJ̇RM)^q`C6iAub~Ofޞ~AhF =[$JH>Rz!%$dx_(8 uD])P`]֏yP!*V뭦 K5DDX Z,DpgՌ>@%VenΥ;bv p3P1uߤ<䯳D\jO@f:29t9C¨Yދ;n=x9> ݞi&*vc]_͖ 0>ʃ׶^x0f|#]Rǁy`ýg [DX}A4owRq<)Yk7" g^ C]NZ BYyȩzrU" L+Wi*"H% k9d@[Μl*k85PA ,֑v3j=5 ±%ɘd)x9^ [j"M>>ǂ҄|#/t:\X^m dda`CB.d||~@ƒ>2~bSQW$XҞ a8;MAp/Am [07[-U)^g@ϥ?@mJB^ʯN&Zd[aY0Ώcc>2K\B8+JU}k8 $ QƂcfQroT$^YwK:DZs}pW$oSUYGvrKkf@%e),:c}rǐI9oWӯ)'ۓ`*%8c1/!^uo_ʫΔ菕F<(bDI>`B{ףmfI{<_*>Dy+?+ 66>;~%\N{֙?Gjg&DVgJI+2c!(5FM#v|%ZXɳ1}B)C; \@/AZvYN[E4kPm50oTs5ZM|wHc[gÉtIZ*C:$Z#EAHݜ5Y{S{GE{P)V sfX2ԎmִLqN=z|gfJ+7-:0dlZozz!ny,3 Y,#.+n=:{ަ+Dռf 3w좦DjDkf_uau˔Xϳ-k-u`>e<@-M HgwtX.s_Wl]{+;LP4hYb=gǡYm/f12w0S_^vҕew*qrL1_#67(>ר}^bpV#JUئ>Q' w  ]h|@δrO:*/Բf$.K} 7{_K*Wh+|ijY-lsyӑ%*vWMv1 Xg_Sff6i)Ux*TP9caЬBF uYG^(|<=#׻O;_nSXGz\҃w+>mZ$aUUn9[ATMk%iん_.@5%lYi ˢ5),G.x@G⾣\ջkF^3CgH&%Eww'|'HW-Eej)/XlOVJ7VM$RIOȃ;RAi}dE,nR~I:42W%%oļ0~C* cO.zqgz*m=xyS Hv(-8ӵ&>q~c;.^Q.Nvŗ8|=TK9+`U2f+QaS2l7J?7g$pKisK[2Ge)V6Gp;-,v۸C'UDHQҖ?y0]"h)jҒ2_ܝJ.,y/,Z!b2eQ4i_,C"*60eBm 3e*l*jubS٘1=D T!:'SP&ƿ.ៀgV%XCY;jGi<3Vٙfڿ?7~;*7SB~P-,>,bXH :CCPHB˥+yxpc_.6 %Y3f[͗%`H9# R <1c1vcVWE+&ӒJjC/oBx59Feb=vp$ySiXp4E[)"#hm-wg.aE$3r%6tdϱ#?%a{ )}} ɗ0E$2ɲ%!f0ڥG4 W7xh _G-55'8u8 jwޟ]յחXh,̜`CA΄ IiJP0yseJVzG23vrF̩R50 BDu8u59;"7wrf,(]Wgr ,]J .qKW#WM; /`wʄyE&KpH^7EΏSoÍ*NdatS4McQ@I4)&&VM8jDaDAHv,WΐSw 9lbjbMgv MD /q5!bYH`5@R&k|=.w5~a%kf̎y>`BLn9bթjHGbYKA7nO2AQCWz.dO@X_wߡiX~iUP+d/YT+;$h%J"IWgmc1;3fXB8b%%=PVm&j 'ҪboCޓq6k)dyyEu]p+q4u겿vc\+I4Jq_RkvUcASi;# Ѱ(TxG_ %/ܭgO}7U?DPKsQ'B*ܽƖ8aQ}]rJϥ98N{;hx]GW\Hgv{AK^b\4A`<,@dsӕ%jt e%Q5ͷx:q98ܵ\!דnt~2@ZI6SVhCJ4W8~ 2[Y1*gf᧋4?PN"eX#(] ^7y2^Wwb,C Y"j!OVbLE%($aUAogDF̏JY=fWi(MG EG>Ɗ4d<)[<F2[ծ.UPJ^()Zݕ/\882 O]J(6/"޶_fm36DX Zy3x('((؟D~aVQBl$U-O?VYLYԔ/o1y N"kM;g4LEL+寖"+J# nG-M!_9 r=WZ烟0 p;Drb*_-B)sUk]x##5$WM 4pmA/zZ,זmfwɔ̐Ψ6]KBg9?#@߼n36FҨw--by[};>ݝZz k\ZoLxU51!*wo 5Tr˳ۏ]* ^^%Šʖ7C+ 08u$½SqsX!^TY,9X֩Kwxb!+Y+ņ7iZC٫ -Jv1tfE.T-i:by? O?tR8a j~)<0<(.j g k*>95cNg\)֊)t:̗(*2XVfV;#i8-5aoZI ަv\??w;F!~6Y(P7x4xGo[ ^PqTqzӬ3 | &2s&ScS{4?F\}?e]Dǔz!"Hj*/W Vi#9ck6?w|Qę=-DV@"k+79lhV+8S Zxi*?C~h=  rqhmc\|hM'>yHG60a@5Y/lJIuReJS&o*nlwǀU,7/`l7éOb2Ix^i`>x|$GY![B hlS dُT DnO:SįQWF Θ[vd]C`Z6*B{5kI_rcݚSH@#W`,g9(kzHO%  WY&c`C)řԖAS㔋!HX頇3LxGrj01/C0t ){s6OV3ay[=9s;18isW}k/crh?8Zo:@8ۅ/6lA)6kpAFAY?$8iPGLM]r " `Eު:rߝvˬTHD{mʹwsIVhM L::#uRB $'-\}svZޤ(~33@fIm>WT)&fC;/{.l D/˿?ڿffMb #oq9YzǾnʉXçnĈVU728T< c˲\+U.~?-7}˺]?ؔ>)| zPjQ _fOBp6F9 JE{$uէ <գDE^7O8T@U7@.W\mL{Mi25W 63_mXQ /$nG{ve.`;{$6!)\ċ[7DQ1ݞ<&\3iDW e9~;FQ>/N%0babN^ǣlo Ws]}=rbJ5ϒUgbA:NGZyBoe=ssJA\ž|mI`)ad3q͸;+nx{TY,6bT\ռu@A+.mtg@!(V{+AMOb8Iyؾq {JF+remo9:&$g=ψwq-@ wU˪#:^_݃SdWcNQM9u.t>e./Xpp*A͊ Xd@_\WsS?@-!v9n#cJc^;+!K+Y#x(2j|1?\ΒJdla-65$:YU'#[\#?9raW#кIm)c\OHId8ua n2 -_o!$ /ך#l6Bh?؁);-c =ڶa@M(g`_pb?S)3QqI8غ. 򈬪 NkDn<07}>D&e-s0g Պm ԁ؄͔kglE<mq^s`3ϓiGVs*NB vϗ/_hv،(\ >X- la{:xqa~]+uB$xJdO\6QTY#f!)L){sM&gpJ4Lqss0Wן@ 5!cs|Xj~2ឣp ƚKȭbdch_>N6>WFg{t r̮\(kDk?~_? ̛' @SGD!Z" n{b-٠NR^ 3h }@ZYI-/yx71a_LH ??_WE)Y`h3\tXGIXf#nq{KSLAֹGl#5LњF{XLF{= /miL: srP|I4,ev{7ܠϠ)&T=>gYʤ!T~  2ZE}|,{C=ʼn[]tb6@RCn_&>A跆NFH+ oB㨯Dق3┰A`GכG{Xѝ_e09"$rGY ƜvMq֘ 7mL&!9`pﺂЎxe+{q@v=g$@޵g)F -WUKrc5ۇ6F@]O'P /ܨ8J\ ΐ$˒h WVR"`jך6߆nc*J+}\ju$csK*TQ*Mz1)kCb{jhSҺmlCfzSŀښ^!,W5Pp M qߤ-l7XqT$:5=/i L1vwI@'Wд=>^z. mV j}utx釷.ޱ^ f`P\44eêxhpW }U}h*%:=;D;= ;%~9O奔w ,b>3٤h]iH Rtkgp,;[:(w}7/(~]^_8~|H Ñc#xqa4F'yVy#4i!t.hy4>4 {9`+>#*p ?xJ9埫:z]zȨRUJY93-hߓM2ÆmS9"iw{=E5_[_Tې84?}\Su/uNĖƙnqM2U"D`R6G%_߆=F`vp2N-a'zz ?ʀ׆88D3@F0:@j SC@;2Ok9Y,C-;{ud*2A0ҵlѲ4W`%+u%ƤehȫgdXZ ~d\F.}, 'tR&%bL6f!Gl:j*B^.j$cb`]  RAɧ[J7{lnR\Bā͛)F*>D>wggb[&CAͩbW̢?lẹb+ycZMj$Ưroh?#1`Q]Og5l&*=:E|k6"͒>jT Jc2co[߶Hki`QI $ @qP!5}PU$f冔W%xdfU'ǝ|Q{ #$T]01hkS@fN=[ }r}"fM.7dPܽ ?(\V Iqh"qC4䇨۔mX{  _ӄ1B,1W"kԚ)`縷*V a;iX2O އSErh_hWBNr{1F-U0f z)8t||s|)>#48D%\&Vʾ2r|*bN=pjΐ QVYQzSB-^auXRí쭆rg%)ȟFM io i{82L.:'9zCzVaXnJLm&¡sؾ gOQzuD^19F7m΁~@=Y2ك#Q;n4cZ j :,r5jЫW!Rg]ZDDgdVHlQhR9{DZ*dI W5_! 14hbR}8ӕo isEa# .D1Tj q v7&CKvU{?͘ lӟ37 D}ep4X^[b>[,@I4[3Pa0d EʗwCq?++CQ%x+!S!n?0ȁ.I*`7HSFmh"DCs=.8%gu`_KrBK/L/Ȟyey(EF z,C\E{T3J^i7˗a$jt.턇NB ݕNzOA)N1%mDE>D,OL8hXAWqiC0sRt%elS$g+ 1ɧκ#%^TV 肴o4;Gn X桛ztݳ-7|5?⊯boXm:$Cj^5{lBgrї%Wsc8F8B0+ `u,{-4QEd4?M;(_?PBcuIZ WjʵZ-^Iew5̤$ *f-eJO ĢF1~)#BOv~R*) D ofFu^sӳFxb{vV[P;ێVe4X2QVC(QK5 6VAșZ,#qftZLxeqgC!~ZlR*j8 K댼v'>wY4|fT XCJW 蠣$ax ո=?X&Yꄷĝ)2( c*~!?d>m屽Y()5[jpe:՗f/k@̊)xU q9@T y GOܚ4L}5NFHbQ[\%Jb ns~0 =iu$cR[V&;|ƧnEZzm!Y; }'5Nt*1ύle튔7q!Aȑ79A] GS[=vq9|.qq(W'=>3Ĩ)/cAPA-QHGUa !TyK[Ц"XO2pʛ`s,r T)X+9R[;m= \ .Op)LiJ%(1Њ~(Lahk#Qf8'Q}r)eo@m$B '9DK<>{WS x{A8m bGpi~{Xθ+&xvwMA rI5uP'0*'CJuؠO;u}$Gi1p&qJ?db:AGlwT`` rA!3Zfyq̠%mi&em wF$5){anZUzz4̋@ǛhK'< ^ cAb}c*\9"ۮqG6!lƱ33&s8ݍ?,ci* O)^]NU'( 2nf]ތ.kL&Ñ>2nT3iß FÊa1z!$'A7 "#Ԧ%[KqWcyo:\Ѽ qrJ<葻q9CF URWH.|wv|FRW>2JAx K trwb"ja'"wt:RS0y+1s#mGH8Ws:\O9 k>>=2w9,{C98trBQ3OWxH\HW臌@M0Ըj(C))exB>Pb+JȼY|6畾Z{Dp+=^mq#P?|[Z ^8Nrٔ f'PZ+`&R*c‹3iZ|5 [,=X0Myk>vTE&֒8T!l6c5i3SY|dg t/*2z?܄kmdCħ`eh,y>HQU&9 a:9=HU CpcJ,ecS 8:E:6~FIo>`2D Hkt#%n~ 4{+C^UQ@zW婗,km'b n/7u/Dfj+jC}aa.Ϥ'&vN:գƕ3 VC=I}hSYCXKN`IB˭;c2))iE-h~o̮_B>̙kljޙ_K*B2sV6pLFPUCGvo"P֔?6Aݸ+pw^ Py!duU5ᅦdψ2U~u$og&WvhJKqz(W' =J7BT$\ c1 G=^y<$i%][W5. U# Ua&cpx?;U[AF%\My0o=>IvޡIVaѾ#Z+4 Ht,aC#QpІ.,S35\8VfhR* f_z7 la`}aʘvGF,:Z,8V! Ĺ|}G q|sst4cOMSPC%z)M:?rlŖ6%7t:O< $m +'r;>-^Mf&f7V[#a`*OfI=8ƾ q-BlLh5o Ն=Hqg]HԈSluHrz#aa9q0v|}GNJ2z-⻇&  [N뻁oC!C'镯|@?2i 89*v& -f6CU(Z|/^m13R»ˣ8Zo1T}di[7h/KºJʘޞZXM8Ԉwـ1 '=[k3^W,H_mUԒ5Q vl:@|_8c=I6RƚrO 9ЧFEKZ,`6аZ]WקLe #rPFhsn*9#߼)CϚ wQŘ4_o $r&jKQ \), OVy2QE 4 8y ,mQ$O F;$6^el¥O2Wv^@nBmB{B%-φr&Ve ^wGt"|cl(`6s  \;]{Ϟt.W3.i7 \v~;}$fQ`#|o>ȓ P6N+WbC4 UH})?#'Ch+r+7vFX'KSk[Jw< KVt4y 0C;2 S85?+jzػ4j~M-1m+ة;gі-# ^F?0esj+T!z$}1+[0sУ0YWKÑ]풔W}~#mc+m,WOW^=>kWRt_Z℀y ɍ? kopW[Q-k[9ID0"3/C0w|a)حѝm)yo!lo#QuR>Z6E%IRd9aͭQ+ZX2e~1"8%ׇّ`"+mu]#NYםEWD'0MA=؍޶]ǷCƿa[r64ɓ,Ծ| V51r#$Le0-Xbop"~)aזAHLw*&ѣl OM r egJ.;:Z_q7&^| ~:YUS>$ -|=hs%%<-Ph 62Fֈc:zӫ8JH1@qT9Α}"#Ԥ]*ovPݶbEW`6]w?P,h>{:}$vys4+dҒhGBbmo](hVTf@d\\Q-tsRشn$Yyu8iS|?J#m8MIZ'%?j{7nkBqIop}W0ˇ3e f%G뙧ZJN8'Ks.Jh@d3/e2V˄ԪX/'}eԘ5P\@|O;Z+׬:E+w) mrOXwH1A'аO!O' \Q;݀airfMχPRWUm**nAS\>cBW\QZw<])[ dn @TXeZ/a.W)6h-%$P#0~%3GU/Qh>p [֪0cv4wj"3]5$kI# ):bBB )жӏ .]CZnL5L-#ڋz_Pɬr gco.beSK?Ӭj~g1:H[ݧ(;Hd%heB GVa@U `3Zg]k,]/3tO EB Y͖51.v~ٵ;#~3^ Vκruf!CFrD6QSS᪘ ?9zHPLS`o%=VMM8>և M:<.E:^%oI5$ nȏV[ws^=ze%bj `tjR‚i]s6~gqj"Dt睲L9m`Hbi D)1$XVXYSmj dO٠8w )vN c2_VBaњRQ=~:5ߝΥ흹Ȉ2YB^`ȐlEcr/QZ3 Yk6Aa领% B'q#r 22zyGת.AM,BJ?]2W儥9ᥳM]O&܉wZTZY9,6[|<;jIG-0wįNDL"H|X%Ռ&U:Q:'oE(sG9:[PUa]-$KTʍ'Wh!MO"-l_gw5&D&z:/{׋c40:34Bۮx@^x L6%^ܻt'ɛ,M bݝ߫ZcUA4WGlіR 79qAij'aenjMy8hJꅲtS>8#RT?9f:"⵮}\Y{/pPV(RG Ɩoo:ws>;1g @:;N{%b.+r f,ƿhuca=HHQX`] e4K6 \n]BWُ@JqRXTiߟÔƝZu+ XZ 64BKi N#g)ĪE/e`5lwleRes|Q&e$7_%LVO1n6rB@5DWצ?e[6;??؋1"ĹM@= rǭYu>t@aBllcRzuU֨Kf_Ozq ~zfzo%uDXV!^֡U@0VmrJbڼx̐Nn. m)U,^tH$1٪+)*R8ԙd6{_TJCnFSK0ڄe,Ac) +9 yu{$hL=Gi15o+Ĵ]5evuJMhV([հ=*YL9+mzPCd׋h>ldX$,J&WOQ/̆[Yْf6_]Q=1 j Q)9t$멧Ijku,|18jsNr~7f8Zi4o勴,ޮn6Oz68(c!۱͇mq=cT:s!?"9]G_vsgTw~ *_K.q.*_){~C3kXfw 쩡MN!yCz1qXo݆ZŲE\ pr1Հb4\X `%7 @@pV=^rnCUCEiiA\HLՀm#ه'`jɸ|tYx_t[nd 6ِy+3I.歫Q&zL 6.xCbe~͙u \Ft+3;+l٠:$hde~z¸Ki4^6ݙ9œ=JN%pHm -Y\R%qN0[iӝ!P'B($\Lˇ?>lͷ+cO]NS?tb C`5][[{*9$I` }6 ˃Z~9;,ƿ8(.2T)GNޣQeJ*"VY:A4bz>! Z5ڧh;ᙯZRqٟN|;š/LsJ =Ez_N0.89n#kYdI͉_}=RUP*]f{W̸HT `Д<|x4>ef-clzZ`0sUr|h\n)_4tWL/bq3h:X[=^S |wdtk__S)&r8A 6]SbduJXy~ܥ:TYFs]gsrj":Ru[ f=nP#1)k ;el6LH*éoҫIuhM rqf-ZD7bbO /%~ȱ}9aX=Zv&S[FȠ_̝p@'WAy{E8җR{K>꿡 ޾FΛԫ =lc as._6tzefB~׎{幹)x*cDWnC) aLvz?9~N}oم(=(RH;]<}1lە@5YD?N D ercqf$Z܀=*l22 p<@._T^v$JM{1ŏwEj;h{[se~[|~:Vrt<' IaǩmtgSCbz&rUq)ݹJ/Od-YH~pCe 'Gj@0SnGA902ֽ/l} +˿إMd+EĴ!Hf"sz!_!f[ޜPi {fLkg3u6 KXܹ?3Р}kKa]hzڈdT",M^kmtsзbBQN6z9H#Ap]g$!xXҔ5Ko't۰]U!=TWhD6Ȣô)E|D7'Ά$չ1p_wư_<_[ƒȺ|sa@4#Ͽw[ ebT~E]K!Kynxi~s*9-\1v DĕrEj +Y/e≉PѦreP Ƅ8][!^)!nr7!`i3,YA%ەGЈq3ҥ! TBXQ;r0 )WkEvFywp$atܤw:6["`(=6Iiyv6K'O{uӃm ="]5k&>a6둎Zvg6蹆x\ٜ(O{0]5im0DWRh2UiiZ-2+ W=#0 |B0PYK5c'd1s9NJKM=Q{v_x=7y#LT4Jmt2֛@OB`,=\=G M{X[#.R!1J|kRF[lKڸC؃b#hg`z iHFJMFcS>1g3gܢ;ہ{E22"B:襗}RqҘD`9lӣ*﹫#o S\y?'ٰ 8κAPK7f^qSvM*L7q}cX 4iWd!~Pذ ݷ&S_o#- NH~$~i:bխm8ҚVuWݢCV41 *]"n^ #wS^ݰ}$bX U簴R D."pKR=c"}1qWB[S|-GI0"Z4@c^e#N%ΈbUgUEOe1{ڮFN^d\1%{oS#YpE+p2$|Q`:͚w CR//lg@ٿ:*16*RK@EG$rUʑ䢉&p!>jRݍz^NckaLN3z^]lߗv'E.HRu۹E :֗_l"X⤫0*D\. B/n=-噎EycRf~N;ܑ%ߎYQ߽ DJ,UtOan,2'I(b(2쩌 $~)Vf_1 blU*C 6KS#}9z%-N DYؘ]cPKv`]"u͋bnj# QPo#Pj 5>Ug 3S8gƜ `m](֧dDفmOډll 98!9:%;K9^oUB9wNR^Ȃ~%R]\qaEqѦ>|[fǟyt3N!2-yDx;ĥ#l轟3IiΛ;u.@?rRi_Nl6En5,8`FBCZiM} zѤ_{IudzӪ u(R.sdpqR}p)X뵾僟&:"MNRF f8ŷgBc|k@;H쑼U"〓eYr=_kzLќrd/I4L|iy:pA0}KUmksΘG畠r&U*̿?V5juI~ٖq>vP)F{ru= Ij1ZH 7:m0-%"R[k"C GIV`n?zIJ-5gJ?m e/b&Jx-(Jl[/ a.͊_QzX/Mcf"sm|kٯCKv7-+wzS}Ɵd(wݟ?>}RsDoK(/ \f5䫾,h EV%AU%xy|[zuljuy[E rqLRyB_+֑ ?B@"59cSycj+I|C@F˥g-t8mZd4qª^nѴ5ōXX|Mk!FؓC,;mFh*4%RB5P{>ES# $ỵe%rl)Xﲅn{`rZ&.3!Q:іy;e.oK$3MdK:^ASt(&:SM)ع\jp[7p mX\;$l3qgrHBך#>S0|ܲeᓄ@^2Հԧv;;(\c?sZ0ˠӜcFNM與RAR`xH`p?jC}^HƵI4 2s`+wn,dUWJ(҉}6d Oe/LGtHP5yZ_lI%nWqcx&0E1-Ad adtsu8IBZVC!ONU0v"PC;qrKd.gѬ:J8pq]}U44aoqXI?ib*Ac7\T'&KH,"jSXӲO; ot͈4]~[: eɲm.[>?2 #Q]O 楙4iY-&+bZ,ԶJfFML"͸%RO9fNħj$@jo3vN;z1Viq.oY1 sh>5,|dQ`ǯ\\vKys+tCY/q(Z5qssRy$]) eYaUPeEϞ_t:F&BD\#*ӾvQlO2UMSլe:yq{`i(;_^Q@mXw2!;VliӭV!8lڿG%fAH;$Wb\/-Hi@1G9^&nX_{Q p?,[9Kj MbZys#N! vI?^y7~'-bXCզ_ojrȲvBcCƗ'sԘמ籃pf{NXj{)UX<]د3GPfo*tph(vJ=.h߈l ,v MQz|!g=JɥP,7H5{;i#!H쀠 }UHpwK&Tm<IŞckc`2&<>^M=aN[Uc#ö;*"Ua'"vs-hVtzPG@^x\)YcJ^+v=*-^xHC[ x}Y8aZ8شH,YӭLc[O{ U@luRoRqj1;Te+'6`nOo&rJ0sk0.>Bs7s߷ϸj|PÛ?bͿ0…+H~_nNM5jﴙy&;_}4JgmnLr:eF ~-[=h/+lRJA2#%^ىHQ3JexV ~VwIy<"q,Ca:?ypȃiAϰzXz׽8T"S Hc>{Lk2(1c: iu{|G=C+֧RHlЁcIW~ڥHvVr raO%{0[S"YMH1 x0Kk (|?4f\=\H:_Ƶ:< u8u,Gܨ?鹎(BPZ;}3C$+8Jɠ]L22MxA!ܹz(#'!3n( 0`+k >2p%}ƺ#G=vgG ouI%auZ--nUT}ݻ=LI4@_ʿQIq 9iap0DT1&yժ2ৌmn/u$a񤾈pFh k_;(B]>EM4۝TvP(guDd7 e^} iWk&c[x`0?x ͨ2+[d;tYuXu.7Д^k|ƨlp ӱE/22Ea{S*fxn #-PIFM] S{Kx^^zV`Up4AUb~S 6`Ƈ3*8190̟Ez*^e9%K˄m'Q<\:y#dIO@?Cuwt+;EpC`qq4kҦsP r๻Xc  zKuҶpzH~ tߌc+9HrV@!͕g7!mrn yjg%rlF~8_vXwߔu3mG+v  p+ ҃˺ӑ=x5J3 .Q9)>^=6u6%iic ^QP}|!iWL_e}S+qG9(myn{\j@v tgiN%w`eVGYx&ڰGspz|C d(SG#IqDZpW = \_dB7k\Į#T2ݱT^ZTXc/IR"A XADB1\Be4XkQ=a볕CϢ9اH=Z6\O!QY;"~&$ƠJulvdugkӄyc-K(ěS E_Ny?>$##h{%ihGJq &r5; !:X="iPy%]Rŷ@N_qx>۲%Th&PmSlU Wt-A7Mk_coeY @h[fq\bN֢\MQaJINHr a;說Mot7b@*џucn@(.bpz,PM4 OX?Y1p|=ϧYy-ux *лa3$0TG`*~7Nx PDvb-$\u#W"x;]juObތ-4S -@j=Mr( 7QAGLwbn0*Nn'5;e@XԿ"hVgQz$>AI7QO#d:7mJ?(GOΈf.$pG^~ZYz#=i}NEF+2<`"hE$~alNeFJnQ[{b DBVh,+q{TuY~9ը7d˄a+D"1}!dR_-݌u|O>TiKB+Gw[i|eAq-;iq'dV>!{8 e3sԎu#]OPP؍5/FS?#̙["KlHхGk $~*; ta=U.أ?Uj&̠3pɮ%s_@aIʊC2OJ%i~8]>^u;u4Y&)ҏ4D/ڸ=: #Ns|֍ڔWx*> OD0*hTcq"*\m sԔVC{V>,qcmH$B$W##_GPRH]b?2 &rkB e*̀Wsu)}^"Ũ  qܔdk>ܶ;kY˼~EԮ W֓g _{g @훩ߧyj$Ru_7;:#eY7τ8g /ĉj{{GPiw;U iT:RaP Ep'ZTTTJ6GNe[D8_P$\\S ?%#rw'v@k3`oDMPS Q5G&=V2%to2GӝlEDq? &(TLKV30=Vt'{uE4/&c( "XȱpΫ9>q{|23H+%ahؙؾ2m=0-3| X޹-#1p V)2jZMv>UtNChKn#}}iG(??tIuT-Oh Y {gc?!(193`.ܜTg%%m1=ݠt|ʜH^5vݔwɝN LJ|Ⱦw|ulc {a^^l o'v8Do1TMoHF/HGPr2"PQ]l\.!BIGS#%{X[KƣZ{2 [ )Јap_} {h rv;v;Iid}oF:18/u>WUK. b}6z-':) kŵN=^*9H(`Tzh ۟F"ו 7=T4?*i#zs50M6CS26T ?؜,#T/g[GHJsv{}\k9OTMA1fk&.Vcp-Q&ۀFL\Jv$4_:ȝV((!xspo>UiKaxv`$PG'i=ҳLrpހyo| 5DIƘvY>.BWTaH8ńÖ˷!IZS"t!JwכIU[) ,h֚mX':(~Cn%FEļD&IR@ߘJ :;p蛰v \ d~ۜ.va1j6%5 Ȕz7%i%TVEEqn')&Z?%vnjg`a$Hts{r?7\],vڢe@2R(a8$P#d 6u8JNf;7yTD SE> J}C7&]jX W:3Z4 );1i.ֹ!T ~P C{DɃ[QGsyfI&PSlc$Ad%S(yZҖ_Ua2/[{:u2 o>u?`rd23hj$9󱅣(BD7b` ax+| /)ŵnHC*`|59wF\5*y[7xa)K/, !`Z+֩m1>}NnTz9"P<# Lu˝E-tP >r\.XXSe$zD$F_Яn?"Ub#r xvXt+GFҩlW;)QzD4`Pe8LK'>q>g12@N^Ŵ\mP]%GG*nFꄝfM4j@dIuB .xPb+7seHؗa^9 eD Ι'֯-*Ϭ Nυ\+WU&*_/W_U: ka@<_UgHo>549O@޺&%AX{ eTY-u ֽh:2P)^dU3s* ųۖbf$}?ZDǖTr*~+UHD}5<%YZ݌ZAJ_02\"mґogFтXK}L^Lz-:1Oəy޳hz ,ɐEی1lHLo@՟/_n( hWިuDtaoսԬx'2Rt&-7^yLF3C({;w$HbD)V/assQNs!^g#hd?H)^+ dg%<6u1` |K G0H8!E|W(ik d#B!aR[puve5|Tӊ!>f׿)uvlyAGTÕ9í+.0)zgy^F8ěZIQ/)3߫y|fjl^ퟗ"yRzte w0we쟪s*%?M!84Fp >c"JiK(y0N5SdZkDϬW Oav?:l-f zC!Ccbʹa ϗ"lO9=03.8&D_v@M@nWb'n;;㬰Њ#S| yQwۣ߻G+(a&M) Ə [~ˮ I>1upK-Ϭ\!GrhA$8/̅<]kHɉ "q}|ORrTXdaшG ]n\y^[I\NK|n \cAno &rUڡEaM4G|$8{(Jr3w;'? D@X=kYh{yZ&>W"i^)rC2;T9oZe+gةC j!0 Q=y. ;(^߭LZs6Q}pkE}jOЁtT'`*̈k2S@E.'DoтhoSoE=aARP%m.-m_Tfs5n͡1~acژqݍ>Ym zb6 }MLwn[IH3;/PTqڜ#& T"t$^8[}&5B/pF!ibj4gx+ '5^#Xt-DG)'5n)pkb9 S/IPy3~ ͧ~MU0: S?/1a㩌Nur>BŇ[y%C[i׌Rb[tmrvb>~\vz/x!vzhBcSvn3\xXUX_#(7%r CixwF$es6mXWݮDy(CmurJn-UG((tbA%}K[zNpTLl*=% $CKho^'Js; jNF9%iUہB^SGhv ވϴuJv˿(= IQ ĄzkC>W(0ol.3c 0;z)4/0޿s#G'5F7^|ʲ;k56W6E'"/zUY^BX|pT"TXM+d%i5W:T"q[@{Y!Լ Hw.͑SÌhXrf SHI΂6Sx{-8Vڝ8chbme˃2j/S6A?}g[Ks3ÆzDrNf Yw1Էdd π"'Oz4I]'SVˠrh{6 vxfoDTuٶT_y;j̨6D $("HA#h ,(5R5֎ pIy} 7)GM"#p:@:RƗCD_'&5VorŷDzWm7䲫=K,.0?8ՍLNpp*-l54^Hd5d8gwyjR”б_fS&$=6 ~pՕpRVÔJWWJq;jc<\Yndv@Lpf;ur7jA\X$?܇*d_Wf}(6rLac;~ŕ -g;+2F(U7Y /`iZM6!]xf=U'mF["͎ Ԍn/pZS-O1Imݹbw,K:g @Zv$9duq2?X(f}Hjll ބ[xNI4 #z(EYj2ﯕiڪNY7,kDWba?{3ZZky~h(_:T>Np\pMX>Y#;<.GEu~ 7*oOz//[ G!̸ji(AO[}_mw&B$s`r`xaHF?|SPKIŗ. & $|F"$-{.],)oC}]Rz[BːkƯ2)(8#e"G4MI@,7'ʮwTYۻ<3z9:Bݠu$_&WB gW:·js> "|(W*/!ydVjviLkuW_,rN.0%H3"EVKB G /髐vs-Z(?|z, ELf/H8Y4 ӻ^=A@9ٰ[b8q2k>3.E>-Zb. >]j .wi\XBtY~Hj)`FCUƱFdB ɑPTvp}nJY[\ы K|zY#'qHxtMa, zZ^@L+ߡ,dvǽu9C,)[v,nKGI:]%T= aOan$Fjx[Lry(M q:)B`]d1yZxng T& su.OGV]8I0Ƒ $K|oLq' &r⯸X>ϊtwBUgl:H*5rnJh_8QA>ql`8hVd l)L@z(^B}178 P,յTuDG|7S]OUPrjX\g [k+r!roکs>X](^mXݾ8;ll $D;$G`>2['e:}@95&s#Y:HV!3(s R0:˱]W"+4^*8i 3DiYIcjг\WZHFw:4Y_$ASž+EV"@A-d<<i !/X:(;4I?yJ1CY!'Yd1Dv.ϫyvG94W ]^1T!Jz+՗l#8ƛ7b-n ) |g$}qP09^Z".[ )B\U>(Okmeʲ8ll\.>q@`Wf5krD%aL +2Qt][^ox٣eJ}Lvӆq xF]z{!,_Af4zrb3&M-~2.0,Ί  v@ v飔+^X'j͗GH⁢d"8J`\mB複ϧ>T~-]s|i oٷMӜݕ=Jkm$&P8y^[6cqbr1Mڟ,!+I+T$+ţ ldxz~o~r)<&1>dddx{ VhL5dkG )|l^y CF) PqTPSKkFϷHQ!B xo $~=V8^kAOVjtgb _X B ̛h`g .Jz[ ?l?BBl3fk5`SB{/~1 @4H)H҇^ SZ2p8ؽq,Hj=a"6@EHl7o 4;Ju*?Y퓂 P!H .r+b{t_Q+{֚ķsl ldŁ0uԯM]d30J6DEK=+ c};uEɩM!b9Cft]s\o Yrn4.gJ g&>:ru(Uuw Ʀ[oK~*́7ܰš m*xK"2!=^\v*2v7b:{ ;Ea_  IpFfėh2t9W ))75{ >a *#`0ڐb3lrSERh\i,ɲXS4I }萍[#r$G˾I_6xj oį->\8 n\IS`?:,\Bzh_F7_-{ł ح~ٹ @r=^F5ѮD+WS5qA"4&<Mkz3eQʓN3{eR$n:>W۹%~*iD EEÚUIgut6 ߆Ug(EYHp은aSZ>EMc$V3YMr=gцXl ݰ;jm&iD21JMA9`iz^-lz?aZzQP=^9AqRhQA/Fzf# gճJWS h&FH!I 󔽌eN2%Tۆr +mh>!V/}M:7rvrK GLؒHmȘHJf^6-UW 찄}Q ])&tW A~]˹re;f4FA7傸 j듧Zߏ;<(Fk]&2u?QIii2f[z@;冰<Z¬_QH`C~ŠIpz~_%ǾW|~W<ټщ ^soG8/V T0w( -[&(\gX@"Bbl뿗CzcyLTgwYD 5e%K k [R%7.r֥sV@ D!(PJ ZLnBnڔ>oȗ%2_qےK1Sm-J<5yUօ_gֶ_oCv@avԞMsYt VL;9=L9߾{ 8uLL|IGrWqhjK’6dr"Ôµf>Wul? RnWkt^O%„}O;|;\&2J5(lLK pzb<$2\Y9:x <Ⱥ3g(Zִ<)J=ײHr';?8κ7( U}+p"MV?x &K&?k}YewI9("+d @b3\W :s_Q(-q0$t佽򓱢iڎ67Y ( G*sUIA57Yw_4"zZe1*&&ggBG}Fz)?@&cӾWa-lChRjbG~H؅HF'#lpdMtxQM2 Sp89|2̵z`}dͼA@dQƺؐ]R#0 V XA%Ozч(j)<IGHLӋkZW3X:䗵X%,Wo8*ֲOÒ$-s$[Yw϶-딒)>=K an]!|G @hwmI:.>f1n)󁤜I7d|(JuZG N;Yɱ_(<)D#^g/\uRҦ$}P\{dm@(Dw+f!7Xria`] wPi2Fj\Q"@[2Q%\帛nA~C$gzv^uQ0to ZaCUh"o:$Es]ZLBPHX66uXԪOr㾉{mO)niHse`s hܰ/Qf=;G.*|kT|y$GΧ`g-v4䁑p0HLJixҝ@Wxkt:H=w24ƠM+"Hf90jEGWxS~QުPC鯋-¬ݦ8T# 8iN]Yz̞#F_˾0'y@Ъf}B m?_v#mE+C0VǕ5o9~}RLK X,twokǩS!,3U}ja]Z/,Lr3zct3dN÷{@$Cޛ*MjOK2UbFR>i@!d/NLl.矀+uV5PeZNa u[{A2kZ >rx!sU`A ~]b[mV($DsisF9p?ӟ75 ,ഌ>h)⹇yvPcFZ*wVZk." ;_^bMUY2-+jf&CON%ZZ0HC0[TFnU Rn9>DB[ 69YQ $O³C/buoz;*+o>ua)9I L4GB G z [9uk;%~A]2(ID\DYĤZkW˱ vk,p!>Uk I~h3JE=C4<X_U= Z{wQA_whTSe ak9^D6N Z4?"[ = P_ aˮ% 0$CBޭd$ix RȎD] `WUk˹2Ĭnâ|Ze{cZxU%XhsfSd\Qޥ.a|!uECّ6LvvO+R)+K[3b@.[u߈nf)%x \a$E9[Q>{Hq_-ѳ>86G2 rSGЦ'JZmzw(a͕{HOބ/:z> isbeEH\lzsc,.*I_x39<~ޟ)b ҝO>Y,pjQRS?3H0>@.i_kNEr= R22>eֶ]l;\7I 랋v(/KPyTۣH>eDfE|o}o`ngdk2mVϸzT";Fꐂfd{2w`9Ҋe1BcdG 鰇t{aTSf~)PXǎgJO%S\òL)Z̦j|C&Wo!̞8ؑ3:#ac)js!'T!, }B4MzJDd`lWhVO\l Fg }k&WY Y&<_Bw'ID3]ZoV]$p}rX2X5f\{:D6=T@ RS,) {딡qw=p҉' Щ^<^#l-c`+";y*>FZ N2۰-gz8ϭ";~-?uc]\T%@ɡT|.-$}n*381}xsl= %joAFK#|AN䍋_5cCf<[U//ؤfqSĕM$F'p!Gy^ĪXfЯu+$ *z$ ^$>?ob(A/(+luBc0 p}TR<^Mڷ%uSUQIFd(/p`"})eo+dpZ?i—ߏ>bH0p=t">Yq>32E77:u՞Yw+21\_RD SsIҦc 3WgO>^sbCW=89sq27ٯ5Xs"/4&}S b7"̤۾ݸcT yߡyqgt~i*3`S. !Iod/ERcFH3[%q-Qb:[JCJvcBs!fs(@D́HWp#8Q#I_67׊E|Llڂa!.呱O;:³| oG\R*T _}wْg V1jx7%6ND> d&%>,EEDLyІOk`PALPph 1'`r:/O,V*ڍ% TvOɆK[ȗ״` p ȆGY?\ . e 4 wLim/O˜/+"8+<_;ȻmymHiM-\DҪ" 2,ORy=Zd~*O)?c5 ^ L e.d^_m]__/qPѲ '2&6>h'J7 ;m04׉6M.koVɏ͋V.zfnIIm &]r}Bo['4P8Bh5j{wG{|i z̨:WeMrEKgKF_ N@sc{j՘ؿ- D :"QV.(Nu8,MU좚Bb2!hfe?d`ʰMY6(m}8u3كh+~g ? X|E{rpO]$ia;n'&5-]mv3/NVw?4jl!A(Rc2,9-uWVݫTl"QA)JݝT ɨ>3k9*Ԗ-iBRUOt#n -vy:}qW1k{^>d#jnqXAZ(( _1 <@;Xũi\/??Q!9tP d~"՗x-F7Q+I rcYb0r*,-}h[L6ì*IאO% R?v8_9xv@Of[45p:T\,Դ3wW6;'N#cR0-R?t) ;s*C^^SD:[X`Rfڧ]~#5KO+\ngNCEe|>"[p]k"N =$c⣧ L2il&QU7Kq(; dFݭQ 6+o&0B%~E-cs>)BO&8idb}'5Deu@b0!M2I7B?SB㯦UO )cORZ֭|V/e/U;`-8:z$vrԞ;0&qrPwƎΆm6LPJ,!~YZ$*CAv &=#&AI>R%hjRK9Tb饋=Bܳ߫6k=o!dQEEF9? $t1 6=u>iH(j P#Z@!u;, Xo@߅ RraBcf-/Ocd kQH;*2s2g%Q{KuQwhOCo"M;x#?ŽrLe,~jP(w`a(q8Uf}K_>*%f )G 6g1ބQmjc)4 B`0$I!;?bU\/$ KgdQe00_S|wꢤ^*X"S䶿]By}P Kraꥠ&qn-!;oqę4Š,"'1zJAUvOKԩ|,D$O/@6ﰜb& Ͼ9NbRi25MNdM럀wHyuRW%Th?i>T3oU~Գd>zNlPh8VtIHp/eLI<:vHG zm+L5-@(48ƒ@"MGD"Îh;}pJ+ˎw#oA?oӨJ1ӕ? |=j/$ 4vj;?{<,rs~EJ8 y`IL|Ҭ( W~RUa5CT0=f_+ {íYQ@*UҋC*W%aVO}+HMu \ug~Iu=+%DhYDhMe;,=}75jX̤}2ч?sypAF <~%.Y Esܧ0d#BkޙtGnXbl3C\7582Y|vw_p{BE@ޱ3_äqѥZt3^Kc0sH5޹&i"ټ$NZPMS>]k{WAg~.7UWp}u?Ʈ 5) eo/qXrPV˹d^CS?}?1{u6U/\ /Q"Iagl4 )6|DXJ`:-m1PXSFmH/wH%#$E0f}&!,105|C쌚6!aß~|&Oj yg` ֱmouTnuM1f9 *M_:MgJ٪mғJ(" |c|i+`.T)z/K?Kx\ȵg'X'/ǁCJs ' \(cƭ{֞3‡l[T mTV_#{Mub", g8֎:&+#8};p6*Cg7}qe@kB{ɀ GM}Z3đXz6dOӫذgg$0}-XT$\mh%"J{Wy ^E{u3@6,"bQD(,k b$sp[w궛JFx4`~F:F- 395@a(`J_T; ا RqGxikgh@ЋHuOSm)._zyYݭM^n NZ?)6iYx|4Wukvi|o8HyJ,Gl&d<{"._I!L-P_{8sa+XS;x%\<66'$&ʆ|&vS3e?>voEc%g(Vof'ۖP[$a (:8Jn#ˠbSlF6;{`"|D3HfzCe2PbIW(GWDtѦzez6(PO:Gr{Qԓ M5)SYi}{'0*lG,n<Q& dçɛ\'mQX̔:a߸_RJi +2 Y0l,BbY xYn&s[eR;Rj0o%aKIKbYdt4jD *sՑ5<)ΦWv>tvr+"G$mxWq$bAx4USiӴ7-e'KT8]@LEMȢg| L:z٠Uu8[麜8d:#X2ďgP0-2RJڰ6B3_2_weU)v&/)pok %-E\F(]+7Wæ?ރ5!zBkͪ$`~_8Qa: J7JMuDN:QOb R{7{6>!). #r=BV̪ +LOvf`JJ4N.d:57'V):\Q8+h^m-3CdHv?y'HaNuzQ.l[qO}mft] |-9`u1Hr^Ou_5>e]i$VNi|rrSkbsɝ]h+ YByx'5WMp\h z~6a6ģ2HxϞŃZiD%`XD僊9L>D7n8{kЮ֏F=]WǹGM)HX^ ?<$bxX0 76!]R+ʩD^^8}szHPoř2M!.B5tXзjԭy󬖍ˮ1`M0E^Ǔ|)mQhv,dKRTƯr̢ϻ>۱ra7 `;c0oƏRPb[O&~iٰsps)FuL2(t"/)^?ܲ8n(ަȝhH ˜bR PCeQg0y Qc  }ϭV֤}>Q kޚ iWif!2oҤQ aO@+\;#0)%ST\ Aܯ>|#x5QrA O 2hFOuUa󕉥nio{CnaQ}#I,/jz4'|&NaH 5(ml- !qDi>)t;X: VUghNH!Z([)@5C ߿7rX}HPܷN'}zw!vcEi .YIiI  PJY `iD'aiy%WI[ UPYj^V]1HtSoi]˳ٍ)]E FI]H LO0 l6?a>Q 4mK?R?l"Zj'unJAA{k4&~'&jvdb F%U!3}zlq-"ߔ`C:!d;"49r1Eg|!WŒcc8Iض Ne<X Pe,mQ+ ^]1\҄ fL65fX/sê*LF*A#tmd5<]xE[A|:nbBO3_R^x:3^fI]v/iN.ޣjl7S-Z OPxݳyW*pmkn&ߩalY(ޭ,4V2dvJgysWiҲT4z%2 *Ǎ['IW犆[U*]+p JA XƄlOYmo9QOkm.qs |y@"Pb1! UD^PEz;ʬ+$&*ecSTHM,i‚a$"Qn7bIW&!,[g}\c]myрI_vK vwG xS4a+%0e /)L'[O,wݬ0zCkN}!3B\| .[1i(ҕ2NܚTY8{Y[v=>"+yބd40>Dd@-ha].ID K0^JR YE]Kob\콜vl݂~6O*Lt/:~T|P+51~Tow@ϟk|MH\WXOMrH;n-"޲ǞIGŒJQ (mTd7 b{p[Y:~Ct@c|z{wsX[ y| w55m%g]qkvY6As_ĵ928j#+otٍH*J1WOP!w|)q,G@({%Ox >arsb֡oᚳ)S.:Iw#}641IJgxmOI9/E҅tX_S~,_@L8e#tS-nNd!f x$?p&]Cz#@.t7]BQiD=RFY_C5ązً5>zw@{gh&0a{~(`S1efWYĠjlL\n6ɴLک\plW۲S#r)Kzž4`  &NAWdcNA/C^=yrTF:;VFŠ̿3]Jh 爯bpe* (g`8H97>yv^ ھU=+3,*Kgwl49=6R/zF:W/d(!rCԹ@=?6wݑTࣕ$?fK0w s,}gcE!h_*}*({{h=:fY"xJIi!SG=ŲXYeS< Q)Ȁ[qr;̈o BÐ >pRG~|y$-NQET >SferC*hW/ȒwfC>"xР#MEvT&'pHK{GbIܨ3вQނ>i:skd3$cel]Z <* \$ӛ|VA:zމYE;lRoƆhƥ9^<^ARLDgEx(FS)wcS!)+z81rkS^ԳS[Lhާ6hquP#TK)BEƁ?:"f!w1]] ̅Z.$jrh>k7Le,_B3y͂]Pr68m ! @# l9~ DUQP,H\|I-]A[JJ7AaU ˰7` 4/5+ƈD$BR%B=丆I$:\:jjn%ͽ'T.Zr`{[D}?`,'.xwOΓ!Q"̻7ezt>ne&3\_,TcєiuZIl{Aʧc:^I<[RXF/?Bl$X(<P@=niwZ YWVz˜ٴ DP1ыVbJm=RMJ3{dOx茥?5PiKq zёIjL?-ά fhGr, ܐ HIxGl7Cz.uKe߿L);p>)r5^kZϯ]a9wf}naoBbD^ &&N3մ-CX'VrÙXnJ*dMnFcHRmv4!mX7&6c@?p<d#e9*_ P%t"7_ >G^[Fg^ߏηK harZ5@ nȰORS?R/W 7Yr*.y;H Bs,ϗ-~X)xV8N(w[<`MYuxQ:'>W*?ȑ| a#OGAekWͩÒ;{n{oWRܒ=Om6iEb&X9=+幉u(H$ҡ*%N a4nd-@ut~5nQ%]pqVR;T;~-xiTs!+zl` cC|_bFoiںf|BI8gi`~.U]Urpv%&zX0J.FRzѢRnh#=gu/W?&f-y$s3EBHK lAUVګߺ@>3XOW&HȩwWv›s-j8 M2Ɵ TāimUg;!jբI앁ZpY5LnΖ0t'NUX!E$nvg)~ux=Tf2gUt_9j,7%λa}bZqHKon@O`&x)T~X!R;D${I6/`E}J-[|K늲yi֌/so֒b?[PBjb]wOE~^FEE*'sgf0儖~]f  Քdpۖ*eU1"X2SLXUzO^j@bَ^ό8r)lٝ-^P: PGreƽNWs %pӉD"eH-qZ :8js]b EHF}6@(7n Kd++d;4HH D)g}&ߔ v N,D1kot) _x հN&pYe\6crOev!0;5 9e0$]4LoSuFcFԺUrCqsuW#>@ bUq HfJzEpMq/p}e7ĝ[nb?w9@<[gR\^e5TމK#DַH[QIU0U?za4X|E#4}-5f`MP~Qo,~ +u&"{-QᝊFmse@:JC/wDc#j(ocƍtG.|3KnV4ٚݭiD)G/r&"K)/ ӹQq'm6g$e֭e ·X )e,^2E]ml',TE9sD?jܴ[GhtcvI(,CsV[8(ܓE'Ly{o/kWHHJ= o#D6Vi:ߵ_̩Jrb~aQA}3@꾺=jpcAvWGoY>;4AgF :_S.rRYTOQW60Ecu!2o*:aYlψ]j‚Tꪴݻ0OLӂBGc%]03upWMT 4GN2Բ^=_[ @@v˜yt?"lX*> ~Nݡ$m׏T&pM IN>xTh>yjՃf65iePS?7$ez 4Nށ]BܰfשDzr*)O|Kwx M>gz*R.VKm6dSe:єZ\1m*Plѿ3Pxz5cUpą;saм?A:%x, 2 OԅC3\?^rͨIz3k\FVI kDϡ[6} 8Xr]ضc$^rӶW3uZ\>.:Rx(S v'?r1<V@IҁV0)PWcxd''NxM`;nC@BOf_ !;Ta?_S2XңoxZxdNE} Mv%-_ftV˫ʧ``^`$Y֣r#Gsx 2\xHߑTo:Yy@Z,j~ dvM(#0 SpK=d czBc>5mm9bI/GVFx7 S۳dźQp1#:by8&ŐkVٟ:?I}!oݻs-V7WLŎL)@4?̸KkBJ2hh _*ieUcv8ӃBٟp=BL)+%C>gC(z_5!<=S_HpM:L7+}G!@3UJG*ZkKNplYkts泅,P 58Co2P<끓]fXR prkQ?0UNKQt]ճ-X]҈F$"b\v1d?yS Vjj'?$ GغL|UCx[ai?d\/},6iȱě=YՃ(4SP7SFTJ#4Nil[x߻Iʝp bwM;E2h)Th+@#UDɫ; QxEXT8E[aM\q={ܽɠ)U8]$zjrSPKwPp ʂ#!w*ЄLW]n,o 4.î<0ŷ@}4[SpّHݏ:gҒcLH وTU Z/I9ex@>̾=>=Ww/" ˜~P~}o+\IioGX1BL& hRcO+-ZIcf6S)aD5mBoHܿ3kOnUFƉe.R8 R #4A5.5k,w_k >#vܐ1Ѓa7ك3):`a]J w/iɣ)oƼ,$y]Al77 )Qm>G+|J~GkCȭ!Ty;?JF np-RsE %Tb[7;5w[^;FҙA9A.TR 4nHs~ a> vZf .ut؏"[-roDߛ{'FF-|:-y)\1 i#Ux#AfV`4&K؇8W׽9M05͂9t/;з~&KUۥǓp;',y46M3NoՅ _+_;)woD>`GZ,7%+L"栔" AdQ3V#FFV~.sW+6`idX"y`Rʮ" O}Fr V e[ui-?QGԻ^'1HㆂCNeDk;IQM+ּ? pprZ,uOH{kM&g+P섖Pi=҄y4(`(?m2D2eއq1q瀧1H2`=a )GG0Dt=Yg/Z7D j2׭=J9H'Ƥ-v^pzy>B#2!oЯc -Rn+}qyP0v)MvIH6ℿa;^]F`^MQ#zxIiR/~Fخ84;3Z |$9׉98o8e"]tO=tkl t);~g]W+x:g:9l 7c[kFy/О XxVBO.WH;!~]@RN^'Q2?,z{sVX_9He>E*U k\=X޻."bg&(*5vf NdS9(+T+dM¢yuN N!cgBe#j@#ӂ_4 ن2jAXsLKp6cffOw>IcK:.;eYsX%} T 9坥g!,6فg')<3g5@}:rY.6_d1 [񬅸 w0vN"%ّǕq%HLL'5\e[twa()1;1+ⷂ:y .$|;IWG{9$f7L)JpOVNQQ<؞ L#h=KF@)/Y+,_3#Tlnvk›@fQa]W3XY O%K+V  ejCU/7_fuBP^ L<Ь6k'iإ]8Ru 5S47Pټt2knͿa 3pG U/HJe9XyFTgO2..)ŐqAH`R5aa5̎Zbji񌱐]lok=ܗ8s ܰczƱ{P%_R;k}1ZDt(=tσ` LIr 3ؑem$0i(͐n\m}W_G֬N4zVz46GS!Ф+V}"muG*W MQOc*p c55ok<]ՒOXFI:::pbغ CqA쩸EDO.z(ޝˎ] hR6U8{$M)L,닰Xx };GJ41^z'5$`Vk>_ a!K >)xeY>WM"sqOPEb(X`S7#pVZ<u*qX5iP0+ "kٴ:S]ɊXɍ\-W1qƶ'ܩHo 7I)u?K݅4T 0V;(u&,_k~U4$~Q>,A"r Kh.H% <ᷰrx˻ׄL87ѯ fV}| y=>"{*D@`.ڇܜ дI l)>o.0?1ٹBhy0\/š)2In0Q2aN'5]']%&:cOJ:~q6R@ɗ&*P68+F?ɹ3~fe9j퀇:`=$Y#Z"6L`-^q$a^C}'7ބ)fbHL+P#tc6tR:1x/'Zsqw,r$ P`&I@)%8eL7{\CR?hV9n*늆pÅYnPI5cb -Pebk~;9A,1Nj ֟>8J;q8KxT?`;CZw.0o2BIpIޥLA&.?y+N q Z8`|jo(!,M mW,~b>zD=—͛)Vpm9FAKVM|CR%|V^  q.Xtcyaf`S_MO|\s=R; Y#8̭)<>V4 W)'E3i5)XZt=Fݿ)Ҟ[HJ N:Z@c ܭ/J&^ ˘'#nD' _&_/XD즣A$M?H? ~H=U+ IU6}v@$*C==æQl4CsQ=%\\J 2~$gp"KUYY,128tgФ5?B@opc+ͱ'-g~QFibړ2 ̈ '1FRYZ-ܜu{ ;TS"4u3n"#j@/qtt3_$R'11#Ww?>u۝1d#ֹC0%ƸV5TZ _SuQߝ%dXLktD4 Zn4oJ}]OA(VK&|"0 QUei5XY^KJPv}{`#ZozquAfA!'N.̠x[QN]gڋzKI뽽Lxךr˘pLt^x)LP@$ zˍ˩B?8U$v@]vXGZ!ʗWG macXajE3׼M|e F9B_V%C";|&6c _RХOFD\=ɇ EiS<$7f2Gb\; K/seۄ/*>~e(jXtu9swmp4ku;] a1reS>9I5Z]^lCQ3MA Ca~h]ŋ>!'"5/{On͂I7V#mA)Jp,iѮ|h=-!u=/e8LGz()aec7.BOk_N' ^Do!nF{0n"!VPYcGkS3v%ek&VF.Nd5K4j4C#AȔ@_qRHо%@ʉƇCˆ I6g$Ę :CGIRI]g.ۦ1L2zm YV[wabׄi|J>j'5Bݮ34wkU-Sa%=-fA0\ta#$Vx~%yKUyթ- ȿU댨mV~.>瀉Y`"aXdz؟k=-اRׯKOz, ] 6bRE@q*@^X/90Rۊ8X\0:tiڐ}yE>~qS^p?[h*# nޯ-38Nc;dբYgcڟ'7A)/hyIߖn:u׮˾/)t%E%]z.uFyDl%XO~$6dRU*cQ&G-*c}dxLYDup]-KQ렸xnV+oLZ+ܚ`[=0L*(lL^? ~Y>1W*lB߈qBNd*#.q+*{H"g{('X<*P? !b?xŭ U 5^it+BkK|ӫD˸3N k㍰nbJ5zc\RЯ̅5`?| =;qi~61rv#̅Ʉ-qf$76DiѠw)>ז +t+xE3P4bymf6lV0dt18ɍ"DNf׸mD5{q~6`'RW m8N0IkჂyND銝{Gԑa/%cw0⠣^JB4G\ |hWT&%t{G=BD&{#`4$Qh81L;Ք-\s]DT(=i =>)]VF⏁pJ/IhED&|? -jcy. J9"]&c?ރ?^|#Nߖcn-3OM3l< %ױ}FA Pnh8Iy},^trX0qtnۿ0Jd,O;a*̴ר4)=@BAj.mn-h]Lp=8!uzH7rBoJ'=»;mwu8[d<*n%]K#s c-4:E֜ b;=7(aQ0tٖXC-ofVSΙ |.{&/:@c8n2VC4XNnRluWsׂJa6œOwbϲkVSh>/5>sy`Fb8M`8z=zۋ6/D༻i޸265B|krp8K,Y%OE{E@xJXkw2x۱y qhPK#eO2]SMC~ox%-S‰@9 {fXt Bb#~QMy]thM7Y<N:9MmҸ W7%  Z1eߋl8͙x  : "Pk+_U\{ 顔W.\2oVD~bSkZ${eFW<@h$BlVh&~#`J@zf(3_D 'tSTb|b 21 t5:*क़&abhaBo9Cv4\Ƭf#c!q1ǛB5Zcv0 A60BRɵC"#X^[Pi=$].e 6T1bfɐA: Y+S R yHo׼'jfAz/9U W%g_qgbq2ٓwɗPZ$?aFjǐ蕔y%r.\6+\FB>*Rǘnz[Ng̙H_8혛qrQ$ ivĘ'5B=L~&`yRAȞ?N#8ue#g+ xVS)8MvS+/p=$z-jlv2$}lcYIv:MY^kgtkʦP}I<-п;$KzY)ֱmE e z>ݭTmkG?^+oyz7HwNk>m.ūkR o Źz+?fh5zڽܧ<;yO'w1&Ͱ7^@pb؜b^"xޕ]Μhɩɵ|eIS28MPRZE .bI>h\ :ɸ+W]-oyBova_(C+6 REi"Iӂɯqp$goE}㚲{"UyMM ;hfc [=5Qr&bGn(\DaFN@ZBJ]yb n=TKA߾ϩ_=Fe0DaD}.(nrYkNgZ#&Uoro\pJK: c lxg2U}rsƲ\6). f&ĭN>;{IKbΩڂZ-N XNq#_s&}e2lj}^z.|2|S:1Sf n;u@SA}I}*LgJ(mw=0DP;'7 6婝x;.>8?,qZqw|38d6]"|\VH\1iw? έpG,`-7gԈ DGP }-Q|r9/Ύqv3n zzd^܊J.%Tl\86o%m>aݗa5@"QQԁ &q =ϵ>PU\1Hm1}ٺSs>C餓 I쉎vw-W )8琉q]w^.XE8ŮL?UΣsyqkT“. ͐c/ez":8q88rBbUN)r*PW9{ٛӂ0H,Z08Z`*{ "|U-x Z.= O⽄-gDk-^n{k-!-hvE9&"s~*(ڭݓ(ސXqi'n%Zsc~XjXP7醓ͳF'zJzũԢr'.VTa@ًTR\ NSƠq`q#j|=7GQgg @- 7h8~t{7ꗲrT;^]ż}ZGﳣ1:.xeݮU[ lq/oz2ΗLJJPwC3 zLgyLok7psݳ,!$ X= Md!{T ?bwRaﲯσE钔a&,S#B)}"^nOQkQ95ՎV}.w9mr $fLF(ʘRW#GI\\֟wcn__r'Zv%iKAB9jJ^, C4Sy_3yZ7s=v.tԏ\7ZDS~-p] ez;t_'jfJe5Ŗ\ll2ZSgPe>[y< %` T_飡_KX+OީG*7a'~GHD B"C2G=65  (m8`ytteXN B́J^ ny7ueNI^G^]J+jx mg]uETaE &NtNڎG t% MASS-PzF-1˰L9jBu-%*9=@ տ&cH*2XI|UO\^ݬGGqtSE[f59kF)/Ғzf_fp]+dFL} 4O5 a# Vɛ!nٙ!ǩt1gxq2[ǷɄ%=5~^$nXqe1}L)ljuY*,Iꮼ.YF#醷}~`cQS1]({m*/|M HbT D%_Ln~4cQMm&-EL/t+eRx"= )2@H?byX(^NHa>HrT#Jw,2M|{1垡֭(=:jbtT/a`P+z,fcN'EA+p¨-Y5cixKT%nAi0vj)\hE$@}R҆v˄(ʟNdٲ1kqj3arTlIxZumo+@J0DQbf/ehǩy<(z@aΧ:]SDZ `6 N|΂QM}ze~fZ7 2h3҇lVEUFGo@1TkȬ R%A@QwU0?W?՚ݳHϢe}nX[PUJ\| F1> f옄g̗ҴUGBP Ќ3^j0] \*([1N~3ۆ"[u]+˘kYUwǫoNJ ׁc7q'R_qsе4N6H&n3o== %\6oJv\"AtYDu,-Pɦ2#*,w|ͣP ៓ŵ:X2sp֕w+Fk N<6*[2$y?wEv!9v} T\JLä=X  HPplrmA>/Kv?mT_LUjSj蚾 D@L^3A!ovJiÖ=כ8 plRxsky^ʖ|67gx^-6/DHHb2e$17|}tc3aw!v}&`gKQ99@ 26WO$}N b[yn Kc;fA2$\ GՕ+D.?zLX]svGCu!,XVVb oH"ZbWpp#J!W~5)?Yz>]@K]v06?XC taUJ2,CթDIS`~_$>Ao@Yr.`izJhnWY:\"ww?Aʼ=9,:P연's%J4}wP?Ѱ3ouܸA d#@ Fqx8r|i(+A߀N瑮m79<%{,Dx@Vb-Ҁ ?A+y*㭮%uˠ6it)))a *<U|64Wi'툄QzNՔ#IW>dP1.KNxnwfT<ˁi֡0BgIu/jx>HYf,l/r *X8' |39:G79s#ۅT1fا FֺP9Xxj8O[_[\!"zԥ} k7UDjHdŢ ocw7<`+qlO_|IЉ1g59jľ-T$%O WJiiglWeՏ_K{-G ppE8nMaL}^m(/ Xi,.Ygkϲ46Q@?[#[!0<>Ic^b'kTsPDKVͲ25Շ(W+#IvR~&ɋ1l> @zsҝyvw{٨9w D3TkJi@X0 EMg](W;cy+ZLPhE8[ g 4#~SWr5z^-ߎp3DRMI:[SX+T7֮^^0ammQ }k_hÉxGS[Sv6l!Y'E7w7 Kaє@G8\yGk@>r+\U *jI2ϗRHJUQq ˰xI$ ?u SOL8" Ŀuu㟷Y/ȉZgsyw4*<5BL|Su0|z3k#U]C=RZ)c]Ʃ{SI?4:nuU?,5Z>T7Z>qHv /jKCA'p]"J Bxx JLՑc F|BWu!dcNQ+E`lܚQB:̢ti:6lIlxfD)sc|B)|:) ֭x~hPU,<?zuU(H!L{pN ՃMOTLXF{ Oauw(F ;YCw}P<eBV4_d[ҩ\~k2J@ L9D + jfdH18þc0#\ _@P G;-%|9%?|{k7ا˟U ņ $V ɥݙ)"dM|Ҕ/v?8[Њ" g.e9oc% opM@Il, V^T,x>V"o И.+n@ ĎܸT=0k"RJ#R'M۫ ug6Զ|M@+#0b]{;ojPVh'Iɥ]#/$#1]@ ]]LSI D?j;G)XhS`,Sd5@ ]7`]edا>|לE@$(.;<JQL{ũ΁@մ5欙jUG,SJ[yX{9 mU[+F߮pFM0W-u/?x*}LXF(2ܖ*yK"ѵ1Uk87*!7ZðRq}%VgH/L 8c&Y%1&If;uCuի i}'MHYQc8<?l-pP [nO4%M߄_Cӂ }fTHqbe?PZ֨EK^LuߌjPb*QmfK:#5h2sȐ!} M_0Ufu]z/eT@XCJY#]9AL[o puf3=FWU4V&j:q d-!vrip :@vfxTU="N p"(Fg^k&=-}b+'U"sjS~<"eRJA=˩Nz!+<+Ls*o8$v} Ǒ<,|MkLLF8EYiixLocM{ƿM?/-& Y0*q2~fљMhmSMsvr~,VBG㠹+Odșвڒ%5m!) kP ][o B1P+EY/)Bk@/m2]iɥ^Km$pm!<8+@va(Vlrd}I@z6lBT){jF&$TeP\r'\;=d@oD3]G%9IϞÚkHj!p7㺐%\YO vYX`ܺ}?U2?{3/?z Q:)-ll:{ 0;j:E]1W8IaC KyY8`^SU}ؠ V+ON۹< i l+UW)jiO?'f`b8n =܉Q)Vf 5I*dz`dӅ~X@<Nȍbr KLC]c2dj-H+ SlzW{9ւr ҡK@J|k'oh{b>c°Nsj[IB7~*!2?$a?c)7=fY %1ݵbB$Z* MIcdb,Y oהZ@1i!p8 {2  f` g^'Hp5O}dt÷t mDve~> 9S8ydIxI'xoA䷻iT$3"We/;vb,dnڬQJqzx2tzsΩ mF.&uمLvĎ%KF66GZ& 7:7PpsD5sПXA EDY-8IiӠ{'3qԦ.sZ$x(yxALJsɰ<i`8l1raNKwrvXC1hng ;q3՚^-M(xfhk3_ -EEtцv՛w%:)D(nwwkazz7B+)Q5OZ1Fԋ\V+R1EDA&)qn}ݚc[Rk?C :޳'Oͬ>BÝ&+[FMݚsZnM43g))ꗔtT%a%$MKƛwqKft)bwWBL 5fm@MwY P;j%=rHz!8[ȀAy T/&gi`0uƈSE 6-t;* Z7SO Dt:X-w>s&dML9ݲA0E1ymw*}"D׻U%^êEYB<8 |l-X#Φ?x] >5e^(Hzj 6{ߩpkXIp3>ymI.uޱChTOoаD %24rrv>q0#XG.w b˜wZs3S T~};ӝ= Bu21ô-\ѩQG 7|dլ{i>cJمѥ>? j)fo;;M~-&j[\kKazB7Oh}]y @{A1ȯG [8 CL&퇒DYMnMq|w;Rǘu4(o]]ڴ ,yEЮ|dy ʀY=b.a Sl[yA`ҾxxYɧ2/KV"B]au\#'t1zEi?-ݤHp:6KS*,ОE|8ޥ`GJWk7X~CNS'ҵ8njM_xnNtڃ?ɛ#`hР+__ ކ{߷f-́w'zHxI^@ E,Ihj^.+ 0,VWva!+ E"MXk]"׆r=|}AAo|Ed xdzcYqoSss0`"}& tP~ w[<, F#LY{]͛Ps ORY6!*-o:l&?vN81H*/ Se#v`߸^˕+ͨ eOU&6Z_g9?wǑrr .mbȰ:P/ʖdӏxOhk"QҘ v6T]ULMOg4znltC {qU=v5htfob Ȋ^,H˼Ku]VSgfvXx,@B昰Yj2f3.ZŖ/t=zs4g.ra" cCI .] wR7 W'\?L_ @^<*ēD@*IܖVlI e'7Q *@58Qٹ=O,6`x^$%MRtbS_? X mhk|CBsEBǁoHQ *ٌ,87QH#,O @seQR@sqN4 t&e7Pji;rNq̾r9SiHҾD^Z"(2A2B̿#!ae{p鱳Xw43e/W_V r. 5L?˞/ )jEl]*gbQF>7Lq:wYuﴏ_bt Y4FEԜ$X;Gg{q_59igLL"m7uJkzc#H޻JJN="3])2ۑ/r}.6.c}-\Oz5iN~Q-%o`Ǎ-ݚ0̢,g",fRR34\W2Xo^O$XN&6YLاln8nmY岡2OӘڨrcms@H^cP/~pX-\j%ah>oW}6oUk9t](-aA*FuRL[KȜ؀wJ<_z8<`e1܅ 8pd] @ZZ:p\9q5= x[xEP[adzl#̼ \Pt8dh\ʈiHGs7;,hٷKjw)[IE#5G(%Oޠ}{E~ခ:傱1A N yQnl]kzTȷ7+^ZcF!n|q># ?Bf!c]4I"[<(yvu}ݠB[0MlwFg,N${>$$&0HW ֎Vā4nkJ@)[_b(zzD.Q+& D_nɃO[]WrI8X/VܕYb54K|" k =jz'+ W19 iR}t'į*HrM*><_3>'(fq`ЧyŗG )Y N6mk|ۤ XM2*W|%xn x NYCa/| "oܐ"P9I^2_xt,2P+΁S1=0n-f`iF8qGl}չ1ڝF\\Qګ#d.לPEB(e`hTA])> `5a?@VFI!plbVLYM5HȌd1wplxFHvSEG0>S\I@S|"U6̅8|i;rgi+`X uf_VE.XeoR38xzt;6Ss^È Od!W$l52aɠtվˈ?!YSgcS7އi.d =S!&I7RUBgП_41xB R_QTr6%3 /\\Py2LJSF ftנB',1xr>TQ`TV/e64|Bjۅ Es;uA Be 9 43/y3@a d#OO<ڴ ?l)׻9`)6 ;[V{K^tpO9e0SLq.n&t7E O‘;[~TZMY\:iVAmftǡWf14o2Fn{# ^d!й.ɉTTwK8ͫU ͡D#[+F,WdInL Hpz.w)@"Sw^$ThI' (sKxkM8A֝Q\)[L-0֢j__;o@N˾UL"dluƆm9W EzFO&:aסcj?p>06.\}4^\Hƽy{T3^%8{GޣȀȇO2P>qYVuAQ z Dwn>6YK-Y4!U&)ϩO>5jh% Q:\8mh#$9ΛE&^k";짎JɎN YKLa`BF(2׼MM a ݗ5;P+YR0G fPfw@۩5 75g1Ft:@D|GKQ`^W@*c&2!hm`J7jA[s<@A޹sL1BKrh0g29.-%lW(DFD:kn` deǓעӬ~o7n mCC/b,ۑ~A% x4^y$:<4NgteqUϲUBI crF_\k}A`a=WPEVsAE`)+WM3PmTc$07AV$gfՉQsR`138j0:(kN^ sE&#)0aL I0sZ " bB-S*5X120~,D<S;vv,LF\N_QҒw35۲Д9wH'2Q[_Hl 9I8*W "TO5ƅDfD$98]9" {oa%&wښv:LU҈rno])0;5G [ >)mlXI?=/qsB[znZaƷsH,Almk4J Ͳ+<4t?'*tEA߄eKH5璔˳_PTz;HoGT\Ζ tWែZmT |zԐ{c{Ui6f MSЀDxf}6+/CWzOyF?uIGd [u7>y=>~3Q<rZ/z$YYbӕNv \ZW3# 4$Vs3Y&菽'$RWƯYNid"Zj%Uϼ]ҚBZgӅIfl7UOU<PΝa#4R5Wh[Myk &>}ea%]{:?P pW̜kX> vCpiǸلrB%pjx^Ēgl(Z|.QҳʑPUͱrtbT";`<&7Rl"u}JßfFsnv Ȅg g_hwb)^~NjǧŚz , ݸ_Il +;۶i]Llg%CI >[c=9aHC ^fGsIvu +vq[aoDچ@! QyۂrϋyJ2VeЀAñ錫* ǽX>HTVc/L >zBb;Id4l٘FLr6h֘)RzG8tE?>s0ϥloZ-1@(Vh:f' L٥$C!GqrA9'T.q?rmU:9NLU2UyBQrƒCEƐd?2^,jY"TG$ԙm >ΪKS D xk&2ن4~eWf$7OFΡal>I=ޤv'7C6&*_:]7yS"g a,+agi-ˆz)t1vGs?&~#mb%|x9FzڈTSQAGr O}\tEShoCj͉}9 4hf}f&^~>U0 QE1E٨Hs#}}ٜ_ȓh<Շqkx]GPEo`+RM(ɴs#S@E`QyR~̧FR 6DdH^=:t̤U_Q BuDqt}mXlTOcy+ifHZZ4FYG"]5&ʩI0;-% ܹ%5P\.✀'2ph=Fai#47< [vaV}u7d/bCꢧ7Q06q6Y}~f8$2IJݺH# 󹬕d5+ڷ |Z㆝KGäRg-*񂇲VM/#_6T&D&ζvNygq0`7X,T*%44zR,NJ|V< .[̈́&V\T )/@^f?b''ݹ35+Wt,P6G#(叕Age< c&i~`%8q |C(1ct]/Ѽ% `a"LBFikCʭqS}DC *ܾ[hEw2'ɗ$|%OZ:Tzo*z)ay'Bwj@w.[yCі٤{wyxt )6*Iρp~61ՔWXRt@n%H,5O?)̘cvTCYkReKw]'PrsuSX4T5D>SŠ5&fXŮ? HDV@*/IfLNwh-O6Uqka|n&tEGڱ4  )rZ3vI e4 1*T˸@JI8:넁g pf^ETg=la\0W&ҍ2, ;`C.ӗԪAf{#'u܅ xA^~%qR)|~^ <&"3e#L-т m Q}@@ͱ1J*O[N.N>n׭M0E)Mgpsj[a0\HcL9a#щL  \JDaȩAr{^3'0Z7]C' N+ Uµ]Ն?}c2KN5vv!^aѪ eҞz m>Ԓ H"G1>ύ}$Ulr!4%$jNk6ϭͦgOO])JW䉝7Xb "v^^Ap)zqXBWQ]Ua#CiX&xM;~YriD%S!ѵQ)PT / I,N z$z֫T>vk'a%n)T y ~y8X!Jf!!A%CYwe-.X gЯh3\L| Y(ɓ v?%?te<ߋ "轏G]CrWyE`ݟ{!ՅJj3 ~_ڑe&JXnC7r?=>'u%Yfg gWm2Mf}G >${i?^J'PSlbV>X%f=b5A{  Jc)T;+tmu΀{ cy_ݙk7(g|&mgk؊Wlp_C~ h4%cǤCl+\lX gʘMغ Ùmȗ3K6u}#cYߣ0݌b2P4k:aqAf란6R'lmU%/#ͽ8wb:^|m~*XrB7^zQ| WMtC.oVtT $1!<͒9E 7 `DPɷ{҃DfDJ>gׄf[r\=┤BruEFN^`{}KjA@BXP%BwjXb)7H`yZ<Ğ>ȁObcglyIf $Hlԏ]ΠzG~Tv |t|qJ mGՕ=>Ԁ61n'4LC!baļ$^$8B_Bo`n'(V ^@`P Mwor%L Xc2)u%e]#ׁmXdsfVQײ Z@HyLkgHw))-|/!7 oRm0+"Hwo[|k" Z4ۜǙ=dmAsRx^*sX;^)h,AhC` Ht(RX#G[VS1oʐp;DZ/%w/.x]1hdKU_7wWyfZ\v.1I-^J?bWeJ+&Y8!+"c)'YqԥBܫq Lih<@†?)Nc )aDHH] ztOːx[]%P1Tx߆i 0)S_0XY)̾)29S<_ BeypDe $xwdg\XDDMQYsO6QMpgsz]ft۾;|&=ƀ٦a\X.W_9Y?< mL,C-JlnFԄ )!J3Ed41q w 3sCˏZExqB9^FP꿛Zy6"o&6U{xJ.n:{sTRrXm.RǣYaSQ]V,[ ׭\L9HJ~ڼ`D21:Kŧjg5Y{Y_.Zɨ^yd4\@!5j{wv@,a3FSBШF51BVJ$gt!B^Q$ @؍Y0)akך&?Dz]C'F3YVqë2U ^}_)NAUjaa2iu͜G硩|g5On,h=#=n,Q]4F^'$R>FU-N:dHjiyh`P]ES)qm֩YԿnۗpEeVDcN1S/m 1-- UWˇV%(ܺSF#ru m;9c !91y}yQA!Y!gF[hk.G0IPU}0{{n^G 2@%R0kۭ~`9K` '8GZ;X_Q<4bW &R3?!W1%I(э c1xCɦcs*"M`5lԕ[%сq/2Z]^}sޣEz܅ΚNؼdF@WA ;,A'=Uni;26Ǝu#~ޫL GhI,?74}Pl]]?,nMD t] -P4tʊ]E٣DaRILwg`OS5mϕY-n-:*TjrPjm ӹ啄]A rQXs2Kv+nk)4:URJͿb4\9 *hukB.jgh_MlCVj/ J3w֌+$wzP# $]d'cR 03fY^uJMEuB|fPJpw<Fy(- Vl9-oeV]\fD%z "Jb1OkqeFzlo-)\ 8͠j)Pܯx]|q’U%S Mr ݹ_p.bI*VӆcE*j7<X9K1-uE% phi.1P/ ,d6f6 kLk9Fn_S&!P{JS|u;wU, /stѾ"):LeޓN@_ĉGOqiuKOlOUvN^1&0DY=fԭy'#LNA]%A8y6Me$#<>`X+/ꄡ,s_Tګhj9X>WFcykΗUNGFc5/kr4;ԫ_P0f<16?; Y0B p%<ٳi8005֓\{<+t\iiю$z[ڄLБoHh:(I =b[&M:I-kd# W߬tcBB_xܧ lFecX YAcʹaUCsܳ|MȍÙzZ᨝s`^z8p?YPt lmXEUayդq3pU"1>0Op `"o*GƂK:à(*n6wigM"zC ]C?=G%hWi& >oN|o_\5 A#Eyߊ|\b?8Ҷ;oƣ~P XKNmr7@G\ӄ;H6~RQ _"! \<߲rQ1(sE2/B'r K(j B7}0<δWHtb20 `AqÔMY|b_@<1AX+_3пB'u)y0lfOhi.[\ΝNo[*n#<^,jӢg)!B8?E3oanfODz i2.Bs,77mfc = 'J۹QNt4';&3W|hMw-D)EYc0zb%E]TBŽHzSF1me}>?f/"ܚzKC>ߴs_p-҄&i_5J!W]")u뗀yX04 uJvO~{E;gIK KJC{}"+cCDRn$nUa~<|Q9杙f4}Vv.+kzWD6nYGS}?IzE|tmkkIJw=#Ei8VzMIhcHF'~RZPu9t܍HÃd`{٫Iw8u-N\Lq\&nѩPÍiK--ou\zjZ*l# +#*y\9%[ {0zbm h].1Ys.HA(d&نv_`X|,ϞB;8YeQMG(7p)Ih0PDB)5kTvS9 \MG2 fZXWôekJ1(Y:)E o!Q\J/~Y9C)p^~c~&ԚfIGØzepGQIVcLs:Z+wmt%J=/Ŀί1dq9PaGcqѼ$4 D@ g4>v1=GW츗~н'8UH9!@?ѶLXi&s]u@:Mop W76M[cr,ai~hI^*-(6Ԓ|Gk%"z810+**ō2('nȢq sTt;Ҭ\Jb[=%OF9K߽ŋF <7]gĒH\<([xo1mVI /⾯(͏ɢ*9'6x;>wӠLP%q  sJm{kin_%f!SD5*z/I#ۘ^^rܗ7ZHYV^q l1F`vl)3"e1k.?ۮP`&b7Lp3v4^S])8K^̵u16O:I欤'"85RF &P-&rd5WQp;,޻]ڧ%G3J[!7n?ϠF^yf5t*-a-8%ф*i,sZXauAZU7z~)1%4d0Jx4a9yȚ;#;Xfv1* ǶC\1na}h:s33X; '^]4Pdai^#O6g:a)zo΋yM鲖4Adu2)i6vS GӄCOC0~hQPluX_IWuА RB>fG1$^zn*k"3jqWFb'/ЕGI㖣ݷzm&pCy1|E A#YdN<%杸VUoϴJq3shqW,2Kdx!qM vk9̚M a 8ʺfkkLML6$``m.џ}}ϽjhnSӝ eά-,Ϣ P.U6׌qi;EB' cLoh r N}mZ|ԡ*[ baȃ Zh?}Mm lS1S.Cx4v mFC8~?}pTVj+ p H K!0?fP enV. O uD.uOO ' p2@5)ScXSb^^oY:8!""z<.&s_{&vAdL #`\O7iN-)xEQL߼x6hEc4+&r}ɕ, nP2پXd8ùbA\xk\h&eTK\u]q c.NJNhһ$21Pz&/=iwhk>f)VsIޭ/h}(>DY1q5% =QJ3Դ>kIN~/S'U8 @w{YD;7p_NTZ`~`d{eazN?iKUvHx{ȣW4q\|ҵW=w:zIiZtJ ]K9Vԁ^%up}#*Z3=X6gzr G AwQiսP0#k5m^9~mc̭g5&b˽Q^, O߀=/\6_u-QäƉZ.Nu`zI KH ْQ?r#l6dc𭊜i p=ܦ (IAJ*ZcjEzG^A?Gf]Tݔzd#@ &zD]$&*WM k@HhAdJ5ҭGuӄ҈ T=|p1!M ̅@\ 6yTme.6ȲJ]Jp4Vp-R鵃1F\505 +nQb8 c$x{/cU6-sd+fk гM^ڝFW60?&5g\!lRDAKSP^<>ߦ[@b~F|SM1 [tAs)O옿=DmS-@cƮ>IթJ"*ǚXVӯxΣ"{/XZWT L %sOuy5!3U0 aqnzHV'7) IHAb;x|)GwHP}[R {y&$Q`P (,Qwu-jM)N׻P|ySq_ !%x@ńʁ*N/d} T7bWahR<}T TR$w ;uUE;n}*hPan}E v٨iUN1EF节l183cDHMxIhOpI:Yf; C|}Ur6)qW}sU/mZ> .X2p=QQ܋ωTӷg&dGz(FLZv$Q;$in 0$Ɏ WKŗ@HFߤ-S5Rà3+Fd5!C>mJWHY# pW7 7wEԯf0T9\RFVlIfN=34Yb]UxeoT!1I%B`2CJ./|k .!D%˸nV@d̤5w%@Qer\a3U;\ֶ*kt3e=U^~gi#P.~263#N`cV1\ gvi|D}P `nh1҆㼿hgnzr|RAںH'DCt 퀵eD\j3o@Ęd~)!J@?8KyiJV;;0z[">r(6!6߷ k/YlT'|"P_8P:;zrCǺm^T2C8(l4]Ȏd}TFwRfv^$iZ3>ihަ)ys`mFY]^iBz 5aJCď"_[4z-~?We6ǫЙ6y?ؘ4QNf$ {Wxŏx0/9dm;ϫ6~5 `90򐕓,~U Y#[,խ$i&p #׿]&->]<_mZ.6tkB> 1TwLt۹chm8je@Y~K䤌U#Eld61$P;zLPKMz_Gis~Bwk˪:ƧȶZ~$LZA1ĩjQ_´ǶSY)v}KD#W} r{4Y8m\TOv`{jw0<+4-Fٳ' Zs @Ѫý5}9W:fz_Aɪ-x8_ ">J} oq~ۘB^m`& Ąε^6U{c-DW`W+0j=.C@q6x9jh˥7߾#`]r$n 0*d g/дȪrMd4oG#zɛ[X&"⻮G(ve#0Hǵ!7X,6sqTZ+9}xvK H:*&2U)=\&NAyL!![|L@|nϸ|}&ET@‰axޫOʀq#1 Wҋ[ɚ),"fr3u3'IoiS5iMZ3(  X[Fνd hu U} u6i>9m=L{m"2f|(wǥ`B߃$C(ҢĿoH5L"a/a-M}Di382w/#"Q8M]"jǜrD/sܷx9>(bjҰU=[6rTO7-t YR9OYem}fEmH*m9gmlɫ_JzNW䦨ԃAMDk?@< / ]*Q(cK01WStc Mɋ*_* AY]9%S! ko'}'DO eA c/ay7pcDhba*F4 is>T2A"CW0(o( 6kb[bQ y]px y}ÏĀu|REtIf2IF/na_w C ys0=$^!V)i*!ALR̿X3}#8{Wsz+;vmGc&A`s#)xSFIžB0J"RD~͂! $VT*8 ̤8UAO +Ȼ:r3@ ?5A2Em ]H/݅cU}d6*|i 3sLp7Of t`* Ě\1\!H?Zn=אE:yfQY@C(^5-ݙXG4$t-( W=_׊8D:%Uv@ m<1CLUVzBd0|Ã; W:zug?pD9^Znѫ.O @pxQ/sdv@akȐ7aތ24IOιUBqVK6lD <Ft'у!1m>VO.rM=i5sҬJ ,~vrMq uy,>zwM!Tpi2ukRS.Bzf km/㵙[]3NWny|?miw"oȨʉQɳ5>\ ўh߸ :|ly+y[e(9'3Ojp}ar^}J ma_=4J='P.4) U&ޗH҄pܞ˯rl1nsN# Xp~_s)5[4>4؃+PuX|caGSp7){O8嗣-ȜϿ2kLZȭعOh/а>>v?G-&LñJ$3QAX$N50덠iSpA|%VA %gh Lkw pR3NBƪiBSBE9%cƔRBx=~c]'ANcܝWLs Ij"JF)XS@f0P[֡uUҀXB]Pum eMnnMa8I CԶ#Lt_\h\Tb1rxAnd7E-@F٫IZ*N]V`ӣi2ft)lM4o-.JFBr*SŌ] #AxZ#YaYy:T4/Qk(?N{*tg=9R c# H5-7ӤRIAJq6i,OLC8Og!-(q] 7/Ĺ/} B^vlB#"AܳqI!-"PlĠ$<S7g:wȁgs M7cJ9X ~Z2X]Ј{=-"mL,];or')#+& C;L 5Pa[ hMlBP=]чyf' [.ߨ=?75 $T@ffAg8ӦF-o&&vo PfüCu%vC]֏䒕5f lT+7}Gl`qy%3lEp̆9#˛F3bK`.l4'C!3l\F^FPT'wqi~d6LdtC&f~ @ h9rSJX 'w*TWj3tb[= Lvli+{(\|!G>ò Ti|tJK cLA DbDLCs27U"'*m?؏)Wgj1PяdzK\~6nf:Bؑe@)K]BZ_zeك:Oqw rr7j"D?sNTl Kεl>fDvdMK&Z~Y2vp-[2*2^'6\7tP6N,ӻKP"r"V !3Vw@R6X^9;ê-C\){$>ީ;¸)xxCmє0Wevkâf+ RX֡k妴ӅgłM{gKo i%#$vܮ_ Xfw:d+ ZLuQ;o{v)ϚUvZ ː"oVv@ecAyg>FsZ5.T\U {A `;SIJaz"얾v6 "'tx UR~tx\C﫮5zфtmS,-}Yx $՗ h+43=A%t'yz`lCh<bƀM+{ޗ1c#tK:vkFX9$5le>@#{Y*U"aS.eLIml'xxv"'ysCI|T3<; ;=C\OUdC #& ,Jem~;ڭ_p*1B}&EgXFW_n`Rgt$Bl9;7*O2ڴ鬽7!"/Dc,K:Aٻ[te UP <؊jWc# &f`Y hr˼>s<7K_"&:ks&DTaYzm}?> HyЗzD#STj֜XaW­BpI#k J O/ ƉX%x܌/}6ɧDukmz*Z.A~e[zՁ\a \=po\ G{BJX̥2`zPeA QrS/>H͐پ8c yt-%I'y/Fq]M>n] 14$6x=Kwn;ؕQnVBfm;ZX :=tp*޿Q 08Zt%ME| pVs.4,rҳ>"H?H8-]@& i8PF>yj|oq.k/z`_H]W! oFL jI^m3=t4udZ|g=H(MMJO MmcΒVmd#76ݎ}Cǫ b8r+O4W9 O^wybnh33fGaVGD_ y )O}|,ݾVLQ^WxQM~,.*$e}ID;ߞt$~{2p()-uf$ lOA 8h2/8{Y ϓ?-ӹ&z,jyӀ!0w,,ܼW0^9l{҅7/Rݢ q휓(%WG OӴP8(ᄁ* ld3*0=zCt?y>:7o=XZ&P[vC/6r=_Z=-hu,1&-#VJۆd}@ ~un"Wk/`.bsiAYjO_X/ ggQԡ*kBMnW J+-#@U_dlW &F='ޕfvcc΁~$?dMP;~bms s#f49L6w ( !=\z#aFufU0mF_E]sTBi_X8U>A: 8Scor҃)S=Zq0lv zUip,;8·=Њ<4M$#z=U|w{inHzo3t9ҚO|ܣ}|#]6'w~ugL%!2qyDʳ`.{+v}xk˲ DS .#eޤl_ {7 %֡9 R|dt)jT+ԦpKR d| YΤv2uP)lI]ZD7,%LP`2q >n:MW홫,UzƄYs9;ǀ7'YyG7=o _[i e*rh*]j6@:5Ո7AG>9"1:$|H$$fVڱFA=+?L:LzvFW!rka$u4ewa>.FqBof4 O$AcǗ& tEvx~9*wD0,JƑ(˺#PqN3GyJ=I ߜ{\n-MQӝP=ktY i%êp9KQ,>^:1)*w]B!w-SA6T'a|-_~`I ᎌ%*YD(V%u!Y# s}YΚUgv 0NpY7&t"':d>aV5ZO?0kk]` $sV):@FV9S׻cSPvr_JݑB׮p(<]]t,lo7kܚӔ(}61m?܇fkgk$"k`/*Ylٱ^jTĚh$Xwsnw(3ߊ-ĝM<:>P9jEc0Q0J1Yiueq^5J4֤cq$c+a-['~ 7Ľj-ش$͛3/<9oքi} V\&ȅf(c=Y ;h['GGxmƢeedh3{E~gz80~FF&p>,_9`AܓR)m7E^l-Y`4Tcb<QO&?= j@}1X>j}U͘8/aOtb5"~_7!fŨ^aƸznY%=z/P-*l]"ͷZ|`=MdJ]$KQF/>V<~:V(/5yK#DvIִӵ 90zi{˱QC9OWs 7!-tVz"2S:(sqʥКlu׶U;PH :rs4V:}ܰ 5qeTw /Яo56[#z^`{& ̉6Xzv' yu‡h;v()\X_^λO| (lΗ)^ϼ4C=U탇5Uk<t JK|ަvN|Aa6XH܂b g$n%+lO\ AE mPRyhZS!M-M|06kwO;> G8ȁ*-o-y)ޅ} RAKqS2tuF|n}v־9 &s&â) )1:C:Y#%$mBϥcoqaD# .T@xYWa^VuvJ Tp9l3D.)'Z&<3us ۓʗBa Tz:S Le?a*nix#7Ü1Vlw?3_s\'.gM9q|p-oVf,6bDB,q|Dj_/s8Z:>"|gVmokFȽ 3 ({T'J≻ךO,-tMR(^^+s 1@xђ Yޓ`f#Z:e#-l~/9)4^k`*\‡²0$[/ql~ZJF1e>;R]p]W$ >V putW4r,1+%?", 6,q.Q5ZP#{[Xp23Ru(ϻk^Xٻ`z){\Gl~ZH0)U).ZHPΧ&Ey9h)p;'@雊~G-ֹY%W+YQoP9:>pCw7^ Gm3L o*u=:RMEF"St " y*SiqgJ 9ꯩ uWERP:l/T_6tRtو,\fjIkˤ7}JaYѬ7[-whNuKV:L9Vjp@j Doyn`KtEIȢwQ•Y|prQtRgθr}6-W4Wַb=o~ĘW˞ԍ />M|w:ȶy]SŠbB}a4GN=cmV[GJo$wn7u^կ,ΣSWacEGR\s_pۉXyNS G}JK7"byVtɓ|}f?fx3GTwŖRmCe; ° uUI%"Γ ,3LQS Ap]μpAK! r@,VRN⼠*}3V23gTn&X\ d~tg- ?m!';R#b#vQPP䊦H$b;'v{?W'CyPwb +$QKCWsK >)IÒmC-7Ȋ+^9iZ#r~ I 9zŸEa=V0j:ku:zw c?Vbx7/ \E)}1¾:\cͿJgq(?B-I-yLn?)?, Q] :/7=r*2s*کvKJं$pd픙4Dqrvt+7=WZ.\&fy|l(tyhq>6!6 !|ڶ\BW@`s-9O7dNyYO05ÿƫIFlPqXO jI!-؋|,읤O&bR+ڶ['i"k_"wq`~%q7fdT o8i}5] l?2/G߃{3#JIVtO*Fm{s7N]"qf*Lȁ6=Cڣſ Å#!7r ҚPq`r3hjTOYL3/x n8X%\nW 5wH$w΁ 斷IyusLcg'|)8n`4;϶բcU:~hiQg{gZx5\iPd᫂o=+M餻ѥ,h:ZE5N瀳aNSH<,Dd)YndA%AIJ?E2vL}x5 :̹f+-jtn78huQ ^\?>C,_O^3L^ŽN<8EG&.`m2SzZ v6Zl'rHX9+QC$` .Ggpq%7Se6-/0|6]8}\Psh1s:eDSjQ*h9 \i"e\&(z{kNk7ǘi@vdu||ŷgQ@ ղ}bgw% 9wOjTN⊬!Hۯ]u gjBUP|vMjYy] ((7.m 8-8vr@?m:5 M($|^O'4^C*PWR#$#'̻ٹM7KWM2K@wG~2 ^?`&2#\un\j] CڻƁۓkuӰz7quwZ1pAǨzaVG5W]P:mf s$P}/8{ϊ}a'A"b"ix 9 ƶ8cLd |N搜waZ3=aŧlCL 91_@#騷d2]<upPz fūxluTT?]ľlS-|ӝ},mEP#4w5 D*hb`d~UփúM$P$måp$#DE1?H&.;VDż rVJ`IP0#MsTlLtq=[E7{m3V_%ե?yT cl2.n_2- t:=$߲,Ap1amiLn?n7'4zţ9'L"RW怆jޭPF0qwYTmږa5nJU␬6^@7_Wy%n Bx4`_KV6"#.]3Qe:ѠePbg᝽NQRs6f7CfRC.dM(#$LP3{Pe7 FRYXJI<[hZ@le:=.PhC|=>6}l噒KJZiI=rω}gm͎Fԏ3dQ֓ۤk'{!.4? -ʯm ;)L714 IgA+ŢzWvu,Uv ۹c@| ~ fioBO3^+xhy Ddr^2}ɓ "$>N>>>c"EM=. Md &0"SSa k^oe SgL1!4KR 2پl3P$5t¼\ Ίm. w>g Ӵ0Se6ϗ@A!rS7L@J9q#S̯c"2*hex|t 5i53Bw  XBM͹~wE)9I箱Ć$;?'2O;L^I=,Vi:UXzKEi_<@m.ҜQ^KBI{-dlhFf06Yd0i/vo;uQ>CS,L޴ tRҥY~Bj`BJ&>Yad&$s]IգldG4he}+WVK1Z^(NxY/DFcJVg}HoKUQFژ`q2*miG%Mko (4Ot`_ضrM]>=%K:<5 -"W:@^bvu5-a@ @$s[#PʕGMH E~Ni>gF#ʵNG\kxQ1+D mR w:Rz/Xn&|(Xkq%+P~[xtΎysaX=7eڑ}"O\3, mţ_O@=j1àA2 vzzt2!S_9kCFu.k4.I 6xfZuQcL(P{ KB YaPPaFhH "[I8<" (9ܥT ^=0,tsg곓ڳ\q؍9Q+7b[CJ%OJ -)׻ s!UXO͓sN8&bt|YOS0̥(BN`CP*DK"PjWnXbEw8:&Ei9'E3-:(`#0[81wK͒}ܮn7ic Ruc&$^&eV2Nj6!Rs;SJe?sh=\ů8csʲmQ^L"U\04vȞ<'_,'Mj%x6ad0ŧJcIçmªcGl|te`.>%0A{Zn:ؑjV׳o.[eF$[H =GHWS.6nˑ!Er 4~'dNnؒ>G0J& ?#1nqrQ`˗4P1ghSr앃 uB6.e[b#Ak !9%lG Sc0({e΋ǔsy&+O9?h aw%O7 ʄ^ %ePT8![dR1Yc\Ʈ3VvFLʖ<*9ɍ Qz&1v3J26bܦ[_F$mRh!_ڠc_& z;*W:R#No&XS[ %)G1cz8.&#u_'נE>*q=-]\&Tm.VxXn?G5sh=q`*mMyAb'@D2pGgԃH35o S_gliѝ fq Oz#7| D[I,&v-Jsh!ϵ4iJȨOsƊt%l"5j áW递 _mR!%y02}xРSv0wqDvO>2Sǭ띬UbX%5(F$qVNLFi+e2瓱b4^H8ZbXm5;E񩱵ldYiACNU5ZeSpvRjʫqO@:Zt(K_:hA9T8Ҳ*ոvC8&%ij޹p4<[gI.~kؚ [^aB)>ehnݮjZZ,?HFm uD( |>?wnM2qi壦(MH濨ђU]>{ i& $)Rlk=;,P˚Ztx* *&pڷ>XbwItѾjd˭^ p6hupSoZ^-p ~TAy+L'ކPDJ9ł6{4?u/Mq$>Nx.:)!:2`}?JDnXTNESt0%&maaTd J#3Mz!rXN1G9vd"r$ !:dQ$3CQ'ÍcIJ?!̍@b'+K,>EOA?ۭ0Tb1ekQzyeS-d)ŵ +;|k ܸ\Qf@vlSf,`&)n 7ALjEm].,}yͼa)RJϖ7'k=͹dʀB4/b+I4YJ6sAC0+}iVrn*hhDsbʏ6~)b"p*<)]~M o=ʩsߜx{ixV;:dE| noggVP.2[ĉ`p^ 2~Oŗ? .8\/a^I¦ͤŴFC-ۋr䟹2bA6swJwm SZ\"4Z߶?<ú݄kqЂRI;xuJV;~#DUBBYbvZQK-P?+.cѓޢѥkqQ29QB;S+߆rj/U҅=w '+yDx(/y3RDܭc6$j; CB@R%j (]IDy^<0:!Xh7yy ńfRoMULL\U#{wn΁]\=l{P!VE-:35}ZR9VS*ܦ $H-%k79mL3Nݤeaĺ=B٣G8J<%G $P71l49n/ _7穹ky7;bLcK ҿ% Qxg:'{a\VЃt3tw쾹E'cRȼ5@7gjq10Lf vrX0߆G]Eiϭ;ډo~UfeF%hϋ l551 x{ .\.ʇOTSfKW<`G]j}ԍ\"أĆi@g mq8uma`w^}>Y}^ܵ(p`ۘ+|((d6wn9\KkV 2%xHvhTP\sIr`#QQ?Ny`apޖkv+p9ſ xD$B=3\0%&+UQ8y(Cr^@MmWGkcg^Ky<,IeJcؼK>j<MB#3%iLWM IF/۬*wK`_ANo %a4IRws%o8Yc{{ҙ›Y/35XEKñ8|^py1e$D[RyWφY߅&#s LYض9>TۭQ_6Ai]HL)n%U HzVCuG{@Ŝ[g2Zʛ !Ģ( =}O3.SzxO_xWoPr(P,BOq34W[/ *.jx0!?Θau_CX"#:ϛ(UdF/=$}?wAQ}eD'ͧtz]d-}n+;\}ka.seڿAlNEW "R*"%r!(U]1V&VM|H"-@^h!mZ|YDĽ1kBT.r+偿55AҟKFl)a0N}DϹ/q!x݇^6kWRW\dxk>hn\Ϫ^RGgf\3 ][g2&b9v/RM֌Ym]OBڳd}"-q?(H wOsmV@Rջ,VvKڙ\J{T;J m+v'|90q`pt 4llz~7cj|NHX67lc+ $ֹEJ@e|a↶B\w:3bK@9 ^ i^y5J >I2(vp(kK,֯bM6h!x4A5kI NOk|2- -~ W-RV,h'`΍KFsoVtPZ#{q?YrQǤхzl~:}0|3Doz5CBj8ۗ"EYv'x=*rPMcL:rpswx M*lG<oszl'Ūt7Ƞ#J7Q 1_#@ S#%cs-lemRG&T1ޞ_(OeWhW-RXWYt3٥z-nWZy/|:tG,5@E vxK],ۈF:QRx4,̻RĄ[p0'[$(.pLKHK@\#ʂ+4ceƢ:`RMFn;5;kQnX v2BKrrR2C7oj nSqyVRܯZ0aHJ6^FjëV/i$ėq3Ȥ$[ǂy52n{ wI}w4&6siSTH¤Ϲl΅ )WT~>+BGr8KM s*Y~ (:ƔՆp,|ikx4KazU"K>^{If?~qk ; OAe,].tS$(@YM%el9}MkT&d2Wv4n{!Ȋ7;UП>EU}!sԙCbRkD%gE9DvpFŨu<Ľ@&kHxžjĺh dZȼQrM`֝fhgO1?3&Y}1+?C9+:P/`0h'ǖ=*M0RouIŭ1"Sn$rF(}@ܮ5.LL{@aĦ~ĜKiwDR~$kqYᚮP ̹,sJPcdo뱳 ⎢MVo:B)[4Hk:%.ʉ_J,z"WfB -^x%Rm"ڻL_LϝO Nb'{41XԀ;tB|ZZpN'n~G VO/*|#^7 DlTlIE嵔nyΉuᗬDJ[PdzězK(}yHa@`"kJYVS+ V1fI6PR< G Һs ϓlPNi-O-5ordZ}x7̣EfwGbhL5Z{~l̫ď1Ggj_5 Cd֧.  ab8G2ḟV 7fL$ ۼjDr_bx߂.~#3 ,uw6z@*2# Cnװ%2dyq0D$͕R.*8e[FǠ̅O3Q?a^rq9*2-"!.3Ӥ\J09ʙy,U+bm6iOk-Y@&qןM,xJkΉqS#_/oA|T q_dA=ѹo]7/˫2:D &G`1ČDS.b[ڸ1rAۯ@Mb!RBYtty-$R^ MCd HHj5߁TmXw~0;8 =j9 ͓} 4ֳ,2З}LZ lɎ ]ڜ !jG3@Sp7[-6\pdgW7†} ;\I5)|L'#iMȏf/4X\`}䵖MxXE<],R_:0m+2Km]FW|,wJOvqo6JhsBkPx@ɼ%*Qp}FH $ULpФoM+_ ߰κ6<Ҕ4Ie.؂y=3MEr KnPY\[{Vyͨ:xRaQ0,Gr;*-O`*)2 6K 䄁K6jUNUvVRKio6{ |)GXvՙЫH._XzQzr0XW,2^.5 };so\?xPVmXmmMJEVp9g)ȆX:/sWSŜa" bV(E=@1a[QHAMceoz,t9#/OK"mn$ɬ<lo (墔UQ4@B8Ih)I$RԌ s@sS"u!{{@>Rc,&r(|g&4d_g:`[4))VU qN=~,4p?ٷVza U{q"??'ga&`>Y `⤍psx 8@(chFeL|1.,1h"SH>O,Ϸ 2w'ԍt>W?,Hoc6Vt[Qua̾mW*RT،~~B{P0E꿘C"I܅%(ȁu;p>m#-g>*a-wQv' wKS;ca08SJ@[:j^==_~A|sp~E;c;Xjs2hes\- [[G"pɘ3eJN x2#bN]ۂA+ߩB1ܸ7kvIiGtq5Ih45e:3W($S*0?]zb,$W{EFݣT#BaXQ_ CE'u2r=m[H pBJ*!磳hڍ.U&6S/㼌[WG8?Ka5q.[EvolS$Н2W xPj-/\N."gb^JsY+r|䅫,2C)NO~rK/ٚXL*y$ Xi2E/jo7xy$n4Cf ib?$5RW~)%gZ}v󶥡,t~3qCJh6/.eLUm\W.a;|]x1`'M?eCVaTi ӛ֗*Ow(X?Wɖ /xW2veҩ@lO^omi*:t:RLQ/`3^(Q}(M+.J~/L{`و"&V/;'F̈\es9jpL=R5zqexZCjqꨪ ύOy)~e{c?t8a.84C{@ވ/=p;]n IieQ\2|N׎1w!cY"O,pPSa6YVst8LjOU  Ba:8,7\QUU3H m"fRIBE\:fi ={"Y'3> w/0}F7=!;I!e +E6ϸYMli_bd\ô ''؃3J8Kaj-#w~/8~aN-9S2\~եOKb&0PLh ,zH{irb{cܧϞÈ[,>!4UHWԎa5J}<(/3MDDUBB?>{6]7[R/xUb:D,~ʪ=uS/gKׂDL.Ξ67W%_m$凜\vA hɜbz2k'-#9* b9K2?רL?vQfH(xW /QA<&KH߇DQw K'βK^C=D/ ?hl( /5h'PK%rcp>\Zt%"c6vAE g5Ng/agv'a^Ƕ;t!Z $X 9Dx!z%5]F#p*k$140-M-H>Yyh% qpLT:Q'] > +Ɉ ~sUDy,KmMwzq"T;E>~s\0WOGA`awP`OB#o_4_XImM$Hӌ4'mƻ@CWֵ1#0g1P݌qDvK&ϻxEv$H')zFɐD 5/NuP&7\h_U<1)"qnYsȑX=qw B*de֬[-[Qm4Th*2jK.ʆLƟUmhF o[d60i'Hc76L4^5pw>'id6=SA ~95^y5!(=+~נGзga]OXbP'«GK% RT$NjuX\dnO@*7#"S#H/}*8cG'2W:}0A贤L4L<BN$"W垳>MmYsa1ˍ$MExd$ɬEQ}:#ھ4$0q:46dK')U P-Zh?oc{_l/&^X]ŞTîewkTYNEJ(>|-=JJyʪ1OQY|r2#l 5 \T?lسF$|w;ÕcT sB qNԬkgm*r5yj%Ȕ^GFpUO"1O_邎1[+'ֽ. ._[.J.@Dp(7ǧ 1S 4dռy Ґq3Z '*1ЌlHnNAXi?*qfQ@~l*lpYhk-E. /wȄ\i,3#?zy~sR7q{m ˉ0=NT[:Ι9!+7(|hѰ@ZI:7uļ5};d(,NDJH_^Zl@d*xHJ$eXWlA!) A$Nhf`2F]Ѧ~?f i䥣H bԘw(W]"VKd>?૊b?9=q(G۬Oh+>> q}:d&jkaPhvT,s* (_1̗ EkdHΧ⦵{r@0|0ıvt袼yvANKWO&U|k! fuJoʬeeˀfע,-s|';ւ\;"y=f32/6{C@B:5gwd^Uއl>E:D0# Jڨ)>m_*3*MfH- l"e!ƹכDfa:huKHLgr;ei *#=ɒK#` Y}WU1 gt).KiѻQ!"NvQ.+njR"[yÉ4,4+qA^04R,;M^X˟I^=؁鐖XV~l!f3|~a{ k! K~v~pUQ3;1TU:ٗM H%=^e [q5ce`si bBl޶dllr&v/|g1N</+ulH{hn5c~8t-cˎdm ڃi1f ƛ`^l'scOFF|]7O%nh ITj"hH$C7B 򱭽ks%!Ԑ0, ߳^Ǟec$C%m7*8/}@D(뻥} \[]6~JSݖ3lKwu&/~tW;lB7#(i!`y}9f4 !wOk\ do~cӉ] 81u\ĸ98lwhX}EB;h68[6laQ<-&g~1l`^ :7Mސy򿍹ŔVmHҎX_ o6|zY,1CKFl-fD=vB_RQ1ѹ wPwfZn=GSHvފf v? 4*qXiU8z>K-c cU%nGmłč5HJT 'mнZ&13yMM봦P2i-Uoȥlj=wl概ذt7ػmM(FeX˹mDv+tEyBXO) XZTc J^RS$.k/ޗf7F ho 6fJzx\苟K ğ?~7 wfBw}ʒXThӦڼ“`!z# 0.797xYI6FgdЭɂǠn0 wkы~l&垓 mp7cA<,a'0YtSHCM}8pW';wtu״[YL52ܙ!`^Yl چ",2]4|(Ť{8a z N?ZAXF-ǹ=Twye{e+xl}׈H?) H$@U.P9f)|銱c;Ju\樊7e73 R]_ers S"bF@;D!.sn5pTMDnCOC3!P} Oq`$f-Z~Z]a;f"[UNl -L؅NkNM1top-؝}J41tf)'V(V89!ڔn (s#9A 2#(ɖV;jA.R|"Bdv{fR%EzInt{@sG^9z7ȰrW,fQ&g{SUz5ABhif5$+T'r)mE@9#Y榒{_c&U!Iv~Ι8i7д Woϳ{ twn`5Yf*%4Kqbs, ]ptD%iPRWs "tM՜Be V;i<'yexQS [BWGz Y6zj!6R +N|sR֯.'<Ƈ-u͓zSǸ |y颀ӳZS(ɨ-! oJoh/S$?g h:gnGJ5뤱4r.VQi H$Wc܋?v.LEo %  e +4^㯋\ϐkt UÂo9ΏZZAX5!h~WLO SڃS*¾}]QmM9P!aizeš)QwH3P'I&4Uso DGf$O:fnvxF" 2T@GwͧKs+ vA 6TZa)ϋ]W5ⴆ(HؼQ6o:ZXjԿM܁qAnPvt>aKݔ+!}%6{1/1"mYR0KU.|2 IDp]j:t'N#~~v+4w _}JSpM4nrN1McR 9*}P h3f "ji2L%ҋ F,k+T"NZƯ1Ǭwݼ.ߓ)U_f8돔ɑmbnv@N5MR:9J$j<<޹8z1=n=S[!u;B:)Z8qR9<ץGmn/oao }Pš?u+4M | ỴLĵ B3#o/K^ɽ~mz4nR9[+%1 ο_wdMcG]$+{sB}EZPN(xFb '. ,I:8`-H.٢l/GBY&yL٣JbbchK8~A͔,N6~]xշ1HӪE> Di1B)MadE ,6/8L9շ"wPYňZDXK56kMM REYLgࠖ{ޏr߮eTq.mdj%UՃB$%Ow-C}Ħ;Pez[׺[n-) 4unA)I4zN3ۦo ϔR;B WD -xY7v|юA8Бo͞F9W9֝܅]GyboV29Z[ѡ`|ӸoU:|9;'ȴ֦b7 /}BRl@Z]5x `2ԢWʉ>'i Y"+µ5SDJu(!Wu^} d^4~2cd XI0NR*޴bIzP~&}\M Tq?)Iqr,o3vGgQǘޟ;#piܓ c֐s]T"įjl &˕nV!,"/:.zvoc~ G0GGW宿 ;("nԗ|8&UCiىʟN䬋ճHԪw8Eߒ'`$h7;Kс3IOr $ a識TcW;b˾!Yx&.z?"j}t>y#1 B`|"E?!AIp -NNi?8Df}uۤu05W6sVC1b[N7*&.^ hWpjw tO{>d,ǫGM2:h/7s)9j 6cy`b+@?ŭH[f>R~N@aN( p*63:3СCY՘>w6EYGvTŰן.[cWo*)W ۰2y6!U -NQ;cl1`r'u67nDZ #:2NxZH67H75pY•}4,K>)_B>J2_F>˚;x DÜ22QTi$nd=f|ALW!壅 {R#7'qM{:/_x*!Apr#K_n({*Q{;pY9n>;h ;nVe&5A-1i^ڇRǝ-buTt„Ỷϔ+1oJmߞCYp=iƵmh}+x:"JSӯ2r/ϰQh(;W}_++9ch`+ڽd&%ApLū}"Hi>1˞%h0)f0pToUNd37Q8P4uw8![PK~ oOzrޭ|hW33B–Ԑ{EFr1D%I P()Qdx"Ld#N3Uf|lɨt.Oy3?;P}07WFg]-X{-#󂹚ΊYYAyԆЧJޠg IH HG׵~GQQc$"L\]}ONZf:׃isږ_`@lȐ.##`3T=$&k0;L|1_^1LWႉ0=B"Q&ܧN_AXggeѼɔzfh ̺3zp{Fz31> ᜦЈA'Eg=hq_1bKT,cu\wB7G@HDk~ޱ695,71h #.vM1Q%'&'^H?kPT!ڧpr&ϐXnb\ڌ7*} =hhmvU2 ɦOEXi׊>2.%T\:shР ~MwCזڔU!ʌ̂j ' }V:Xp7Z!YagmQ2% PȓI\Gnh[LYu}}2êaQ,,e*@/mh%DL84*v\O5cWkNZJTr?ր!DyzI+ %_o3x cnZ0^*f"|C/ސ]$#l_A%1A0hl& +Uڒ|k}yY2^@^P}~=}cL#h7zӚtgC;O?jPJ")<A<)9mWy${וW,\!Tr/V났bw0X5?wTVAhIʠflk3+PJuKDBD#<_<b8`/mUuTj.fM*BQUdLa'9=1ET~v[< x] p}Nn!No#b4o^5amy7p U \$tC/cAo*рyvl\Go%le|w}Q2f׾ÏAxW谱e;|,n#~?;&@߯}x0e^s̃t{BVS@] ;fL촜 bN3= 'J&V̩pMY(=,F^.9R\frh7|\`sB[ќz.BKJVר e?zf pqPI:Ð PI؈՗rwX7*?STOMd=Ow|~ 8ko?a"eP{>n[Ws'BK^Fk\i$b"eX#XHP=#TF{=|B<@(W>/FOʺw3٪ (Ye1G)*4nKLrU\FJC=gOE^zBST&qįoVxT횖jr2z_t/ ~m1ZnYUٲi iU{OZU^Ӌn<&<VzT괟|B `=˛EnzpX"y!!* f܂LSȗ5xaʢi_@TGcX[2W_.$y?v]iL2+n10g;q5OЬtZC"UBy,^bšʚ~4J"OM>*w,qSb0lܩ9 j bR8Ԇ>oHXP^DLkDo}rB4> NP`SƱ]-4z| 1+ϩ3H3@$/\|(Lj D#f>(B'iТ*>csk),AB̑8||H`/euvVp$⫨^& Ezk!Z9aYU9)+La,v 8*25:ǽ{0}tvqSʁD}Zj /,xq:% n.A,g#u-T̰SjdeBQ] M8Pp~FB~#љ)VAjAܻ@`{wKJwCYi#2ܓ68w )&PnRJ+geZ2vve^yE@'?2i8MXd,|tj*7^nP@!J7.9lxy/-r.w'fyj4I5g O̾LxY,*BGCX>Y(2[<8c0D5c!-rE0lf6X+g<.{#1_iQ,LYNN@Nt>AXꑫEToJo&F}wʆ!n͗ A,s3ùnE.qI!t\x"fb;q`snx]phDvnT?ZJ̓EЕۡ< :.WsP#*x8X&z-X~ XiQK@9yˋxYЈ_w*ՔLN=}e~N15~-&2.ے%=])/ko; ߄B~r_5-E,r܆Ƨj繡7&Q;ۛCǾ{JW)]) *bd$ 5c+z;O:l% h-흂]w!z2ެxj%λVc`]IdGJ2e45B[~4i:)4&AP2s .^zN#^2bFQ' @u쪂ұWgI"^%~fnLX7rS%]k)|MFL d5&I=.C}= \eK mBz 5}˞Y^Yv(]@/?"0m(SB]0en-9rƁPGw y3{00̷ lڭs T]՜/al:G)Z2:\eߐ寣ride >ZT%-ӥ1_vf]{=r JBysP,*é4kK Kvdk!^`MtC v?|?k{g!X"f\@eۃ.f g җxIiv#AC.HNgOY:!A@WX4[oR 皶B~s"k˽8b9ܜ~M}/J5"e3 [~QxXAʢ(WuOݳI_ԉU9'G4ۋk;RNc%a)5Igϡ2.$F++` +('#UBWkw!+ix)ܽgF%(ĔóoR J+vuQYX##o 4pctu {@*39?$."u9au>+Fim ;HqH>buFUJ,O 3ZKr:tE yΔRa-#gڐJ%g=QLĢ"hjJ3TKJ4=RImUh rR+ӎbT-2;m \pkR(b\xթXOg#^Z\ *;mC=AK~D){D@IcҴzt5CGm?lJ/_uUA,[1k8HX18#%  YLD~76] OતqP2qt~iH̔\[㐬7nJPhɍ,s`!&X& D*7t{n!~C$E66QRAZC'PDgrovYm(9i=W)TJ[&B ( BtZw Sx7-\8E;1ܳ\RE`(D 4 -IЩ_2?pC'erOT0BT =c20e_r%&ZjW~$d 0&Y2*}jjI䉁dƫcc *upI|Xsяʀlʬh Ժ :9,]3 {J,CԶt>AXg H=|nLKG2TEW l}H?&MuЏ.JYhs<Qkj,"7r^~,:lYZ<*qu;u,M 'q(C0 ŹjM)+kFyEt$=L&Bxa)䢠i('yk>xT4#'fLLgspM*"cTݤpG_X4JjaDuem`P 9p`dޡ%S;?_w)e)$/烻:ɞ2ixZ/Il ژABZ U_rIj;8WL YP$JD.ZVvE-ik^Y#h(9%m ;O"<˧kUpޑ$d1+ݢGiiz~dɹӿd>iÞ g&4mJ~5/@ ֥ǔ( (IrQ}D (/ a9;eLFW_ 4vY֚1+e>"e+q%>pe}z!֨#ѳ1;,KC]DOÆli(b..J_ID#&j}y2vU| Ze5;PC/{Dg2ӭFah؟Xtb.٪!p$ps/Nӟ3vPyq"m>JJ*Ϡ8AD<åݒVtǀahMΎjcDCxDeWs,7&Z [頬bţ#pXPD8{<ކ̞$FZ7/9Kqýz"ktTc !Ung3ÃmrZP , 7װu!8ڏ@LZ.m|O3ZY2m d2 ?q[[VEԪ4-irQ:#;Šho,3(`]#>?0Sf#M~&@94Դ~ms[hve_aitN'p@ 0s=Nw"CmiT}Ih4kcS!W47>8 *R*ej{LmvJ=:ہ>(Bij -ITm ڈ|!x֐ako!"\gkF0^NXp0mWJHS] c=a)Y 4S&[g+h:ut9c3EUʗg'5DsM[PN + % _@XN0 HÐ31mhh+"X3WNJlWU(nLL6.K `xq%=jz<W=~C&uK}_8"EQJ|pV!ς+4~Чwz~ћ JӫTbGmoU/`ӌ8k8c=쎫HIqp`+1RXeaf$>vqe i-܎SdT &Qwbz]UE@yiI e8U7~G[ݗֆwN!{yOͯ7\p+ȭޗ)^Kܠd!WbF8R9\O71@('x CEə0dOL5`|kXɖ$w-h䍧ILLDkCqXTah7>[͌DPFB#Bs|&gL4,)T$cxe@^ hvEu7bÔd]Rԭ6ց{"G_@Ҿгom8ܴ{q4-vrNȶW͹&]8> L>c'B'ML豠{*`t(F4|ޢ `ÒClmGxj#Yf74Ƣ6ftT $\MYaAqIhkNrϖ{.v_hZ6SپjGOG&,IAbMO ;2DҌd֛-y6FU׈7'LdYΞ` %Յ`Z~LkCO ڤ?n ֈwoEFK_Ċ;Ui 62blA4%6ڋ_qe,ݏQg6(EjٜhĻ5`L˥Ju/0$1'cZA5!tMCou{H.KFT*m[ziI_I^̂A.tCgsuuXl 0sHDH؋Stk ~:4GTfCrbmV*H?BuMj=`` MVKkt\U2+rjX)4 {D0:V| +e!wg<ޓp>;;W>>Cw+(LKx1&cӚ:<>ooY{ g 򡘏L616 bQ5@L(S@ִ{.P%c,%mX%;YŬ5!'d4P GǶV.  kFղ`fpX7O}NŒOpW<T3%#d2#Nbʑ$cGj4J6j3 1z|~3b?ΨZT:4l:*jj-4RHQ#DteoԝMS}yKQq DnTE0¯-y3%h}Zmꂲm`e%R*³'l%U.N9FhhNX&aJ ,f < ]ffK`M)`)J?g%qѴE, >Dܴ2Ih!|M\&kt/Dx3=韮ԟ)CZ65oLnir`4&I4RJ54iHT[ev P*`Yl W@-X?Ϗ̪)J$EaJ>x/]|/v/E s7>x `N3m˳^%;D]Tvry9CniW/v)3ٵW;*GV'Ad ^tfʪ^^WTN6\ߒQ{7J1y F*f:FU.GjM!˔/! vY̕옲¡ p[TыFfwdęx9W*a+u/|"VSsz+b73733j\A ~R@#̑]ԀK8 (L9-@(Gn ^VG`0$d 7.)Rb^O},W҆B7g=HR_~8mӭ H+Vc)o@߷p+.׆m eCoF6"9`rR޵ARm*dKMi+m# AIoxU4!_Xl_ze392k 9]Bm->辶KU>UWaT4@-WޫY-ܨB/k$o0!=A;qAM}4FS~<&楍˘1SN֭YAXV'ndm];f{x2>.TQ1_/nƥ,u  ]F.X"#R7P>sXIy Zm+h5O¦ YQmMZĭH|vH O³#΍e>mi8>#&VI/8]:k^B~|7yQ Y( @  -"in-JǭUNM,Utvh_pp2+qMlEPOw6MQr }( ( I$#oX9pSo|/W0aw]ɩz%O+֝vb8f%s*ih"r] Kiɸ9d#AFrd*|1m*DXI xlcP_'E;xy; ;`$ kK#5|M[H@H}"ys1c':¼h}ͭ3&Z͜ei"Bw|o-wҵv|חkybYjeprCh:cуgJE+QۦêfJ* ˓*NxZIӬ0T- R@! l@ 3?\ޓ$C q\%cpyRG{7|P TSIvኼiٮ18`[_e,n K\Y&;Tg)jO: (SSz;?o6n"|rIr඼?-($FY?4,oЍ-a8լ~Z T1LTKs#@at}wtּ )8, Wp)03Xf! Mca+کVh g L忶Nm!WDǨ6jO:{:7O#u:>ƭ^9-c+) ~>kGWRy-8v.XԈE"J Mz*3b#9\5^5d".9/eIFҹ,: 9.)ݼGa (eq:Rԥ gVt|ƍ>VʃKqw֛ KmP/%дx6\>gnaD_{WF޵9U(P$}\;y$~Zk.TW }gAϕ<$&]XP(a(ε6NJ:ᦁĊyL,辡IsI`m9/F1}k|İ&4X@GfeՄg|s-? {+kbߡ&*U{ߵo)N=V2`C4 +1CȐ)NRe_!Ues{!EX˶%/ r}a]V9hES*ʲZ$ yr"9svैHX>v#hrk66S~4*d$'4{Zfy(& ImgiA|pLL' E#]j)۸vqKΕWpo9Y5 'l{׹eCP8CA*Pf,UlTlG>'vR|'ޞj'(Cxd|J3ܺlQ2o&Npk\Y-W )Ve8-DÏ8$ZO),厱+@ 8Le)g,jzQ{tДn:nJnz(~CYK:zy繬~o1Ɋ(3ߡqwF65!U{ g_q A%B㛲HDq/E-QlI[ix~b"RE ZK%&p`_V*m*l@T 2"|?U ӓ.KHpm+#GR kjo#z*ڰ XTiG-2'@)f#F~lg uD,in8eW}zT]|)F&ADUhҨy/Br틹 ܾ/Ywu<QCM$.'V"[낁TkT#}c5isOݖT3fM۔]Rq~V\x~ޟku !HaA`Bq*ؾ[tM2/OYߐ33؏W8S5vA3цnKTpE\/bE*˭ۚt+]񑸪{v]MZ !=j=Q)K򧟝RtG͙ov64y \Z5Aɡ}3X*Usz~5N4%nҏ ƋUq6jCF"h>z*'N@3\"yMZuTسX |1r).a#^7=0-ĉAn }b"(7^JGs4.ĥ,^o_ݐMQ.| pdk}7,Źؼ.nԓ:J=["LA".$]PmQs7LK׈G84 rntesOhR)sbgjlfR j.h;F3]%^q yOI 5Mx/}k?*y +%'t~;^j9|7 5.j}A6hF)Z}Zƥ86~ͮ]J_SBQǃ5¢,!8-/ӭҠ/p/v\޾0hXs$ sqkd;GNLHm"Oi@zx'G]dZXo'A`rV\ro߇GVںe]k?WJP|>i24^Libq[pc!^7+pPUlG*S޻H9$@ӾHm RA6w4[4k%%gk9 5xU%伟w,o&=(00J1wv^.,\k@;7㤦FLqĞ&i SD(jƔQ[ΎV,%lK^CxH'͡ Nwzt1p9ْO{&KDff9u{=473/$/M]H&%U c6t5)<&u iA?/^J܆i\aԬ${:T+Gy%;#<D`a0T{jo0mH؃`k4.}s~R EnfaN|dN{ fI_r!i%?Pfֶ|1 #CgG٩|y֭V#7^Ae[r&bb50i>F\!^e`  >0 YZspatstat/data/lansing.rda0000644000176000001440000001573712252324046015216 0ustar ripleyusersBZh91AY&SY8,Q]꺪U_=Z) π*D*J(iD&dR4I%PuShdi1i0& ffd`M4M'@ S2A=4d d2 4SѢi4m6j4C@O2dɲD*'4BzjOiMd 2h 4@h M"hMCL ROґ11?2ƢdƚSOIMf&L`LP40@4M0m'L#%"ڛ#HdD<~LT5Fz=FQmCzHM'FFA##Sh@x!&'r>_s(7Isl/V*pCbJ$$$= @6) S*IHQJHbA H XUUV1%PJP2$RNaaj9L@CldCԆBflDA]^ŋ1:eQ- z.^!U8iEҡ< ww7Y;g3cTLKJAOT;<@o(0:>ԉls,v_%)%\K$p qf.\ ؈@<ΐ1ȵ@tfP:@ҺC,a!2TgvŲŁb_Cq&:Q;S.%EAP4总"BtjjQt=6GRVL N>C*FB)$@){xf[H=ҏtrP>(nؙ@;StHdȘwo7JAι/1 bv`<\~ Z*sL!}}\ނ;4d&a3Q6 .0 g +DNwǃ!MҰƘr&0(.fWA 8LߝF<dJ\@@=x>c)藎h^f:!@207Q9A F( b YaJ#墈$v39sԀ6;M_+bEa%VUg.K["_ұ*pGѥ^Y)rCdV46?|2 F6㾢*nH+ETs(f*_hk`N@Kzr-4jfd-8#q&g0dU)'j"j")$=cԡ?i/fA]WK,;a׉(]p!4>wKjh $x&!vYDJ\嚘j߇QRK*Hܔ^W.g WG[LL#J cF \n@i䡝0 a3d8̔-MH 'Z̡]B0+-Z3je2ᘰ4L`(+gi<Fy5Y VBEPPk'"{$ٲM: r@f ^J5)`j:@+EzyǗT4zOثΊt_ 彡 h;?Ȁw~7*#7, u^3Жڅ)U-[svM/8Q+el U oB֞-GgZzg]q |Z6K`"Zck$Ԩ cʊAk.NqКe 8pW⧯qW+!qcbO[*,0\bу[4FA*>B`a<؎mi'znhȄȅG)~6C.EWMmc r(wlLvs1 ?C+DB#6k>N06w= C nl8楛F/-#ĮGt@yKNe4Rj(YZsAѳw0M[q֋-t yT~aN])dC1W{w5q:]k_OsiemY(5yRZrt Ǻ3Ν|zmb;;=@x-2>H"[C l&9 5>Bx0f2(l!NJf͒'a~*6R_C^~(Hy&D,1WQU!pwVhv*2|+>ib 45wլn k @TPIC*;mԲnb)m&VvMM%osvj0@UsZ³$}%"QyK"30%{GW3`s5y4CY*h}X4M۾jr{(uuwb:N"IOhg I} `6kJq*4lObYkiIP__c4IRz m5;v`9uKrڽ l4Gysz#i)]yŅy1nsԀUyӒ50ŒMIN΋4!Rj5xVKX†V>'dgrLfk xca:V+HL)(GJ1>U&$|~UI W rqJ*+thlj"X8?p0dqQ1Iۊk&sf;Yh30$/su: jwvM>2ι;ۜè)=K&9<4hBbns%_C fރ "eUh@WLEg2T"_A#Ό+ s)~`dL$ [rjRЃ14:= IW06r;w2ėcCz$Zs f? 4%z'~HӠlB=&(xz#MqwPT.|6Tf}< ІKppmXXPܷx9B{ڇ Uļ.8j[Gw]$6әڿK}Fj-΄#sTUd(m!d_x \$q5M .~ G%D tBlv JH]taˌƓQ,TUen"+/-e5 Sr`ޞT3YЮ6aͻ\SˈL9>~Z[\qQY4: EWewDQjpU|@̆{CUoA!6ShGh1ԕ nš < {>Q`]&Ը\&@ύΠ!%~>ļ| F WjWK2j*)GU]%#ucF^ i=}E1rFelW1ħ]WrҾI y5Tyu>“;T~ʎ=C GbJ pʪ:QL$ ;a)P&NXxPI2$>/Z%fJòglK22=.4̃Sdv.FsR ҍFnj[ͦE l1T.eӍV[Xo[[={[*kL ҠbnJ9YIg0((V^uE*l_c,T@az|uJ]fm(6kZHoѓЧVx*ah Ri-d#GQ/$T2qB`0KH:wC[M['5S%R{Ԛ- %Q:La2&؎oK,n(=nGj.T]0ɨOFyYLЇ\gSTZamQC{eB|9B<6aw$l4o{m{MV^~ú8P H FL4ҟ" MDiT*׉㛔D-5*" "u=O uA4#4mɢsP5]zr] 5 (T:$Q"8خQ#؅dB歄t(kܧ)stT\pD3uŁZҨYy`[-5GpؘpT(!@臢D yxFRmҕWQrDP%G[U(@Ç=T@ xE:059U+6>G{w =2qE vH9%ư"obb28b.Ҫ uaL^88$_,yir>.9oכ4f}?F!և[&I!4iقϱڎIGUDd;13h4g5cha8IG40+wHq@t P }d/3ƔIE[! kMd; s$_hǁ㮽=j ?98!xor?׌'K]2<(ɳ$+&kjG٨(7֊:GGN !Ҏ @J2WHs 3lٕ֔!p|,Im i-KdLa~ScNZԒ-u(Et;mP0<=[杢vwpNY8Ƣ\qFaH1J!zj.R"_|؂h] . @bU6Ӕ9H{Q$I ,KHR h oCO]QJJWC#x*A: m$-p* !&Ѿ8(C}d RRBr CU@͘.̰0uA @X*AG@}b, Ű$/>?BvG$ES6 ުtж` ܠhJ$nܔn\)UB> )QC RV: pB< !M̹b*Օ/(&_G^@EnXJ bP;S~'% S%R " 4rQH7Ud tˉLJu>Q=(QՔTk %a B#qLpt+Kx`T a' wp  - B)F"1UtҚi2UʙS 1QΜC4,U/^d&BI%ZPH$TTIPԕDFHP!" ^WT:G>Y%o zI&q#  霐iv`PL."Lt3%L aB"U*N:nVf !GP"P,liUJRtTTHJJQ@B=~za0nj(,3l }-*f 7o1-Ҁ Ђ{'$ m"0QAζ9‚^D@NZqWJ5ZWPXr,ϔa*bP@`R #pK6P- {tnIIBHi2ɰ*HH;}3z#H B H / NtnDL38QP6=Su6W3}Ty@O":y. BQP:gH&:-2:&Zںˡ&00tD4(ĞvJ&eh/"'NOrE8P8spatstat/data/bronzefilter.rda0000644000176000001440000000734612252324035016263 0ustar ripleyusersBZh91AY&SY{߹z}CiJR<lmJPI(M45OMM0)7CQ*~oMSm7šb⡧ÉlAz  DH RdR*@,w01NE&3OoibzжE/^{"DoNRȓ&C:em>>ek elE@D-D PEH !耸3H_-$2[lif:ƅ?F\m69G1y\nAHDoAC_Ad!#|:=6?98Ҧܓ&x.{1cdMsP{OBGhW7t)^(j $TNcb򽤸OcWI#30&dGSI'1K.7һ$=|&efnaa͓E'RT 3@Ěʭ,V z$O>Ue2|<2zHUT8T2dY6`ƶ 1r?#`(`PeJ6,}=:%S)VA,Ve8;h} F@L"'DeU,1h<Z8K-[*]@1QZ4Fdc<jQ$(k~)TYX? VȠQ4Yӗ-lRt%<&AJ_4^sR`%*L[q)Zlk 'N2Jg p޴%Lȴ!8cx}teVB.t1g皴Znˮ2ͱuGH $Sv.H==QtXcs_խGC x$DNq'0ebd4e{cX􄚰tIᖀ"Cep9+x*5S LF c'7}q+Y"dDS(E$uX;pur<9ȪuSHY l/u|\L\O6ӞqqUȜ7JbT\{_'c3cK5I1 kkfml@A D_Ymtz> 1foF3ϚX|\?VU)]dUfӦ +EDMw zIxChҰl~#<ۯƻkFhB퇉X2WIpa`szTCH|S,>vC~9(ArhEW4ks&ffxm󍈫x6?B%?Q??S,@%Jyy?-),X 5$TxC=G ´m 8\0"w=!* $MbcI MB" lY!Vksj$L$FLLŪõ/G(zA91?$~aqQbUg &,dE&}t6pw~n%㴇|v;9w0˲?ef^!#$?UP .4Y,ɹ+? "i!Q@IAJ!횹8céEw_VD#U4( #3%4T4 6:/lx%\$~,⿓4U29pMVsYYssXyIBD0dz;F!2;Ah4]G8?"U\Xکv6^u[Q c$ 6P9mBZPG'^&<(BRP !t,e *xxxث.^f2^f32e@mV{-S)ת^q^f /0NjFSn:3IJl*J@a 8u@+m,  W1"s*Op(xLUXMzZQϹ$9l쫦.r5ߡXpbQFܡLeBvŽ5= NHZq,x~7c[ɴw-ğ1$L3H I `x+'~YQm+bcڟxo[nR@J.2Ć\ !p^h%Do:VM&MdD *Or,R)ȧ U$YIZEHئ]^;B(["I xN.c&7u2`xGn殼s]fzs(K* h`A8@!m(.p!spatstat/data/longleaf.rda0000644000176000001440000000677312252324046015352 0ustar ripleyusersBZh91AY&SY?O=I$I4I$I$I&I$I$i%} ԫ8vss`7q`14a!Si0L*~'Щ{I?(=POSSL~QѠ4*4C@h1@Fid hb hɀɦM 4i#ML0T 4hd hb hɀɦM 4i#ML0Q ~E$zڞѩi4Lj 4 6<=;BAEXE/>Ί|zOdTȱ WgbGv4I{AzlxF/Š]qzR7 :@ -Jti{=Wt<'sfW15kftsmR֛YZC{IBdؘxQi+Tn0˃-6"cCX\'Ns{|('/\6Mv&iL h5NIYeu&Ld̹d]mo8bm"(Rr2(H\)R)HWQrlbRQ@H!޼Aw`qjE!d$YFGQT:&h)ZDUjxXQ&%e D鸩ZR^ZxDe$UEz[!VEHiEDi褪fZanz)QejbQjfFUiI)fH!8=@z }O\B 7B)@R ,BUԴ=<Ԭ#P+KL3p!SuR)BTLrMD$ (Q !囋Fi,%0FFFy=mq\.f Tꁚ83fAcg;Q%̿Bm܈dNVVCmiJ(WrrW?L8 1c R`j[ SW.T`@voQFEqx `,( +\ε-T۴Zafaٲnܪve8(!%gə( .Q,( ̌T~=Zz69('hJ 4\$S-Uy_|q{Ϟ="@ K!T*Kc.׻HUɖ+ci͢8 M1Nt[+0b,lKqgM#=V=[g$lËKL;#U{8~0,ξ {hQZz(CtNk(+1<r G?H/8cl2 Ӗo"<6'xYj*3 6}uyd5K2Y[wfifsKFbHXC aΞfѧ!js5{*O;[fgi8sMC%u z[|SG[ʙq["kmĒl48'/=֛Xr//.d-t[a\S5TOK.(S &Bh-Id]$<'KDyPͷ &Vg#5mZCʢhh!U:Y^mnHL]I- gjj.bRgeb.B]:_yE/!1OmtC1+2=ky&MF$X\ٚ]L]OH J1Hq<쎖y'&ѕə".$.g=EWvIԙ I 3u]ꔕcNܠ$h#-9Kj=!=+='\RQPeE\FRʡk'L:s{{ vc {gUaA$ hkr(.R*(v95|Ծ54f㈂PO$lMM<2`e5h20R F/ ȅ]7 rz n^ڷtzW72LVϪ0 luD5s9pg:5.ۄ˜^NIGrϰls&'[#)=xa{]W*< ܇m/sq9}X>d_#u+sxaw׃삦":^W4W+_J:O#3K3R}WNUϴ :@œ.^T';}$D %X|2}#y->篜d zr\` W>@b8FIS!pɖ=Z)Hb:w:ZC`["Jn֭Xű#e|iizuR*̸ ]MMz}lޯص><ݿ_{k9r$K T;^‘Kn}Y\bM=;ZzdMF8^Q^yE^UPDWDEyGAy{D^@9EYdV@$$lp4zwM2Ͷy;|ףA(IE*{(.T2fR>(""}5cUX슰(TI^ecE:Tbбbr7]L1~lB; 9SJa摒b&e"е>*GçNShTUiU6gvm@ps'? Cy],̼%*oC; $GP]Cj3s$2D>L+C:#Z˜ѣ{[` */_EayCb' B| T.SS|1a,U!Ou6*A+ {''A2\WwV6>5ZbT2*R9ȭc&WIŤO 4-͛jչe%N{"RG2ySzrE8P?O=spatstat/data/hamster.rda0000644000176000001440000000417012252324042015207 0ustar ripleyusersYilTU.h$!1sTP4t:Nкc(!*&QB ]6K3tPp[&w=wsrɶX,閌ĕfɴ J[ .%4 cfzɲA\ձȞ 'gJY\W#"K{ALkւ"<lEv;8.b۔ ˵[Ras/bA&dKܩĕp^: %$b.l&%mbGϿlɼA}}c97=)1&) IOQ"3>T+Z=6",slwo;SN^Yc̐ىfz쫿CXR9|=9g肧Z+(MзE3$z>joB[ϧo?/NZQK/VxoZUqV'́YU6K0 OI,G~?:@_ØUjrD ծ Z&ae]I7$ |Fs|?(L]%q~ ~:WߵҮAOGdžq4w_ʡ>>|l(RiZV ޴s֝ K~f6D:U|o&a}=' Ս#L]aNW.р-_>*Qk|-_cuyW }I3cvv.A2KeJ>W:OHYP7\ƃb=dQo89 Ǭ3縞qc`'1<fnx1O57uuIo"oO;'v ;#[#)G N-1y܏ӏh'|_?ꦩ[)Kyx!?χ[}? fO9 xsK7`znL]L6}rCx\o}aX^ė})cfFn"OY}Py̹C=9/A>GQʡ1s@9#z\`9!άs\Oyqĝ#QǤ<>9l+!_~sd}zǕ)K>7}x9}9U%xuZѭk`7 &fA#Ρ:G>Wɿ- qmwh'_zϯ^=ÇXg~E9}7x9x6?0p| ^wC\w}r0~Kz]^~e/K|ybʀ^CHuikp߭o]_g{ՠs@Fs:r޷qlz˝p<wM::tF%J sE'ȶiM3oB:o7wҦ\*/n!Y/Ws3cAS~_Q}{ ;dpbq`?iv:A䷆m*Lc#|ևrb\92}W *H7KXsg9=x8R6˘ap.TiqCsʪ涞$3-[t+|yS?rH]m gˮ(yS.$:=oo(+G wӴM˯B{\-_i7 -^(ьKu9U߄C[/5>zv o^1~6XOp]:eO:;bECt7$.e;ZEд_c|Zl{|]Lb/EyWi ҭv,XfayIUz y JYjF}u,X=v|Ui9|3Lߧ89};UhyՎ,ZzJo~قzAy_XAx;̦0](v tz-Snm~wTRxگ@:Ϧߜ^f5~8N#, o&鿏RH#N $F8'NIZꄅ[ (xl^KԘc~u6IFm`O3iH8o؎H:VֲVR6?]YkGgx'A2-66_]OBrg +V`b ܟ~v_EyR'#i #mYV!\kهo +Ю7bSKfQI6$u1UVT* )͂6++Q]CQ ^ݏY!VfsOt`CB̿1oտ jZ+iaphrn>&EeX B3~[t 6DYJ1s. '(QwF3A 9QUHbEbGvڡ i׸#N@Wa\X_bʙp}e=% _QXGџ&O<;^^)|\\+D&jq,B.ڦK!F!v>IjW{10ruc y_¸CœzH%xr78iRcUVYKY93Ÿ}*Bw B,mpہz&_,? mp9,ǬFQ+7g.1O>02‰7S81c)􍃶R(IwIQ=#m~"jvɐL7iT9jp^55VJ+&sny{S]  E6GI$ U8ukLJ/K))%%% 1Kkzjspatstat/data/swedishpines.rda0000644000176000001440000000075712252324051016260 0ustar ripleyusersuO@ Jxj f^$ (S|AQ1`q`Cwk.c's-M|ֻ>mK"/,[y|-  П-9ғIyW3 LBCF,މCʭ?2.(yU- L*kY"\d=' k)7ҋ7It6KS:@cNzf$ml*)"Bsy-z{cz0<4< 0/`i^ TM* WF'lB`N4oc5ܓK>` 1[oW f}a?tqO0kO.|aSR1T<< 6~ݳJfsYf2k3tENpu N#m/}ospatstat/data/hyytiala.rda0000644000176000001440000000476712252324046015410 0ustar ripleyusersW{ΗJ3Si(9 =+xSJjSӎD D!QB $<4% B_2,Hȋ(@QᓶnZ'KY`P.;9 ."F dc0Ӿr?ap_kWxq{~$Aj@ w:˴صxxh5p:ħZlrf{OJW{@fٽ3UsZ:fEGK/ο(@qߥ) G"k >{e7JRBDS% $tp  V6Y+fdTR'Xήu_QOi݊;^mQ!}P"[xaj}T'MBWF%kH."9v46ژqP} i[+^@.v|q`Ӽ(X*9X S(ǜ(jI{: W*D0/_:M;*>{&*+k0f茴% 3$|`h?h?A Jb#6]gNÃa/ '.I}I#mx+K`_bfVόh1@-cƁs`̒[vZڀA0 #?ݩ1~ :t[(#xުtr_F@{ +zV)V#ϫ F]GҤ`Y.F}k=0i e W; Ѹ!~,[˴n,yBJ//ɍS08 #gVX7 fwH.?K5ˉc`n2qk4ZVq)N~NqU]͕`^Ǧa4LzWQ f.-"fok_q^nFgm8ڍ^*;4vZ g .9tӿ M\ C{ϯ ~M1 oػ}th_(͗ѺKRZsHȑW>GO6jHAXiJ煗d?W^́2n-pEr{@s5}SDLR(kv3n ̫ UuLDqN2LZVz4jvhujaEN"ymӎ߁,a`̥Ozdc!}Dޝ ؓ^ ['0]n5o21Lb>l2C&9c''~0t2_Z١\VH3Xqh?yz5!h8Y8{CyD?:V‚\j/p"87*9ځFn=wpRuVvP=AZ44)n;_ه86twRwפ `opU1*}ObO&&~-;jP<δ\'KM0Fe圾Z`uL<;s\.T?E_[4N-v\&/,'}I*@m4I ]6RhWO$;ӣ̕t/ǃi%+;]%yȈĪ_iyIѳckZ axf 1Uu{whqWU8msa=Ⱦx#,b'nI{#e#o=wV&j2^"Y2?Q8DX0J,Y5JEM-Ir;Z|]*mDz9eiQF^b)%NP$rG1|AHgc My-S74yѣ-44ԕ CzI"\ ŽkQm3ȋshxH*#z ݏgAԏO4P[)\џq?{¡CƒyL p 2e"gyǫrm'#w&i#)ļ8k7p22R:eKa>&#}ӔkScp2)Bo *#FF`M;PL'\ _#۸Òf>W\&#]ߓrJpO#_^0jr޻ j=ZWREUʀ|XU'uĭIԐe}9ӊn3\bV@T\Ʉ*9fC^^pZC?<ͳ_m]{X1 9c?_T ͅ<lyIxg |((#B* 7gpUm_qiuLt)۾,3SrlA$ѯP*h*Ϻ}2jG!{kmsy4y.&Im(BfNM-.BMvd2Cxw_"<<34 X.I`AG:^ u/aj[5IS-D {{yhcKZi6ٞ)qȊB?* @r  ~":VWx 9La~x+$K~֢-*fZn)'($ ijq饙15@^Bucެ9湎@Oma4GW_wY3 c~jpVE3xQIFiצL>`0Y8~;>¯%\"RUt1ZxrϠg{\Bڬ8 ĆN43o~FΆdBnGdw S0kQ5V5U@ X{ir [lvT>z#,"SZiq<#(\u &yk2Nj6@BAAs+'>`l{\6|RA{XYr2OGQ-!ʰT}W9uX6dvi 5}R$/(Z 5 n-cVxQLܙ/C8nTG9q?U8T&CM.g]A>򳭊53W}+Wĸ}N+\l+w]Z tH\JX6l˙UO /jTB`G&|,cE9eiP{>*0To0RVC@17wv!| `hf@]UؾГ Y$51$1_rHQY *h)kZu- ~6 dNqupq3Ya-XRw(pҺi p"t^B-syz(ƘbI0$_I 0of|uŠ7vi7,ņ P{@dWjw2+ NKx/ybg?;QLmq7I Svuk8rm1.! ȟ)ZTjן%gc@avg&h ` "@ ?^#+C%S]ȅ"OXA;Ԟ7Y,h OpV|Njc:z]؝T;D|Mt(a~30KQIQk`e ,N0͒&|aXa7i?iB|3故YK>Qa50Tt'?rՄZP4%IS b*; Q`R5eo' Sԏ}lWl!7Wgv/ uX1IV&PI;j?4vN5F'Z=yaXtZ<TyU/XڦCp(=WNRlscߵ~D{ ,l ɝHS x9Jr= K do`6ǝ9{{ ?d!wF4/ױI2vbo`(S) ;(1Pk*RPPCeæL"e:"6 %KLP:U:^nW_ۘT _EѮ% m[9?2&9s|%X (ÑQimx]α|~Dė%6Ϛ(ZO'&?7Ur5`:MP*ehBϖl^xKueOݢ~7ey.OYl9;"nd"ѱ-g9/EȲg9ݔ ש|M1dq2f2oJ} 7R,H,vl P 酦JnVl&z둋or#hs[R5ҥO7̋|>Hh?yp 3W۷@|fDƆ+o2ϑhȊda/);+neFq!RUJq)lO96agbxa>( eVfkku~5 z P&[u`fP0UF?E?glMN`>L#^NӐ&( !hM#a(띵JǟiGڹLV"s2t.gݰ(Hz/K '4o^(ܱղ&[?8;gdQHS)Z#-2ݪӈZĺ,ֹth R"$Sԅ@t[h2͂:o68L ھ{7ogd.SG;d2ޯgXӟi3 +a+X~ S|CHCT%46{wDy:#\Z"dD*G[O+rlSK\_$V$xUʘԞnXsS}]jR76KEj,[w2#B 1fߦ"R'^\1\#2Vi:93 k5NQCncX#,n ®<{y|Z]o}g]kvbB=ԁʺpE N!哶Aw&* ~lM7 p8ϒQjw`m0r):A] ه>'xL _A9ūדz>گD*}Ad : J.΄uɊ|p/Pڠ9!O3{Gޓt'iEb{]J썉M(%O`QSg&'b( G+L:ZsxEϩ쁲ڵj4d6/CBHl>/Qxdeoݕ0~6ly'2xUɤ R O]/zPUvXqay5nkJ{4y& - `9zR)Eo\*Pmqc18ĺ6ST~|nbXiܥ_yS(ai|`3cFj|uxnqa#!\>zs/i\(cHU2Vo>aT_$)BCHF^wM鄶bµ!k)(a ^GUg#.<l3P@WU A}N#pn3l|Qap^长=G5W<O 8#ǭ|!+*wol&aRO6>誮έI:;b7)cz蜑i0 u*cU{OC%[ƅwT1G"=n(PZKL9_L8.Tn~£YQKb}wU8}Ll:$.?gU<ɠ_expl.o Uf-It[1CYI2&C12F 1P?pkT6ʰXAt^XӔ$?ŵ s{tXlNRO @)RdhO鵽>,Z|.ǜ4N t8tS"B6nѣlBa= nVݽ˰<ô /M+{`Y vXU>ʋJE0ڃ22ԐW (~T`+C~ksR6:wI`‹|)q,]}8/xhmu)|=ޟ@joކH]8h9WJ+y3-48ܱ8`=§LW7R ӏ2:#0SaJP[9ேoO4^x ,(BSrӶ;n7+ xB#}&iTBw K{|"tKm #] >A~Ic6ى*A%?1a}#w;\ܒ$,rװ&'y:Na>V /hp(plwO]y<%a') ݎUx}Ե;y)Szɚrsos%eu @IHi^:OEQf0>lrJ<HK`Sļ'}d2bڴE̫%ü*vY~=gZ- D> z*Z4\]F}:}8 ;'KǓX z_T4K=WU),`Ft^J`$`I`Xh™Pz6]\!xv*-SI]RNO[P2SxcJ+ | Gu4IZ:L",6 M[٨ުR*yũ}ͮۤ"(Z :Q^V E{ ~ED!sS65}FܿGbXe[^i4Mdb;f{Jf̈\]X!L%w~WUSp'чH^|gpNXZ$:o<: E!b{u1@ r?uArߏ 4zwэH7u|Mp(ACpkEqI+ܹ"/,ASy "G [8Rza\ZÛ^dئaDu슊Z̦BuTwwG9w"t E U%Jb\ӝ|nd}tqw,a  ۾{uXQ1ntz\18"$wN:uϭs}Z@#B6죱 RP~ "E]"pO(K(F WT'FA}wi cEQܲzy"G%R qs-.1&w)Ffd?L.Yޘn~ &sK_ˍBϩ݁ A.pGsSfjr@ƚ8ܭPQpI4EU͍w}4`* g fxAkriHn,R%jX,[3-_t5]W ;=p"%;2i j~F^^A|(Sa]3]*-Z$yX[)6#dS fEHT4lZg Uk_zqv&RZ|hHro6Rʡ46(u3OY XEsOrBqM#8ԐAn|{4uG*E*%-GE` O074ijYwoj4^M—D򌙯97 `4Js'zރ{,6zUZ@ޥ CFڻ;|֦6sp-==V\ ak:Ae+|m!2zӘ;gT"57'{oqO/fG0R1GB2WZ s1绔e{泐/Bpt̿Ҧ:蓤*ӓ˦ |I{xJ3Wed;d3ǐAR3xH#+V0S&CX4LN;ҟ3" 4dگqR4Q5 .YN9:uᴑUa2͸KEiXWdˀ)39iBE1cȵMX)m0S\U'F KV5~DOeh!'Hl='N"x}?."" -CR-~s 4)䳱@JMV0׶gɶ۰땓5 >"AH ;tՖV;{gݮql-׈~ kh*|"jq? "1P"z!<BJIhivW\&ˡ_O"fX`4s]VJէ3*Vw^^ PO[ Ry=T1m,~Y䍘JAQV L?QVL)J@Ej~rzvJ槚V܁Ż8R:F'h̑T'3.kC,3>gq{԰N& ibQX|tҜexql:IXo< "%̳]: G7TDaSy cO/ ~`ᙨ=N X0eWKtnsߨ4I.)z,uFrkD[Kw1[wu4?ywTQ97_F#ׇTLIztߔ=]83Lr"ڗ?wQ8O8{eh1U54΢=x¢gdg"8 qm@Ns>)]zk$ y|Dcމ'i$<0YSYҰ3^EQ–=YO`UU#Wdb!G5caotLZkQ)5?&.Sc71]1'4ZŘ 6zGDu9Z 6+8-|oI  M8WC{jO4#!Uw%%q`Jpc[ - '\F%Xh"k[#0w;й"AREꮷ`;xb0Hs|doBFqEK;Ha);\a~R۴~M7wJVñ&hvfnah)i*$Y{EZ]5ܰ={ɿ$1K`\((Fgfy֢<c1QT@h8kǎTf60G6?ID-eLo JyxPg睻8 N ~nR{U 9}jh >ON#lbRBjO*$'-݉=>U՚6תS~=xCnu#{lDL:vsBGQ"}erR^-KUxWU*: qߢke].|ïA%qA2Ӧwpjw# ׼"28ٶ:qz n\+<{ sZ&W3R>AMB@Mj"&NpAO!DM0jw|ڦv^>S`C 7X耗z6l3) g{(.ʁBO^Jx-F.9,5<=GU) JT.Cn~W] :؁f*aFc XZR+<$!{^l3܂ ? EY?p'_bQ aZP.75?ՂmIJN #!0nZeL Y9`EY̕]N Kù-Om 7V YKzxM):co{Sxz4TFbcgeROk:zւ=wabYS+eF.X*z@BKMpicγ$OΊB$M|vB9'a\i6!ζb[( &c)IXO) ý(m<;kѶW36x//~RRRo1[H#^gUwјRi8n2pMDu!n1Qv4=]"o gRG9 pV!O0` IST[ |{`lنS/V;SjZy'.LCz_va0W_L8ʏ:q|YdVI-|}:MJo4U|)&ԀR7B&b}h0chȏ[9Hn Vxz6nC>P:/ٯiiyȴ'2d"m2ZG=u~CPmHBBBK%FQ}--;jFAȬAb{73 -~y"ʇR< u=|O_Or9M͹TjcY"A)M`T ?f.OW㵫uH/,wn@rːsៀ6-e#ʁ`Vf~Jo~E /nE?HThJVY}r4ՈF^#H>ggy^g OG|ɍϻL-G|tcF.߿/}P7uv^H3 djZ;deu]{W|7OZeXy.M[VK '߱1OA^8:VhSk\ &Xnv D b^2P<ÛGa<d~'4loဏݻ(մ \Hs\8K]JNώc:s/,C3;!j#txj83'il@$J_pO^D] buB%$VC/fnThIh/7lzjzHA Ih|kmiv9zZ>T A6 f U/lM9$fej#Br)x`VgCL95Eat3 uǴ#mP\b̗yZ0aSf~ֻs\:)!6USuP| WȮ4Tq&WƶivbߞMGD4; @NMފnI?w&~@vF) foc3=B vAưʔ8YTkݶrVTKn(!~N ԙjP0iAD~S 0MFܰl_>cj[o(vl@}׫v+>bKaNHj W !]k}N,5b*jּUalvq&Awr1}/ÞߵqfggD|b|=z*r-,'cHa6UM, )_*iAO޿"'ξ*y٬3|<9DF;_Gl@ξRI&w3y:*[;ןѓq6۞ֆ".-yGG<Zk-Тm7-~I b\㒛')#\U8=oa=Oj'xA!r _Řy P8K+ 'O"8{U%.gh"SwZpEߪI_pcgMěNRT_4|*Q)?*.}JDDNvS'1~V 3"AX́n>9fDݫY~·xH:1kX†xo6G։dyQDOJoG!,Oc駿G\r+kfTYGMn}m%/[U]ff AqpkCqĀeԋatjv[tURhh3 s#\U  ^. g0|I6TkAv==۩CCE.!|*Xg_k#u f #MK幏fIHBE}h`cb;zvLgA˘ )hsV{iKCۑx<.ᔶצFm3ﳪjeT, LլDZ4 IƬ,aXv֗nx??(;\:W&kwm1G)j/m+ɩ -b cde?Ur..6}O[4c,[8!*6d*`rȰ@S,'-y v#Zvh"m*񛘦V>*=7N>>KwIk cVp ]aC 57>'X7qLb) nNlB21sGX{>iq9YYJw`ʠ4ؑWThs he3D:lEVұ"Ƥ1UDӉ"3OW]8e[7`Dݷ?u3TfJ:["7aģ(q*e{/+ŞAͮ q"|oMӁQXl}mqOMk#p*Sރ[boC\݊Y> BU>Z(3gM)1p 'lP'X'tݡejwxAʰ48&*_'?hKԲ, udTGqZ,#(Bs+ym{.>X/2C!OԾ|, ~bFj΢JFY^ #nsc]I2rC 7 9!GKG {y( pi#fM/~4q0yu֥!1)T5J¿n^'fj(kX#LEƭ"DBMGqѕ+_w6OO5`Nݮ0F҂g/$Q!gu?ML[L0 V#ńpX;/Km)Q^M25·:2;̋љkyF`Q9CNSPB. DtM8KzWTWƛpF?aIJ3RJ)!@ *C*57nJ'ξ h??&3mp-Ms0=XȧYMmDJ>~I]\]~)lJt2u˥<]v3h~N-w9Y y,2 <9༏9CW\bH}Fn'ZYYwIa=#7c3tif6Dr!6 r;Lr tv I)ׂIGf +qD;/L-׃#"6KIzbd3o#tu^&&}w[nP-%dx*ՊѤ)Ő`2Ċ)2\gFIOW{c|(TzeӐCcًzX_O<(jkHjJ練`0.~΂,%RX gs֧.Rf 7aagr=/9gV8YQ6 B|-j UEJ !T dX ʇ $ҖOc6WGiu(bgVDs'0J Vz!$A÷o\Z  37U1UV d_lq'6Hoc $x|=@)E-|pWߵboYHtQ_dVnjQԊ-"%MTfoI>͋eEۆک)ύJpi>q=_[.*/>2Upl @2S FkWوvN*S wLpI O}\noH-YBӗN݁ |a)Gz#\S쉻[׾>A̠] ]eDFqդil.lH]6ؘdCPLTIz֤!]<b xěVʃKP[b nT ^e~Ů0?8SSz?ԕH.$gm@ hybh"!7FT:=ra.ZW%P.2G"#W#|g_G|Ji~IyS&AQDXFL/g;/`džI+^ZmGoE녅a;LW,>]_Ŭ'L qE*R;ic*]SX)5D.|V6Mޞ1̡ y3!H czvgMƊS4a?旒ޓI2Qb$ZZmw5(0,3<4i;8ʰ=гZ55J7S}ʹ2lGmۧPpaZZZ⑺ 3Dr< I wS+Kwc3{oH}P9Z?8jfӨF;ctc#Uu lCjt:W`lu.~E'TCTsic2wg"͇X@~ϵg5rD5 'kIAsW46֟@+Zt.^龷wO[Y@Ċ&=9ul(˞/d=ukZ44W࿮55\E :7̩VxSMD)UcAO?p֌P#=Z/RtM`pU(R*`EL[~7lD]-.)d&) 5ragءxYΊԬzy_xV|C%Uмԍ| CL;Cq!:-ްI3+q{^j 9uX=|6d@R^yINP`!*̩u>.~4c)ЎϥE!kVa]FK4O7Lml}">Y6$ݖW#!c~f+v߇CV-P٢q`^0R MEa&̡H A2#&}Ĝ@BR0\M7@Hs R۳޲SO 1e:/vqAqZ.RLfZ`,#!RXq$2l9l4P)T70وx|l$I:Ȝf1+n48 3<YMZ]T T[E_=gD@<0ͮTwΛ/K+ b  V$ Y9Y1U.#dr OuAQ0% 7;'85loHyG$+P#9*sc9AGF%!F1NM I5XlnZ%ݝ3[W #T /Ԧ~+ ux%luh^K2>i6F[l ;:ՋA~& @PE~sN`xѶ!VH^3Z] *Ao_\*Uj i}%@J "D2T5L'|c?mo$=E(. 0n"M$W+^]yћ|Iba!Zޭ|&rz:m4NRCu$~2wmٍubf+-S&Cr ~Y4w]E8^F}6==^LWNO&`ԪӺM1JOz#+Z&d?ߌi+k+V BexgCg>EV-;x`ŅP}tAlVDfMΥp}@z%/]h\@p|5`i, lXCe ҕ[IQYTÈxKgR@BbV #C%FYs 1}C;BqH^Xܧ=SM>62dfA ;5-#tO/L}m31vIXSOWO^Q "2u;Pدwp[Ô\KxW /#7=^R6U˱*]y#nS!P;ha4 $Uӄ*Tf>X"2+x akQuc(XăE]1凶inV2Pj- 3vE3HSfUX _z[G*wa8JFB`ccW@c(nShWY_K*[J! 묓efZjk)G9M g2_ ]82V>KhELSc0^?O2 I| Ƒ#Y=-puD]l€h>Ai~a)ϢIUǨ5ޖ7 (YߘrqxUB~[O>yM+Nj$Ɇ /H>i3CJ ie:-_m#{z2}/ 쑣I̧^{dн-#K@ =RR"pEE| >, I:>Dž`nl/Q"yZ0zs="Lݑ %wR1X)|\k[/E zs kA#WNk抧Gö- e-  ?,Qj4. i$LF˳D >hEMAQ WgƮzfn'7sx>؀v0>T,_RB3lCq\>WO+o/`^`OՁlh޹B,BFhHۛ~b=0߰0o[^vǕ#6TDw7*M5eF\e9*VFr`yciK2+AV!7u #7s{+`-FKoz˼RzŢ;[V3s,1aCw -9WKIBX8Y'[Z͢.?ZD|6{0fKAX@ Pѳqה&Ș{i3Dz@ځ@;ĉiYr`b/$!D0cxmژ!.QSFE GuN;WF"@{ȟ UYZj yYJvqP{?(a;)Jr|+/"0B D]t2;R}?I#Zs^y8hW&ߊ}iX/EI(O_g]>ndpoˢ@TeCsCUM(g{G﹮t)gLFZU";?v4/-x@qAgK3̈ #ЧpC?YncyJ&X`7(ך՞x9~w^q͗[@)ˑmTjrqi Y HJlei;*/rħT-7 HB 5~O/  ^raa>.cKPnݓÂMYULqŨGlp\AKzCUV]*)ѣՋ~; 9* DjX4?^_>5i"yÌ"68HkgN+ K?ZdN/ܿ yX| &AYXr-8Jg[ .~+Ύ;x*%ΗVI?41!r"_e vY9\AΣ.H!XV>Tӝw 'j-)Э>4$[O/i3wK0;#,/vD;x׹;%?W mE\+&"M-tp.fg*%Ga[\GzAɇ ^xJ (> 9db9eMQb;Ks,VFeњQcɫ5o6;nY\̛Ji7EfNH}ĂDžҧ-^Xx@5Z] m0@i!;%ߡU(ڭECj d.H5gPP!~~#eJLD);A鈳9[\'Y5QpT2cTg`6u\Ծ1B{fx.XÃE_AsW44Y|I8~w[.=} QZZBg@lﯨlK}/Ɯny65ErDa3 Kas5^uPzbE{_y.m~ڭ[ -Ah%14$WUcKoDJ 9,tzͿVl5@}D;UV7JIh)ZFPfj< p魖,ipDB ]*p{x.%h N*TFQC*ēAZYÏH2Ίi_r.iy;&&_ #>"9 ҏf Dž}ygd XkaD# 'Kߤ^X|cQR)+r,c'zq{P5 X>5KvM٬q҃ŃЁu1jN7wq_G? '@1 ۟+;k6*0_*fڟH$(x ~V8 ēk62[Yf;mbr9aKo[j0qa'-Q |_#PweƗ݊^ŞIԩ"jd\1Z5 9(.RYeAw!?[?)e" o//f0 IY22+˲fpxBS8b3gt=v ]9f#NNTB.ӻqfۋ62JEW^rp"Cxbeoj.y@ވv1soֲK,cpd1IjBߣ69"ȩg+SW#|j>Ӵ9 [n%+ ]`u (u 'O:6BYaH $8D9ЅaMFECc"u!jI*xv>2S%&=<&7݆zu:|>KJL@0*Oݔ}IS۷v.׈9$ 㯤0:EV‚[[L^ .P"FpV#![JZO}tg &E i<'d"Loл 4T8%+19?F}{LmD#8xjvE<(ޛ7&FL3&mI `|"PIܚ0wTY)^LW:hPd:_0O1JCU߭l I k10Df H;bh]nt05AB{VR/8%IK?*~@(Y)sn–o~U{&" OT87? 65V $ 5_B 3gXa7B~ gژ?jyGvHcY0qp 0XU;Q((%԰kGȢ0$}= uM)fGONEt],AL cV(J;1&(۽[ᆠK s'gS>[SeqVe:(b]K x1j3%éK ZBXgN}O(c@̲{b jZ=ZrDǠMӑDTQpպ! (~0SQVAEُ ~)s XF 7cmN@+{t>AXf.+Y6 >2#I6oa-z!HU wX Ca&]挥ԊɭN<]FuBk=G)[e,ͫdYoj ՗u#7v%]NQFZՈ:]q*3 F,OۆPwKzb"{үH=g&Aɺ<,Cψ'^NuKp 3Mˌao[X$T\OW| }-A ՛  2b!?"lޜrlKܮ`A\V^9feTUI +ļ5 bD 6/b?"t<[M?JyM}18vC3oeZa"lNX*KLa +;ϡn**d,eE JPMA"= ݄mz_T^֚Wһ‡5Pmwo3/SߟN;j&-,ԩd:<d+ҍ [T5?60f}hV+\/pgu* 6(@5m!݅'.WAIZ-_XĒ>hht8t^\F*^ƽE( x7W"pmkIu)f`Kph7uqȶZ~S'4ve`5H)x5F$$"n#^38|>ְa<[*FUd43Z2OsG@K@/XvJr>9ś 7D z$5[Ƒ%r?V`<̴o#um#hb K//MaXrDHq0_չAH n]XZ cx4c*G@C&jI tah,?Bjt/p_Љ0@8 Ku>Q> Ɉ18Qe48}o[*+UVzw۰kBfSR1 ŬX^d2Ӑ]/#^Y^Wv8#'hnmQ6JK7ٚ/c۶EOGsԓGDo{*}fA-6Okj t aRA+ ­Q5k?n #ptk"Vx0Lo59~&Y0GF2ֿp1PHĹ08i D(tB/(HKH"2U⾨$pTIm'c$9-̟8`\)d0 t{J(ȗf&-W TřOD-6Ie@gbVMˌ2l>6dĔ s,3lh bsҤ{hsc@Rv F7/FxqEE~w#mFXМY fX@r9tvS xaJIdi"$aTӼ DN6!1 #=!Z?4*t}6 =j[f/lyJ9 fx/ݟ uAҳ4ƊF:1T\9ěw'z*eП_W 䜛|O1{rbe<\ZuְQn&‰~C.c͔ЖYtyۗMXSj7<]M AJk(28 Dq2F<+ j)>s:87Z~09$I[Kw o%9W;_" f(!x2QRm3lXc*!(zIц'7ZNj- ρQn]<N}w{v4xB㛌kc1F+ƬYqD O櫈٫oq 60\՝c8/8OaPTo(6JG{XVϻMFz> l ɍg`>{ē9!/) %FN|~B,J&ElBe G.t!V #۲6ung)h!`eOTKm$VFޭY06.Z<Έ,׉VB'[#i/`ra#wzp"]{7 fgfSͲ"Z K p>pֶj--D=O1iؐ/xg[2ag.}llzf\y]E?*F-=KkX,w?v~֫'7]{y24Sbګ—ad'nQOD$L,0|u|l8 T<,^pFÌ{Cbf Pg%GĈ;`ρn^U'E;rsT@W 6#)O?Sidl{ |l-|xsϬ yԜi%O.3?/r}tW@?X>ڌ$ l6}t0WN=R(D] :D}+ S1b|X9p4m7֤i`ķP ʿXEթ_hu>י񡒲g&KH 謰7P9vRq-R`!ĝ!2A5t &T< (x+yd, K 6L)zz" ОqZM nPOkbZw! LeDO¢1 6gwkЇn!@/M0T,ڂ-.rrG*̭keqn[o&^MoS[7ߚ=ʓFGvZuJH ob֗US%7p0EH.@?+xp1ٓȻ?]`dݙ9>loA͠^d=9ۓA;ƌz>*ª2Ayc*?A+ndÓQyGx336W4/O>FN??\ D>Pp2LsG AQ͇B([}u@5l`vGW Л,Tv(0> aw |:Wxi1]4P, iIH2ɁƓ' G ,_PZ9V%݅'z}$O/E" ՚eTgAT|xUY&ZZbUF~ s3 [}>ݠ{jW[4;Kc=kKXn !M}$Z!ک7zXF: Sxl"ŌX?]yM',; }j{BeءƜgi)JN%ˎ%PXo˙S{O=½*.)Bj9o;i#EmKB:E2 Q_&\%a޻nQ}< C:>1yBd- $P<ӥ`|!j]q1,d[;-[L[al}4n[!PyYI>cwX[ nzSI I^'m9(ƌ}7^ oAaz݁LaaM *7yGܮ#sқ&@CdHh s7^Y,!bxn-%ȝ hN[m{hqHMFUcjfY36vбGBdDOI1x M"xo3t ˍNس>-hy;?5!+k+jY'n:rD{Rίbk׫'P5W2Y"}-4Z0, `)B5׸l95X|TՔ֦=)tM;*C-Bҷ]?,lJYy]e.߷-iTgU,Ir'}G0klCJtqwye*jrXGǼwJ8|±[ނ%?уlFΐ=ʯVh`|LG{܈:+nTXT[ok}~^Tx42H,`~P՝C!OtsJ{WN<}Hs捸ׂF]!`<'C SOY[ѝ*CFda_:3ۏRRZ޹ ߱ F>t*#i6I 3YG{?Ci'u{2Ap\tGUy1: I' foL5#`ں˅Wa4oi$/4gLV7?DW2,x~<;j_R>3] @-) 9:>bb/p%x:f םWSCTOt.5f^oJ%%$WJ3 +cO۴3C+_{8@QOԖb4z4?Ly=sT4d޻4bH8?9 !,-gx,+E |_C6Y\O1fÄzAWgpD(t!b` Ɋu>@ۏ+Gڃu{1PS{Μ0"aTbHn"XUe~0V$`fW;dA;){ư9_^2#[N}4Ua<\kV*^/M]f e 9qx@ );NV V yOE:Zd /+Ш,,FNK}@XI"rҽT ȖXw 72`Ԅ/hYtw*=_+{P5|_ sl -{9{5 $g*K(,GvqlCr!X< ܡϰ 5F|/ =B˹0/y:=nc@dR}oSٗ,BE.ݟ;c$#GRt$B,H.TXg uw8Hڕy_laSѝ"<^89[[QdZ!ÿ(h{qFaHޙK;3/Kny0nA*I1e/*i=Ѷׂ+k~]24=_ ODT-h,̣~k!\P/HI<[ *9Q!b  ϼn:k ^oBrw ,r~ Nb5uib ڢp3U<AS"wU"a#oہ0"2"qq"DXh^xO.8amqEgղ0rzЉĭm 4 "8 O K7F-]ޗq"AB,;$;E3GD(ov![/ |0ן,[@m W&Q16b.Aꤊ}~2U("t 5Tn;  iSc셝+GtsOɑhE:z1zz8ot}4ƹʷGḁzBZ˷6YLܻK1-*u(rx?i(NQ>iDb+ʳ Bs QQi¶YRZ9TXcfyСyu:4 uHcII Qjש!xZ1>R a֝Q* ډ7w8Z a=J;@6qN}C,ol I@˺ i'#`Ϲ.>50D({ckk-GF֓' !LM$a$*kyR8i[>FyHט z\n48'F+<_Q[Z4CtDԝWgyvIekȠ!i+>L>;,X-rqǚKAU1.+n^?h#OthMϕ!0Ҋ^t|f0|/40 CXٺ _y@$ 8J$P#D|8N{tz׏8]hݎI@Cu\ \sA][X1 \#_ PI-{^C(F]7c;MvEa^߄s0?z|܆6_ObCEnʭW]e#==rp里؞VA :|N[<}#6U=d #g?D{H]@-E4x@v>U$LChz\/G&Ua+ 4wR_>!/RUK%ӌ4sqb쓦y _fuck!ؕepsX?K;%>Ķ >2Br:gt>Kq73dxl._5E6z:r[*]5Zf>xS/&4US7gF?-g#e_<)!33I=S K8,qoM.}ATG^6//D9)ځ4yN\Ͻ_E˹(Z,OS4zi~8zKB*.HaUXt~NtwʐC}}ыvk#2Es!?fbgQkmw,1TY-LTShhm/\dןHxT Ժɂz-+d1ɾ&4; WG/۔UYNG~y>JL*:H-5[6֔ey$(Vq~WΨiEk[VnQƳ &&6F;`x+1 .^=y@wd>, ݰ-(ކʖ}csy!aiDbCB(.긇4JVp982mZrHLԆ0E%^OެL:u7c 8) 6Ñb7]ϐ`5Ė)㻭>)};(s'"\F]:^nt\̢T跨1D ѐ˔G*$qZQ ns`%:7_.]09ؗdL?F|UY~5?I=TJ[iL*?(lP2@0p6s!vfɹc ,M{fMu1 ~*mg8nULNR90\ɠ+"S tNs::K)r,|g?iȪ~wFKPW` J F#naY~S`1:\.gѰWi5,200F7hBo0F#ks9=$ɛ1r 뇱m,r{#)lZ¼x 5+nM{ԋId,$x c}$M=Ӷ\iarU̜q3*e ?[ۨ[ZU{ygY0A CͤI$h_in6Kvx<_nvHcE@T^A0 {ٔ<>AO˄Quqn\7%݈ICdaZQFo@;hfA]E;9O]nqIrDFl%غz8;nqѭ+r=T =n|㓛Ly$D:q.bW>C0YDܞtH_@v2w/::E,/v2`Ր]izpX:J(85* hw%CxV)Fz#ʴ4;?zρK<-w"BnZqH5 ]/eW; 09ДVymf1T>Kc9]_U;OsYDI<61H69fjXڞ w6':\P)hs^t@%qrdO+9OŁx7,\[|TO`s;-ON[ ;-A2"wM6%Y.)lw"Lz2N0)W,ѧ s3|:w nv]'8͋P?X˻m@$ ^=wS(l ;g S3Z Цi_O{ԍv mY c]86=F}qZ1WGW NȢhRGSCj8ua)3ŷx|0el8΁cwu;bםJ1\ M0'OqAeiSpcj*uE'vݰ6ʳH5E#CCi)ڄYGWD|^ְg>Ů;+qY0F D\]1Jo(ͷ鮎Y*oҚ뭬)+Hky<1Ep^!nOto_ox51|2ũ, 0§HR.IA =/sfl*Mh J6ʆ6wq 65p 8D3bbH|ȓZ924)APMAy3Dؤh.@2`AE%o ?E-[C֙ݶ)f|9!<\&]c2m-m3Ìn x''[R?)ҩ:Q"NbNiD8rW@I] 7U@#*XI-|Re"$I=<6gAROlL €Cz Ȟe:6z=oDn[^PHTD-ϩwF{jWd1@C3Kd5I\ĤJgyDj<_rpo C z*ÀI8?]0FKyGqNjjP#Y9͛r+—o^: bn@1_jJo|dl3/? Znܭ‚5PNF ,WDJHJNcgL'GH(CE\s58r֍Y#CLe3rs=YY %jY\5\t9Jqϼr'ڧM_9Īxp&Qn࿺TXp,i,=1jM>Quޖ|-L\Af|- |7XKG$1` V3 TAj_e[j6ǜ+U_a\j[,N?MBIL O~gY,\*29r!u=5ϓXsC|aҝ1}aꔺ8Kݳ!:/NwWۀfi+߾E!DMyMH1%ݵ!V9Rv=Xht}\6Z78Wڳ0~tFWIvS6i#DOYVeV*-%ϑz.HQYUo%0q] iH{ jzehΒ4v/-fM+n*]9d`sL kMtiLKW E*E3şß#76k&Upvz~NJyd-zǀub9 <=xԉ#v4\n̥VfW(ˀ A*%QpT,w&j^TP0}Ks #&JPt5]xi7],x*Z){Z->(ڕ(DWVM+,Z W:h~xXl4\hJNo =>c-PVJSЅ07۝TXK#m{iJJ53,DUê~"p<4[#]VV kI0ԤJss{;Ь-sbbU¦2D~mMPtqHts<.:bȤtBhdZ0۾B?8Ҫ`2JWX5$Gn'EE9Fj 7rrWy%Y3KZpTއ9cz4RF`9^#Z^w>][/,sHDXbY^NTs,ѬĮ'pNNᗻa8 Aa=he*R 2j>lzH;F􍝔7R`##އO-*B&(r*HH;uP/:Tʨίw? $*|{p3nKeC?(}ت|Y 9@1cʨ>"mϗ<e-  ^vԞ< Wң+ ХF+BP*n~+'/KSJgpQ[C!4<{mMB {Vؼ2v68Rtu}U(6C(+&%92'>%Vޅ 8d՘j@{FNg>OHdSނ' @Up%LY|!%jM=.2V.t^7{Di8ͯ  B^R[W-Di 賍[/[WWB ۖmɘB:VVC3'YUSDwҦ. mHa"q{jTkw(]+ ,Xa{ca]Z|JnZ[qtԤ k*ec&5ɒޡ'e1滣Tٌ6]Bj^3J80U(쑑3/;i_E@[pd@nPM $Jeuj1Eu^Yӊ c3_hnRϕbaToW/Hax2sm.]W-4NiQəgGDbI!\*.JHSi+:`&7s3HB.Be2}ڝvW?~eh됽vta[ %l6 5'g Cv()R_d-{iR3P0ź{RlY]1h3u!k_|_)@\Ej3Bͦ Ȍ#-5cxO ţ"*V7Wn0kTAlGSӺ M ԝ/nt{5l,C($np-,,cvkʙ1H+f2/\(< D|OU=,M fb~Js×zaHҘG gLZޔDqtSn)~,z,\M~vōt&wdRs}+m7Cq2I 0=kPnC5\\IEJ1CHCT&<:be&)HsˀVz66+GYZo;9iE\c}S%_5IZnQ"-Bj`56^[c(&ijXR -+QiU<4ŕ2?pWa֌hS4'3n=Yz~9){d!ظ<|QS`VXQW K?]2]eNjd"Z6>)^ Ŷ3"ق;8d:nya35>,L+hf|&q^<}b"uv>H}Ⱁ=8u]yz3hww-g,OSyMY$)̼%c:ÖjӉV j'lq^4xKDx(,Erɉ,چ8"#tTiC𴤱CxNp"+DA+98|wAӒo!\XE.᫥g`g[ s 5oeiͬ*,ѷ`o!)'ѹ5QMl"hPou'Gv]nlX 6^ -nEQ%yM')#?E1x gXFpΤ.ulSy|҆ z&ߎo'n@Y_ 88$A0:ʽ5ac*˅ 68ws5dCpu@J(Js=Г"N+`·hN5ktt2+ —~yDXRY"Rj.|I ^Vk2Ѱ) ^tXbi=^bx=diGίAry,s]fNAud[ZÖOo`6!`UA8<ȸ)"YMƳVog1Z" |@DE,;1]8e/H-MƑ:JDvUOɣŚ(Ygl+7d˝"-fzco$= 9Ug$M6+4k^] )'xp+uS.[~pt(:=4dmh{ĎPGnfAgeoM'I:U  "4jaaϿvZ&ټ> FU(g~n5::ILۏ)Iq%o]AF*D r;4?Z=qNh?8_S,Pٛu uv-/5?8}*1 My&>Dş4ݩ 0zfYcߊz&kUFgxKaAcQ"ZI%;#_D|Su )tGٷY!C$]@_EN6هc8@|>Ӿ@"ym*26>zQD?mwAԧjqg{`7i&]ư)zjs(da79{eP97~=BZgcvMo%meԡ`iM5G聲 W?S,U׸r܈\gҋ VPI VgcU瓴G@giODzF }c[xs{/cKRCX؄yT pd̍`09#V,Vb ୬4(W߮Sah%5n]5>Qi ۀ@ƪN>+35t- ico2V:86jCuZa/l5`K0?h1n%ŏQ&nLLEP]B(y: ͆k6ӥg}e֤h୓i-cz>He}jO'Tz~`ڂ~{W^ܨ Y6Tu' ܭblOW3;j>z <`#ၬZFq56pjJ hR yUC˸FYk&$0x]V.xy܊[MLK2VIUNJ(|2ppZ,PMQ[S{2xGU1q"Y2B=@GgMt։K\5eGn?x$ Zl+eGȢ?ۂr@ [5oÍUBl߇?!P;O( #ۨ.PYlJI<:wORCQks؆7`y1wqM )4yҡ:*qd k!Zg\oݪ.׽Z9aIXl gb&=@7gC4ᨙR~5H~ 1| T?l N~4T .1~fY{9/O$m+y\ i50v]?Љr?{w|\Fsf׽\Jyv9|ARBi]1>ɝR+W.։}VM1iZL HaG,@H+%)JYXۦ'7d]:\Fɣk_qܕRN,?>惘)+ o5YnE]*QXwseWzMeMF線cEqZ.f:'n%t$Ũz@¼O;9cs|u |Oy|F_9V4DĝT=K((,|j~NȌim2A!5^xBud㗱/tDžUԲ1sipDfL(zux D'䀊vz|L _9)m+`W!C)LN^RTnj6`LCv\ԯP `tI1+Zڋ<"TꎩiI:.'جѴQ6`F4Vs [)$'[rl@nlI'23ˌpkdc)l^=IIX XoRif\+z~ 1dRЫQg˅B-L ;\x1-wLr~Q".)e*c]`@JÎbnwQ!&Py\KcʎDs- u!!-0rMPtg~fɈ^E:U0Qcco0}gy򓻅Vk5a~p&{UA)\<)apI 8Oև*rf@ d8grvڜ<0)H&3~;knFR=#pa$WZ#wvH}!op)|aU5GK̵d,ʹʛRÈi(|Ga\v“i Vl 3 k)aDjsVNjNʋmf58L- _dՇ9&R\~UFsG- ,! "e,(9LVr@kȖ=FBoSۮr&ڡWcakǪN3jFER[@l&I-LbY'+-sil8/0c(<9_g?Ta}\(TY#qPgǦ =,4ÒpMUp2gy<4M2j8PtTĪmQV6Jjp([Y!FcNO͞Z`ݬlM?gwD+WnGN+ ("hہ3 4;5MZctV?V=1M(`&ewk`&A©缟bH£^̐l2 J%/׉[o+%ۆt@xFn".}!.S25TwR:,ݻ1셤qÖ⧪d3=su -&6|5.]~8{)q.cرC\] _?(޲'H*?܌IE0E(bC0LZ*aL!V -N@\(-Ȍ5gNZ6tsYK(o0IZ fC~/=N8 q.4#,޿dtJxk,w=ȷ鼴ToX,L]yy Mvr/M80%e4bDU=^q3Aӻ :1Ѫ%2^}e!E%r32>7AP(OwBy%\Ed֬)ji ꙙiޞ\'{NPGiw1&; ` p`JKJ:%g;y,^:b/E7;x$5_}/ss4@O -mv(hS"l̝Y!+ײFL1lF[u%(H L7m?n tܻ*!d=LMhwSf`d?\ot;P2 M͢[|˒XDɳ. Z%#ʏ M0خҢkb[PeA2pWMJdX%*`t}! $l$$\Y>4 ul%W`aG+:cc0[&bE~}G2a.uw>z֭2 !%RZ~+4B1u!'{'nR$,vlHp&Y,^ R@-gx"H:8%}]9w:eҀWgb/Ipq)R-0dsIc;$<3'TP/2+1`a׀A8f`R\UjۅnzxB/ɂ3jG/׍ qL_l*дIT~S6qUzĦCO 7>nHW*"~1n5~N=p8Pg{ez^"JNMz/ '98Yi4hszD(δvmߝ̾^8ɳ(n0^e C .m2x Gt {#/!,L;dZT\ 0ihc\ND IJaծE>)qb}0ܐ D /o[_6@K~Ү:|F-' (UWvFn8Tkv;qFt%3֕ie&s>-'H+WBtM FNugL"tGNj9>BRU`]It3w"q("M!!o`.RjF&NY T(W/aT {KW7z.~tV$dοI$ SهEΟ`Y<ǧ\ lUͮ6r oVµ֗= ؂7`v ˓ꖑ@"l榇%C݇N2i0ήaW9JhAxwVNhpюZ-q&q][8]5-J;ZCm8' }b .0'# a oqL-U w7mQ6ߔR`Vp)[j 8 /ž )iڌ`B^'=UË_'ej2d4w2z~z4n OEDQȜGb"z.I}jj!GÃ] >LCLlAXU!`yK_$h~0TݴAF8Ɉgk{MvGPB{LAuAL%"9>IR.YrD᱌_Y۷'R@~ R?l%λ!8.MxȫyЩ;b\ICΎo\;n`BK)z)R72][Li"ʬ7“$5QX:*u_ r; WH<01/WiǾ槏,x0 5GJ&IPr2%V8[qwy[~lyBE94-{GNo:.#RJݑIZi.+<'cxoD 93Rl(FBY# '3Ҍ\O=f ,~Xɕnܝc2 WC#79Qeg5( b٨%K4P[u(lfi2sor)8:$sz)ٟ(9]Yc˟P8&VTjmWA!?8Eu|ҭ5 Mo!v`쯸|yG70&G]PxK^JO؆li,)[άxoާԯ)S -(nl;QozJ:V%ϛ;4z.jh+zinM׶OUw  ڜۋ6f>Tqxa!7sV~HS҈> ^ؿ|cenQE]Et889 4iMᭀL&Åx SB7R6ɰ " >;k }dYudsi=[DyUhܘ{f߷3k:H NTGvyvNs0`^&=] ”QTBgkD9yxǤ! 2B_\FvIA~mGDgmmz~/`ډiJ|pL`\1(rہFo>pLڽ::٘6\M*L߷O Sql-Y5Trj5z^m\l0Q~`=T#`|1&*Pz]XJq /3K=䩌n Gf8zsddpƋ SnO~6ށ;TY쳑_Tf:) 9~ R`7/g+krFU˰=znSw (tp㶰ik1QbJB5 #)kMV. +;P9A$TOݐZkBb-xv{. @!}~*:鲵UnУ ö_?ry]c~X*TӲ׿ nD6PmqlCp-"V-e QR qV#|pF[+M$/zU!>#]_) 89=υ* v~h>ˮn # SŋtlIP~D߲O!VU#E75h_lzλ?7_ Tac m*,9m-vZ7=\o;.VX蜆q3o^rryL'$ƄO~5pu?Йo_5TT"ZJ5IX،3̤vy=EQe$do9D= |]Xlo4 *w4Ny]>Gw.&rZvJ |>S2Ѡޠ d,ub cAsnJBJR_ "!D,pYSi CېBrBrCits.Xbr*-A MyDK>qm_dJMX8{0Xx&<Sl- T,-5o2Hz 7_`6+,2У1P{txJ˃| I}1; Q!A`H[Y2eħ1.d8+x ĖMpjKT0*hJ0jJ85T` ~^]\>[e=:Dk/MkdTWYzi,b2ِPݏEcc?!5;MDd-5:ϹXv'g"nDFtw5!S'DK#b6Ke/(£6{ZT@fڲlMM)jG?q+Qh^kKVr?iȸ7h 0}O[ÄlѲu [wng.m{{h8.-e _g~&ejgMYgQd JU.,v?J;& $bAǶqVT)ƍ3|^@<2|ud2ew&غĴnj FѾr(XfԵz\bjm DD]F,Z֐rTsBϨT蜲(,\(bw䟗Ǧ~M@kֈbP iuI3W8i) nwoJDZ$iO3vM מ-_iݝ7h2"JEYSb g پTZR|keB]slqN`dO}_h`%gN]^~p?ir^@@Jpf'X]XhUчɿu."ˆ:pMTsLY2!iq%!𤽿C3$-7X2&$6.Yd2폺lf +cJ c9|5RKk=( 4Ƭ!v!-R2- @.UZHsӫs/HKU+z 5pQkc(ӽX[j{_ ΢ՐZu5͇g4  ,ŽA}Uɧl܏l00J97|/*JD|]c\ݮ=*b=ĥZ Y wlT"X.V>r]3gvdQ?ê=%G,]&܊ĈQ ĥk`y6+H+-s\qgZ JZB̍._”|ruɭ6H~!Gۈ7JpAq,E8êsZo 1RSj ΂КVMx.V5Q:SA;lŞ5=ߊHk݆/!`D h螻@(ţr0vCD3Xߍ^$3L45X@kf[t|oiX|3Ma-uvي<2OәQCFmoNmbݬ8֜vbC>!<Kdri^ nq?I{# \Z22 1~ty+N+ur5ZIo94 H2#pjȝ'm2Qld:mR̄a['`u.Jל1BmL%c8p' ݃+T*r[~6F)K*EQ^?ci c*8 {(v0țK_443$u)$_X2*U%N%z:KW 2ސ;ah"ZpD 2KQ;6&Nr3n!=n1fqpFPPww0KnS,DS`n^_-wP~,؝Dι:@";߼t  ՈbRPi R;QM]oE@|Z4Hk]Aإ3C8>MpUCvz'^oSr~䂙h\؝c+jAETrPq?I8ceF)Ǡ;S|mIp )[E" P Ic?TxJ]zHȃ]u!F@Ώֶz k({|-S{>G`4 bנslc~@ [3_nOKGGک/Bfk,} xeF_i88;[ր_T "o /H̫QT;?" Y8g|2NbMt7. mZ+=8@qo#41q ߡsңޚq"y9 I)㪝<[ [1LI$Cn9f鰨B qo <4H1[|n?-GV>!"ulZц-;U!y@LG; a,Ht59Rße9mT ['9iB0s8:/rx)yaq},*n*g.IDi vixEWNNH3B`_lyTgrZ\Ɇ (-C\umQTvGA6ΠƦ |+ 9 %l0߼VIwVYp {F~Mo^%)K@_j;g蕙$s;GTT,||-2\gbX <0#vA9c9 1WJc( oBl+ùGuf09Z:i؏xI8V9vԦ H=XA~}:q |t5dt;TĐ@Q4&o^w.݉dи6 bPյ_ -DAn)1 Ҷ8/JNڠgT'ȷ,~WEREB }uٓ~u?[ifMc!zqKZ]HY]Y:PWӫ py63kf`Cן T\hw'6eTnt 6oMGLˤxu[DHlԿM q|;nY9"(vl"C{7w x"X\*uꛣaG$vgPӨalĦwQ([ڂmcy.# k_S1 D"L0.fz((TJn:5UU7 v/dOhhnY|B YNa1|o Nļq^C:Wz'A~I`sۄ`QI u{2 fΡ/9K`, :;_d+=+&d K %54ߠj(K i]g?.'05V@=4B;: o#wbғy1_QYPoF&ꉦjAv7{&0KmN>^hIOzd$ӐZuKq%ᚮ(hf8Hm~=Sd+|!19O}d`aq¯wiBй6=(?=/ި'[Jd9#ӅC1zL>){FZUM$E+$ǐx7=)'d 9%mKv) &9x]d(0JЕ+ !^)Qˋp"2[zrM͢`0p j[h&g%$"-j6ZMHosg d32 N0l09nx| ::ӧ]=n<pOw0V.1J \b޸P$l 7П]6Cc~j"iPEcOAC4+Tz8NqPO Ziz8ifT\SZ_tu?KJc sNql3 S[FrK7{>f^aǒ}N+LKI6Th[Y,^ܧ+S>dYE#bS /h#WzbM'Q#xlmX&ZjQ6L < R)̬s \dzpUU=]:EؒHL(q0pZŷq|!BF;Kυ_s4_2&/rs4a#:,_9l> h 6{iYV̢TI "Vn.ӎ`jΫ}Z)X2-Wb%bKg47Ѹ( e@ )%q1K< g vEBRa3|X j5Hke)DSkz^uD l{bHx^D` GhȜeLƥpiNC$iJjSleF!F7l|{XhD.w0NCPXd0jcN/V1.Xَ܆չnS3۟%v9,κd)+kl܃a*Om䦔B+Wf1 nLዑ/w.L},ӐDgGD5}iuﱏpmaubiXu"=O0]N5Ϋc"]2YIw_LI"-UjE)߽e3x#A A 2FCn(GO؇N "wT(ݷXQ*Q{2kC҈+ßdkp/\FNnUcpт*\8n'm(~{-AK@un )d t1Xc6/л|%矬}={X\`՟5r`szl@NK'_Zu^T7Y00&f^@^ yP,C la͡@GJܐkHwT ըDb:"=œPl4(\bïz@3F/*lU;g1պsγdw> jzpVP}y626u&|%>2 }2c7}Yl%"i bꑖy_%J2Tcm (rB(, q\MbE"S肽Q/񱡾2Ipo1v;^~|ylI]Ypd3b>ϱL?^S v博}o;;3[6C 5&XF[|~hwȍ'gVԾCOΧ-ZF3_ Br{D0k.ڙIo OA:&8ʉ@?Du;! Krbߜ@ /J]A$b/= dU@2z{|^߅rN[’9ޖ"<@G. f<&|v˞:N$bFoȲӥm'@6N$9SH,yP5UكH #dIea /oXUD ȡS@ p;%Ӧ<#I`a h劀$B6sї'.íY1?q(olOl8Q5D9H礫>/ah<$Ŧ \VʶTWyv^jӿD|ʘBį_1ZLCio%RxGJC 7U< [g_Bǰץ:lFlw XxpZR 5^H^tj&XS9o 5s|i39*;uIeuց1V6U^>s7ዠ{B1jSV郇{\T?BZҤdX)X50/,[_ڟT9@Aɠ~!U \kԽ٢/?RUHfG /$;C@!I' J D/`u*ýUӿlߚb<BHrۭV]= eV&t/xhE@~dB6#"EG@BA`O )3H'B`{H+CqʗbpjSݳLz8cӘ* XKʯz|cQ?t=ٳjv13GTs&3?}z7V\40aف6#NŲ)H6o'N^0:@_@eGbi/ ƞBat_9c#:.WbcZ+v `CSڹ s؍ējG~=h-3GyobuxMA$c)\;q5ޛddzEC49`6%`r3 qG#48h]("꼋8{ZG#w6VH`@ȏ-X)PK~N~4|˩9aq&{lXApƛK-C<<<1"eS6]Ѹ2*}AE_@Y Vp45x+#,.ZЫmv 0aODq' $<$#\EnWk3kqy=hŪzg C9+tEyT 0/_b8 Ze1ɀ,E ݒKq|/Op76 %{X8[`35ٴ+xzO JD-Nv @uj3IkhȗE1fٱZ`ޥ4'M;"f㳧%SiRG s5g؂\FѶMz-s^ {<`3BIyda0û\[2-Pv0#s ށkKo%?Pa P"i/}D}ЀŞ}q;^fVfVU\۵ByNRzж#XvMΪ$(&%ʤ+Lp+E-U?P2O%@s%)s1pVBU<KHS#WR~ێ5VF^~e L5v)4ge  _2`3D-ʀc>hM)p'xo7<8Fϯט{ )fA5Oc6ݽr:rN mmZp;aXX*w(,YI8WwD{fdJV`,Tf>^B#o9ŏdq3ÿ/P h l /RSJ!5 d\BS51­C9=F}?ɪh'@F9!o`P![WmI[ d /{h*>7{(bI$haFBΧMP'Y5==T;" -5d12a::<86쿻Rf &ج%[n5x q(GH'`RW3gv<ā Bob5[-I<%Y'S45)vHv]K|&`ZAl]f;H볪C=z y5~r%/ơ| y}|p񰧷ĸ^%cq3=@=KúOx7Bnv1z uD6vQɼV]c0\Fj=t1Jif8R_AD| FWX'Ry` =K^}j}xciؓyf4 _q5,"1=lgrcD:AwTP#doVI:,'UB>Uq&\Y\N?s>;̶ :PwvxTM<*?waɳ?<ܚtkUs0 H/~,DKL5n>`D`:{%;Mt n#5.AۂX ߺfj^K3mC9QYƶꔌOP[dH9AF2--'ט}L?ѥ S;SDhy I:Ƭe̐5Mo:zvN#V Lu48z$Ay[`5 )&gxyXX_ >׼BF [@FtE G#{1DwFP/DH.dIЎ,gNH3F4< K؁…qzߗ'N)ݢ}Sb^"Y"O6=R]nAQ߭< iŷ׎DSMʰ`y|5Rm#ݚ  ϴ̿V }zdFf{`K F4 iKfcLۻ넘k{4:a}~@ZZOAħ _OS*|DT0sԔڋɓFo1OƾkmJ1*]Sq7ύ<ȴfTQ ͍Osf& ?&lwa/|EJvEU&wFnsGIg2mJt^g`j w| ߂ G8Rae~ |p!׌[ҐAjS"e 'qPK=KeX;|I ̳@E(GפaX%>V { .9?M$hh*eR?8W}e\F1%6}t,j˾Ȗ'/S"ؿF}aQU ^E$؟ OeJP7R@\}"==Y8e hAQ[N GbByK=o8g؇@M+&` hz;n?ё$qͅ bYZSX)3.2Zݧa&W'1AR2ҙ1Oϳwt'B77*"!a=C{:>u{t^ NLA5$cׄG(m&~S?sgF<\؞2m <vC3͐Y.֐Y;|1< hlE/ &m ܂;V]B#R ˴gםLPxP29I o2IVL0?Of\N PZl /Uj _.S,݉v_k\7/&X^ p q{"]~;?&0iQoiw&9{^ǫb-y='0{8ڡUO c{+d2Ō5(AV)Z#R]O\W B;\ކ' KGS,+!+'n n\D|kȥ\Ϲ+x0GϚ?59YUxQ1& [=L^&?$Dt%C( @dկu52qs3"dQŹͳ^թ<)F n/}!9pU$6۝S"l>hʼnPkqaO>Ŝ(7o2gy\!@v |%KeӋ>[\$ZےQְ]H  p( :&1>9C0|i6ݱ2hlȯ :*J(Fhc&w5&] &o],> Ԋm0q(gQPb5piy{N?OhEZnzw؈j(?=-/H90χb,Yc׳KO&Wv&nhbXe-ozZЍαhtQ2Wk |𢮿GD~,C7✛Y]xZy@{>#;ح7ffb7bIH^Di]7"^!N_T+-ZfRDZffqv[hZ%t%A m923bJ6iےb>%J^>.ԁf)𸁪!+YO&a&^mBcYx:j93K-b2[ 5e7@ؤJɯ֤mf?ҕ1yX"3&}3Oe Ì 1w #Ѡ[FU}]M٩NP3ơԁ ̹kyR`yI3ih>aoN*DĮ \6/6./?jI&O< 2Kvo)SpЅzpruɽf@Gi{QdA8J{2;ء$ܓ/S~ʦ+{ 9sN;} nk `$Bd}40ʃRב&}Pml[;Ȓ2&X16'8x`z+0[ y:EB`uLs~A REṷ8v?&eo?!bc^煣G2-|QcDdlon 3cFDU*i={s58tiOusi?0!50jcWKZum/Z`{KI?1<㒂/^ $*dٱ]bdB\J V^S[3N7 .p쪳iG7-لa3xɏ4 (ӄV =sYU0/s/RkbY USl"^ωIc4ۋ@6 $ `X;]T+V<ީaxY\ |I[%3޲6'b$u/|0ܹ1Wɘ[6^Pu\PP M[CLDCO6iB+欅:pX !}3x;NT# `M-؆}[:^=s?`h"KG GgU [F> @(0AIW7Qo,=wpfZ#W/Zs n7Vzre[o#M5RH۶S܉~yNV-CRd.8okT2Ү)dg86I9Di#Y*&s܇d9kQ__7]=ZBRR^,"|۠*3= G&>L6ZN%Q""p> E)丗 =RuL=kL$H<(EʋPtDUDw2ΆKCEF):+HdgYL`Xvz63t<6^F}`Y@U&"S7Cu/ؗg1m_:QSMޣbP3!pYp;> aFjMbr s.2@HmӀ|Ib!`No$gjcBI_3dQ L2( &va 2  <(^jf1ӓG%x=!dA5԰(UΗ i@}`dl5UqK5ãZ}lT|[d!ےOJtQO-1|/GѴS$Gvg ބ:[n '%3.WTAuI(8IpO:&e%r27fY8Z- m3CbvkѝJ ukϴ{z2!u4 "DcVcVNa]M$RxrEКŷ4Qb8n{{E6cwΙlY > LXfhL"{n"A1O*7='ɇPj2ЦہZW}Jbv\p^o$:DjBuX&.)`gdҹ;-N$^d&$S.-lnsb SVF3 _!'i_?v5^WgcGR$4A<_BK؀vHͨh^<Հj?; 6ۈC#^̆6Xnn9Xlfd"'ځ@2(6M}dd#:.+|{ڇOb;'_L]/k:Q^OsEBWH6{-E79_-6dMB7NFcx"<ɨ.ő-(=bT|ȫm͋F9*>21a 7?-"=^ne/D3Ek^[dәRX"'C3_ݍjޞ#e='80Z!V>s)Y] ^T6~ú ٱZo5cό,Kp\\; LT˘[2ꊯLmq>'ЇUU= dnN)Df'BOdh'jE v~Z1>-LSa'ڏbIHY+Z؜:t|&F}T]ixCO5\r|dOT R; S |xqg)R~gЄ7NdW]jHRpݪ,<ؓ_o+clR~ު1Y͸`w+1VY<:z5D|UԈv BX,cz|OB4O4~TS3*A˵ɽ{ (K #63ozJwl_ *,TdRa"\cp"_wP%|#aYj(Mڡ ͙c]2+ p2=ٯs~wrOz,`YeiF:ؕ2 ڗbĠW|~6bXA $Uhf 2ŗa& O-b| ccDkxJ 28qlJTF f6]TdƤSKܡ^9 9ֈ/jA}#g&]S2']Ғ-d׀N*ݵoŻHI7Ξ}WJvi3pժ!x҅=Z$]`Gj9\a}pwB%oi4y~b`;VA'%kl9X)a\o (q_'QwYyPthhqDчy7eW5[?/ s7Jx/)g8V[wSOHGNltTI k8|HO) P͗d~ k?5=t)uábehC Ug- 9/f=@") m{@X:T tie40#gUP`2lv 8`k{/_L2T5QkvF3!ϸȏnngV lUSd(\$z@*J 2[$c=ekN&wZgٻC_J"^1}&_a*t;A:oT -'7ЇT~;9@n>Q:EUO[ ZӰ1UԜL =KyyԐkGPLb'u*c`cBs1 X ߘpr$lZ g0%uTT"P -BXj< $+ ؾ[&epqnFT&\T(C %6סćf}VЮ/pphIA1lֹO[wISF_?@طU}bi#Ae\UK&/% 35d]C"s 7/O tkvB) 7X(Ðaw`I¿t>SKbسmK ]؈"ؕL&%Mj/j|<Կȩ Z:=@w 1OXQc$F%FՅ,{Ϧ<- }nct>Y}UK[d OZHx,p _ha/{7M.rzO\ 7 )=kZxR5>}}ER(.a#7$ 2H? җs\;o[|6@V5)pi{>irŢYWwYP 6sBּ>71u)P(=#R-L} ctS$ $Wﻳd<_߂Q9<[ fDMK۔~K8Mi;T'V&d>&\_oY;nNnUP$yi_5~/X_,(K1pLOຯ1a Eq|fy3t+CQ?\$1Q~77N: ?яruWH5aZP}'o0 n㉡3<9!A sdI[ҵe8`R6{ 9)9n쵧;-i9X47V`ޞG#Es uN^t6i0p9䗅H6nM" 7';Xưny%<㶇FD\uTFp ;F)" e) G~Nʟ ΂S{Kt= { U%z SdCo :)9܂=%A}飃_5.EO5OLƭU^]};A,2#it^W_@es["$7CYr0^nׇ Q7ףwsOa޵zCIXc%穔8ήlR+sc.δY WVg䇴}2:՗ay_nԁt¼@柆70[;PG*%RazVNl7_:%PJs\Zb2hT^צ # rXe8&uNj`j__.Ԃ>h&5Ef]FPoHgoE{͏ֽ++L2o=;*M[\l܈@uȵD+Tuh+ j^rO:*겓\Fya]auOZ#U6˳FB \?2F{V}վn>ceqZ<-ʄES+pk<*nŎ;RHQhhbEUI$"XI%O4^ES"lی> VE3 ֢)[wVkv`@)Q+ sȚJwٳWv+bR݉ dvL<79B sѫx4)^7(# GGsY*SɷhEW "})pB7[ZfKN ;9|/PRY%z K LĀr _wz> 0- 4O#|4ʩ'4f0g'?|w$I }7 >Wwy+Ф}G[89 D a 4DC(͋5V6ř+-.=.UZ١O+I{)w|]raR{,liVޠb?GG\Z1gFȫFbdgf68)+|x bζs-1p[JP-\nEBC{Pi>C (Aר B%D$[#ZDC_k~w1v䠳@`GZ |ɊSᐅ}I727"efb'a?}>dL;l:*n^-$+#P( #m #0.ڋ"ZE;B(8 4U rC  {y9Ѽ29*fQݚ𫯡MryfDKֳc_%ip>o7$.uk=rZ!=OLM´ˇ\. yfFhUBq/JPŻ(ɧ-N{':E7ilxJSn7Ul6"A}c*F|\о0֋RRK6OF,ԍ@C⩔} \wW(zkaCI>QJ~WvgPbQ2ۖ)8buy8gWca4؉:ϱӷLc3Z%?pJ2,kG$OZ=Խ Sz%Esqr~B" fs Tmoccvr^1L%"PVCX5@@HU=j+Ԡ g4{1Q.q̊W.K,>:{ճożrK}-GK љf"W7}bԇmIj Ŏ^xeN>\շ͐'ͺ8ttVn;mhlyZB 1I!rܹUɟ OkvK\fvvJac2 B/;o}9*YHx s]dPJx,rxզ#ƪ8ȱ+O_ 3"U@&y@)䭽Cխ Ǵ~Z94Z*AY1N="m4랕sE1؆qEM5&/ %cacBGPy>^eSZ߀I6MJm\62 ${aٞL zf pn祖XIl昦6h|΄g!ﮥ lˍB@Ԙ+!p E[*[7#9E1CE Ssy /3ZCMZjor>?H,"=I3hI%.< rBshVlwK,6bcReb/3!;5oB ;d(aM7Mx`sF.Ys[UŠ@HAT'(GƢG^o׉fqXg@(8y R>6N{~B>G PMZoM-}I.¦|+K{ *7:y+mzH q)z4))Pc`v!|O٥Wv3]\6v@]t]!1?)5uLƍԛEA32-;"cwk2z,\Ex4=Q~7ZTR2 C>gpvs ~鯜MR9wo'/e]Dei&/c?\݃CгRֵZ-ϐߖ[P;/WAVzoqD<!ئ$rB̚#ײBgNL$z ]PWՓs(!662,x}&9;(7-ОWkn~?O q9k{gDRvDY35pT+X>Y ޥ0.wf>CґL EaGiKo0{ALTT!5b]YّO~3ņLsx79mN A6_u (DwƱeӛDEV.S~JA |?(R-stt`oж :6W:Dݘι:qT7L2EdS)OBb%JR>c2x*a!%zG'ٜsLãҒAW~IFK5,hc4M8U)VWiDž]LOс{}l2^S-.Y6;/>wsQeP ~l%ݰFA}Vc;B>_H{DVooyhyR9\~cv1-p-OQ-Pͦ}ƃ`QpSM5.&#J)6~>=\rmE;ȞΑ t XұoZܔ5 2L =nfj  hO귍Bu򴈸e{9}FC?)L8S `7А#ҿ$H`4s:['~,qYbGbV{(K0aepF`v0VAkuzv+c$þw(GAo \w"g`TDCrRKglOA*R{(RR1KbN܏^9 ھ|h!p-zZj|.#tnEٿ)r Ƴs98D5D+߽5|VζrZG;iO\6X<:UnX9`ٕ|]̅v34 دq ⹖&Y>OE (oo+>04Ec{VQ.?3+_dDy`oʥ=&>/9z⦣rكy;u# bނQ ߕbM3i_(u<իT#_Mx$Pϯ%YB0(s[q*Zέx5QuM.1 W>h؀#=Ax;fJ^\G{/D8Krs䪨lb`C&z)EP5q=m-٢"5g\^s)4$)LQ7'kbLغƹGR;ڼtb B'd}ˁ s.1Gz+؇wշG[n'ٚ),j]b\R$4u5+lπ4G]G`ӓ&l#1@-/]rdjQ(.Lxx*nxE7\mдL,y·O5J~APRTNkm({LJG>4%fY|0[1b `џfל%kjl8X`j{K_ e%%2Q_om93d&6핤pdaU_T` ˝ȁC.^"ϽN]>o ^[I|0J?ÜN py4 eDHMi+QyVq"|Q Ⱛ+fT|]#oII(HntkjRe1pwyNFp_:_1u#yGd.V_J9 w%vMq'n=k7 ℹYbD }Hdp2wBҮ-7~/4Z8٨=x`/ zr_fG߮3!L1O=mz5QVwDJ{/q; d\KQ0?H{Êf6},^q6nASԼh,@Õ_Z\6q #~$ВM/dyj%jdeKd]ua pSKY5 /KcgSVH+R#}>|nq< 7p"+—/*4zu[G'I1@P*U-ӉEЬ2gje@'<:. x1\kwNj:2OMNؿ,xHbJGvOž3tF.ؗߘwO1xh9&d)6Y_ƚ `wݓ»8g- ;6N6)k`ל:"ɵoI=%+l)j=(BvZ1?PD7x\N9W<`PJ0/M]%>keaj045 v!IuDڲ(G<'oDdk>5&8ҖQ@=Rw0uŤ@p>㨴_TxtKt[?<sڢN4Ֆ@L;-}W`pT0-@tQAHD*,iW)& &aqoXЕ*_Z{Kxoo*&+z!V)s=vͅ.%#߃'8lޣ} GuEA>`pVFO3n;Z +s}jn@B `:BO9\0w\03$A5WEҧEdi܁}vM-dv1y9ɏ@eVr{{02%S~Qg+Eޫ _'y$֚#?f$-kPUU4):H!"vpuRPH1g˜>?T}$* gj g*vLht]Wr"0H^53i\{Pâ^"Q+2itZazbJ*Mw$Q7:n*#41lzE}n#Dp G5Mzev5m:H/zv|CҘ4P9;:Tt//PF#*6L؃ 7KIh|Z%, J2jTP@Ot{=l4eA@ޞ'W_;O;`IjXe h2ڸ99J]ѷRʝ.!K2oֆZ!ڑ"9CץMpoyUJkJ㻱xOi<`)hyaDAJUU\{8R7_A.vG#P(~A.`@S -&S˻y&e+WFYg-4W֔YoC&ՙ_W^~+O] x?N4 W1f)bjfr•yZVœ0oUNJcS _k{nXmߏASzFm ;#7W%W8jLODU˭JM@V./:z{KrDѫcr[$ɍ"_.˸(+Ј+Ǡ:gZك%NpE-Pu P"2+:%^fEI dzzx^I/hf"h`;,Xhtſ^, R$,w]ЭP~ry0R!V;wd#BDQ! vf~Uɦ1a_[*3\ GtIS Wb8v\li#VN7z.14X%vVę"DNF7j5ryR/&636̠Zjw?"h+*&k$ !hZDlDb;lK- B>:e:D< ` IV˞U`1]-]?=:C5_\GmWtr'@{TY4ʼ@1sGRͪ#?-e%U^ rqgsL}xs ?tSfɊ7AX4`c>יzae_҈&;a>2XE /-ސҗݿ ji:0= yIs,rĉѣޛmii0kdB1v*+up$Uf7p Z׮/΂ ?o#X(ǻJ"~K;#VkKH՝XNY}\LK:_WH3^IrO4vɌQ $u4m\_ ` je=Te8)Lz񅞊BiOǤDt6* os_jyp2nq'%z8+SYwNʒS$/Ǐ-ON^j&4']C;(DR#$ X5]pF xi@yV`-1K̋bp||Rؗ]@AsGsWF!_/{@o(z INOE-JYpl B1-A[)\; 97E̳#@|uxsF:ޤ!] Kd\Cƴ"߂;t}VY3˄M,p09SQrVw$ WC*hǪ/H(I hu{2e@!Pf.rtW.7UYo^pBA+[ ʥ=7V2l7^~`,>]~ӨX\M@}?#|*("35jb&W SF -43ي3&(flwE|  4x*m Oʓ uiK^M (;> R84OZM’}qAebl8QXu!o,D@@V0Vq:Rofˎ>)|d*紿HӞW !Gth`='@j;캵|Ci !Vsq76K&> }dw ?a֯k`1S;"0hvD L/PEmGre3φ'1ii  CI{s/)T ԿX&vzO/o?L\5TUަA֞暆rsA 1Vtʨ_"VSuI=h[;g%k8 m^xȨ: ؼ̢_zF)Nr@!B: mcu (#v'i#Uqdt9 VzNG~1OTђ*$T^!^Y]* S1~ՓpZIZaϟ%6x,#a\^[d'7 @52[c#I$)L)v3$7:DJ=2`T9guOcTQ RnZ}ޔ|H|$$*&J5bω?)ҋMrTT*] r,!ib)K&\[)g w; 0C'+܂t렛ɣry厑]%)d8#9_3ʈ7nZ:VӋԨĂK[+ݲ ) Q"'*յ2cZЕ*yݼ&8nK% Xa{f{d-Yѭ*N ȚbR\ V:t!98بlƿ)*zY|1T+g ua q]6Rù1h\q0V7g5"#-qtn* A*ɝXDA7Gs,v(`x8\4uڈzL1Ch{pxO< I蔈# 9iQc=-**7G0bDh0z䙀CĮ;(Ɵ|O@wJEx}:Hl)Nk++ǿiL @p7PC %Aܻ* DFݞ&}*Nf9WuXyo}>^p.ZCyuiBozrCr;y&:FSp. iY+jx]z9 #,y\ 0h!G:OuI0eWXw%)Evgmz@Yoj Х"'tJmFTU9H `n]XTQXZR)xS[xaǤ RiuF0dB?Pe##C|HA%#wbi26;s|>7%2$LPNe !g!zYzDR-w@uzGo_ .eexibгXB})81\kf7N)jFcL>`@|lMĵeQNΣ_UrbRi>aHA8,D#"W^b&+xtAFAp$B)H>s#7eq v=9`r[!Zۮ)Yݲ $نiM.p'7At]Kcc9-`1bZ&633lD|UN҈)2a&۫ g1o6b = arq.߮cOp[930m=*ӔOd_4 t%tWzLL ] 58Xsn ؤ"*mA\dɾy -X=g;L[58_k>]<{u0~gBlȺ|S꿪oJwpPd_1{=z̎ex0mN ܶ!Ǽ!қ,RMn7_߃KNHn0ҘB02X [ `?OÁpEE0-'v?m`I0xD} Ձ kumxMٲ'4AIϊJ٭D1pd{s2{ ߥ<Aij'8r\wb+.8b||ֹEf39h<&P=>6 HFR~ .MλN{?&ojGWfsf.H?[hE~hr +*p3E#Lh`)Epy`S~HK5ҌUboib@\ <@*ӻ 2Bh/M=p89YY(t<zk(e\/P5s%Nv/(&Ko䞛3*hV.lr)P7ϓOk_?َ2̹PXv2|B~A{o>T8śɗz{%ވhtZf3U[eG- ubBvA_/zx|vԇMojqj_~O+{1CPbB֘ euU % uB\`=V фV%X>EV }1A|PGPCDG$wD"5߷i$?NUH?CrШi qtKKl֡JUxsg]MUNb'PSsEǝڪ//6/Մ4.-4uڴؼ `ZFΙN3z"*-![J{cbTg8gj* ɩ 7{xrv5g.ܚok!ߝ|.KnyIcݘޱnj%YlMw lq:o;@\L~0Q){}RR]iXݔKڲ)}L|:Se:ağ{B,D2Hm m)2f:96;~yhlMf1z|YjQ $(A|В Dz5dH.I.힘` oub1ly% OC-PT4$LpiC ^)9' D/G6T 2!DfMW\s t46lEF7$TW3'B(]JpқlQfV 36r: iVA >E+^$G[h)Ϋ(<)Nyo-+KK 7TV*5Hڡ^Gn!I>KM#(ʮGA޼hML`ii :q@' \;ٮ;Br!^pi[km'&04.L>nOll/upO0W{YhIm:6@(ދV] $HD/y-ŘQS?10PGT}qJ$4`ڄf zZbp@>-b;'L {Y6flh|3PȎP8 p<6_'^F*hٞDW).ɝ_+Kͽ`e;my~ro瑺>.MSj>JhYIdLXbF#V@da# eEŏg vm=1pafjnKMa7`a~? &s4:`:F4Pl-]Ī<))$+]B裤;gG]^ӸO|і<UXћxSGc²ʂRa{)쵒'_{r]9k(uIX`#=/O$dSWj|8jh81G7YZcn ݧS Q~w:$@"AQЀ@}YSkV} qOү S ]/gi=Op)d /m%Rc 8I=wMCU vxTx[6|gU%rfDcc77. Mb rM֟@صvwd%4;hMGjJ5?U(|F:- B6WrEfFb m]cvsBiʘwٗ!rhLʝ)Oɺ|6Ӫ|CXߌ3y<Mc!(x- t(*̀%7Q4S0g+̋&!%GN Q\7G:?ˀΊ>0M(HxCeTU^} ~MBa`~ oL+0~m,>xƥQj?9~ D?3W!K[BP<PI0`) D>XmO&~rBXLf7_6fWAwx 2F/N5|ĸ,#˴v #5Rܿ}'2@^(G&j Px  (ji Eb96E +;6ڱu;jqY 3m0 6D2]De7ȝT{707G2yUMLhL#T,qkޖ?"Ek.\%Z JS0Ҫݣ հNn '쳭Ppp0 'מue l+zA8>z/J>" &;z{6O_en1' 7DF^.Ws o]d <ХU o+R yհ}˾=OR}+{׀Sm kQLz닓2n K`+R/c,SJ catїԝTO+jC(P} Ց{/x.N 3\B<(߆6? JS^hnj$I5T7&wCj1%pf)xijAhNbj`{-X=CծQ0P+T龁$%*0f=9nePd-c{_OG ?dh%$䑑+E}UN`lmֈ~Bp=:yǑ]ۂ+HT!HPH1cr{;Ӟk7,5+&}{ڠ6L$@q>yfk*|xT#ֳ([Cⅲ#M_( |'RV`A#FCg{[=^xRkee_v0&x}TvOO1 \4v7v<;\vC}4S&VׇȐ- %ɛ ^+ z=nX10nuUO"$d1jϮ<'<e4.r{P4֔c'0_GUᑖXPb+t!::*yZWLKzҳl!rW.3#@M=䳙yD&184vh{_5ρR ld"#~69 DA 0Y5᭼8Ʒ\ DfeW]? <׶.o? QZ,q\:fbZbYH4V[lp~E9[)BnDUO%D)A:IؓJ\aֿ +.Aw)gA3iQ/mףIRn"sc޹cO ;n]n7VŦz Qs$k V)7ܔؾU.-7k2c&w&k[i~k uSpqVʡ3lj%ophź4*q)ٽeg)[du(HսmNsQE!j؊'mj]i<.g0xo? xs!?y˥ {Ul1ޖd+Zy>}eMC)Yf^>H$^RA4nd4}+0R: <(~}B덆/b(}=uM frDq6oSpໄs5 tXYwFH2c~bƃ^Z|+9bi>Ap%:ޙ"LKR(DX'+jȶ ;W`7l"yx.4qȥU#;)tnH@C(Ft@1%Ugήq'ԉDɋSz9]=o>!7}{BmZI} /.ރNQ~2%aZdHhk Qb~%e<%DyA==81_$O)?q-ƈ1`mcp0Z$L!$G4lbqLhPu 6_)ܤvP oX0';oEa@Ef,+x9KˌLvIB3dKM5FAwnB[*DW} fsP}D怎Oi:μXD!9=>ҍr,զmsa2W-uڡvL3T gfBsGD]=|Yj((MRb;ь曷ݸZ{ia5xPmۋBs2q1*/*|+w7\5ƪk.&j p"}5|)JBKYb eY*6Fsbǹ 'V^Gȥ`J -t{mmaL@^<|&Yawz0IJ.>_)JmtN^y:jѕKWk\5M G67ϰ.r9.`*|VWל6bݨ+ծuC솝EtΰLRX؈ zE%o;m`5eW\2"ӛb+.ݟ{SXG\ʲP Rt7WTdcaboڬ$A5͊iM㛭tLJ*0H Nt:,νr[Rg|Ls  &dqT퇶c?idj\tv'9AZ(c;pPIs,ٺ5Z\Tfm땐vV{j]{„WQb 2£,NYQi(Hjxe +{wā؋rwTH5Hw^lV 0mND$*P+bp Z if I -с$KP݈9e?eC 1T2rr*9۾]}:nΜMPI6{.nΫ;kF_!Q#TP^ڵJղvˬYN?DF丏@YWgԗ4];h'%l4a Z3yU;Fj@[*<<į[EYe{r"Ozf[D0z n L5KEP+ $DF-xP; }5H"$5m z/_7P KnEZ2*S>T|#F͂WVfL#^ٟԪvyH P.(>6%ڗ'W0%ʤq9}.&;=-+F&:Zַ\$>{BH[LcGyx#Ljk6nz={ Xm3]D$3>:bh+7NVYfq"" ,P*ad_IX$50TEwWᅲ6LEJKpAv@>KqF (ÛB0Hȍp<B8Op7'YYD =3lA ]3&bloy K;L pWm/]27 c3à k%aF)Jdı0X㫌K '+s}vvcdz 72%12Qk!Y&uQ8i=yb-Rof}^zdMOv.05b $4t^gڱn }ESXm_lPcH.". ? 4;y~cDajo%UPhF0^ w=hJ/lb쨤r)geyx;qtsoC*(Lƣ\R:~,R*/Sl&YD(UN-g])7l[]sɮ!¹ѻ~=6Rjv 9hq.lB^xC%䎱ydWlq U)=]Rg@|C53͠7 .p[DfriC;W j;>A?C.@ٻpئ\UIP Gmϕ.^Ts:uj']Eb1q@ EiH_!*i-Ġ|~(5q{"ׇӧD hɹ_C)@Y4=ָK'*|vHg7wTP ˖o\جؒOSEM~ҕ 7,dw<}e .kd$@Yu"k-$',Dig9y~;~ms#:aDBPӀ )4T&90\"#wi~\bege{CY3&{6 i*$lCdOKa\Y RV-U~ؾai+]K7=| ˓ JhL6P/U:k@(( |  Y/!Jt9uA UFNq(6\@aZ5br9ړ97 %OѴzZaqCL=R[X|/{7`a+ 4EJ$ S Us@iBF0;CcMb{FmYK1gHL lj|(G]I!f02:T36$NiC<bVMʩt7,-F[pLL/ 4KMd$R쮑>GLI|@7P,-NSp&U𤁚F}BQNf%6{qvkvB糠8#/R-4G7>]TsɈFfcPНJ>;fI8$D[?eOԬ'r:;sk~wv7Bs͸J k'4$]YɼJS_K!R筢d 4r7/-%pu#$m>=Q⛚"-fuJI;}Ljz<tznKxT#TP0jwʥgJ2I\MË>:;Ѷ^>7˽l,Tn/UUjR8`s_aQGu~@I'L1Cgb%A!K~ fA6`Z1Gd@2 ];Ik@cudIMNKƆ/% pWQk,n;S&o]݉"MO[cȃ %; 3~#DrE[e)Zz^'7Sע(p\kY,{ݳ*J"-"C#vYXј;F^ 'uiR-U )q8b/tZxEPMs vC+ŀ>&(0&%('ͩt'?~[@"Nܜ-&g=U$@ {(.ttX8UƝyUdbsNНʻiH%IDsơ^}A¬:JCuNg̓u#mMeo,"trY? ol7\?2^x3sADMO+BHO/3&$ ֊wj )R_4E-kpfh-;/$f=/_ۡ*ͦ5V}?]"-# } ޤllȖ &YGȋ%`EIgy{C;NG?pbIXXZٯBImP!2@?m9ܥ5Lx=*Yuqm)_"4@Y7, vJY:n׫,՞1_osqDI2P$qϚCÖƁOY_:SDⰩZT%U8h ִ݇LϫiQU`(3 ɷN3+ Ab~6'Ԥ7 sG<1Oߞ汁~sd.Tkt H^ͷ j$v5aEs{uh:g Bɀ 4opB m~}NRCBa>eBgǰ l镪,?GQ^kҀSir[sѽCiSNrg:Ա|i`ҿ5~}o4n+,(&Y&U*~x2}}[aqPeyF!b,>SXPA7IewJ/{} UG}9 \Qkm#D`KT%ui\EUx".xv{ bBB%޺*6z&?k(1.rʨw#%#`U.n8` '4 :@ݶ8wXx ^g1keuG8.9~vI0V5dELNE&Y\g;khSO_d:d\᭞Ӫ~!K]P <{"7jNn-2D 2?`À{U#6f_0F"wJk ; 2BZGǒ7 ߟBo} *g1&Pv,Ew%my_5_#*PH_8עlP6(. ~j-^c TP$mAG7dd ɿ"iha ۈyrYz{ VϺo_ }yKE(PF,YȱsUjTǛfOSMB.{wGøx//'k]-ܐx]SUo uGR[0,/o*"⌊86_ c@&`WeMoz2e>ϊFC˘9}f/^^6kӦL/CJe\ddSc:y#hƔ*1uf8R61QGճ{%Hf853Od`ղw{fA=n,vSt]+C{{8ӎcZP{j* Uu]*&5f M7wSRO՞[34+5 6;J>(&#=BM5K +vS·Vbˤ4I Kyr1q ӂ+MS8YƏ/E&Χ-IL($?kB+S^(_'jW*R}<{e0>'kBҬVj>~ɝ!dZ(CG-/Þos)o!q(71w}ss z\1:iGb4 $.OTP-Yz2V)LW.C{@4QDwy˻tXR z//oZS\a][} Z ?R/N%ѸL4zCᩢe d(}Wq@.}~و,$Hx;LhsI'\H1{Tbg:K#_[k hS|j|wUwfz~3Xz:iePqӧ_%NǜMe:-%GL!Td,0H v(ua,xs !iK7E}Y5mx09JXvP;B$% -Hz S{qN@ 0ȰRcpߠwUE^ OM~Fח3pk#J.n/LdF)*'5bY^/J> =l8anl˿Qe RuF~{lU|psM|lSuuJdITM@5=iyY&, cp=n>0 YZspatstat/data/simplenet.rda0000644000176000001440000000235512252324050015546 0ustar ripleyusersWkLW}[1(R"jECc11ҘXdwIgwYVH[h(0MEM bM &.-t[Sb%e\ؑ6it3ws.53ѐi(JMiHCZQQZJ/zk9ˆ :{g~ 0Fq4Y4,?Q0F[XQa l2j z636KlXKysvlDXFP9f FcҿZhX3S BfE3gxhſ 6eʾ໤Y;Qdkg6mrԐxA$2mpaWxćIYGWBo@ d2qKpwcоcю$/A3KӢVK4*r~0/ACp= u9qxΨEq(RV漂p-{t(6աW FʌpX_E+.1{½ FYM]I+v߿$?spatstat/data/humberside.rda0000644000176000001440000000321312252324046015674 0ustar ripleyusersilTE_UaޛWEHQPP1"<L4 lܣRj0/?a@Ԩx`P Q*"PN"7-|Аnۙ7 һjH8N#T9QeՈx6;NDR)ev_v$gұ$C:^T_RxVuttM'0fXK=.(  yR2̩\wsq0F =#Nk = Ji<a0 eauWBTt9Kpd9q.'؀oʬǟ ?_7^ rR6 O޳~^_)szِ%H J5$d"-֦6iH'9+ U L𶧦Oѡu٦ÀE3F30X(ˤN=Q7|I|"m,4=J3HMR)$V_lx> @HgI>A D;& ѾY@<֊S\Y32) $<|ȷ,<=&Ag/LI<- IٰE` ?_D˿X ndܭ5Nbg`_jmS׈4$i7<~7za}ICyHb7IZ 'qJdelC6K9K_j;_-Y5Loødsҿ|kC3&$|RKDXLV5߅lB&Xce/֌;%jMt ߪhOH*"c*6^|g6l:1Z cvSUĴV8=&I"_)cJ2%1ϝ, ͸_"mqm|ijk66ɛM2G6^]WBduWD?lƈ.Ȯ[b_֍h׈ȦmG̲߸k<7w^?qNݽ:"&]tT7$[(s/ۮ͐>Kž}lPl@鲆qg~cf0WV@b]ւ`ȸ▉%>/W;_s>wo# eDVR)/Qq2OW˅t>IXX9,fI' ΍u>y*>>,MmnZNu mkv w>2>\鹖 *k{7WmsmS/f7crJ& ~EW6#Kn(7AN$_+JR+Kוu?6+%. s/ D?spatstat/data/heather.rda0000644000176000001440000014707312252324046015202 0ustar ripleyusersBZh91AY&SY iD @" E%R*(IT%UIRRRR *$) %@%$ U"UJDJJ$HJB)P}a@ 2J6mREJ&ƫZ٢ZZMUB5EP"U()B"@ʓSOiO?SS2=iS4FFxhF59l]㕉+`KdUq9Tn~\j;IꜬ'ʧKdƊ(d*Q/©8A%twaj'j|= 7_\I`Jwz.G:A%Q^UQx}*mGہ_k^[$Y#/T(WagCy=(w%h:_o_ڡأ*>Jf© /}pIwG:䗶ԝVZB.UR?ڗ WM=%oa.V>1ʩ9BrSbxIjV/T;,l 6e~wm/%sةz̐G9(p|MUTvJħw>Q6??Qɣ䏙pY{=U2;K1~Ѳ֝}ԽC.͉^n$rԯ(%/jt'_9-C+Qv_]P}YӌU]vjrv3SݣN)<. 9ZƐvZ{IuqmGM$j}$=>~NSVGG>.RX7RUt_Y}S]Hlj=V*Z'uME|P:|J2vv!u(~_:wG֣jSeʕr/iS1xRz|:KJIdKeN JSd>S򰮳T ~FIuxEc}^W <.*>>S/7u'fT*>W:#x\dکp 3E+½ \6`mvWWȮTN/e>_xxY#>[}|iKU=w䧉/sW?Hq'[]^ZwcY%d@uEƑ:s]~)j UZ]}V ;U=|=^*? =+W<{BԞH}"\G>6q8Ih< Gk'Ci])<^euإ6~vTEd2+W;Ge< Ezt I=>KVWS<'{\E=P|=JW`q\]w)eOҮ> RO#kE+]K/`DzP]W%.@Jum99R B^VxTJs\!R}N"-?ƞ6v \1#`<9+9O#pԜS:> $^+h8)ShsJQz?§ Q:8aj&sM9=/'+H2?;:}' W&Wi;JznG>.uu ܾwG^V}]P+Irj$7/W(d;|4Nyz4+xO;TlT|ҾtПKKʟ}xBl)z$x{>&#Gw8+|&'&CѵW 踑|=ٹ8U\{MJ0>>dvKM"uy*Q5lqmOdKZ鰥x9e.6Z+;RΨr:+w'[#:Su)j ijUhSyJ=D)r'l:|OŽ ̪N^-.F|K:\U봮:G tɐ\]^p ~WuJySib+/iWf bN6DGu<OJC:Q{}juZ͸;7G|kv:N>?$;ܔgԸ*{}GrWy:.J]{jOkj>.õآZ\]O{pZs=,_qDiwqwEKj#. 9%ܲOcsmKN*: ^.hUj=fW^ ~}spWG"Tη<蝮TxI\0vڻ N[*I»C<^Sh~Y|̇3N,J+p]R]Usp a;Lڑ;mOm=>Bsu.J!9^$ZC.*},:Sz؎S_y_euO[Kʩ|.;//B2E\aI5.];O)MWù򷇉%js<+jg|3l|<#'9#jc~u&0Cxz]?y>s''uݲ; X{y۷Լk |3>.A5]OԭnЪX[6x:J "zvNG~'u}G8G6YOm'Wھ{Gul?O:̯csSu\:MI:E{Ca{}T9J;RO#yz^_b^P7It؝nWF''S2^WO;z?w),+}G&/Wsd='vXdvUvz]U`|D=6;MA#jGn5z~Rz.|a{E8Pd]<<1KdKVtl/YdW\Β\]w GI;A| )d+~6b+1Wt]ͥS&Ru3GğzÊr7*;TFp޼cwTBvx0Tv'$+yQ/ B^ENRlȥĽ>P]e>_KYx.LtFJ}fqdqu#K["xxhpJN&ҫDd'OHClTWkr<%*\DqmUwW'$|.';%'CMz Q{~rh=gx4c_Rc: R2I\ : hOSW]c>s(k}0ttR~W+(d겗YIYU\G:eGen~_;= h]Nfk|L'ԣ5mk.|!:W3rhΑhGOsu[E6]f7< }CilGw<vW鲫\\I|'nJ#m?ZtN]!:ܛRM<Jyxԝ&d/7WvډЫȝ n%^S֎t m* 0)ԩ9 /W[`jp2/.(yxVi^'u.)WPtu=N_W x8m#=&;It 0| Ajje}mMxE/)\l]^6'jT}M\x ^].]ⶣQ*_?u^)ʗSv.ce*}\rwYu:uym:u'\@qWtuWV.{U+*Wpl[${:}aEWcxqC^.E>}^q\J7RR{ܣ/n8ayK`Uz8U|'^nWI =//<7OMx?;Kzkbݱn2.zU4z x:]n; u'bI<<90W8Fgٗk;/C=}|.Guͪ[*.RڣrGqewztrjv}w{t]& N'ˍMGx,]U4/ys trWN %$>,HQdwڗ]F[+dgQt){/EzN/C˟^)rt2Gp㓢K*c&Kһ]'4ڥLs|!uZ:m! :IwPt?? :> u];l#RRUG+,BdN/rsp'q s;#SGC_ rIbQzi4 ?\|'ZeWuJ6)a*p)]2].#놔6.Q>c%?It5G+t= ]uqvyHRIE78KK[o/N UĎ(uTY < {=K:R:Aԇ>x8=v/guR΂b.Ht*tAke*MGgww'f=!=F]؞G|acW}Ɨmyr7R,MR|/;fW*=~y6=~.q8n *|peK{\ItCVlHGjYka%{Gӡiĸ.l''5GSa^vzÓjN; Wg]63 #N~98cyy&O+vHW ~QuZ.h+q}K}vWO>_e:lx4#3!]b2uSGatO/yRJɵ?\Q]8FKilE5]`u)0+ŽR/ s45H UPra~#0yZS_/.>-"u:F;_)^&*3!<}|-H45/R K?*G7T!U6>IQ;,f}y(2'Q4qy{겪^umw]W}]u9'ss|juQeUZ2.'Z'Wtiį<~B,'mzdA&x|m%NvDy/=/6[9KΟ$F+eT*Z.6TeGOz (}Nf:#+E~KN=KiGh59z ף/y;r3 tG:x49+!vUC0]? Ht)M'+N_O2Jz]I;Ltx]??.v'Y9څ[L|=]n?r`\Cp**eIh}x˟(q/'T*zn.0js4\94mUĝ x#? #PzHz(:HnOsNv:^[(Gv9ړ띸jCV_9z9WA5ICZq<~=>{DuNrk|JrW2W\ulr6J\qC]?z=WaOåhK<Wqz ^Wqp;xX]7OJ'eZKe'Ⱥsf(Z.Ut49;GCPrzKb O3 U=waŠwOuvԥj=]e<_} y~6T:Mx:/zPxWw1xyz?W(JއV'pai+rx%_<:MTgt;[wNJh9اpHᠹ\"r4HҟwhN䇽'/s}d~%_JPqp{~~>I|#SZũ_$?+^^m\quh똫$ڢl| Q'7!2#zN7=7T_?^Ò }?~~`Y.)mG&=ƕx=ƪ{<'C>x>'_ǥ|< |ai:9%X8g IW@GF9g)E-]ALyV9Jçf%F`T":ca5Us *ĸi =Ec1%Axm،"𻁊P9|W޼bx $oWi$=Gv8,a1pU,[.P5TvBe/F,ʅV"fFG UA};`DBΐEu]@6կO9gg0!::鲡Gր,%NCWE8ynrNjo*2%ia@szE#DGQq-CTBV  /?bM,F~٢π 7G0x4Ա}z%YJB_XŠ ,|(节mؙ"𘍽7ltO-$k7 A V% A1| 2Çp hi"@j/Ӈ88Jk2 ?+& S/㸲E+Ԡt&4 P܆Ш2FNtP ע{3k&ZS> 8{Zv_lxP&p%KA2PR{i\(@~}PJ :+m픲}a D2Sˉ B #*1<^dJ&eIC/e#3TiJc;HTjEmÅRV-sd˖ː}< 4}fNSPoKp :ߖ8QOyF@h~j5 \őPH=.u*1cMt` m:ÿ\]76G 9Eʼn =}bggx(";ΙJ7~:+K԰!Wn(Ra8_?$gUNLTfɃ'Sb#8H&4׻,RX B^ˑ5' uc0ԕnF7 4tvC)Aެ\V%6uά  bb!J D6ŇRsGU+f':رXwmmYUlc#mAƢYP`jAAC{[AR3~|Bpw#7Xy#CA*HC"Hy7͂C<5j6yE} 1+nު[FG> `~ˆ( Og[:`}:Ơ5q- iTx@0d{K gg{댦oB/8'=?AiA Ƕ;qwjY-D4MWW4" G*[4vj0sQJxu-<)lfk#ER22]Y<v/XɆNވPUt|jt;\}f.EbS'fE56ub1X7d|/lt4_,sAd,hlq٘*L2bД uɍ4_a&'Dos &~r "!֧ B8±E.K !况Ņ=O#!ෝq1&`?nk 7 l W}LuE7!Ob~luHR9N]u B}=5Ap B@fE4o5 R"a0+Ĝ u#ԁuAw <~j~\9`*ȁã]oH5_rr:ڃUl܍A}Ǖo#,khˮ9xCK:"2@PF3w7f\/eڏF'Sn ɲN|>bSW@LqO0!1.iXE~?OVW}h|bN!%}>E<:iJFТr.AC)Dkao^̏QtAbMy>=ziHjLA6Xmb}3L3^$zL&}5!I^'4F2>/Ei᳛ c’~NA%PJuMQv*2ΈY=Y!*-,T\)yjafwaWrIWԪ|[ IN@ґ?6R,lUN>*R5LkL2l[ª7-ҕb}svH3A*!9?GsFt9Z/ EX٥?*BҕZMa;n:?SPQDe⯿aVe]MR>S+kq\ElL1"u&zGI$RJ'{(Fq18g97nڲ!kjOn<,FԜ>Qv< e|b.HuA8g JkRQ3<3?17ԙܙc|Fy6&U*b=eDnxgN..0᭲ cFX5I(E#p{^ =W8;ԷOlB љJraRs'4adU7ef8l}̷$|hb=ihR='YN O0*$1JJuwOx aV߅}XS ?Z=EC'J44h4:} S>sMjlNAP2]+Ht"\JK՛MPe֪YEF k}VR95 Nݡ&4Y/~N(T&cT5W.lӍ6Obkt=Ql!2ϫ /N]w5cJ=4v|:;UVg@#lZ'{Vƒs P>8Jw*EEZkquVa^V `.?bk6-qcD٣":տ<FKDLa] NJ!_ybn/.Pu,_>Y kp :۩_ewї&XdXzb՟t{*Im jC@!eşM>{S\B=Od&gIrcdK ,n蟔8-igdM\Jbmnՠf@S0`=ҙO&5̟1ѫ+E˾Xr2<g>ݠ# &OH%] Ś\:8v3ʑI0)^icK_|7^sS}'@9Ej[;Tf1R@cL{?ՄyN]ꇻ&LҘFb$n&FC{'΂:wLp9$GdK÷$oݻX_^R m~u/<3n67ȮjBvv>#uQ>Mcccepӽ}|'ync˗NyYX9YVܔtt"dz;Dz%~o.MUJO>YW^u9E>SE_9\k_7w0w{"_3i'M/͓yo29P2Lk*,9-hFyWbeڼ̌EޗޱG-j}}\F51E\ҫ5H,Y4,7'e*,_bU[LIp ChN6j =hoS#)pl+s ؔ 1AB#ce0DΌkoQ5K̎R0ur)NPXʭKW{~(;>Oo. :㋣1'Y(s6kh]r*Mn}u.7z3_5.S\^ muy.eBNxD}UUKԕY_rms#lɵ'Z]$iJBR,>I4=qm+`co/M?]0~s3R+@Y˴~/{Sq0R\S|LmM4yjKeϵ5zkl;;NH!yQTr!eU}2c}UW!Wzr+oi^^" ouyLt|Dƫ %KDj8_)C5OsSlf/j]auuQ蒗)fS 0'ne ifE[C̲](auJ!m6p]q4My^ڻ&o?ɘzγMͯIi;J$p:ZKYtp'&GSobm.LM_KU]МSѾ'\$LiְźܿGqw &X~.U;J.)KFcog"5l؎~99_9~]6N'9]SMC} V W%Ϗǹp|X4O0ON W&U\*oVV|; Jϝ;999997776 DV배K.Tgdy l/Zx& aB!B!B!B5c!B!B!B# 0c>c!B!B!B!Ff88U^1$Hc1cH 1chZ`th{{bPC5:jȓTF2w*+֫oWWVa rhpKKKKXicƥKKKm㶑[VVmԦSpMқ+/`,YYR(.?`29ĢmD⃪'TQ<ڣܢ}A 0)p)ST`ѧ@=*q54~6|}C9bʒ{lx]h6N\RYrPf>yc6ֲ*tMXU[Oe(\ 3w߫m'nöj>t+^}EkLծwM~+ηRKVjfA8 )f珇-Iϋ~6ڝF٭Vf}!}‡-se2B:){n?lƲ0<zVdu> "Hb8c *,N$+#ZkpOK$G^ i];ËS9#L2 Xp|Z_ni >/(~rtJ@pO>ћȮ.)S0̩rOQLy[.~q)1Rvf_0VN-Nش kraa3'MD=fvr@S1@QA\HT+~x,v*v(EɖT^Rcc4zo^ᮦp3^#ZԢ5:gǍmߟ Ԗ#G#X.σ/4O ,]"3|nƒE)`!$? RĴ0XܸΖqFϋN9+^vZu y\(W!d1rM|DΥϷ.5S*D?CP8twr{R+P{,b[4wO<<35]zIZ_ˌUzrK%{eZfxyKSa(+PC/Nn'DZvыm[5l@j8K^ʲO7 o]:Tҟ~˲娤%ljK2~8Rn?NB>9my>}A<* t{y+/O®qv/_v5_#ݸ(%!Omu&s*A0:DZ(^=c>jɫ͘δY3pxylaO%g$t}aW.[፰EB*=w6m,fY7/0Y8c2 lxޣqN ]tiWop=5QIFaÜ8}:`/uG4!n<5 #CQEI]`ziUNsKrmv6՛`Y wP5NbyNY\GA_II.d0QH{>dC:[Y/biǻ#;JgPD$X βRQս#zx݂!mbÑrgnLW= BB%c/OYyu( F5"A#h4bFA3 vMgb8ݕȉ0k'AxmZݮ8k$3VJ~YdJuI// cd)C,26Nay=\w j 9]c>TdRТyهBD: %`‹Vgf;Ͼt,-{ca-JFɝ um': ;slS ]u,xx<.p8a%fs$3B rwh15oP_de:;Ѷ/9ØT|ob*Gm;vxI5k4AkwaClwRtOzyV]ԉG=N?u!rLo6 룼TsL1ˇi@PI~>%q4 a`(+T?H lT!lç N$͛hEDaT_{af`_\i:aY}YxywvKܮϧ'qF: 36 d-Tgc3<\s_;Pyn疭CvK_G"F4B/zZt*iNu'Ո}5[)Pv(Ք7$آ6jv Z6׫/\k&>%Ouú^i#T/jG{\ţ"7a@%&8rhVo7XeS|QxV37: R Q7 H V|[ji~ V 72kmQj Wm?p3fZ֯i{gqu [p"34k\z9mlRG^yxekk 06yY61x)UF1%~l6qA[uYAsB|pIVlw;׹K{^3uX렘0LbgUpmB*+zm}W WAFcWkپQUfËckei~U+EXaZjfr~GU_ j7:㨱x*f7N-s w{~+Kރ[ugNˎLߙL{2 <^ 'P뼸yZ8-}BQwʎ"`x*B.7Du=?c_+M:&sg4//>Mõ<=i*TS#3Sj*,k랔&޻+,1r YkWFrl/v̓m.,euYk(ܞ|@^uwM<:Qt/ AVG蒪TfͰ}By4hsM?[s<7C%yY/Lg~CA~qOEW |/lL"<-LW^tExȖd &\u}Ezyj^\;`MChIylxY/aRI;_ߠ{#;+Kf5YHLyHaiM͢wVs[DdTã5OqأmcgM{}LL"s]~D1;Akn ^AA@~<vV pm1Qkw;L4dR4F\RQw%c7F$B!Btoi{6 E#zzj?cd=ܴ}dX&>G{?GǓ޴;Xo&;{5`^QVPm%HntkJI_yU`X@pg<#P~G4c?[ˉ^\Z>zWcLOi^jŶ雭Z7 H4AVSQ {M>SZ$1 =nG) =\o'!G_yTyQ؟<pz2-,+mSsv8u{!]:SGst3WEt{Sǧ-kMNղ<W ;^V-XFUW}lV8KO]L f~#Dܚ"9k=ޓ:%ڎ ="xFVч9vjbs^ģNڋ9$YBsY0<"^h\HŰHN0?5)~EKA.3^g)}' 1~&2 CAʡd ,+xcL[k!<]K/2issw><85[G`}.7@ѿѠ=t^%{gjt7OƋPPG"*wOjK7{Ťڽ4lqf|_1 gs#h3Zqh>ِIH3Lfc#$x{Y1YX3i3P :O{ &AKɰ|q<>HdGT%^!NPi(xI΍_OE*wȚbA9#c}(=rR<9~V`tMcl1rXƤP?7=HgV9}!4|9#6H-GNulMWh'H7۴}f_!L:Y<ֽW| M15&'K ١^B%KYb\Klm46yM?m;R#(,2 r*05o޽.nʿuw&_P l[Fu]@˻A{ 셋EeZY.{p:;:z3cP%>ݢBw هf_FJzj{ (iO3j^,qp˘ ϸG>w`3l >`OϙDeU/BV^0`Dh?3)!;_;EƬ%k6.90vʨ¥1n*߾Jr QܦV;Kp57y_deg+y1!sF/u1k\::%(}BMjz_EٖEKiuW ~LYaYr%G4#ѷvf-163,"jtvU)1!9Ӈ B->Nibgآ'YҿơYB^e@|ć@ 1^zCCCkc-8gdHơ3G._\Y0:3h|Q+b3rޟ3~m\Cc^L,f˕_ES4 ժv_F3#`p=Ppp 81MtXL%v vFu{Oq8iܛ;? lYIcI5[h&E4_+wmQ)yEpw(cDB -m+6O䙇ʯ񾴌Z{`i{7̮Bm0~w􋣘x5"N:o.&E.J(8<'ArX8KH!6[+,sz65yd/-ur(5fzYt~jMon "6RGDoT:((3cɗ )<qbGr$G"ʌ7\ԌE`vY}%ɣAW%{3:^5.Adă+5*75P!@9mkol,;H<VO]U‘y]{ k3GcSP8^ܕc* qZQQŭK u&*=L~QAbUt9Yb՛cY ;x~ĥ iy}Zm2 v$/7e5kO`!sF8. 8'I'w h &ED܄- "VFf"#D#xGT W$6P$2.FWr铘=>`J'W)PlbؤAWhN%ߜeN Ův“\\2By?2f&{#jAr}\Gelm׊ax֌'$FUۈ -~α$脸KQo/y){j! I9olr3#C%y$ݑQ2vK'qBZY\:bO<ĵ$Ұ*+izH.p|~<[UϩM`Wz15,Z6_ ZNG)SZMZʨD GXԉ ߑKYV8d*d֪T7U<3ą9|HϚfJ9iߝs'5//yV>mؿaO[*|S&M,Y0p!{$EK߿F{OOB3.J+Q, u9T9-u'><C_4ju/?OcR>d:+s5(pb#{ۍuU* ytf>.f3ꚢbtzx{=;5:YPlH)0Q4b{<\*+|;ٷ?K& _%]I5^ gR̖`,"\w ꉆ/=08eKF&sPJLߦAIQ &Narf{]l嫭'e5+ G_X|=AVuԘ#yEoՈn_C6C1 rջV/*rXv}ShTg:mY_޿Mh9%ԎQ|l;!RO|p~;An>ytY LGȠRu3Ӣ˹g ~p >]e?);3Ef";l?`z"yP"۪b.%!xHԐ3Tebљ}Qj:"]b3j K~_WUo**JH 8woW닷 vEq m'L+&7d>D>/W ώoRR~pK61Y8.̓P?Kv=`*1*B#qkiPkd{.6CV Ꞹǽw1o eO:h;]@@(f:HsOT8 J$*X:Br1I ~[K!B_ڷZ>{Y Ymh.6z35NNvI j}$9 1Ϋ ='>4@wp!֍,~ȗF#RpVWY꫆N7*)aBjO2ʹ5AI 1s"S9*ڸa$?贪 l{;C W  oe)9QnɬHize9 c1$@f3at7tFAԐ5m5G@֗ӧ*v= zlu-rԳiT9 Ǚn~1 M\iAvFY6Gӿ∖43kۻRU- (&vZ"X[U#:*34w&Bl\&~pk!ͪ&,8 K*xJ%q((SxT@1KA9+c;LraYliۙt[Q&Q6TZh"\,^{)eb=ƹ?ɚhwQD>}1Tþy_i?eX?C_c^C=(@wu~e e!uß^# {;J4'}bfnn;Vá\ dZŅJ4:SW?VݳNݙߓ\DTDBmckpReLEejeI s?s+[ò>'б0;jXZ(Պ{0. :x{UA"G] SvWE]jG;rS#Y, ?+i$_L3?**3džPz. u( Hk_Gh.#scG}@шBqx^1lCm3Yul1g f0Z3S$5+Sƴbebӛ h!OרlS\r-y|&0Oa`@RJ+x?fw *V]tB a{XU.dr È}򀩥wTJ+@ ?P`l;r緭+I(]qI^Vs:Ը0BzFXfud6iDQo)atNx&;PN`B]:Ƨd$x0\i%">|#)셎_#G P:TCG>.[OIQ9';!t[׮Bb],E#ԙfX/0*g߰+tnDzݜT w;[P,8_i|0GiִЦ\x_i`-,j|:n5e5]sQ! EJI*RQJT J%HT@JP%T)%EH( UQE)!JT*IB"FF44h2@hbh24 ɐ@hh@&  CC@hM M@2  hd@dhhMi4C&A@M h4  i 414hd TiM&hbadjfS&= M؉(JzaM=zjhڀ=䆛Q=FMF"m &FңI a4i?P ѦI6ҞiM=MҚ=M4ڟ4ڞFjyMK;'J>Lr0:<8b*zxo #9GvI/i]FJ%7=*s0)W?(buNV~w/EHؾFWkڸ*= >C0 ﴢ$]zO jW{z1>'O+CԮ!QeO!=J*s KWu/[NR9z.&TmĮPt7*WmN6R 򢯗\ ^Pܪ]&RBd6 N>B++Au.GRF:LT!=f;_q(>N*ku\ 4wK>3^3QujgPu/uv"lTs)Cď2?^Atz'Чj{.{/㍡eSc㛥^w^?sN]#"}: }i^.It2W;Li_OJF4<\DmQɊkOGjqp[ɵ|{9VAjKqqW3'{>N#dq8dH.`<;d|t5']vx6%=!7WeQ"T0rtӰPa(8={4;^' RiU|Luev:\fK~&vvxWC į?$>n OzZt'QsT$y8f(pp']dCf!9/ԫr|M4wsASM\aê^@P4L;ܨ-hޢxݏtW9TJN;KWY/;ܨN'uA| H^$h^OUvw%tU`.H89SԮP>& %Uz tNJ<'Cx:<=H鲋'cM_]w]64Uy'ʎVjRL nyu^8ׇO*늸~ߒK'ȥjoC`=VҼ n?H*w91W:czmGsj쿇sl]pW KbR8<|~#_o ^."8KԴCȠz/?UʽN+ʞn#x Tmiv B/?D:LPQz<"=f?r=)ԋ\sp'&e#ĪuWv?+*</>Gz7U\MExxW_D;)wyDf&mT||Ub&xEԅQsr;^gW8.y%>!s"0ڒ(:+ K‡qtjy=JCx|Aa;T4rjxG?EN.^t{ =^KdVkMW}t98; yxG?8\b. WKʧ3 wZ%O7j]AtI9ڥCu)q? /G r.Lu|(]FW'!z)t0{(U4gj6^V@ڣKm%yNQ=^ | zz1)j['Syͱ.*OR4S~,ϩm|'/ү]:\*.{m]2o|]\.P-ʇY/U]zG.qT}*GyQȻLe*mI~V{辶ȹ#_R'o;= iU:Z\-++<|#:}Ev%Ƕ:}*U?sԥt \['3T~D 4•ux_o?￁vq|5P`l%iOH6?'?SӓI9O*:zW/ ]i.[|+Sb^>v{MK;=$dKu(v].'/I2V4:vZS6Yfcc±ةjƒ2G¥x"rpK)spIq}|tcxE;~+`u9%v۟~VZNvx:W.A~~rثWHįb: ~W{:KGGAdйJvJt2NN뜮'_^Tv"m]^nRuzbG*z̸duZC`tp\z|uZWNV?gW7s<@O#8תG?Tr4m"9OKvG7I;.T]g&ut_tp<R?]Kbn :Dzplywy/zIdW'R:wNn/ƒT< >_N"v̝?ߤ$hEh'8_3)w:#D{Oh.ۨ=x>TʧQrGeJ_xW3Ei6Yl'y+:;oet5/~/i]ew/7\m(U_ iW˨/~r@?w|Oqy*Cj'u.T/Dq]v]O+@/3S^Z9Lvwwlju+:6f>/7wZ/O Nv.R^~-p:eb+8^5B/KX?˿[*{R|NO#*/OS_^!bA9IyZ2?s|!uW/UuSOy:/ yInOy9RrB;ܸԿcSiN(ܾ#>f)x8? 0 WIzTqqjC1>}U`zYx}Ρ^&q\1uxK d>I;O^ >V} _C8)i:\I.(N\I>~=F$t1" s:wd'K'7 6NVEc+$iG;]NUjvWW.Vڠ6_<<үwNCgWē1TIQGDt{?{UwsA)J}ޔY6 9ZsryW2^%uzu8}m}= r)eUdL|'jR8\E{Q:lJv_ |'&ȸ+'⏝8/p\2Ԋ4s?[fE}B|IПKxQ?~v?H2)5:%W/'*ஏ*oq#G]֜ ~^|%'\T.z_s1D2#1O{xoz麞I p{NOKH\"o|rW//DNEqtx4Ks[5tv^ng}7%Qwtģ}磉\jsyԶW7 E`-yWĥhγTV0+W7%ʏ/ma.R.*q6_Cd=W$m.(Hw{jh3+|U]# x:*|erP{ STrdAҗ+^W| tY\mP>ԏ䎟R:8ĽT?'{C%}BuXv;S1+xOh'Ib]^P'ID~v.ٲfCa\S0EtczZӝS.[-s஋E:-+RKU8Dqu../CpT^Jrd4y9y)WK{{VcB5xf~Or\ܥ>7XNAYKns4Zn|Exڕ6_?~><'J wUWp(W* ^>Ľ*=y{ ~:Q^ )NB;8"ZBw/E\zzIOqty?:.{}Qaн~Rr7Ay%9w~n/(uu[9[)^gSzᶼzađ{6gINNj8uTqSr\rh`:Mr/D66SC[BIQ90\.Vɐ9yuvIY~\{?jm%ɹ/G "iW+C94'+ejS_w?gj3UGuj'9MTmgT]#}?/_>Nr,#ixRؗ];m@*91.5C~τ>NyXOr>)GwpQr`;r*wqS19trp <=%4Ki8mtYM%BbLil`H */L (!BUQJ)T*@UPU)TP("QURRTBD(PJ(J PEP(U( Qh )U(P PT"JR5JR  hU4CA@4  22Q5OPSOSOF UPU&U3jO%6=)zOOT4ڌ6ԃ&h 44Mi&J hbbbd1&&OI=jObH2C)@h4 @h%R4ѠLFCI 2m A14xтe=GR{S$SyMjh!4='~!B `Z3'Z8pÇ8荀;0~D$$$$$$$$$$$$$$$$%#8x32 xӘ:zǏΩ_stʼns#)G Jz܂rhW\)uCX%)ʵP7 pv2؎2~cOSzn'Gk]M*b/^^ W6xBtuTw4!:<}]K½#j'ң⺘<(4Tdzxک{ړWg+T'Hz=&oEocb亖rdN|]qaŁ^ƨ{W.. Uu2'WW_GS\W&K_oCo !j\U|WkHqN,WgUd]yZsP+ =:ṕlIŅҧC*^ h? .L^ 4M\d;:+|*;:.֠WVsokd'Uqe SU9X/wKvG[)yN]MKK=}+7N.xZ\Ww]qST5Up׃#u3iśÆ=%w|'#S-AJsrz/ ]Rqhw2'kpGSOwOG jz|BV쓿2)O༭e];zG<;Z})]Oк:u5ӿb}6}Svt;2._WU<'wGgRܕzDžUy:twr</3%v,.f7Rw">عy$= IuqOzX+w4U_RNv ž~y<(atDq>U9wt5ɔKˣ.N~:8-'ӵu] K2|b>sMx2 %uu=]K(^ד݌{9SŢ;A<./OJ >H6w1t/' {x"xEҗK$k/Q9}]K\){yǵ/+1rk9x4|.:l\0t[Ki<,򹜒v1N%#ӷ!ɲG*Η]S>x?'kRrK}lWUZR_y\*]/*=]w4Nt&+eeNj'pByZr'C#{!=WoK]S}\&<91>[}-NLO|H`o6}}K|]e#t>^+:K%jE>OGR^vw; Xu;:`0wp+8<.ppzY> ^FE}M+g C.g==MAwrV?s֥ŏҹrcGOO_7o=WLt-O/R;BW<=EWQ:^vT< 8JWpW}/Rvy R} 'qtXzI^gM\ؼؗ&}M}TCUIW3?c>"WԾFKpy:'p=|O0Nv,Kp~Sp6 5Os qGF<=)=;^Y?+Bf_A+_' E+*4x0_*Ҟ S:'C\ |)ǡ\Ūpi8pCҝC(#// tӫxu'SMs'&l|L;[ҟWOH>k&$m*zxO/:G{F|My{3g)?m*zrhl/9ݞxG/pp|\ ǃG6̇N C^Nއoz.vW ]J|]/Ct?SUC!x2dGԻد}@VOltn|ISX^K},rz<(d^~>rhu\q=WcTtuQ>VCxZZuU[g(~]iq/ʊOMi'SKҟw28b>n-xm_#pp^fSjJ;ܜu=[DpԟO[Q>=$O+U{]/b_s({!]<¿ *=\*¾⋹&?q6=WG4+vU:8?G |{O_/GbWjᴵW"+%7jҿsU꧳Ngc>Ny8Wi_[ F| ̥ )|Gy:w5]T4[*o ;`[IpN/;&RMU;oRqbwt6'~GCɰJ%ƫgmhڋ]}O34W%?[^VyOؕʾ_W@F$t_hWa.+_wp;?H/>V K1</S_D-~E|K+Ce_&Gzy/{ 8>upMGSHO/ç8W|4y_{''.,S5 j>t(-_tȻ%bkB;:?g#d3|''{7 W w{q~H} !dWW*,إDx8?[$y^KםrFʞ %h^ R_'O"asSa\'sH~_=~^^b:?%o<7uii^>dOyj.y_C/zGG}*WfS^NRT~I.u2G~fSbk$>hxz}㓅^0𣩡;hU_mw:?U4^V5KO_#xXw_e],W cW'zz66ҾUu2*||(;Z#hwsƕsʾ>{Z}Nޥʝ:NfJ<[=c |x/ʝEzId|-GoWey/Eez}j<]GW(˪s+ =ȿizz%z_O>>)wUꗹuq<=*0.~>V j~OҪ7/?/*Y~>'֠r>%yw1O.!t7 j~F~??~pi+ZҟlƣI]zpI}@WBqX~)]\+#*xi.~p\EɓpŵG_3@wu90v2?N"#Ҷr #92aNH}[ r}x?Cg;q=T'n/GO:^;GWuvwON-?җԮ]|TxT|{GG/Z|U>$dOmS~6pGC=^C?Os9UpSn:CWԮ /[E_+(O\ Y~'J=MH˪uMƒW{T}ʓ t2|-_g%~Fڨ MM^檝v t{Kj)eAZN-N2}4x+m/NsOdz_iYSF:TtSeOôl.O:)Ζ^o"x1t?3+ux/̋d}x/?U^z__I;8W<:*.~(<½9u'. t|D=|+E¯_}tO/$j>{;:u_?TY'.?dGQ94:ZK/пe9_A=aNH] ڡ9vu+ tt9A/{C"0;v5:YEpĞ><]ҫWsHw5m!zEǒKq.ή^^oÓTEд+N~N?#E:V?ҎGQS/'wOOG_#(}wW`9ڿ>?K$xySyߟx_; 4oC‡x|wUs9%ou.C~N| 'ur⽝[Tݮ/O2[ʼ<~/wW:8?H->[W盏;Vϙ/KzJmNNz^6?ڛ]Y C*~/o::yW`\O"tO+o7C>֪}?C:8xU1K] lOuemBpps{?<3W|*CŰ/';ZsT=*g[Y<)\Rp| 948yRsIɩwl)q+)~V5'?'!~v-5 QKy 8-Ri%֑{ܤ[9 ri:X_.vEuRؗ9'#*q} 9؜sEɡłq~UǨ_-u7O+T`qU\rh.,T?3JQSP]-P:l/+W^G_tMzZ7i|y9>~<8'[t|9s\Bvus@w1S+h_WW_ v'>vc5UkWKIm-\WTWEJ}WoK/o['^ ~;]?|?SG*l}um)`t1iY,?K86cNp_2f?'~ h ajH D m8X?';6s&_jEx >j Xi$8 h27D_\AZb%rR\ 3S\'E8Nh[ޮ[N3;efJVDZ|nD&K)-:5yD+bj(AY)8=wKp/>'"\F})wyKq8{~^&Ie11='z\qOO."8|8=^2/|迮n5{86N1<ǎH*t!ɎHJB& JPA>IP$T* ) $,12cGPbI%p\XP$IHHYS5~ ۖBc&A2ZZ ]E1>Y+o8C]B$j!qŘ\SE`Esϭ+,(& -{y%@ d P76ᐦlMc/hLd 0_ ``٬l#"p" ¥JgkllFXѤb i2u{% 'aao*R9 e]]:sI͔,vOXI .nmhI aE'D^& $aRʰðB /)ܻ |FώX*XT8J&Yf2WdDhZZ:&!2&DrQ07D$~g@dq&'}q.\W.!˜nnn:/>nn.R3[{omnn.\OyܺGۈr9\_e_e9yw.ۊw3'}W8iwnC{Oi08BҙL8"eqqI[ >G\w.{Oi9r?)OqS^qKssssr>\q˹]/ =Qssr9r_?A9nn'/"{n799rr>\n7.s9Fqs'%..<3G{omN777.]J)q9q܇x?)i?nefTWwgFF|dFtdg$o nfi%(P ( )*%?]buFS8TPCrG3sHLB9K춄?"Y]˾Bw`3B}YB=!tĎ1@!>&![ IL9pỞJHr8$6$^W9l+%]0IEM# i*;H< oPd==n93ɴݻ G|&#|rhw` ]d4!JD:zA&.(tTwHbT"m*$Hvɴ㔥! <0As(8.]x暰:țev!L5M08%.[0BP&]:-!vmX*lBoBpx r:diL8` Ms ^CH c+(L9P'2)Tɥ/ +Du0QwJ C9OII{a< #_Xx.4`7aP'Ih(DCl B n&, zτ9JxI@xoph Ѿ3`x:ed'Y1ġ@R^I߆,:ǔg kЀ6rHyBB98\"<*Xw}H8@<;;08 },9@C߀I/JJy+Ct9Y< sk b2僖 t9|.[lixICi55+C@(|dN~hsokl j0o mJr#Hx|rNP*;YC&rN8]JC< !uHBc,1&m eH /"gI|`(YmkӦ H;d. J'C')N$l h[e3`RBnt}Ӝ!)HW'opm +B #/tI.5%*tœdp?H Щ6&U܇dx4 xʛE@+_wHHt4B ~߯,?>{zG[A4!(Ig& ;T 2N 4,zB'd'l>I\Kݖy~lMxˤl)_" XQ<$Oo`D"@p;3`t{xC`wút\MYhC(r{$NPC`])@g*|Љo@wP7rf;+x(4 N. M`tBH:|ryq˔r !ۅ 7R9)C 8 #) 9H)_݌E&ʧ|w^ucr(rǖ7a<(#%—(~yl>|!4T=`LΠ?R}iR/!_s} $ˮ CWs= >XPP~!:Bt(3*Hh@kmZ xFb `('[81@a;rkwρOL}KDoCI"S}{~ ku^H&z 2?[]G=q)&]%rG k:!}\2R(Sۄ4lyw$<3E+Fv|ʚ`:JMҺG!>"qABՕy/>',~~He#ByHSP㇭)@YT6JLhP7'y@arOtAP);ICr>^^f)>GdzNWBuY 8`;25> X۸@&E{2(^u`+ dmPi;^O(,oBrB>{An?!ՔvۮsϷJã4=d5RJkIO4}:{}nOvٰ$@:M<fFݑtɓ&L2d܀a jFFffddffff:tW*:&L2dBtӧN:tӡB!B! U$Gq)H@`<<$T w}-Q$ X(x }7⚘"( (yx/=|qZ.G!vz6 %#A. ((P 8=g$/qc-V2-$ yA#N:#F  B " " ȍ< 080`?UecAAAAAAAB W("#9sS w DK[[J %-nPu{i%O! g4f#ĠE>T(NڠAoS* #q?%_O!+릓 jdP*AVE)%%=>Ig|v~osÝ 5 nzqA7~vo9NNgCcϤDBS'RԈ*Fujիc_> >mr3g# )R.m$Ha(g&M4֓M 'ϟ> ,i6|‘H`Ma/ +bT@I$H#F5kZֵm4T8P@۪* li FFFFih@$HATȰ`1c1 L,q * 4ѡr  !/KRRRRRRRS.p ]%spatstat/data/cells.rda0000644000176000001440000000107612252324035014652 0ustar ripleyusers r0b```b`b&f H020piԜbfaJ"΢ļT lp dF34/M$V 5assS:řy9EP>[ANiQbǕ[SYZb\QʂTME`*y0@0G%<3b /XA w-ki``_sW$KWqڿos]x@0%{oYϧ6շ_J&8v_sJ?oՉE߅ҏ%w`Ca}yN4PPߣT== z8 3yOPLaʇ&n Ȟsuhq)ԾD8 pd? 5Խ7{u,aX8/a㣋?,_KWv%T5>XʿŚ@R\?SuAnۑ6(H(RZ=nI. mLՓ6ݮ6-Ċ!shv0i|0OtveysfӸQHT9FN2e;[tq77n\7z7 #bJJNl쉢; j7Qjg^|u۾/c޽:'Fcѓ ΪFR3.`A̙f1VL>aM Җۆm- 08(]4*P:ąTdS_ȴɄ0hK~#,π }v@ Zh%c-P[zQQ0БSL`#@yM?GbY0D AIΆ i0{5]FT~T5Tv)EkΆeJj\6k+—-1ToM4S:":2b& :TMХF!Q-9lH"Q8dcaD6: hŖCj-i;σ UY5Xq|mo6':/=rnOӉBo"oٙͶ,A2rkY$2dVV1cj5UUovGWm}| H{Ev &"#~!MS*eH4\,"YpIJ(3|3UV] {)ͺ@vҡi"EhyĊi`N'@TLP=ߛ=?vVBXlj<p],s=/ tO]9, .*[kɺjrǃo dd2:'#\%n'0lYUS lN GN&gka]Dn2tgQs$Y2.ܶ]SIꨀtmi 9\(:*AugjS9mٞrBn;9tm !=$z-&&LqJSk65pGP< P=%M=L֘i&r$2D;Dd&Y6bE1!dQjLJͱ&cr f{FdmNأ-OZB]eBɇi糴 Q6İp]ge]E˜5\+“m({]![tl\Nrӳ,  h<9"Nj^L5ܔdB*!QsݮDbs~wGqu'69?q|]?uWwƽeӮyxQ;|󇝸n]sO䍱[9;;E٥_/VZIIp%i|("r9Ub5i5J&BMj{ p% HYhok.c5oU3Y9T-FȓggXtK *W= ǃ'83th_?GWDg^ &@On`#L&.RVYv3I$$sЗSpoVbbzHm7TDҪ`( *djJ Tfb[- :t-ڙ!E4)R( 2ICIQKq[V5vɶb[y£vmclsmk|}yn7*kh2Κ i*B s"q{$ 08#<"{/Nt%td<* v9nō lÓ\O{,ݶVG4cMUyyQxU^9TGyx;;xxSAXm l@M&$)G[ Ģ!kk{b/;(|Ry_ Xhr}svѺ;bi瞴Q93n|bԲqWe CH a(C5T`b のy5p4pxMKo*FFf\oUUF$Nq%o5M(uӉ7s=h -ڭn[mYʫhf}x1 ~%,eUpVQ\M0Q, 5J7"IwC;,B# `B@M!; fȱ߱D;_"sP C h:0;33ePR2STT*.ȫE:HI%4(fjRT'ESUm U6 F[H\-R2%$aU{ qh^P\:z:mnk^8+qZ屮u/:mme×c>&[:ָ^sg[qﺶr:՜Ϭ_;ni]v=Һ;]v9aG;k;0!j%:bɋldl%_bj Xm i0xDy9Q9EUN9USUT8^^NE`\. o"Gp@;B.[uB8nإaBeZs:nmlqmj垺9[&=zMjRЕj5p^Rk}ejiiT=Ӡ[4J”h1mke֠=stG*]<uc?|+x zl{J*'dr̳ , g{95,nt[w/!\ </ A9< 8@%Y } bOz0uH5<  lg}瑩/2h\w:+B!m6?`g\Ot7,g8B_t~0C`'ppg pe +PݴayҩH1w_"? G0AR&@*`z6xR=}:d$_lǁCn` .ngO~>K} s-xVo%s{k'_- Fϐ ~UwXV/PjJ>XժNZ5̖6!^:`Ѣrlh)澒a)5iI=*׬[?LG떦H]>$('FF"!F,7Q}AWMY5io >BB47ÛrVOjuzm %~X=y:;Ş(ę(xu5ƑnslzRpyϮy03Mh2q P4xx<9ŽHmtkӟY kÚ{gh5-mqVf<;/& EWujִZhW4s+<:;ghDyDq$<pfҙo @#Woe#EzZ0{`yŇ-¾]|WnzɦbfW+(A`yS'YgEhN8")$N|=$cN#!7NFKFvY+)lo?"Svhά>bu^b싐VDUNc<fe!V>1v+)LZjYbcޅIhtƈ 6QAte,|Hn{2'#hVOG$#LG#4<8:yer$KXa&XAB3`>؃prTkZt+l<+^~Z7R/ezXlrE%ÝoƍOdИgIWxV@{Qr `cȩ=j~,\,NfM?aZ9¼8qBoWnhBݛv6RYSy iYj&_)(ѵ*3 !ZpFrU$T_͚>HWrXnA;UMLMw9;w~n^uFFsS7~O2v8Rp 1ß!6ގEp> NǨL|_"((9v,wYDըGUpzyr1Oٰ̬kPrnIT*db97r3?џ#01NŌ=05*:1R*/F&;{c[֓o2/k6{yeLx'85V4{><>sa`nҭބO0XԝF|`jsv 3 wK#^`8uwf96\nB#H58фO F/; g) +tV(q =#I'޻1&_= a\xCOЙhcvn,=aƒ\KbV\5UЛkb_^l$5,Vۖ v7ot>%. L #JFAǦD_`^0 nj@q|7 FZJnn;xI2 Fo}]oį!<`l[![(01}ADFE_f>SC_ȼ8)89BS'IT9%߁~ȏIt)77u` a<d+ ~dGq~^׾6_}qa*<f^jAbO1<"/u_ޜ'9E9_6#BE c~_t)3uI`ۻ36 nvq7> R? ڗ;wj}:޾`Q |/o,69>*g!_lf}^ߢXط Y'9ۺ됧Y?S:<䧊yg-|QWMԆhyqw\gf?}7qxh< rWJ~\Z~=o3MmJƏ[;{c+VDO;rm۪Nh\c71ֳvKOk);rKg\e@u./ g0o:42o9*pM49n̏[t}wzʻjߦniѲۏ5Gl3W|lgvʞV?_+F ?t'# }^.;ɑcž ;EgIMP|_=~7qKiO⿾7I_{g#M k3JNГ%y+@7?K,[=%y?lG+a?>ǽj xQss#q4츜.\t9o]pwv?ǎ]Bo ?juߥAY'*j|_Z*W?>FEcm\St~ +=&^kZt>s+ē_ZYXE_kO+. 3~7?E0'$k/OZ}_Eޚ~c?g_EO+G{^w. KuW"e꼌~%zɯ^򴗼s[0yf?{Ƀ)f^1t_1?}C&?];w:"q䩓!Ӽ/b}-Ϻ#pnh6zlFAꤸ1z^kz>GÎw{گ ?K }#pоs$=k)~~O_nݨ._(c$w[M˯M?  _~WzsFT Vg'W%zW΁邝c]5̯~I{}N@wg8C{C[OՌ7[ٿ yC սȽȼ/QOPBp 60xy;P֧ypJ l^kODo~p7}^v_.T2C] F"*%*e+/JC)]JKԕMUȻ%XZK/KG/A]C?tgpԏ:;28yGg`OǣNV?ɳ2yQ/y]$;K譜 bb@-ߥ_z,bOv#d>T P%g{:qm-}cuRЎaQi]_Lkg6I)G\ΟR^@w\*r׊jW34Gs\rޙI^跅Qǹ8d? O>"e>~龱ڬGu*E r{o%oCğ2~yRBz,w{H0yY&'ˬ[+cb~Qa* J=u^9*mOw8W%ϫ˫xZrU;C_z"ӆGg^8`O<#{Tp;Cc?`f:x̡/ʼ(S}!qewk1d`iN/Gm^+@o$'c\Pߣ \F6<]w n.o?m6gof~+yjgǜ:4^1wAE>cҧG?hMsV@ q;j}V@ŤXĭy0㚯ݬCOHyun3Fɹ s~߽=A>x=qr~ :L?S{!]ďNC`(~9O8K0/μCc`6O. S7b.TOt?15X 3Q q+~1QgzA~DXr1Mƿ$t|<>tao[ n߸ƅ?i0C#i;.d=e^l_y PuE>J~~.9Wp>ߏi, oğq+rKyWaO'z /lWc_ѳAA2pp_o }- ~Њ{}--~{mkegz=_W1~:aL<ļ-z[o OX=fp t9ѳ &-؛ƼvnC|j^>+K;;W<܈]oÎC~F]={gIr,?d7y.(0/WyZ;ףrW|~{y??5`€SM(?n/` ~C/q v]]DoQG?G>#g3|F>#]ec^,eQp:wcҮ˧]GZ`3N%J|P ۙx΅?SoGspatstat/data/chorley.rda0000644000176000001440000001453112252324036015216 0ustar ripleyusersBZh91AY&SY [Po}o%~|)"A@J(I 4b@id 4@2 @ 24 a4&@hAF &4d@h 1 @42z&LCC@ $4&Pi40@ CM i'h4iOSLA25Oԑiv5rz;\ѽţɹ[^FLjVF5s|^ ttc֯MMZoFGZ1Z:4ovwhF8<յձ>R~MfF8{},n{:'OGGwkf9;<>7?vz>uoorc} +Z7Ž xjFwqqc7wz<:O{oF..燓Ս<zuHղO~o[XrO:|[F6l<:1, 2>Owx32^MۜX!cu\\s0.q!ȋ%$qdIRyS VSM Դ5ZĊVMb M󺶞^6I'ђy.Ǻyd]*m[:lu*!U~1)9*^K2-FV1,bK&$2*12,c*RdUUbĕccUV1XUcc"1UdbX1+IUIc&*c+XFFF1XIbdcXUTU2EcdbȱXVc+,c&F$*&E1UT2*#I,UUV1cbV$+ŌbK"Rc&,XH2,dbXdbJbdU1RTV$ȫV+#b#c,c#db+0FF$+VF$Ȭc+bY&*Ɉ,&I UV22,IUFRdXV1qf1UY%EVEVEVEV,V01*Rdbc**LUUV,UUbXX*cUI1Uc+UU1b*2$$1cXcF1cc#bdŊőbF1UU%Xb1"$UbVEc"2,$XUU1UȫV1XV2EXYȬbIIUdV1Y2,V+&1&F,V2,b%ɋ"bTVFFHcŌY2*Jȫ1TX*K$Yc}mI&Mbn%!TUe#3b5,Xޒ,b++*RXX&XI2IvKu`\Pq R0M Hҍ]mR6j+?>O<~8R[ʹi&*Wi64ul-jZ#EZI5KSFS\VXF-G}aDZ=髓FmmunmslqonsqV?Zэ:64vyiz6&6չ 4's'ؓj[&^ϻW8oc'KG>GVmqOgËG'h7gGU|ZWŽ>7{<8=4kݝ\5ttny14vj{NoɫscűW7Wű\8rxmpW'cdnvz9=X{|GWkW\CƷ\ ̋ 2,YVXUՊz|!oq_-_(}5ߞh?z3G6 &]C.c-XXcnRj4dV+#<բdbLmh4D²9Ɍƌj1#Y4j*XŋƊƭc21&5c4TKbTbUY*1dbhѢM1FFFF*UV1YXc$h+* :ɫEVՉ1bŋ#bhѣF$c_{#dX0b,1U%XbIXŌmj-,\-[\G`cqnc&jի{d#V$Ս#DJUUX*hi>ccVHbJ U1bLdcIXX,hdb1dcEdƌh4IVFIHĘȰdTՒ=d4jĕbX&#HdV*1X%hŋ"h1dbU[,[,EheU&"U+Œ1[$V,dQQRXUXU%c"Jy7Ib2+dTU' ra&ɵUjK%|h&֫v`:MX8WdwFv >8:\]e]}Te~>'8rA-FYV,X] ;'>T?vNy&ke,,)y_ƒ`^[ *=㞠ȟDzў/G8׻fLJkc7qztz1w6fm\[ڴunr|cG1 |\]]nccFmy5o}o76͹ūE|Uy<skWgvV%r|=]4orn(t\nfnNA~^úiwo#y<;=sZ8]N\k)5U68s5V];9i>Imz6Il9: ѿG[&Ĝj$~r&whu(:sXz;k{Wq|3Mpj.;=_j=U4}]7_M-Oѫ$[O'H*&/tI:CS5(M*ۭ[Io6˺ѵٷRH O?zy{NBuq^*Œ}FʽS|<ٟ~|G(8sA,22p8Rz#Ҟlq\4tEA104QJ?|;_ׂo/ڸ_ƭXɾ;csy6L&mȷcVMy9$qlc4|O&Ԝ^=]߽jszެhrn{777+89=LcGwwɵ[X]͊ f׳cգz>Okqz{6/ 7skѱMOC_Luqsy=ksɵ]Xy=ݝ^nlqV[Z88:nrWth^ѣk\orocV+Up|[ѵѽ9rupmoqqohwlu}>:O *I*#,FYVYT &-5bEV1& ꁣɿ =)}Q%qVy[ 6-64aV[ FHeVzC⟨h霥Bt)Z0ѵ64c[VtXjSF)aaFѸR@zs NJz+楇9bFFAj%ƣb4l`ղey%_ý>!8z)j FcF0[ 4X%+K-,cޑR斯s|[ǿr_{uurpcG_xcGwn \XGnNm./VƑՍ^9<8f=sk;?8x{6:878?z\n-\_lpqouV?q&h897}kF 5xhد6=WΎ.LvhY?'ݿqr}.w679+GF78<7r~N/ٲ7Gű{6GV764zrlsqll nWVu}g'j 㚪0GTeղ0jѩ4`ţQjʷY& )O *qHXMWe늶=   /&8Ujц*ѱݞx>L<|Nss'Ble]lDj;x٪1 jL{S zv[/lիmn}s8@仄9v~c{\] "(H\-spatstat/data/nbfires.rda0000644000176000001440000050702412252324050015201 0ustar ripleyusers7zXZi"6!XZ])TW"nRʟ)'dz$&}T}o<9=4U5z*hD<ޡ6dk*m7$MN`ZZQʨ9/tfm ^qhO@b*upJ: q7%.Rڹuc#$wYKC6e>Tp,]XV_ }- Eq+W/b6 Uq&MBΒT&sq6-]2Rj K/"6AYAa:k9[QZUH)zA튰f `Bur#3av4;Vig`C9`9#u{N]oN$sXy|8z2t]BIz".޿b'p _Vx|e[j$/p<&MM>hcѐLibXLͩuGJ:"e/A( _mr@##T7-RrHɀ+ RߎKfVXBoćn`,bs~'д nfiȀScYHܦ=y&ިPďYi،"C /z;:b/9¢lB70g֩ԛ6sU%Ew>s#ɮ.6ʅ ˧̗mc/\޺%O=yr͜wSX:q{nbeSJ̫}{p?&.k_ J#̄/g"GjwWe>S"eImF%x$` r!E7b 6 RITquP>@ZFP^S? Zq7nn3Oزc7;ar֒Qᨢ݊ļ ͘/%7flb慩O2E q-YdK>qCA1gl:lg1єNoo#HaedN 4<b >Yӭ)U\0ȚjƿCjz[ 7+Z' f8Ml+ț?oȁ!;թb1VJo7ؚ Qв4ФB!-:XLļn&r+e}z{} F)$ Tj5_m)z'<6?׾,"AVIw`:mK. Hy;Ek QKt vT{%)i ; U*/̥wQwZ.(!ǒX{{GjINed UJ82 ln<",2%Lg1x,5㊃(_{dMI=AP $ϼsG2+fu)S( 㧲%hlo%NTo039Xu0j ;%\W<BRTTwǛ{<B:d:~&5*%Hi5#oLGF_bkW3((R\I[Joiriv33V=|44Cb=9@P4M|A9tbNlH qe$sn̉V80j%Knx{Մ.D(gy.fXhbU3 jkcm fٙ6Jjߚ92vdCRqp"%l Kp$qpX'D}gr7Z̟,p,7]#6ޣ*zhj dD)A6&F%Fdgqq$l&BJs:X }Ծ-8/?OT֐&Mr3{ي D}~7J7"$wc ҁ[$A(~g%8pkέv/aeiG }}GfZ<.qf g0_%.sqo&_}/X|%;ǂКGD﫱6Y1\A3DĶ~N!=,^<U={?W_[+&$U 5${ 2'>fCt%'(6}HpZ;r "s*1Lds ( 0 B6](j?V,&޷9 4': 6W A4|`Z~ٸq~O!aU l~J4C4?s!JV&FaEE➰e{nqhFIK[ҦM)>5' ttaC  yףcy4|O 3KY"D_D*,Bgt蚙9w%C-/AFUo\!Zg] Ѳb?Ljt@mnִ_;/W%o#}"@`\~-=MߗF#YAT=x1G,ş C?o {cv\)bjɻj n)?.~'q(knfT#FFi6iSkRTM|y{Hʄ:. Dn?<`KLȟJ<6ORcĆa+ì 7>YCFlHkw}ʰϼVtFXv u`aN&a?f;j0\)0 Ū-z@D$ectyIjϛmBHV I7v-8HhLMFXB ˚ps Z)5.7!Qu W_DmĞz ]QM d2@0arb>萢 @*\&:a @Pn܄//(|N:*0l|;=]ToI׊[f3A|ڢp#9%wwקF ¼7i(m*SbP(iseE]~mc!?)V77 kGqzWucͳrU[.;ӲDszT( Oxˊ+Ai`q?0A.} omS(I:&G;̽A[!غR?4pw֋1ze/F2.o3ez.{yS;̀;ufmj.p0Riz[=xmBiQm=QFieF#5*^ɡZMdVyFk<%D:Eb>{,؜.y4FOXI=lʸ5#?Bngf~lԪnoS}_U8Od.4^$"1#wgAIEi~~Dl0!]lon x3;lg9AZbwz] tQ GbNbt{"a\%!FjUd-ƴ=יp?&uC::M}sbk2H@!%^߹)/eiMf)TG !/Ќff=?4$HTDUpzŊ@T-lM!|lcƣ4jAVKQ"[LjmUW;If]m.3 &lgu6s\1>^ױ҆h6[Ip5̅..+"B&_o/h4\z+bqR['0PSY5Ǵ<{&7>M6G:d\P^15:ŨaN fX4~Z%G$12;?A[.ydo|#_P.∀~ꯍ5}mS|)6I^*zNG^|v.%N{Qo:B~N [<ѷŁuY\ӠtMx++d-ɬ@@i֑uyqOk H(1s /!?7~fQES(4;6mua$0wtvf  5};5$!H%ik@GfH(%\08,O%C/ER'ş `] ڵP?VWRT<H'6rBj9]k=3I3%@GXQ>?_¶ͳqt:;sgmdބƍ#`,:Ƀw$r2MYQxfJYxkPzwPqM;DO{wMIw  ܮu!.Am :$jf|lH8ѲӛFm9앇G*5 T daLJ~ Aʷ7K mRSRY7"X#&}QsxIX+?u݊3bH(DIJ3[i(16!O>t=tIMI,AOĎ4ȜLׇ@.`sTL஢BTx~.MM9R%"HU[G :jm ,sԈt]$#0߼Qs5k0Q>xfAŊ:@W;| C{\l4rUUq(x)N:烊#9L,Uq2M.+Hig淒w5קJ0"ZFڧF^WxP^t̓@;ktD8:zf}#e&\ m{u*OEmμHkCq]G,<W'wnOhU )$vL4ri~)v1IqntNED쐧E,Te7l}(Á>O~-|#E(rG kM \Mfe)۵~n=&pGzB{<OsHL3]NއϽ肚aV.~r6g}PZi߾b~Gr<[Sm}HI^!MQ7"|Y$Bw} ;}/hJQq |t*k)^ ,#J?Y&j^d z8jlԩ,^yb#?ƫ>Μ"'kn#FbP'WNW;:-k]om,,䮸Iz,h4lt8틪GsAL&Bه@ Fi$Zww){bz&VUupJA0c^?檜!T;fY+.ZiHV,UM9X+EjmɼHzi馝A絩wdfƮ B?ŝ\mL.fi\hkg' v眠L˒qoT0WARX?LvLc״mgѯC*U{V#%esefN=G._c4'i9Qvh V918څ7R3`;%FYh$:AWD?1(AJt__)CL5=΂#s?C4'>Lw~ԣ Zh?:N/\t<-2f s;p~_?óGrE8<U4썻O$R;ʭZ%Frn*Ihz,j6u?=3u6!e{bNt{wJ"srP įguH>XK C!Ҧ k$ti:#Czy|k=q/kZR2ωɹ Ց8/h9ADFJtyVFVJ+7(U'7G TÔp;x@%g$!\bB/ǰ&4!E2jBVb=_ܰX' >1q8^M| Bֵ0ٮ4d{:]هh+Ót?!/8mN@$GGFzxƘ_ SW]f:&+6@+ԃ,ѿǸ&Oΐ؟DoU$i8]VLN#Ȁ;ƻk Ply`W.dT~=luQZXKFvEf\RD`Ыxq٢>2@BA'*0 ވH~jɑsx1OY&'Y6;̪ϻc# QX7xW߼-KUL+D ! 'X[ 90ۆtO 7xk`BDjq UCP*琸ŜFP; \N0^~_(Xr@3A!23kPƏ1s<7XގCŮpN/'KZC3Ȥ| mG?g $^[J H_&}1;7my0 .yJ7 GaJWSE1s'S*Z"%xa-a ٷq -()ouXTJXb')gdJwpA,Jg| qG$w +T6<4rFt.b4@[_%nʈ ܟƑտ^=n:Xǃ2-ꨥ 1 kJ i__B7`~]k3#򀡶ljM@;vҚMn#8'&^dB } ^9> $ 3CGϥ?2y5P35|"iWsSd'2#wۨFDx:*5 mCd_"< Ky'C£N?q o[u$ڜHsgĠC+zg*Jbٸ̇Jhgh _OS=On%KF6m)*ٮ5sxl[Nx2;W;FâJ5f(y%n.7`>:uT\Q1a T72- Dn\BA!Å!%M߭sf^|wYx] *NP&pkLq 9ysL~ՃPsWo/HsZVE]SɃ5]*@AaBd!lpJ2X~a&XB qj ϩW\k9y5Dm V=*Av~yC!pEmO%gBx)ĭ[2+.UyֲІK ,- p3D i5ILAhT٣\ƙ(Φ]/nr//i:tZiG:HGgv|Tj3̲oBnigX7‚,W|52TG'b(%::Ze#Pz}Y/TkQ `:ߘSv &UɎF{?>'*Nk&&;Y![s{`:@(w}B*lv2@.Ne=U'd!'DD2=o#$(%_D'XTB@׀R.%kqqrl$/j8Uו^[!F,Les/4! Z3͋ʛ_?"Ag߂N#8]R[|nTtVbiwmTiocK~U:gAt+MRUgiN's><-7#T<7N 'lvAr-ggȉI:!(%"* N/Vy6fmiW}ZkҿLRHwhZ~=/O(6$\80MoejB7屶T8QnB@> dS3otJL#oFʯ La.µ|FfsR1uzO7LME&t;]H!a NurJ F=r'K/p82OSZ?iy9pon}6&\üށ2,TH[FZMOL?mE_T65{E.} I6ϐ5=DɤQ{"ʅz%[gKYV>QoZ˻T ;MNN_Z"ɢ¨х`*&N[‰ϝ49s]kLIM.y],PP J%V/fɏ"l5Q0|~4넥U`~@5xVoD"*GL{\%AA$+u"?}=(iK5XWg~>YƼJ"߼{ZxdB.Q5<DީӭEn>wmBa$R)b?= ָPy}x8=X_%ہz{A莘u6`,nGtIy9S9h/{|K}P`ꤽ@uʂɨOq<j\7KCP2;Yy[r}dza8ҏlftHy&҈V=&cCD|:qUBrdDBȢO`;R$Ua;Ȳ'G;G&WґfG\ZյPUL`a#k3Xz\lf |#,\w͏"=c\nԔLOUR.dd^ꥵ`hM:H bnqOH7ԒFtS(zLn[R%?ׄ6}n2#!Q ap;;2쵊g֋g3ͨ`K2ٕpXr%xj?,^6(B4.Mhgdaܣ0B12, D0%uͼ &j1ڞ7׆"RD?Sz2Fce4=$ 1G^g1.,6l4'QQ~p\讞j0T[VpPϧ0/sY([]jUG@rxe77ɪ6%FԺ;Cs":c_ҢczdNNҽoaӴaafU+j3Du01c6(#Pas%=(ڿtD9XJ/윭Q;{K-_ܤ3]>v-q+,_>y񪁩nIx%@zDad{DQe(hbFm'Dt<}B4'WU$Bb&r3mGc}֙P5Y(4fF/ƊlY7%T_fʮk2|Q]x2w125= LLttL` q nh4ݽ@Jg.(MhUD)%ʞ/@ #H%.Zq Izm̻$Fucq_`%>AaVp]e,ΫC1=l ]SKxMh`W HP!J{/;FEC2d 0,(%Y)=-p2,)ݫ^ D/cS }[o 1fnDk㰔 )h6y"}q~Q?VSVMKیbpAf^`sD2t约-+dfePݩnKҫҬ2A:*2.Zd1l036=~|b#qv)ɵs{|3imOmXM-,Q(d2Jۖtcɠ$Zm~1?a1WCT͑Ek ̵jmh/miSGeRg1.-u8 \ hqQW#Fպ A[-Py8@ IAX~ivI|Kgctͷm ˍ4N"d%=K!3}TO5v\-A6Dff{'RM>'`ix͖CKNbd5.+m]ܖ=/5Μj2͡͹hwC_V4QE6snK`˯VvFt++j&+y:OxL qٞaܲASoʱ/.GgmYUrْ5w胝uW^ƵrĿ;R5d8HO+SO5#7d4*O 8H-$uCnJI@Ў*BsrmNʜA˅ھf^p7uS#44DW 63l`E%ϰ؆sM~7rTzoӂ^Wʌ'п}5_ J.@/h| ؠvE¹p—r%XUJ&XhMv]LS$$=mwCу6WݼDObƅ&s.U:K ~_F||'ޭm\1KmЄaXy{w`X|]MPi^$mTGTmGW_Bl9vF|@~S-G`[ݠkdp(N07l"<"( e8ݶ /{eg텍cGBft7eR5{/İ-@o(RϾ@.5dXIu&wuvVi1矠cڃl%[@lJӽdP93,a,s웸8i ^DM $dsn@Y0 },uh篥\?5K;L3"T\TI:@$kKBS p(a(\*DRJb5wnYLd#77{-/ψqf qy8YV]6rX?c(R(5K/5>reu"pϜ@RNp$n%ة䈌]mB\ڌy/ NlnD/x^N)LdbZ3725epaTOE䘗D7:dGB»JZ16nYP{Of_fK.~~st9O5\e1$# J=o -^W) ,ct bs;Q7*@ <& Qi|ǁV–qhИ 0dž~|Mu̹{e<%R \-s ȖyB|""E83g\YQ? oQ'e&V g*|,\/KL:5$6-4?7 @z:%FARҦ2[khV:\/Uʩ.ZKqi4Z,}#FdkK_25ܦƛ#>? twS%UP_-ďEH ChS[[)2a,:ã3@oRKMmpK Q{[Xi̓!Q0X檷;6卲I8DUˮGJ"v nP|݀|ӘKj0:O?:\!v939Zc4-G/blH46d_ ]"re=G\V_jfs;.mB`dߚ(dž0mi}BG#@:NGw/qAJx~LN_ }Tכ#^JmܱLHicKDIigZ9]%r{'ۗΖ"Ё*30~ }T4A)_nR1E:9s'/tC{sPq!z YTvA,'1YeUlFIjos`~&X)2OG7 ![H𓒌qIŎ^u戼Nsd`< ;ݽ^_{8AN<1/xp UVu4r]7I!*u;οدX@ 4QA}6Gyg"PUZp)7 a->^^wUf&A2bӍ`rZ2,bz$n&K$6"j͝D7ǀ%BwU΁2Eɛ7nIכ¿A7 +2Ü:Hŝgu"/o b Vy bPSY%ϹfK++t[r/W D<ͭEgb i{ʊ?,V JV; [JO@Vܝ;!myJ[ԧuɺ6't*Ϲ|=۴9\}}9?B erF Ϭw4]#ڐZJ WQ J\F]Co= 3Aie:x6xWV^ぴQXpaDJ#fmZd4+>Q阞#R-( {mYKkrz9[l(x* "%?["Zxx$FD:xf#&BƂ1^HaZ9=F ub6J\/*Q}eP%5߫&J+v7l p X q;s?xk IB0›pp,Y8`vW=2◫ݪaeUSp=v| ff`;9]O~0rKAP?P!/ Cfy ^Bp91ķK;St=\_P͢?OlE7q? :am{ErtˇtE4?P5V+3R")g+5Y;|;;jƔVp-NߪB/e:dp ϗ* *NC~ 3.e2]`|'f^\fB:ާ"}{XQ=]}e׃vb; dvYX fñ Sq'; cq KKI =7Cp!98@t2 }/'ொ0=Rj6?g@|!Jv@x9-N=-$~:(]78 OX8 ';% ({HAśHt5 ׋AHJLW̚&gR>^諆H,\#|5uA'}XRYgdu?5j\LFyX玾I|gJM!F"T\iZg_P%f= b*$jqoBf/ך[90 /|MAZ]_S}(\j2㰀G^Ӫ97?L} 37Pܼ~FsfZC !kTM4G<#}Hn*2ZP^L9I֝+aoznp^k}o=F{ 1fטe{ cv8k/" #l6nZ9/r %ToHfwN}4M )js%_XTEteʹj>JS3iŖ{;k#[l8Sb/-5|q5\0pIcÏ%9R& Q A.xE~Z$xQsZŨ,g}QdbB_|2)2P W! -Mʘ$ڶ}F{Z *+ux|ok>;TtK"?2tfB~noh? xWg )`Du6zS1;=-iڑFP6p<٘n%0*PSPOPgaeu. >EyFs\ҙd4E7\ѱih|4Y„>g h|D_iB츏YLj>|}<&ud,>:!A=n:Ĕ|09c+8Zlw3[` {fjGJVEWs-c9T2%(C3 gbNoVfs7ֳIM}Blמ"PUrJS+Lm".fPmBt\T%L0At!6/ʾY']N产įȩF 5-Lhx?+#&uDi{p3Vjd#zvsR8OWy 0ˢy}Ve<,7N1]iƩ } of01j}>;ڬF˛O[x &|>ht3~K/ ,8O-)^6 ۊ'fvL5(\'4`.ڰjDY!Vv\֍|*TPͮ35Bp:`$Aw=fk M/ks#ӷ=Cqۦ<&c&:٠<P=K|F._l^^5X֌ur6cG qq`U*ܸceϿ'*;FJgDVup8,n b\apL8"\u/; -NUD002;LѾ\v}S i$vchNgL7HbH@u>-iEM;H`S/xЗSPp%qS@V/ s6:q~,Rz8X. 8,e>=mt}xrej_Gt,W3C3Tu8]vl/x;&IJH_Js -1m`.*L[| oE~))~7lhljc+`σQhbNUo \Hjjd뛕TSLK_ \gPU]Ƞ*5" ;Tgw,r{cI=bsT"YH? z3?{+B@V`ܘ>m'glo5.x&~v֗Co_8S_+lӮRmjr=ո'EGC5wNK ||KYZgy+<+@ҋ W{9;>*CSJ*rueg@G@[9<9 QSgmqd$$cUΚ϶bpLjn"19K49Yib(YQƑc˿Vp}Szi!vq7HEl$ BN{C;a_y i!rt2LycS^4bĄdUӚ JPյ쩕c#~AGs!$TDhRTӘ}$$*z|y+܍.=c}CSN£`g$TS0;;U7D rݾJ')u}oI*Yw{_͑$ʾN-ҪքQ* fIz|Ljҿ/0ijam)xw1"6u&*TVbRo@FMqs܊fK.ꗉZKzɿ~GOцwcC6!V jȖJ6W @- ^Ej~N҈9Fsh8H-)+Zrvռm0n510ǒNE3 4sdK_RuN=E7IQβAA hyhH p~-5ծv)hӪ lY 6ډcFw77 ^Ӊ' FT,[G5['nHm .{'(mg$8枕mDdm:uuiգP2K~| XvAdg9+`+ uTBc%m*1!L: "?* &wv}k$]8- 6ŷ)M@g6*FI@$`άr{@%qQ+؀+AZ4$yƬ`O/Pjg#_hȯ:dnơ ߊBno^*e*bvMwNJĐQvGL7ӻ\Dzޣ!,"3E!_]j'a3t*g2Ca)4n0NȲ҇B݀ ,<6Dg;~YZۿu0o7R9O 2o>94ҥZŴreCCAےͤ!=֓D"UQWnqϖmhpoaܼRJnF)v9`qK^K-p ?\,RN`[K@ZI>c肜1isҫgV B[4 q PK]m(:!(7\⛲G"`V6Cqw`9^d _t8?[ٞ]ZrX}EY^IgjC';\g9-$V.dOle=CZbTݼՓ9~ jNJuy7{X*M(?5 xDso1_e;F:'";9vwyFwVߏb( u˨/9’W񠬧nul.# /1& ػq. =]c{;7yԠ}?/aCӂ! 61-[3:{JzF|,+ u&,w~PnAt9mT`s̳3i2v!}IZV`?һ/ cF" >4)~)J Ð^N+z1gݚM4?,B';F:E֘xx #[$:}^B{RxS2yD Cy\ơsQNz(Ǵd&Jkf0М`cO ,x40 snjdR'NqIopiS*Ѿ$DoWMM}Džhyeݥr#9S/T"4ވ3Fu}6bSS()Ie_Od?iFɉ Z.4! j3VHz!uZOΝjEW?)OY+P0^93NfXb}| Ր,4J+ô=Aa n6ZXS|Xي%ٮF!چ tYqO|B0{V$/8x\VX7T}CLw<"ztwdqB.B/8EV 툵#,p@o !̉߭uejx-"O>y>_q[P>!XǧfpX4roN@3( ol#rhϪ7>h<zarXg,G]^a-Q.I(\փK՞J[1+}^YTA}_⿫S f)vz)q )FTQ ,^eUxs2؄-^#l$fYVa~LE|γ+EA!aN^ '_tolAT9$>эkN`ĩI}zGEt)[9Zlm?yЏGeǸz<'.u-[ז-ڄ7%׬3_D2L'?0#YtS"t|)kV;"{r+۶?89$dHa.4+-0`ZneϖZ{ Y#';b+E=L u{4.~J'%xgEQ+㾉CdE{޽N  on׶n7}^<?zf [1`g4TK3g+,(ݝˌTݳBu>7ӵNnז]4Î}{6k `Rc_6?bd~dLx'HdT[6v[ƐaDkn&Au^5͏ -.,x/V_;0@0Q 鳗 GJ=m‰uSoJ "+erMm}ߡH5 K!3߻Ve$e}ZQ/kO9o*4}޺9kX번~/ Y aưqD9psC-, :Z2{W2eq퉭 ׃df,ǰ1ARv*B8[>.%&dG~SxDWcQ"pr&Z $!{ -fyR{mx :[#x-Π<2A)*N]DJcj2G!.H%kDlb!ұt-7wdq6~Ql&1&1̗!dBgRɤ  t1&hU &}1*YCP `p5Ъ&tٕ ިvxu{)ejcl5 [? `q,E-3.t|qҟ' w|V=%ZTKK!9gF ".9l_XU+]}Ռ BTqb3w({ ѥ&ꊖo3"0sL/ǰ$i<{c%# x)ͯswؿ1Zy 5sEaG3͛nh%I˫oy6-!V쨾vBq%|k96#~gF{FDw$Ga $(tj,=a6a)چpWE!}zUtFOE\xZad(e;nhЇ(?Å\ \%ftŒm*V#c{תOa[,x͙sV}JGUOZ} W^DxwQud$k61~`L{Vo7X42l7^[ٍ^x]&#aTw ؘ&O}WBF ^}#g*i+ S=MMKKʚ@#]|J.Ej+;+K!jҽJGn-Ԩ77HM7wkB M-OJLH9[/Rn2tDЪW_x;Z;.Š7b=麇J`[ԤA0 G a}Aj/hl}Ǡsy lo jZOɉ+բ ?Ƒ;qDPIGڜr'Fܧ?jF >Դ}v4\>2^/9zSVЩ =$e yzd_6.L9V$H@(CM\ q.8dϸI8O߂&OVKwOSQ0 ;&ka,&K19;H3JlbD{W|1jhBCЦl#HnJ$R;zGp{c]ꅙ6g%EEPN3Ⱥ{ɂ/.Enkx$wx@Wo!xaܪ*(lX$D+f::o/@{HZV]F|kw ʼ}dsw8aN\ѰCq8GvlD:|j0C ?% 1 ~yEXfL)?%ѧ.y?_eb;iLϞ0@)!#С# ֞CSB" Wt-3˅>ὄjlkz8E4Oj0DRRd>b۷)!ڭExy#_Rih>Kxl_%T?F,7v)Qb2Wt^>M6c4z۝`m֨1{Y*tOT0n?F\,ʼ*#1~:}ġc, ~i@ioMV]Ÿm!Q՜b\](rs G(T+>V҂43Dߔ]"qԊ){>їf0O(ò➣8T IU$J%겯Nrp0~a\sk>Ҷ۳Vq]Fz:eVgRD2ٮΒp78#g͠@+VW>N?ʤ+8 qKC-.Ep {*j,^҇>/q>6\36kXbֱ MѕEɼ$/"=琊s"tgQkR I<4潫Jy"񲺥 gV}Bg jǤEWu4ѦN=RSUYJ2AſO7t|j;KY38s8zWkŸt_hJ68o;6ܜ/bTݩMu.(F{̓0Є yIi^ . A\{6y5[T@7B|C!i%@i4;*ix/[ '3y;x:ְ IXkg^*  \)2ԽMW>#x3T>E}g?G]Ђ,"(RfU;F)Gs'k *nZScmZQ|x2:z% bBoe@4ut+]U-+}bF0ʣS ±1ґ`"e_A*3 % \!)76JJMվDq!1{i@R0ǯU9V:n[)nj 0d'kgJr08Tlڅ4ц0/i^dj;Y9^O_v:,|yWHQ;-RfEj !"xC83C/U5#`jflr ecJ(bQ7 SϨ=p`W(b֥eF?A{yMCqpf; *vo.I݉!.­EvSD Ul,kRA5?8'ExEǃ:\姪qu>6×ibAU3 ł@ZpPk !rricfO 6 ~Gl>i);]M3K_p`˲ulQ6뽲*֡ T'~zq;Rr9q>n([M$ZޜSbu]46]Ўk1M?80l"U тqŗNgA x GRєZ'e}:_V3<>yT`P|\k?`eL?rĖ[2t;@ܱVr&ztjNyfO:hB-OŎަSŹ2kJ*w9}(mٿeZjμ\4ˀv,=4iGpNqJ>Ԃr ^?3EWt_ S༫(LuєZ%P^F Gu@PP&tEKta?0 Y;4#-<qvv\ Z1{`zj S{R69xYmRxāյPUȳouO+9ܛWhAb.HV`3@5WkƲ±He');Es՗nXք>qdNc~t1 xڝjaPy:-ա#oAg%dX& Z'U;-^}~нʌ HǬ ę$Z h~>C^sBuFx̑n0&uƭCL}aQ8Yz)Scmb?{(j(QJ:gn0AՌESٯMh ?1jĎC)M'8ƛj9& ~1pMV8h#/f`v+m\E;x UeGr!g:+Z C+hQD7TtIMlOFf)$cIFdKkupG WJt -5y?/X:ysSV0&8{T!K/J~H|9 ˻G6,{h~x*jg=,su'@&;s&9ג]$=|gsJxlK"|Pg [/ӨIMC.<~ fQi#zGs *89b>Ae4M0P@<ha0jη5]FJ:gtyGCMisԿ:.Y 0iAf<:Lzʵ 9T6MXV@KƽN F?$1O1 -l|EoK4 *heܝb8B0SQ4 PwO.GO$*b' }-% 1UJtXJo/|;KhnpUZ)G^#Y쾣hJ=ҲhW3ۊ-f޳z#3u<:k ENOṭU&(SxRdgQdux$2sDd'fhvTcYxrp WgBuO6o|w9PɁRP8!dgkX&_Jfj|4[;pl{`'2#z@:HA #Qu6h.SRY{,m?:ctL˻h9Yi z'}U*D@#[Ļ="^P8(T.{uƢ&5v߲>K0fUv: U59'B-E1ؽsCaʮf+=}ki  Bޭϸ̊Ab e|\n , T1D Tx:tbO[cr?]FH CT"|bec dYm%Wv\i2eX+) H.|/(y`.>sGc]1*^D,dTOy_hP+h&q0~?=Q' ť ϯF\?V8 &ZԌXyGr4 -wD\OYⵒ($ ji{V>p #Qy]XuIIE;WwJ(AP.gϏ?1*Qsv~2aV`΄oY˦nm5)eݥ[>wQ*Ddy3wTMsdf&4՟q8ObVXKSA vht_Ն6L}"~%G2<6&{ lr u;&Quq όÀI>uz :i{bO*V 'boqGHq/r~G^]MY[xg > ĹS֖AËO NOx>g ⯶MFB"cD[v! ƇB吢9<]ƓѬ];ul:Pg[T$K$r ۚ V,y 94s6,`f(Sbۙ°!73ςJ Cș:lf8D@0(qoJ ʍNt*s7x_ TVuKlFF~n% E8dTMn:E&W&+/eD62d-\̔6nfݜCoJKb|e|ͧ?iXY*dY*zG-? p J%esXEy~VkvȦknJԿoN*[wdI34Dll:4wSUIA0IIט >&eF?U41'mvPMe^Mmlnl u8@m~LWc@ySIgϊpchQLa9mW[g狃vi9nM !Xt@Ml*tb eoLPǓfZ"YUGއ쭶&0ղiVliw5]fLe@7'w{Ǩ:aP균JSOfX Ѵ_)e"l&բ,q7>>[ɑgO Dj68e !*&QW[PEb H[&B]vqaM(6apr"8jh"( Q ]dQrKN1[C*9mw]xxa(!֕zp%1o+ f( z@Bܬ{C"h9h^.sPSԁU[y챝#L G2U/2.zԞuvDm* WdTYJs~rOsu'=:mq"œr9|6YcIedD1/n@ĭjo9dˆd&0$ Ot:?kb8Tl<+9h>~Ɂ~様skޖ_SrQN'Xz͑ vnEP PGSvni C"{J-*^-T * $z]t -COל| sT\ep\i% =U3I[U$(\"XUE{0{9sw$SC;< @K*_[C:yhl}tu,ܣrԮbS+Wec4cip Ir-}b n=%>}kY* B-}gvٲ_Mjj +=p -`(|#OC J_"< Fqr.w!;Kh|X5Ur֐8JӜmv619M$P]0K`c&" /#釤O926JZdbyD|Y..1Fo tn Y@N48qCe#>Ī)Gb}SĘJўfgQԛȃzyR/"\-*W&E'J7GۮC۹ptv_[2a O4 U PǚrĽ KG tNvV/@gNQ[~G;]5=,oH8JهT풲1d/% հ"Ƃ]HPuIf27 ; cEqAF-XVE|0;%cէJ{$c1$( 2AyK%k'e0]yjNiAΧ"ʒV@ ڰ^)$jLfvc:0j>(F1]nq_eY h.`}M{W=ӯK _5SK4p5;uvCLR^u*iJG5"y@*F8 ?o8/POnQ;r_ {ɑ&f&:wނeDMX8"eV)u<5^%^WZu7"*U(z\{R'V#W/%&@[0 n fܸ:UkC' Hj\=#gF<26+hʉbsb:i@~߄&kmn O뭠ԬR0p_LX_$Y Vn&5 moBTjٛA1 ycǶm&Y8!d=puwaPlP\oH,-T=>n=;",Ll\>j(h7(s3N*v.yeݸEE)hp&As%s7/Dä =GO_7K?G fE8(~޹>8] n-2$!}0"`$}t*db潘}! 5L;9EGG{`gbSOyc# N,4,ZLQF! ļ ۍ_$!C\P&_R+WnUx (} a FY¨0T[ B3D6@4jܲ ,GqL$J8poz> I^fUt]o_a>Y󀴧i",# Gm%AΖ !CBQdfI/Zm~Aku6S+8[S[ cw77y$"r+Lv8ZdMAHnL5 1+U@c#;ɧ6PvQw w /Xf0S'63ӥl!!jl6,=EbJ2"| 1^ Z{X?(,{r>&./W.)m <8 憭^ӺcNVl,)V0kc^M)ZFCS_ ԥ0 FˀEC bvC]ӛnƭv$?[eѻLXo/tYk:~y-y(X5~HOކ bkRnAZ.ԫ^pA'j.i9wc.#I3qGKn>5{'NˉE_fkNPCA/~<"T|X-W,nlӲ -:|r 1f/G^(nh0- t>OobyS ]]ULNe/eTV򁨯 C `qoBڰ<8!MYN9`+k@ {4eA ?Z^$aGOaq.!H+i  tj`cjഃ#o:5|qRS35vq-hT zF-î‰߿I#k&}C̩٪ IRaSŒ XjmZ Wj-5X5$0]}/RV*"XX &`r M7cG9-65z,wM0@1WibW/pMӣKzqǿLv!^"/gV6+lsKLR]70Ϋ9xj)h5+pv̲UNJހ4bAjܱ@,/52AσF_f{,8#~;([+k[C]!ngSz+-"\3·i]jII9ev"GMY7Kt>4^-:@ʒA ̴:T̹?/BGd$6{C~LsXK?i cF2%LU DVV՝Y_',=Ě_)KoH:6.O.KT\{UaNTa4'%r=id"ҦuJ7⎒<G//AG3Gaq.GSV~z,!=nE$M`54<_ 7F6cT&_("6a ^ S3lsI[e4TBܓV¸͓ zR!p5o=fL3]me9q^_+:(0Ɂ\b:0\/GQLlR=IB8^dœn1 {Xjf v :e5Av#vxlĴ7Q}_(4'l^TDiΥh OgP7` bp/@Y:C4+nKlq{ 9;(Boq{X/⥒$EaRőd$m@O4jgKƟ5I+Q7}ޯga,؟nk(qfh~c=ͷ)ͻL:_R\z5t.Coƈm& r fDXe.\C-Ek/6^;Z % -P޷ׂUq ,5Cy\vX0~~a=P Uu*-CGy2)|v,HqRޤ[r5W+Nf8ݕE\H%\DR ʜ0w+AI'~!M/`0$b -2d_,ߕ}&}FD},4K$:)gpńR_mgtӈM @]%TzG,:.`J:%(}ʤTN׳r:nY@O:~~nn10ߗwLR;K!2r& > y ŕd<Pd͖UEش-_n1*,!qd1m^藄\BG؂ 3 4Ю84g!&׋x#F5W _m!^̬7ݰ19ȼ./4?cIn|JГ4@Lq^,] 8CD6iOD:z?EGu: qˁ$#X*}hGшv/vxǼj flֿ@Re~AƗ &hx\y`> /-[ q+3ي'Q)T.^w]FljB 3MҏUO@2 !hS +y"P wPw"`\2Lňb'bK„%֛ ˙Nid0+([ Ka&SIk+Ɍ=6tߴK^D1l:Xf2/ۨ0(޲t:Rd43]PFt(F_arkz++Q9{f,l\S8yH?-ÄK!{NT.DfՕWn9X-eq :EPmV@3iw~1^yg4rDFjeJ6f^ePRwH7t$ۉ oNv权NtdެleIbnnN ^{/p-r>n+؍}Oy0kjJ ͿCbF^8ꙥ0C_؎ 7jx7F]yKX 9gC[ GD~qr"a9I:`ö r"оU);ﱱ{ѾĎ[lީMsE8T6 PhI;;.NxY\Oa>>ބT 8 :]߶;*(;C=V #Qvv( :cd /qH^ L֐tB1C\%hhI:ä4S[-P*C%*Ro24#ゖZbksd2.`-5φ0a>yźHDcv>DFk4VhUpL䩊)1p=UH2Ѽu,eqFgɑo29V`FekN> aIm$r%{{ẚMځB[zgCRWBYdv۾1:wi#\a_LYXߡ^s~SB dbX4luWZoWtl1y__2UQb<Uu=N.+f;B1~K?tۥʶ4-JfL>D5+?ִ*-UzW [KP)"]>X<3@4HEZ~'ՠpX5baKKN%(.Wn倻z[E?.3R$cSZ?·wMM}>~m܋J]> FU i.4`$"NW5jcE`E8v 8аL*eX^ֲ{hnc5G!!W#<^ 7?>n"cp:T̪&YnNq֠)[R8Q̲2aa kZ#IoKHbV_~U%5`"65ڿep72?XXXq-3tdfA = Ւ$נoX;OGҲ!Mro1q*hf4CGpYP7@ o!QMU[׏ڧ'^vbI0BHnC9Q]*-ݹ>%J*Iy~&nn`P-]|7=t37ĸD_0"h@f,< s}Y7c-ql&$W1P̡:*'_g"wsy]F*^twK] e%mFQ^"L9JQH"yzb۹0a| A'~7ZYCq?Y,;) |ot!PU3, ']-4s1Y3ē(:4*E rAV4l=B.FiJ M-~dK(Bz~ p_xVSQ7qS\7Kc|bty*<'u.J}l2ˆ V>Pb$5g&Q>Mwb){1XpKF`&Uԫjv$ӹBaoZQ^Բ?sAo7G<Gk5g|}MzG3YyaHsqSLxkܨtv Cy@w*{X­.2K"o'*ig6ZWbt=-hv7^MTFa}E?&6!-ՀM#G{37q$_$!]YhЃ@}zG -] , o[ODĤjEi7x@"%@ w1oq^Sv>[6==JxUg"".j($ݏ6_q{c5via[=F;rڽ -h~šaj32F<+ͭL-qgPfN}3N~"Fu|a$6Ѡ1iaq N퉹 %<C6Xq.>_ƽ"0HG! "{4.4ϙ&Nm$R?ch?| ]Tn O7|G(;8kƛVRӂ]d5pӤg[R`_o}+Oӟ 7pD`Vo©;x`E*0~%Z#R 0^srLsM(f11 HE'ܼCE6i>rf96 tx.oYkz#u#<^aQ5aS}HD^_]h(j (- ^ 7$B,1v&J vis}mƒ c3}r;/8su[C1= VЪR\nv5bnX )cFRPGu C*\ȡ]yJ1g4 nxRCi;K/F!3 {럛t| d)n'n]/+Z]rFrg%mBc^ Q "VeA6Q9))Qf(K73D/ ?/C׹RO,< 9=* $Y2D8&<?Dkyg»>xbBx= I;e疈Q`J'j $kL*1~$@*i<r'T # ΉTXI%_vCy 5;|nF㶆kΤ$|o :h_PF߮A$}|3͉-K_Un uAI_fx ?CM :55"٧1Qc7hP ђa e|B'.E)dn~è5W۶W ntrJ <TO6>G n%fmqp̌޴r ̓QWƇin)\U@wV{uAŧPAN"PCLSb,'5 ߓgX;RAKԤܖ)oر)`Gڋ>yEh{) mT?)Qv2Wh!")utK-P$>WmZd@QsDAs܁z_ RCK7~ᖨu) -˝2^g):>T`in?!fPpyjU)8(~xzue7o~鸴I; |qiwr{(2kmksJVr+B [P\8#K Ka?zcKg47s i4ց D1 KM,tWn'T5e7ڛ DG71QHq lyq:>S M =̠t8D:E -q"'`q0?u|(]Sqq=rٚ¨ UÅaJ$DE>II:k;2 l%~t"e"oBY΁fp$<97NiMp]D=q{NY[o;3 c, p^VK+P=Ht۴>OΨ A(m)#u >ԃ}M \"bzw OvlQ ыo˽z'͗v_D*SN>fPwJjR,X&^.9 G3GCU'FyfAɳ!g1g rZ}я uݢ:[t9J}$p8hf#hn(Z/d5&I)eΡ!4V.w>F}˙%(#OUYy%=CH#cX$]@ӁlXc8C Q&>⚈~j|HSeho0p=I$iܵbg~E[#{i?,snH)tt/W(S ( 7`#&Ih ,iΟw#҆[M ApBMUX[Jlƪ݌L*ۢF ?8a#́,2ءPazrSZ9K;3\gW 3P=[61Y˴/Y0hB;¿Jyr -aOcMJ&jxAޢz̢DdrCy歋'vj^⁾B&lr*Hlw̝I# '&a*ryL/T*j@^nzl3>x/i7' smUNb";X3' X~  a0Јؕt] ju*Զ53dghZ!˸._T7JT"g;ZAB>(0u΂R[M ^e"BbiKO:ǞOknF)pm6G6k-Gej7R$oՕ*r#+ZtɄYl?_qa鉎V[$鳲ˢPaVrTB>OIzVfvAa! &T\:_ IyXePa;"gV0?30+N=aJ[_D4^wqS΃ XiZ1™"S;k!:nF) >UG3w.%WVCc^LpfO9֞^dC,2(;O| !""r=7"CʝO'l ,dءؕuT,V{Z-Q%7V.NTp-nen9aUS c9jVFW:҃b|-Gr&g$= 8-$-5i't 2p>S3Q)Lp)p=JRrcYIuԥqPNh (0(O7:$kV %G-aoO%%O}a>5[1:RfW,>d k6`Н%L_~]i?+qh΃6ǎtlYNt[p;I5@A ;r{1MuD 9=~etݜd0-̰xъu]"B-6ί2Qyw@eXW1sƒߤ`гL %Zٶ\ %Q/?a/3#g*nFh2PTz<*G8j=n5] ?ܫ'ux1, m*uL&Be:`U2B>M'hlz>Ur=,?WN]ËS!o}fJmQ٭p;[roˁ{jU58#Frlh< xrMT Rw3lQA,Vb M~(*9ý?:Bc>gUy/hC ΂"u#.o>SiWdS?AV>V.݄tpan#U >FqsO1NȢ@48h@fmÌ qӠ\!*ؔT bK8 3Eρ,{aHrP 9Z֠~e{GpN"Z[s98ƹ2:Ո#G,6=mz"@+0!VêK.Y,M%JS7R# UzNdzhU5JbYf]QAX&JXw^":H¼>{> JʦvfH>_8;̙NQy+xFx"x&s4.%z_}{"ϛ,Փbć;`{fN IדX+)^%UM'ҥѺtlZf{,EQvzԖrFq>ʤIQ54F g1㟟yKq-ߌe꺑4oYD)\(!@:l7O5 ;\ HǠh{aʊ .~,ᮐ9g>83av#r$ƖȠou.8~M02I MMmU"NBn{dR`#1 lX-&DCTަJoAԛ5 =]VC t1(cjLzRCqjs^TsJf5R#B6tcjPt98mhղld.&I*=) %Mp̣7ó X9xCΰO6[SD@ ^,CH6x ԕd2&e:Τ@S!oqI>˄vNg[z|B%:Ig"4kwBѷ9 />\}x!fc74~7sFI~/.^5bw=xAXpKFU"Jf9LGZGNP o~ݴe/OatqPlN㕫٘LRz+XTn>(|B}̐ږ˺DlՔg'mˏwdoyLκT#h_RgmzE{!6muTNJX=!| ւ!M}'b3*'A^Ta*@};U/VvO`Ҹߙj׹(S2F)&$nwxGne(x݆QMAa-c44 4NU V0wIUmDyFZ}_Ubs5nSfkqI7ŁL~A? \97m*NtXp{^&mfE٣_&%ͻ.$$fHl(B潌hzo5[{BMtxie.-X 3ad| OV:3&`UCwYG{%"U*'w-<BX߁J$l{U?wC5_DCN!1e>Ն#8U?oL=d{4ZO+.(ʄi&MB4/s.Cvzl2IX|mr ɋC2qX>Qy9i5$&@Ʀi{dޒ; 3gFּKT}Hvoّ>˻(}'å3Jhw5ѱ [mP]7p I:)J}LoXh6{t t [3B1 ʑ %_@c(*{ߗj +pũu_[|Pa%RﳚwBͤP ȆI|yƙ"Wv>#w{pR:M Ѩ e-Z7k I}>jcҁYK`!@X‹yFqL^l~: F#kl?XSDC\w/^BdU hİz}LL(y/y@8ŶoV9nO+CMǼ:d5 !T!3ݼutcb d1&M Oa*C&%%i׵4I%/h6B͎mS>FaRZ-?{v7gFyTE!>5[ 0YdR3 @$acD T,pRS3_  g<@/mUͨGۍG7+t%Fį=Y:ذVɢߜvy]#Gj,DN7XfGȢuIWR\I8/̥utq 3$7%«vaKgǧgZ 9up6'^SUfU~_!W9 +>f>J)nKKhaiJ0;,Bne;wdB,PCixN̍;5a@S"4o߶W~39;qݵ\a<%Yi(Ud~b[:Kgk\&4w3#zPO`)=JXP7T=ԗ*a} 34(׎Ѣpi>˅_w sg܊縴@SrLm0- c8 `+QI/R&>B]ˆV_/idž'K> kŔzF i`sOBJY29$'|zB8 i n׺ͱni 4VD3jRޟaH,Of1mյP eZbgH=5K")G+%yXV&9B rY]2HX-]jnBdA,l wqUxAcFd,Ww2}7/1]IN na˧;W4;a]u9/75%!e_  E;<>A෡q=E$'%2ާ~w`Ĥ- {K su &O;$/uJH樼."UMA񦷣6?7%yۂb ϺG|:x{͢I}CN}|S%)2FdS/4ufYS,QUN+v[ѩxr^^Lvƙ_"dһݖOȔ}E}̲+( gTR2I&hQ=?0%?G4B\6Q{ Z 冐8L{re;Lw{Už 5AX'P 7SrFa*TNaٞݵJ Mj852?ɩRCV̱.儔^_O2iF98 wXw=Z-J-GRp5LOI}ߍvȺ9(d2768jN4"f`zh"z-O:;"67*^PV}_=:2b *ʄ(3 ݊6E.I*yDRy>:0ojJk؈ >fegb Y(Hq7L6-Ž_NYJU1Q0^v*WM\B{/Q9<{IzZY!tki^Y'9;ĉ\ìeQK,Uv}3;+.VD%="QN[ WA'YzyymS{TAiF|cvCIَz"P|Yb}k0 냅e4قns3|>.kFӚ6v\Ů_ A$MQEBSsNpǐx"/І p'M0?(A_i,'@] 3S,]Y&Eb}yIyg4E(̩HGb ]USjgW@V.ӿFYȜ0 cQgv L3r G/j83SsJ^|)fZM>=! qnl ylB17RJ5'9ƁlxXA럊']7gt }ÆGQ%=.Ooٲj1oћ5-솳p[##t>fcRj ;>P>B/@h3aV%V1mK6'|QEʟj+ :As43=R&m8LIFf{0f$!PaN%f?fo^Ǩ]mG `6";tE|) W G!#.wl{2Hei %12HSW;9,ͥqpoaS }n>h$,V$Em:cM-\e- $3\WAMQ#vb;Л$_kLwW(R ̐dg F6 -^Zlm8&:/z!KDMLQ ZI;6)5S? !׉:g*!dWklolǤ78e؅^vy P@nJ}fgcc3KwYm*ۜ ~ 89c5MP؄P:"8n PPm[k<&h=HbnlPNN&LU0vTU1q 9_raMnNE.b⟢ט"Il_{ .>6æӀEuEP$ȓ|:w~6m=-)rȏOM|SsSZ 9 ܝjqTyP6y˞= IX-A"$鈚v@H ǫ<'JR2h[#L`mz lvjHcWK <"Zm[zI.GK5 a@Ui|32cpmWa:gGa&xJzbH -y$%[t L,-x\PC>~$ 2|d@dn.A{!ѓtXX 5C Jq| K k@$l,sޟXYQG͐PJ]+n$ @L23R³<+ !:zPWM`uwa@떑#{mNJfL&'/h%ټI*`4~s(jޡѯք2na8e@m,D%?7rʹ|.ALqv1 !Qݠ @մBѬG!^ y  CA' 6<}9Y{MѨU j]W3̔|O%r߅rk G V nИpV %h7QKafV)"_ZU>Uӫ2ӿ΁œ|7|mϞE8ah ;5$gQ N `EdM6: bX}&,$slڍ79>Ï+#q])f!`w?sq4ihŶnꄴ8Q}@!|d$jO䒻{487+0MV")kCI6t.W5=N Q(mO"DU lp"%ʥcm˱HwyftL*{Ơ K4[S'U4#kPKOƛq@Dzc !񡞢8f}}²@irj"o:_v=~O0W(N$ͶFUmwUh1|EC tpN:ɝq!iQ7A}/~K;#΋B0ТwF;dVO 8A o]mk3(ihid"면t,^wZҳ[PIe{1oPwIT;d 3uFP.ıߟ+pC@!M㘶0T5$,Ř0fXO40m&('ﮚ2kwD$W^}cj~%筸, 6`5%6+8f0ׂs; D$BTS'ӯ>b9OMxc?X7w\ {Be#r3٘7L ¡n-↿S|j{~t'g8O/=| s)3DjeAhF!ޖS}wɘG{mV5D(NC, dN, i2"96!L6_ m䢁Rb +YeQ 4C95Ek* oi"v2ͩ^;`LJ,>ّXPNzJ- Ω}Nm@%7sldIKmwv@ P#&$R{)_E\tB w47 j @ߕY"@12h+E^|~4)5:hώ,埰GקEO LFhS~ $'g BEq$>W8XN/EKeZ3? Uج*ANa /S=7lD#SX>c[Xr#ndo>[Ԃ`cX_?&= %Bv둛Z8j<土hqD_t8oj]0K|s*pm-R߆^ |C}Ǎn5PnEv %JY.Zs ~3[ DbSiT1%bZf8|-wee8އ{}w2ֵd 1Q*3:ύŻ6//O.`UQj}c[8xьr#MӞ.!~- ʑoVUcb(tCe`VZlA, "a[m:qtQLpۚ8 ?xs)臸Sc𭙵,0S,g#x3vA԰Q/9} 'KvE~my]Ʊ [ U0{n~`·b)You0bA%NޒKB$MEZ _w#M>{y?2lN $@DYR&PoPX *]w\|TH6O+?X˒V7Sx l m/[D8#OWLHz"ǓF_U62V0^ x҉#~kTSɌºP-ܬI"-o.zc"J\p< Ijp:#yiF[,V v;)RDL*M?h\BQɵkA͐~HTkvhERL @k@ ɽO=&-(EF EM9Dq[\۱u2g]^C䜱NB{\yi%6%q0`S5U5sF(K%_\'ev\`*iN][y!x7Pw{x}vnൕ=g͎O IQ&Gʐ#{VHB"HHD{C]gTуtw]& C%M e5%o| ה(+wrqO%1֘*e`13ҚXS"BY^LRջN@սzFm+!;猧)hO5FI0e a-ҾgiKU9 PCwʰx#8ob5Nfl8sJUV_P*Ty?:R#}59JYAئV`zSpań0DU ֟3*=X{*Fٔ5ZJep^m j9&wbgÖwvEơf(RyaGwrp]_{'6m%؞_:퐭:ʂ} ƆLpyk`սȁcIZeo#I4LSjW"xCf3: =;r]eo(~(#@^b *KB(ȮƎ<5&ձzc̒-.QwVv'%ޘ{#'oY͍ 'P`~w!?m+Z`_ou $>Txۭ|C .ՏovjQ2stUIMٰdM0w|g@ZElMlbg)iIkޗ Kp*nqY0kAqQ}&\aRȥjpȯ0ED4t iv c ˋq:/lӀGO,3krr9"<$:4u.54U(%MMk*j=JYl5F[bC?jlR6D}H,ME)),_IYg\9}4F&VYϧJ3s;M1m~ԹTX|'4akE3Q}bd-Y;9'(*qx9ZN/^kF"ONK>l sꊪ|ElbG]B'|nS]PAh@ХTkjvi"k @l@{a Wmh#!etxޖ9`0q.<&%AzVe,%S4 wXEio?ԸyLpXegeapS /NuRl0?Z-Ȫi>0t.4]PB5!]K&$hoV HDQtKࣖޖȞh Xs( >=,M4ԉ  ɣ𭱍9V}Vcp5i'>=*&ObG0xR-=QwJl.Q"6=>Rx-,E&W$#'(B_mp0|p@!GE$dz5=yW7^]9l!.$H+0SIQ,.7t8oݢM6?u%P1}`['0D:B¾XlT  R5\%"y| EA1=؟ -ְYEH/2 2D_YVf'jԐ; JKt*]ZK|RAF1iX+5x fO2 ),%<)<&hyvb[$ 51}1,^`RFFMŢHYa[§ń¸G ˅j 3_΍[>G(|}Gvwִ9pұK^+U<րr o0JN:!h!iB^)):{g9s1``\s_6KU;F\]ߤh B߮&at-. A%0r-ÓJ/1@+w ۀp%Ȗ]ۉBrcw" G"ה8mړS/7P\#B 5GVyj]4b4jiMK>U;{̹ O6dQ0IZx ,Oδe=)\gPj.Geu"Sve*ݟeksB} aXF6Sf?Z@Z\etp*ݽX+ TΗK2@&v/+l2LgϠB5ZJ 6pX<-AcdΠ\{ wM*ܾ$7bcRzWQL*Zt݀CϫU((z ŲL >EN4PhLjdLeJ斔!F@|\ &S/$Pp ql*7Cf{9CCji$l$P+F 7yeu␚g -tMJ9kk{c^e kFDbh>!)MlU7[v,%4TU|)؜wd߰3dtseļ\-zў4ąE / >Ke\Ú Z}9( EUe(\N(xIJ]XAVX++6m3T G`xdR)C;yt!+dOjO$x涤Y>T ڠ.ȜKL[7'6ʿh(]vn$-?࿬lLzVH2Zԥٖ颹N q_A?u2OwnQ2)_&4bq ~je! N Fm!\pd[]an3%ɷd3APA镂EsPl )<u=c{V%EQeF U O;9Q$$$aRS _tPs!ݵpS˾ӈ}R'yIlA y'r>?n­#!!VkOM qASDb/"Ы-rY(1뜹6blc9\3IU>S'OG9-:we"'h~&(Ln2PϐN >d6OmqM2w*ϳVJ?wٛ &Qrؤe^ MpZ]Hc{Բk*P JSU,Lp& HKCe5ot?!q8ET,]aMCbaPr_qCuqUA.z䦴· |PJZ?N7u7 &5^'V)֦M aLR3L gj%Ƅz,SC2QуӺV~Y t{!s0 n*{XqH^ٝ30 Lj[iBiHtۃd.ꉢ0*2&Lz㬉#uy~QZ>{p _;̀7~u Fvjb E[UYgR9x̡J^E#ǭ}eUUI"~ゔnB13LG-M  ?k Yj[W5,(~ >1k@f@s-s5"Jxؾy @x5} GuM&fNBQƮ'v1Gs f~d/z+#y6jqԳӵP&M1\h&bԁKZrkh~\$lF0iOP w+/C ղpMV詆i#8Ptzbo4ju0RT'4E^lޗ)fF}6>MD GWeb$_Ͽ$(k.?u}jhN /?$ǦBγW?6*C+,3@$Tt;E @Ӑ`Ob|G^6@Oͷ [@iszk^)`F"}k/b:PH`߳1l_422+"i,?'B!I!k>8s0x-JA!gb%JHPf$:83썖~!,g+0"K!9f$3c!m[#XN- A7YVv]8e!)Ə~Ah?bUm4smg[N9Xc0<(]7!9UL5`Q;b3&BqL1ܽtw?A( GmoY{^o^Π*^;,D9Uy!))U:haB9,=0˞ 3 Q΁a"X]bW(+r\ڮ7 qUV67e`3قE#5:?~9pMK `'o)?c O q;p}2.slZ5z f(]n\Z;u@8isͫ!]W`m!DG`e!H̐HFsAJcjݚRqCkm * }Ԇʾt2[+tgQ.'4ǐ|p.K{rٗM1aٛ]*^ߤ+1Sxޠ܌ KiƇ$:_ftүp*GڽE24i˞ކ4qtڪBI p 1X]ًI6N*ndtl^!̥blgm8.+-e8b^iġexJdR]^QHKCZz7}t ccƀSed5%JB\HdϩVŕ(AWD濪Ѵ>? JŅ臵چ; TlJGىv:ej[R@}M2z͙kȱ4ӥU!hk\.{SfۤRE|ɇ!u|o OxARĪ|5 [ήkR9VCҘ oM{[ qx懘-,^.j!oN=]:UqWDw>Ȧe.U0NSG2#̛Aa`i;gD4hv+.ClWxI ԏ XȗDmzJlu l^75-jlc!iH^b@3Au ܢ]bJIYEwdOΗ"3A&-F A*2[SPwq=[c_B;`KP:4 S@ԂG&Cr9^͜ϲoG3?H Dyyw/ ~wp܍\.6|1Z^f49/'HE=OdZ &/hρ VvXk he7-4jDRܱ[ .L?spQq+׾"a9mh.;Ԇ`_D34?z&:=̡m(޶KKgC %J0{@Pw@[9ٮvÔBpljzL6K=ǮNeq5 aG4eM6o1P"b { Q Gƞs̕a{  "XwY{qσgN$_5um& fVl*Vh+D!6SRX%L |8Qt!.>(DHL_М!dӉk(1JF b[Y;MQF$fЗiu/n>=2..अ5S=,<Uk \OuAEजGv._t^*ddbEJI RR[ÕZh%V~kP|르X#' G͗5"" *CZ,Ѯ΅Q ,z#hr>3Jbg C5 p<̞}ÉɁ%e^Ypیm :U>f{) /b:AnDaJ޴/|>,4tۢul KVSBtO G6 %R_Nܬ@pɑp.l=zƁNlmм3ዋ:kq_B ޸Δz` A|8*%?nYi3Mp[ V|JuAeŇ^ i}\FgHU+L!`+ܢuKr3hC3K~Զ.̷9 ړyF!oQiS{Dp))o[No"dԦ3뤶Z\c4@F6(lG$mV{36GVKZ4Q<U5l.P TLkom=oU2+%lu3đ/ t<'adLB/Y+@qeL"?YZ%H3+韶ݐeW&wl4y%^虜 yvr:X-0651C14c $%6򪘈go%qH{pDb9X?iQ+m"S[GN}Q!Т7K'dֆoȎeۋ0yU3#(l*3g;qUE՗b *ƶl=DޚMI͎L;\C8^oi-rwtzޅһ1~gvzKtQM@?n0 3NK8[(ŚZq?G-3`v kЮɮ'RHl3)koNCFTyyG(j6bI"AmE{hwYg[c'Y2zX8 _$2-rZf"Γqk~]fKߙ1PUuFal \/Xtbaj`4BrR';\X=/Ѱ'y~k^-6B| ԣnrR=+sHNɨ!7@`@dXw>sP$m޲eOOݖRzVdwkXFv WYt#iO|ڽK,Q-T\BK{#{] )(-}*pBM(0+-CR.4ZGEmy8j@0r$A^Y %KFdGP`XBƘPFVٽ U>0PCu>+o-Ap ,mr +8)8]08yMd> @Ih: `#7b#H8s e"e৓I [EMGzu UE(CpΩӵB}'q?Vi y%6x/<'0sv ǙO՚`0S mܥ7oDs|B@?(w ªKa rs5zcFդ$nƥM E Kg|HVWxb,C19I+nV׬k gv;ok/(5?ut lÐzޗ^9z|L[ u:aFX#O2L 採2F# #DJv0='M!i:H{Ċ<9y 9NLbd!. (0ahe.iEoBL#()ӂ%=_Và~``e ˃CxȄDLъ]ѸcssGR֪etoݞ/Vu]ƻ)ghZ{;ih,ּ +Cc AFh{J)F.O. (^.' 1Dnm*-eM ~INbL&D⠰5,ar!|}E3Q!It)oMs/(^@Re%oG9UY .OZPAʆ?֯"RD(#S^@\O޴vɯc %sВsy?fXyoNX:\aۿZrMN0yJEpm:7{"'L&U+|V iIΝiSY)G#Jfxvar80$+pZKǔ93w Evj#vda %s0'KGKrT9KPx֌- O5 K+p5"V@IyzNdvR5%"hjPMS[lR&='aq^m5p1~ѝ].uCWdKN$G ޶=b:ιC+ߓ_@ ?Xc[%K`QSmu\'ʏt{'`( 52D/W#6Q23Z>Y4]igYBe׆Ң7IT/=P0"$a;fkN.,A~fZL9& "yQ."%l%j=S9E^,T"[>l5W4h$1ݴ3jz˜zep^?l+`߹C:~%䱨jWX󗯓(S jJvN(k0^ ݜIuPO%4c@5r7T_8v<'#! #;r  ]yהv_en5<5#1Z}TT (^#Fp5wTi$,M +"qE%.d I"}N7g-cq dR=/~0`{ [s ujٸGPE4`іd5U1Op΂mwqdD(S@@Ƚ3XS9:Y|M I.?Q{>s*Dȑq>jtTߛ;}'6YޙN嚑aܜ ?q:g0ǵ+! d. YE4D N@=A*ͣX| 6b{8xK"@:v_kq%-7I8i `e*k ^o`=^9Jg{÷hshCq~uzk@ꙘNq0j rNi;QeCdf]?^>R0 PyM5pcM|*XQPcJ\\۳ۤ'Ԧnon芌΀c h+<  .`` ^rz$d˫ ęu! pqw3r@l'9-)aĿ/"Tgr"4Ntr5U7NdL mʏx tTr’rѹz4琟E]^^%hZ\c{1 Xg! w19z"X-a3PmK ˸$YB):?vt>U^ם3)[=;PS]z51GqF"'%U*͑*t#)l֣zZ}#.V}?Pz_oy& v}Fi3^>ivG ~?vǃ(1ZG9qyA" OW-8]Bo D1[Q)J@?K21!'|ɡ',9(ee<4a9$hl/d@~sEPPLAGKWSH8b=Un\/n? >gL@x Q@=~fHLbZ26v!tn^)H EH`b4<[es%P xyc[p =Wc Vnu]Q^伝O]*!d`؛dҹ `|kҢdР8_҃|60x1WOAAAgӪJ{ÊQI E4_CfPp/ˮk4Xc;Z0smRlRd\~^rHS;_Ru`m#x5cA5| UABۧ|nȠu_db/:y8"AOF[n#L H8~82X|\@J|#V*<;z*%ENӅ[O׸և0ȶ ՝*/獆 `b?:^?]S̶M4}ƺhA"C! p$50+_2+kr تvE7N g=^*L_A'gkk;CdK!o"J$ɢ8ZG4c=l\Nu_ Yo.p֋?~PTQ<[#& }@a{#l|w[@Zyk(9>dwkA{%pU\}:vCD*KRHel ~4ie @ꆍ,W(V,__Y֪(޺fzX yg=K_3 :MwEiA;(m; _^ڲ Ɩu3ʻ lY8Fh %GUHwZ%&~2 d/;D;_lRy'!]z1ċʼn4 ̜ JjHK`灚81#S .yE8~BWzzz+[~Uj,hqZ|G^dufF&,(ZbTH/_~þ=Fv9RGR.Pz;tF-y1l6gї,8Re Dۂ|g"ecfپϳF>6jY{h1Umh]m=uN=beݯW 6>l!qӲ$a. t{ O$ I 4!}a77;65|T̻$l*ȽL-Z_ =2 $zK0Q]k}ϽʉrѶD_I>G]suM{=yCYK?QoHw_Fq+! 6fv}`,Yi:{8rXLJjdxqW&6Q_@]B9u.V~'^hDTJS8EJky:wVje'?QbF#pApz;H2">I3S wg4t.@)7?ywC"*J\Jc2RqȈa:\&rR중6V>+(U: 5'`+'r[ٮ,DcseK 9DQ$f`S-_)hh YdzksWEVŜ?ehvϖĉ& 5}d#$+0?T-0w\P{JvtmskH\T+| l ေ_ I XQr"0P[ ZȲT`Ls)Bqt 8J]3%4!e^Wr2 @Pwgg'kv5$c=0.k:D֕p;٩.iZGB:^W Q<!$y}&=34W7!j/. GfJ>mJaη4mR2׻p TKLt-qTYjwѩH{\c.3) $?{ zE8a@Nh2ghH1y>ptYTnn9zW_ix%gOu ~˵+#ߺJ~= ZQG3u;A蓖RcOfu6.~9vs` MtUաk+;β];ݎT3\JhDJ7RIU>[Gg:Eۜn&}7und_Iz[@=f ~/rm Pn 7w W3CBS۵F@&V[aao\&MWY7yN1 7OߑigK-H]z*a?5؞57=ЋE9ܯH8k9D0|vG ɽ ICאw[$& u-A(/e7ܭqO+Tګ7 r͒+ט^NW#g'jCk t&v%{OrBI!&63DYW>8z*ux5XZ7Y?-mG"tfA^$'4zO$L';ճfl`+)~ϝEP 2'ۯkJ3?W:h]t% ߴn”/VssIy>u_A{8 ce @,EƖ'ڦJ,9g :M[plPˌ=Y-@>.Xmq{ܾ%3/MԌXfvV+I=զ-F)s]x\ù>?wݶ~lwQ"ٜBw^GSMaq1aG.׼:w5u%Zx n?Y&t! 9|?T| aH 1 /E*>Gae!Na N)^L*?Ui)lQO"QUl&m"RpzZѿR:mU&8igGNE TdLY^v;In{o)i-J h,)g ޯzeW (e{͋WfAc wJ$p#MdHdS_9ȀbS1> ۈ6ݽƥmǫtTW{utPzv4{W"b`(A}a@)#TC<֊RuF.i>nc8Z\fЭ^viN-7Mb_ a-pV^!u"%lD0AycYzˍPmOHlgY)iq^訿 5Ub%5XFfoFB&?MDR!/$ݻ?q T1b˪.0 ;=IB?ȶꢝ- ;-9dl2"e@~8eVK2Zڭl*,aTB'+#Ua`]?|;BBѥϪ,T3ɔMˁB E d$/Jmq{oWNj=mribLKv-rd)>K?q~:MsLn#)/a317uA@#h&3^TuU5i,b!&jͤUvPPFf{ׇ*i2t+ǀffpP^pԼfdX:ͶJBTO/gAx`1\Mńs"fL``,EeV{~Y:՞9Jɝ i8$0 #āo`=`#Yk|4kvݴbÊk$O-8#۷ӏs*ƅ8lЪ0ARzf67 ۲c 5{2%^ ݌$(Z8 鮀DZK3pK 6^# zd:S wt~^"+I.zƇ=+`O][_ ԭ}h4N5I* dz9z> {YJtVB\/H;/*bQ9؁!PGL%Om72._-!-= 0ப@ы**:p0Tm(&ЊPHrnǛde" ]%CSc5]4⾗v F^׍[td{z%..y)%7wxǙ(R!,Jg#9kǨxP9|GAY$丂05uX`]]^XY"j~YWga# 5V$0X^vM?5!Y5IeQ^0Fu~i)>! io1ṵ@AҐcdD5Dv1r@Jhw( Ǭ? $Qo{yAHM_u&1aYӁJo_\h0鶴{58T5^t{riK?1ӥP['\b#WݫLk5px kUaq|..+3[+F/~U= 7ܼ_U큊}g@SIt%9l_HFҫ?e}6PMરZ/qoI]Ev:d[)||Y%|QC#(h3 T]ZHMv Jf~W}߽٘N[_M'N:Ѣ8(}\TSZL(mQ¿ in xg%TdF(WS`qCM< Dc eλ:^1)+Hnx,DDz-hǢϲ!wWVk])z7d#jhX 8-`p-14{a 5r .HK\U(@ ryh~Z)^x(ԣ}Jעcjmژ`T\4"6U_A唒NVy|S:8 \"rH_%8G9Oi@~8.22$7ԙV N \Y~#;Op 以Jd eV2"H?hg',<_R+1ZSKR}] ۞梣U]?SuǶħ $e6)(>11> ~Dc}+ NiHqCe8x@7hԾ' 2rx+xj)>p?0`솁s7X=׍ 9%[6/(8SCi2f<+ꘪ9t=Yfgt=Ud ƐJrE,n `Aal-ao:8O YdŬQфj&\jbzAyL,IbS޴ ?tST)Q'y>p2tݧmh?:\ٙF)֐aVFv~v,cC,!;e2PK͇%I ]B\T}|iBɮaz>ۦʤYb8B~#hb9a=F@$\ [`-_iٮ; G&hAG#{/3{vj7'ݎ;Q=x6׌l0-ݷi҇,O- Gy{٣abq,ZsJ<>=}hOmP n9{\,7K0.j6CDP_ч diJzh99WsӰј>JEAnQ{0|,!ĽJ3#^!a =%ENYI8@T1|QǬ/|<vbӷ^ \dZX{F^?ם1Ff PW!ډ}68|?YYPjw̃Εº_/9ZJA0JŋqE::tLF"F̍| /X#( qeeH7PQ b$b[Z c Ad}G2DEm%EBxaU4e~Ҽg"&'kkBw( Ŏ%| 9s?o彡4JśMY>jϺb+]8`6}rG][(4 EN^<CWWU]0}[h9e^m0[K?? Oр DCZQ N!8}2A"y{CMc5.R8a~ή[pj-m)CFxjmO]tn=VqGQ0k?ȝJ)3Zb2KkDx.4 X _ H!a%{sa[5V?.D *G4kQ?]F?\=SU\wOĹql1sl;QAj9.A,E` 8_>Q3̔~ވV{zW G}D3 M6}Uť,y #J=n[ M4g^9駞v.Q 8Fӊ(CFr35\IkSE?X7"ĥڸ7G^йJ_k!:&&2rޟj i B3{"{]I/\qդuGXW3^z7Mo|'PhsE6}W2`ݱ41R칞kU:dm8"Ǜm{o bpF8–R߇˛"!Lb@V s1V'`bjK/Z6ϢDDFx~zg8;E"&@PnUdc}VQp5%548Xmiu1`f) ŸOn9=}\LDЪ3I:/"FUt&`nj-%D [k& RXwb+aV rk xo+u5/6U+xe5MrPqmKlQ3Z3n}M pfnidi5C-Dڌ˘ePKR^3[rc>*4]EjR=BM?_G/x4+[KS!s<]M9X0U7{| T)e#DN'4QSPY6(T)7%N,0t :tmܝ;[Eb 6F3XÌ 8&0e)bF)&]MSKyL1d$>ΠϴœՖCpq$QသrItBX #ŵvߟǎcU}^/i7Otl>:BG7=nn $, E;"+}hVEyl)?d0 OV3MZ1CQ(A:@ owѕtZ<e rGc6Ið>}\{YV*jε?#Ԙ:w-F G,\ywF377F?2;+.S<ʗ},A]+B=X ׉`":jV0e vBM #.3vS 7?^_TYpP=*1Jm&kuGu;ф=w'nMzU7lc⨑ L[7\^HM?em TM(@M(mOc[YIN}"s@0v^6N{F6N^~|H؍NYw׉~O<-9 ݻ\@.&ŲC$PR]rbR̙}ŢR$;S{v5"|Ç'UbrQ:*X9PΧ5U3fY.C'md#7f >x9]TْҬfT<(h}z&q8YipB{w{R"#_wdP JM[~w)]oe!v'nd|X}/tz E)VP7w\ʑmB Yt=EulDC Jx;Do+iba0J\Jm#:3ܚ +S6zFc< R8BYIk[f"hDLUb*~u7L1[ɵ Y\b ,I#u nc2 fC?'δ}܁.nn"1X%;1Q*K%+ %_Mfb:q:ˤe oT%KĤ怵S*FxJYROcDOX&&Ti&`S~Aj^" .^*U=5$Ot])\ޭ>9m[rT;3J##'/ h-zqdJQ<$h8d k^;? X% m ܚ h~`'Q.*<ꌪ* LnBnvHrnG`4~7P΅07oNq.)p * ݋#9A)/N<>̖d$dH^KFC~% =eeu-۸G%ٽ"ՃA%UCSR9g{.\"l(޶VH' ߚ.ٶRtfb5sphs5ǜKɈ *Ȇ!a*tsr=^|n\c)%Ŷ;1Q!R;;~Y ĈKA(E</ByϢ9E>ia /< r75Y(>Y-JpybaࠧGfpY3o\T=if)l?DxD6V瓦DWI'q$~vDm^((UKR@OS!1۵#{3mP"NK r8X7^*50BُkQ ֻc?ڛ池U[>4J s{Bs !(dZh 3G3"0]OHhk\u~k+G3Zl;?;;f&c \|h ۜd+EհB+P}fT_fJq *'8h ~BŔ=D2Q(AMJ\+{ޮK,8މ7-1 8ޭ㦥ԃ&xb G4K;#3t钵 ̱6-jTK]iB|IYFz;59 j}5g~:N0TК/V6Ω1FTz܄5&,#٦5ͶOѿ@%Oho4S^zl%z9yj*T8[=%"b$wM{XFO QUYv.+%2(4[?('\ r%!UM>wL弔@ĝ^sWoT5޶]( #Cv}[' IbPRHG\~4ـ#>T-PT 4.vzg"z={N!B5;IOfS4Db>Zl)f1 ]C#yΫgogm#=?y .J4'@, 3Z Wҧڻ Kpt 5u>s-ǝKvJ;v>Q (8-KϡZi!L %I-N5z:*6|⪠W(ޱ6/qG= ϧ*$gF!EMBYycPI[h%G \28S!Z2cܔx {sFӃfoVʑw>Icy >u$nOwb vmHr74ijGSQUlŽi;פ!#^魡և؇RN*_2rEoYIHqqQQԳO7*@:e't=G{0BX2$2ƐE$3W*J]>wrw6lDBd̎b"0ܐAN:쬟F1‘=Vf Y ̿M>hҁ0+YMSi~l;4, xZ( -Aլq<`c9!ׇ7ed8}aSqQ6GЦі[?j9-A=/]%wQaK(wS5fzk9LC!p߆W=R;+rP s𜘱ر"^(tYTj\H<{u9 twN^=r޷|^1u3V=)`I(-^[(l@%pU'JJ88eBD?Z yfz@#A}8#J3YWف;J[čYl 0Ti}ߏT_/~ 52GwUP!>WarVJWc0ד4>Ogd(O;+OUZ_{7GHZ\1?!z"J x.\ TH"ƜyvwMV&:AyڶY1M-[bZ'yvBDJxj8r437u;밊s'4vab}"۳Hiʓdc01i}k$>ǫwM & pwWcs)KyBsA'aU7SY_!wRDb!oO[%3fA]Gx4$˸v2&邲 wS╠oKlB μ/{Md4T6l72~w⇶a'cJH6đ >!1X Ѐj޽QfcvƤ1[18͖yL<}f{n߳ =u]BnPNت*[\PmsMhrTie86 S&3|ucSnxW:`X=Š@*WU=[?F6%tWC%.EqT-\(rٹSQtѾw1Bcyj d-AG_*Jϙ lŖ[}Czǘk);iKfM_0 *]o[f{ (pU4āt+tѻVPXn^6N#Cg1zILq,'ӝ+TsDyn5: 2V361 \8(%~Ȅ\Y3{F]DlǎL56iؽ~ܒ3qE(:;#-")b!~-dE[/CP#ݙ-c(:ɖ1OFd],Ĉm͸z])0cAVȴ.?MredmOZb1β(a{!''.6&NqX>tʛY_{n~>&l&UZa8I҃'*dXYgcٕa|->Ӟ_ޠbQFS.su, >ݶGuU_p${X}iKF*R`-G }Y AGS&Ta4u Yi>?Xfgh(9AB,qhv}ҥ>#?v [T9]dyYZq ni3f'cP&eY.4}Ğu(%2WH2Fu 31  8EJ_1ɻMї <?GyۄksWAщ?ac{d8X]^^k|i٪'rT0R#>xY(TxϨ.&n&XH  >rQKff Bjt) dhhCGI-cM'$իy L > \hJ߉ߌ(c#2A} g~5Sݤ&F{S9ec3K+-F\삯½V D:\X֙ 5ֹsMH@ c%x )BZ7>pqȽ-'J):jYmլq0~2wʛT`A{Y`i-r$rU/BpEC :-dcKl<:Hܑ#'3ta\9 k;''mGeۦFY ^|+F`~a^c?j5S6/.!3.:K_LcdA[-X(s֔\'2ogf-z@& i:F J绳yׂR܅ x4QE2>%12C,d3OQ'sSpʻe5o[i-ުLwE9QEle?ŵ)\ 쟊cHOvGMǪBMNߓ 8 Np0UJ2rÈXY$0v' K hu{\f#3MGj! '8I޾PIq 5c&wKGd{,g33D 51[{:kg͇&FSq F)uM&tNsʐ}lLH7BwpX.+vV1þ,VlMۖ=*@F4f?} O/Tjkiތ>Jx]2xqS97K3V؃b p+K9G` 2"7^_|GQٲ;QےMfzoNU'NJ$zbAmK4-ITƸ)ݻ N@1ߔ?'_p%]=-7k8Tqq(w㬘o~CLS:9m0wY3!{յZNY!iؾB2=`c+Mj\')>KwI!=- (M=H{?#S$XV4{kqaHC/:RO-++X1VX''Gtp+RЖ S'zrR2dVJV18Y$kޑIaaglIzŕ9yUg{םPW>oϬ"9$3Ma En[|2*m^m[{pT4:1G\p%})U!yG`t ߇{I^+hѷ\͍!(ʊ߿Ti  N2)wLORq[XK|]ϵ,IZUXugS-ܪBUtviԐD/vRH1Aq |;3 ߵ[ۇEw DT) ?hS6W3_E #(;PEݒHYO?hh^Z;ĨB-3NAk&NU5b$`W<ڂ=? bU_V a=!Ch>և{tMd❌(ӗ%9\3 Ko!6V/I!VY:w x_w^BF.v~}To L #&i[ JV̷8 'AKњ:0ur;~|$_ "냽3)z y;e/?Y|:)r"SHcg$;:G#,ȉ2$Ckoi)x {J3w^ z0+ߒ¦Oa0MX`*63S$)mѪ2tTԝJYk+uQ F"ͣWn8OR4C@$ڟN\n/%A܇P ōx U1?O8ւ]}.EaI:.1y!KFߑPq|lz$J=i9L +@l$||%/6SV,KVf;O()Ch>ԸDu5ME䃮x7U8sr|NlK A4y\8F3;Wbff[ΏHa6჌9eچ ]_\TL.7 2o+Oxݷо?@_A) /kZa-@|Wp 軟TKggo/oyV<#n,WMYY$l5!/Ę@yAlzBU=B iqUg #OX}*Z|bcGQ6Lu\dx~A7 2J:JL ,F|ט 4{e}p%C~C<:j(8W %v&oC =Ba޴?R12k g AQF ""^ESN_F}>}3}r.@b1K̒"$/uZ!cnA9TyT!}BnH$@@^7 Ė(.1oPtagIewiY[UtEyٞ =rIs? I1_b|&I{~sg=j^xeɊP,\eWbew^(ěX|# ]Fs͛Q tj}+ưMl&ƭ5Klmd6F<\ ۈ I@˨,7\i'LQM,#cÙugijQM:|N5m`uӥM-[J惺:*RQɴ ӌ\ڬК9n:U3+<&'NƫSܿ%#M:MTX,6`,ЄC?+kowSw/詥 } l0^hi%k:Dh9?!XrGM\yϊ"= 8LJ:y@L^_֪Saz)XEɴj1jIwtvK*3 @af6}d~ƉY%E'kPD4:Zv?V/>(o Ãn\l^Č1lo-?Qeʠ@0|U~ؾ1%:Y϶WSsg6TTrAcJ'Hw'f#vGKtģA79ҕnrH_ʼ^GKPO;/G15  XrQ f}W{}ukbe GJLGIIj0Z3% oI< T>ꈻ!IC"wo&0tܖOKLQo&lZGI}Gv'<fhִdMJ96I$kC8j_-U^ѝDz=PvF}E; +-Z 6[=ӞT&fsӥ'3_,oc8['1/{g.&[At;ch2_{}<l<Ͼ:jJQ^2CogqjUF%/-Y} {c^RUuWKG^ Uz?ކde\K>w k#Oc?MN:k-p'Ũ+yNdow· ZjrOx:I6js/Y'%`7ig9Zc:JBS}m3n߮w=cXv|"**,BZD?Cʯ27&2i"Z1|ӱ?( =;&ݫ4uX>x<5hʥ;y~@30Z xx<\zY |y|_`( !2E2i.ՁAoy_QMǚjk]`G8K}eUyu=Mk GD'cOk=\`Bn ű6pP(Vn*3H{ |'1!rnұɊ7l捩%OUH3$72擮]m)+]B3I[>x+<,P9 .yMCXMihWd5%?Z.-ӕ~ 7kŕй2/輩#={ǍQ; #GJZz1}z^Mm;0K_2/ e5;ou6h2G@JTTpvl#,H<IX$(&+g;|oI|V(t]\Vi<(*`'=m)6Q- e:RM/ᬎR'/le/8S)ΑUCO HaZ='`e P`P_VC1'"V䢢Ww8{hp֞YX+(9mgNĎ QRs6xDZ0jE-2a*VwǙ8_VI4J٫}H+]?s.ޢkQX5ԔV5 9OĂw[ kp,|:We%(g:e "~g:`w ])$8?lt4 Ŝ>p5P/ޑ/8)YE0x 46 iettf[n2pɲU>69N뜝%/NoWL }z#ǎ[+%e~'VEeD׊32RGiujiE+&& :%MXC4~"b:#ӫSvǬC{)&SIWga. RHֹnۜ84GV,r+FZ;?2UĜZu6={J e¡k,Ba= lǠXf Kʏn-ac>y5Ҍ;VQD NKPvG%j&:b3)U6J^2)o"ig&u!jĩؙebveZ* ZPbmLxә;#5,Tڂ?/eRZ[b9B2c,/s`٪yb\D kI\? a]wrDBIܾ@"n{x~co"J &YճU[k9xcކ61$i̠b>tdXKZR q,"GS c4)|`=ҢOJ kX[jC'l3Y)>LDel 20_k54ӏ uJbo1$]jM0?O o7Wqam'()+M%.0DקǗQ?KWGl;o8 hDiR@YnHge0r6{R4}K2|҅1hoMk gBAd7CeOR^ R8eɪߕc_/'  xr>įHSnW4Z ӅDw6YF:vSqd1WIj@τxiD2SM(.}";}f&໚-d/b]ƈt-p=u@\sgX R kQg"Lmbke1*H{Geˮ(P\×g1Οa3vX }5kʉR\'+FV{ ~\9GY2֨d uoQ_wMvm9ҵd}DM2:C^ :8S,+F叩T?f}s|g0OrDNP?l^޺bozbfJzp[M~OԖ6@޼ݳDP%6sAo'$s5; ?x%tx `k"a1F [߯g `jP^p'eW93@B~ \NIG2=}yJxq/l93و벼)w6Wֽ!Nw (|AϹjgeݡХVvXEu0q܇;Ӊ:&?CܤJiV]̯=䡠{ث4.~􉐥 N˚=B`|Y[e*4N,h)W&|: w^6y!N_ʵT' 0HE~E=b^L)zF̳؀!KMWC6XnsRp[R<ȓ!6V7jfяG])ˋn&Vb<6GꗑgNH%>^0{Ͽʙ%>j+`(!SFKVCtW!|?Rf<{~vKK|F~͜vuA=~7o`|eF)ppsicw wi7Y ̫V+>~LA: Q;tn׀;򯷟ScɳuH@-OD Uӷd^Ta$Z0AMcJfqϻ6ϖI݀3`]kW E7T77 *Txn1k*7[u aE/agSQ➳"%;|A#MSXgҵI4Iep Bb/slfhom"5sὭ^kxğn'm0`d;v%n9KOm)4oP~ȏx\xH/#EDD#lO> Lr j@ǣ.^kTjk#3UX)+^Uwe{r(rtXP̡^/6(}*B.1rBMiDp\0K(iPB 顽#z7 ҼAp[]ॡlj7گOd]P 4$ʷrd25IdDbF6yzci'SlOD~?z,59GykIOJlps6;^^| )@QЊS#c6Mdw0:l"eEP/e=yi!b]q|}ÊXCR1 EoqxGo StD&qWp;<豆?V%d,&bjSnWiD Xڱh(. i=hT׵# 0^bPܔV2_IϦPHpL7۪Qv0&w B~`'S_gѳfXhi#ϖH#:wFJc9:iWPa2=.ELq:3bP`} Y} ڒi[Jtd;'~v3[${5l/K]ĩan̼IꘞAo3i6)ATC_Ni,Rqy&ʩ;@>3H!p-hwS8R}ѳi%C,sכ_*ǃ$@P}UKbQޱ\o2ఃfjV@&h6fC@ytCH=iR\- Ju ]6 =T@PMoe*K}.}qG"HRa.,68XbG<>O'Űp dKj۔;3SF¤CZ•$0>޳uu} ,GlĻmHF4,};ܸ0aK1tmr/iG%c>F.S\ 6 t6vx]Yg"7R&EO+dctd7 C {3*Q3`O6ޔFu{GK۸sqo M f\+S /¥wR S0yl&K\<*,tv{`TV*)Ŝ*n}-~d3~b0>DM 5I5W]ȺPIcSf0}Jdj[NF#OHU0A+j̘9G_<^ Hd@Ij@TT8eq˞ȴzc,&b5 0w2Y(Q rcciҴkOh*1LBb@+hsD>Wq naBѩa: 65e^an"mu=$-8?SC[׬^W5vZ7}t1oIK)öWs pQb(*(Y#C:޽4Vuk᮵/}5Xg@FB%X6ɈJ(D2D!U \QęW}+$Ҁ[Er`no}¦jeL[4P&Ja(p)ag$9i zl2syxwZ{BLX{y׫KË[:X\^uNo%ⶁCn>1,jte0eѮ~1ĚVa6@kӺd%Z$[|"v;^Cʁ֎Pj^xȺy1ikVQ>!-؈ n&:醍C,JH,X]CTVWchp똼;1%NPߚ@Dq\uAmFOftQomv~M~B-SuE).*/eDTɱL ;(C4F5!MI2e*j{3\~aB#5KkR#x/]G|V0x'5Y\jTķ'/KKRj!0 bt25r1Ù/ud.bcti ]cͫY^UL;{zBLLG_aAM8N}`rT <ًf9=6T5˒ݦ5u% {jN=s;sju`n^ځu*Κ$X8:"U'9XX]r +*rb^ke84Q#IC־j>Z8Tc-bcg6[p,˱Є 93^ӆOdJj Vwqllg*M 7q4jG45(J(V"uʭ+14j=i8.M5oh7 +lzP-EO09 2aQ띁~qizCA}NGRB?qMŶBK{W ŝZS㳸Vԍaq⨶VM>|1zZ,s7kEFqz=][3Į,OJ+pjhyB?N瘌.$ @Nu"TP(5 JG4QM,evpmFI}T<:uU;blFް`1qM /X^ͳMB%,PEXnX z]`Q~gQN~Vxa3$ Kcn<ؿJs4KX2 eEU#K_ztMbgEՔz{!FQY1&F j ,߈q}`mr fK@V[F)y4W4  t!üt4Gh=msGTrIRS~><詮pG؏G'~IojA&Opφ+_{(=܉,6y1Mx>d[eKKYaV2c ;,@x1Sb.Pl A6:(\^)2TCU|/ٷ,tbX:`aܟqV;E0 1Sɩ{ZsVxg1Bv)^]ܓ34` ̂B[SR85<]aKՃ^PNv$RO24ԁ%Lk|?ht|-u9qI!$ |j?gV?;;z 1W&Tc ?.BXܩ-0x|q׶Kc?Ƣ97 -o"RsGBڿ ySn-\H3_ 7,Sf15? dAdowe{RAz0M>/惑EiqϮ; ϯ3ZYna={HkD˫wMѠ6!p_'yR*?J>,lswN-E0_r(]x~!R78G3MmVhX}hG< :79, .%zWskyl%ZA(84Tle nc'bxG[k26Hçv/ͧRۛ,Ô8~=L -ڰڔmJRp}=6i0MbgyrZM!d?6Ug>Ql3`b 52ݮԢ. Dw_cBwpG~Ȗ<ғRv%t [ 9[IrC_QG^7u_iۗ:(nUڴ%kϷ9Q6X~~Zܒ]S<>Ma6܇Cڼ%Cm%:2VaDRqƝfko?8nC59A)]gܙnMunV|i_yY-.0S XǎiZ(D4Ѳz /f=7:(l=x eы{V 4iB >lɔ$9񢂜fgI&v *5f909KҜ0ƣ̈́E+ɲ2,h45ݙ?c8v~@TQ,O hL~%l @@m$xTSiH{!X'O[ R$ɘB\1Z0Cl ɨ0/C|nH0G`g2ӡЪ(AH`lx| +׻ZDIc`6m/+^s@R Y'!RCǹ[,ȓB+mĝ&{{_i=? :'yR( no`ӷ}Gf~)`=s<҇ V~)NJ+CWk'hzK{[3])x5? @T`g6uMK"'sQdJU`-*W4VȭNZvQT#NUD9AJp"\qUɹv}TMO3N@9y?ٓ&@#g ;5 cb$-'pNWfG~sp-Ė"Ey2̤=/NPi5Zdo< `#e]:٬x}q1\&b/}" -϶:'Bу垬7TVBVg)fhRjl55.xw[SH|XbP|*䬕,6ZlT^n\$7O0zqޑu97RlEE;cӾ;hַN0zVX =C/8޴Lej"Pŭ_uABW( D\O:c f!AI;) mCw%k5׽i|O%&_9PH\EqKxr8@N 7o/zU|e2*)ie`W_עDy}6)4ωNl$f,ŶDrVkOEzv$gܑS~[.#;PZUs-4IS²=.]a,CΈ껱%?{xuг=xO/Mhryx_ F>\_ܸ kbD9>1>09Gt NA[MaT S YП$&J精>4`ea!;9uJmy jg@;cl:[X;JfZd|A lB.n?x?[ e%)OT-ٍi3:;h܌.߫d0Lgp.ySkv,@孎_wc`kl֒[@1MWP_7M{ \ރSRbǓ>xݻ}]]I5c<ެP҄q-0Qnmyh qSLsHftGUe+׍Wr ySq7K+tc35*79e|S!-\Ac>6ڔ`q)Q=U::y!brVxS -JɾQM'{L.392 фl#_ИBv!xIVZU^ gz;RU\Ek|=a.+"zV1HqJ{@17`8|&~UKHgbԆ6fçRYoZZ0MXN,j38U`6tJ&V{`Fn qs[t5Na3[`{kVG2 ɄVЍMY$irC6EB?8E J pD'`g@D;'aʺ@"[@ e\0hfZ/h놸 7"߆4^gPKS{3dXnD7 fN4W]~gvc/9aiL2ax8ҢD{F!C/!Ҡ;6(=<#"œkfzM1)՗NԊ@·>'͊pM'{Aj Ւtl3 $ @#m DM9Ku#,U :!A' p.!!?3";`-V~J?LK\R4%b}x:|IU#ZJԒ?:ٖ+v/$VWnss!i_>=1PV/_VFz"6ln?1B޳ 8jY%L0d"7ye"69vzi)\C\]REsΘRqvGP-GԜfj _ }`^HKqM3̅v/4T3Ӭm!Iv4yUJzQLJ/l '(||%2T,]5ߖӱ;YE.fc&o }P Tk{aA9j<B^F# EX-rQN2vj4!1NR0\νG66RFp ʅ|=@yeVe ?X{)W'݌#rm-920Vu:;ٰ7q/( f 3x^4?!͋q塮 $͙ZIPV^u H㗊oylDTڷ0)3s*]OL擊k1ck\׍d찡2q=x[Q+amNˈ@N CunJcOojnZ1oq4CNv*9=F cԳ;PeRy8_^6V zՠa&@+At9Fr%#2-Z*qĬU3V[>)XBwN į\X>؂VheX˟y.:F F;LH]5.c"E("3kU0lȈݐ\DX;h%5j" w*V+tH`04kdP yU|nLcL2'˒RzB#eӝ<ɋ) %T}Z:^`? i aWj#~#,,.ns ^ R6EcZ|k:S̐;j{%.0R-qA)o*{aJjH}3lhl1.Z>>mT+oApBDA]|O ֲ ʻ) !M..ݵ^=(G߾'J~wő0Q^8g#CD&x5_7p0EoI_]#[!WHQ{Uz`Ty,a,GJ״K<>lh2J8]vU 5jT#{L8TFd*THQ?%7Mn۵bO&4i%k0F^JуEFk'yeZ[-ﺩ-^+qHZ^=.ǭb]V~ey's꒛!\Gx1v )}sc_9"H4 I@ŗEϔWwDcZxùnNKhw{qPޔم@7Q[5*}9I FJGmUCJy4]C/)r˪XMj..YHPA2VUm(;dAf!GҖ^:е?n~`<0~&lr%DwrmUsPA4|F)𔞳 9z[&:F{tn@dP:2@pfy-\8 X2w{)W0ɐ@]8T4߶Hco~š Luh3)# >?GG)45pΰ3BLգL( {n,FGi hpbɤv(E>9T}ClϾ70bZu{k &-ƛ<4LJA`a4Z//k~6+;Jg89s`zAm!j`d[Dd|cNj534JWbYJŢorxu>3.:'!fN Ol!F}Vc}gAE`0T3?6SZLJݝd|I珊Z@+9Px5\;`?G W$oy:aeljr( PQpʤ{Ћ@|Y]5 NR6G=b|Kf9DjsWL if:u3VfO->PFv*GddYU =Ef F;I޶:sn0;PcX;HF i1ic5Gk@nf3l@%6t U,2E]_!\e3N*tC *WW{gEb ?G o6*2gm%~BCܺu)d6 ZRalF(\ BVԸ/|&Y_`K6 ud^>w *n~J z3tCFF9izM-7obmh'Ġ;%UΥ XK]Uc 4Gpvd?Pz *:eDiRd$,wѪLn)/t,:R[oncӄ&}S Q Êc+0Ք*D Z%\ұ"LtpgŻ[ i,;(Fx%cq_Hcyw,v7VE *![5#{uWDb7=ye{6SK XYc(ڈ[F H/:!hq ل8%۪xp=.B-$*[AJp._Rw֘/>gR[efgS=Ϫ/eToQ1'ҙxTwӓƐҴG}hG<хGO`s eiNFQW[/O8YHf\O띶%Z(1@KT O󦨉hV[M7T"ͶqkB$PPk rm Lar *3j|\?LIlJymw=Eˎi @'4H uU{Ha~VKq!8]^J@Wa=`,^2j@{7^X7&Q U.!^gPB`$5Y[#7>s+e0&"Y7$$tuQ|žґH&l|[]nsYm..eFcmFCNVntiS+P}ޅڳm3E+J"nOL쌓/PlTh/RhT.s'e7 i>cpĩ]oݰU9lZɚcyip͆ɇe5A{N {$5`(0iDC&h4T}' dӓd[oSocuݬo03JҚE{϶h âd3r@sz &ӥ ͂ze54Y;zj c[UN*`tТabhm|>Gw`BĤ' y,/iWBK`k~ Roה_v YT;i} x[$XO 0:މUӊbX:ҞZ4 ~}qkLƪ~nN6}4(RVO tnP?#Ev"xQ}X p۞Zj!awh?3_տw0VzƊOFTn:- QbSP')K|y [G'W2~'sI_}jq{睔WIydacU%ӫ.`Z*Ȋ֛+їEz#2|~%388:H-1"$ɯEZ[+YvTRm=40pdo\ ez$tQ=ڂd%p(=t{ UOݦv[π:dzuֆnHL"CЩ0B%#w- D@(9ji" g βWg;?]W﯍h 2:68:3[/~_F9$Up8{Ea`3n7T謀^լ].(@ݵcI}QWryRMH5s ,|3vg) '{],T6ATfߍ7y*\랣 ۭ"n5&CiKsCƂIªpBzAP،Fb ,[aY _q<-!--.jsHsKmʬrT^+4Q4̤\x,y a7;g|(t4AZɣ/ʉ32>{zEy?9K /ib'Uj1 (>$3)ԋ"π''t,YAd?_;Pu(W  7? ;.tE?4$IMEdU ~f ޝiQNmV`AHҠTf_7hZC:B{=K Gߠ-9zP#.OU+U.Co}Lz&Mf h4xNaTr[:ڣi_ZO5Lʊۮ#W\@wt16ڈErJNj)%fo*\ȁ jE ϣ)2JqòbvJ{=(j7o)mq4lZh>hgUW]DN昘tD-vT{rVB)FEd߀y9Vu='+ѽ#9k\InXdvsiAe5'%O;*3Z-Wšr7$Բ̨PV˰qGkR1*X+\"~vJ;UM<"ڧ} \6pb*U;,azL;%Q.t&+IkJ# AtVJr"V:(g𒯇 'Gܣg;DlO4f(BuNN- w@G<} d^ɠPqX[x]P䢄Zr ФM__fΏ6i?|jMI. R) )LQ`%tdBis*\P9k;Ni7S[!iq4!z"ys\>Vd;ClC0,YB9ڱU:5.baА6e`J`rdRWFzdz(>P!sU6xjRyosSDdǚ9_2 6r)K:hpzPkOXNBp0~={P1Xo,vQ1۝墚Nʙ00wW/~-[sش8lܧ#?Q%*}q #wû#X;G@$( I4Z90fK9i`u?=kG TskHrc /6Zd0rf김CX3_fq/m}-eɵ 4Y VkΛ&27߸e.Eo? u*~ Nj djk!?1Lj ؐg0%S&cQp!>V;O` tj H=ZC>e6}#}왚}~4* 丼?z)3ѮsO3vkU6zuҭ]"zI}(j ؏㸏(9L]BGmdg>K+L[EҘ*\(^M8Qg'ǜB!iڅgRo-'H7#xBqa8^L룸Dd[9-PQ?V4~۩Ru\j:&PmQS ӓb*B.W}YiI >. )N).# ˴i1vIb7|>GW S 5Ғlay&j%8 Zw1^FtX3͹'*$[g-) 2P03'LR+$eRdD]  ֥([Qޕ='E }jk㟷] s [%Xך1g2.#|nʇ)'ŠPe<7፳,_?Y#Y& k$)uü --۠Z;TW E2ɘ0bX1nb1^ eU1my<~jZ4/͠~@c[h$C9=,qmCgD2޵σ\< w:\ɖ5<5F*!qmI<.bLExfrv\ τ1땱)Φ>QRflu[=SYv`n6!ڍ5J;; L(&/GZ/΀5W->Ҕ"H_"Vq.h%鶯jb=k/{GJaQdj@ڑOx:} islqA n#ap[@q0 /kM(CgGLnj^iy2"d| gJ#?"SF~HFS Z=/Hau#πݠᡙmEnܮ ׭Ņ8;VQܶfd撎c2 y;uO/ 8W2ƙhM4']IƧV q1]2O!YZ:o\4USRqo|uMZE Lq#?LլQ6z0P,!N(xA&QO2zZY^u/dw}%7%o=ُ]Dz9*Y^4:Z¨LEe"4+)/l$&{1;$)pstJ!} >iroT) -1 .hRvI<]P\ E8[Bi*HRr2pF@LoG59jJoFlKJVH@sdP@M"q Yr٘X125E_|1X*M{H.5ß:㛼'DFU(^тuy-*HW$zR(ib;m- a/FŹ뇗?퍝%fSocU\f9%!wXDO,7% LZ#C|A:P=]L{຦]wڵ{fC.gu{pד2-9•-oq.+`GUc`߈J6TO: 0x㏙K-0>%a)/\Л )#$l&3(q Ƴ6Wwԩy R:&2äw y +&T]ƛg`UL`#1m!n[+3o4ĝO@+# I&ethP@V=VHi@h ~JjQ;E'Oqz]v_n/FG!1_y^\*ڣF@aלPjH6 n#Kṡjx~N'm~AԑFO_ {IϮ[!)'8_jAK8JA*8P8Q>(ɁfjIG s{<le$|#bVJ:^-z F75HP eO0Փ5VnJZM,%۩[\pKy< ͺt#R[!a]wsu2Fm T~׳ò{W,a453kFL q%MP+ek$qUw}m1`!C$wC)b8W%@&Z(Ɵw>m jSv,S 09"Ù0u6(Wec>7`O(qbh Kk9;kz 9&=:6GG#yQ- NQ>4~Ň@Bgu|ʑ3j͠'u,o¶ŧoa7Wb3M9Ԍ6LMlg#*H6n\,{ebN[ISw?tR3;FھlB<9DD!u+aEpk06$ DAS՝,טsRuipo 5Ei;Ho1Uڥl~~_VqlqJN %tٲ,Gw !de&}r ߝMgeNc3hb=]1K7HXn'b>nwa'+HswF+1IMM29% n Z N [԰\ R?"W H*;GH.8|<_'lYᭁeҫC{';6ddžx_2_47)f?k>⮟/z&KϮ6)xS:4nscSƉVcv={fzHſHjF@t@@BxG  ~̵Q[337A,Š5I5k%G$G1;ewȫI.eO!N[sP܆ ҵOpshUgJaŕ ̀ͦ_,'B_c)흹kSw.iܧh76C7As^Ev͊zLĒKz> BGc j^q4OgZ7-{.=`M1m/ku4l1wVE]DޫI:ڨk izW]W8L`.Na$D^PH7Lsc/·?*EKd ܫd&8imC[E=&i;YY/mڑnM|I^fFS96/@a 8~(Ji?culR Z|si/ѝԩ+mTtpٓ^2r匼UˏB~0P-v&`h̰fޡkئVjWJf8_R*H4#>(ǮIPG^9IXOl3그mLL޲I)0 0\5 "ˮ0ozVei a0_'Y6]dLܗZEb9m Lt& O8Ojal̼oMpPyYRPqgD]t`:XcXz2'?0ye%RxŠ.iCimBs=iҶ..Tw ?h5hW8M:4m0B9X) `1tg8+\jO'hODKn头U|#jUtZ'SٷyCo|h{^ ރWEQNs8CcD; v nAsoe0 i< ukKѳTӤj۹w#mShZ=d/EQQ<ЪPm<_G{ DUTv1L9!TOw^3BETFҕhj1 URM ͊  ^͠&XJԥiB{*ֻ^ƒ-R7SVxVb^ٰԦ~NoJ&=x{y32)½Ҙ=ĺ" 6Go.钖kBJu2IJ.CIKU$2P0(ȧR߈7 E'o =Y]nS~4MlNŮi^\w@3=KzFSܰZPoxF{HD,X;hD0Uیw G} N Z` PGhb$:tfnpnRxu{Ӟ({M7 qrQv8p0orh8Q {@7^~[xϕ {&$z0%1`&e,UrmzZMd-3?[": /9 No/$\puμD;p^XW$,{.cx<)4xawvt~FЎ'zdĐvӟ򛨯[OD@ G5OK;I^BC" uc[Gg4 vç?d g\Cf7oo<p< 9z!AP"꟫m?Y\gT֠3j#@طR@,ƬY%~ q\kFtW!"yg~~pwiiJCCr,?wл~bqo P*B,lEYl"& ZtUT1Uk1O@>AG]]oyC $9n# qx8;.2k*[ ͭZ G5矄 }X}}*q[:J3Z'mSqdusp]+ u[$£Z:_(*H v=b|F cFu+=d]z1KEF Iēg $Kޖ{G"zv,ўhN'i@Țui olD5xT~LwD"0k-aGQ @ZQ OpM-YE\o!ޑ#a7LO.,K[\\026`Ûo2؛wZ!>23:Ьzr_?Lh1Y i56s.ԅjAi肞Qk[S6i,hUk)̯70U@Mp2@%l.tyت[pZ&W56(|YJ7>+QoiOj6AUE5:AɑX$똹-=+֒=ikjI`-p ԎۼB3#3lz;8V r-oq)2jE}>#HJ9pE<41.1J1u==+8Pebj0(j`퉩=TGٞ"HwkJgz `,/YJ7UB<<KihPd~C Ē2bW@C`HYmIŢ&Njm?ZBIA@D/S2Gᾗ/ RZytش(Wx^vD8V߸Lu JA(}i_"F3=g04.fCF oWg3h̏#~6tx#T TjbbaG؃m^| 1L1#ZK+'sפBMLXX@G}m`]ڪfu\?CmzQh峈V Fl+9joeLMzQls䈩N/ι,1OZ#S*zXE,fah9Y9əb)r{T$wo@ajEae9SBHpw\AYx#K R=&_OrPHCTTIJ:xX;G4Rl " 'f2K2}+\Hn¬27'!VD)fDYԐM‚x%gB#[InzQaT’흈 /beJ'?L˽t0&85{~;KUQ6~Q ZRfp:#*m@*Sza2nƸRz{kTxRO%\_ёߗeDz,'iv-.Y͟ Q! Mv˭Y6Ȅin!t2dֳ?:-Zg_o+."SXÑbx,xQiIEM}衋L$PMi+~V-NNyZA<+VLDJd|lĠKO>վ)qWO~,AtQ5Qk _cNjp"3JxKAN?0N N6** ~b)xt?jAda0D \vo dӝv}^4_SG8 Ox]#Odڇ M%RAyX0%tyo=նw{%O7ѓkӼ ɒP {H `zr"ÇS;mg2i~; DbրQmft<Ӟ:iR?j)нu%T A\ zY7?Gvjvy<)9[-9F|C5aV|嫋AX/7&ЏyGgi( 3j)'`sW"S;"Nn`סi3uχL QXL2B5Sƌ_'߸r gSĩs'Pd׼QCǮ)]G|G[9{} &e<81F ѽ$l7`$k6 g&f9KC>˦ha4 rtfV$9|_`/\@CdnbI͖mJ6 Iovv2ʄ@G)P֦@i{/dqU.RC3VGktVK hM9W̜F*ll Qr/7㵣a+v5ZTBi%֘JaTڕkxQgHgBA]YfQnDKoṘh/oEp`i'82>UD(]!Rޜ@ W6;uH c'IEk\WRdiϷ#@k>HK9/z*N+k ƿWoF4ȑdhv]弖<`8\"nʻ8:kI~ 6* gzȨ 0ASDmFi!(SXBMd> {*@zS\Mh3jڳo6^^/>Qhm.࡙RBuK%<2T90ҫp!6D ؠ ,Ȅonq ګfl#,gg߃uKzRRpm6cǞѽRa)fZNw<8fQ5x1ԋwx52${*$@t$8(K(.iz1w7>B:?a6Xy6u cb͛۝aHoJ 31G'bG1MUV_̘lFeIR'F?{mƟ#-s1$rWr3(΀zKh})CM? ks3-E{xiȣz+ p8OBVnH"oȚ"]+!Ohudmae% %qkߛ.hT 3T!RAEflGvAid_}<-cg.Bӳ$7D}l{gOY4Elc:[b)5xsNIz y$Ӹ!YD~c7ْ/]G8˫-\ǓK5YD H9mn sKɕNA-cx2rIx$qBؼ3*uml/4 -C H ^kY'383fdO^*OC#~dD/`S}ʊoŮبMJ%QِP:X PTda)իhиNWp\)$mk}kJVw鋴tD` '{h_:s$KgjxDT`UpJ/",פ: KTe2B U.MպCDvɦoXk(B:3D',V;ۭFh @8O~r1o:#S?!$mtn䷣UP2wCU-ߟrH QvO($ -394m$sh ϼ$Pf~϶nOՏk.Ծ1)%xB V$r4r!'{ X$'n2nYaQz漫.#NR{" |6!gf=LC_C\;b?;= M6О֙4uQ].~w\%Q_j WGRο=T^T)sVQhV-,n\Yli ^/$_/=x-`m9@:?,HQG1WwHiG[@VӍ xf :5ۢAc%L /|&xln#mB9%$PaϺaFS#/CH:Bwp?=ш4T~T_cYhs/?ѣg{Ce_)"鶟Xk#_c <P.{PWl̂[ ܂C`(6ݶ E޳q[ TTy<S-.M]gT7\—w)d_ &`_>UrֻA!oO(FZ^foxv# PIg[0k<ֶALm٥3B~^D0_7 ?J:q%:-"BkTuc(tWM{2Y2?,ڇr=L#k6Mz|ٷhd_]LqTX"Q[bGغT?Ƥ>[PG/~;ƖE+dh /tbp̸AH^ܙ:hr'Cj(8}aQYr2'B g%a'uƎ+|ATLv :4;6| AY,E.FƁf<ш fe}[ϾTR`?` (Bxۙy@φ!Af6wΜr$o^ z/a ʻytX;n}Ԏ[%NP*LJON!0`*4Z%zwzBUk)̪ M@tmv˱Yqؾ_Ap geCm@=ee=\-Ik&}XCo:|'YӉM7ϓQ@ xV;4SMKI&<Fi5WnY.QLW(eDhJ=\<4'{R$?`}k~7$8 Η꾬$h޷waUyR`gnNcedŭw+V53dxt4j͐k\F"UՙTu sdٚq[5ma"1G 34ڴ/&}fpa)a,poxc̎ If[ZCXQ vXL-~׸ F~>wcM@S7p$ º `JT.zznBjQ|[Pg#0` zs'`683 pgnk8܀ z=eLg cn" ͣ߃uN`J:ntGz2=tU,<ꤶ4YgTvEVԼd5^fm.4lL/Y.Y:P:1Lq! ?JY}i\=Ij7>P۪ xA ,c+% l. l;rƕ޼W~u$ 0փ̥ψu[r UuK< 0E j0Ӡ"i_h5$Bmc7o>cDw~"Ճv|(AYk±rQE"[VJKE)o.9qs׃nG~Ca!=No O(T[;ң^rMl(5X#2wWT-lCDeASf)M,CS>c~X{qp.r_C@)ze>iePԴP'"9"_ "mɏxi/tě)XqhsMpl:]!Z,(&)2r Gn@| t{8 mTښq$tٟ WH.o4j^\<[ ]z7/ym`/Эf.k9Wrнu\ME1++lo~UΏ }g@-Z7:#!^-cbڔux 9H*YL\Uo=snGP*^chxk f! e= ž;9e.>ymbڙO'Q|mV^:t%ၞ5Uq2Xr:K_\I)|f.[:G[ήRDD?p7d*{Ge9Pk$l]H1ecsG)]ېwr E|Ò>(wl]M F=[{snza5#u{%#@~b算cҴ¯ovv1f%HZý.e|}%)h}'u>ŸDي@(y VJf(Iuǃo,i4>f6=SWFH<-vlE]l7_7M\swt !l"e!uZ;5{8qN˶oO@5Co^ KKB$" s sEt\ʷyq>HU89RV`AgǪB6xn6!}NX^˦`kԖ׀$wdfˠ.;^ןvncLA0qH u8Q<8lقf=fD8jHdDMHig֧:-nC{X:W= Bj:oӫL ҁzI3_g}fصd\c{6~K%E'F†Q{-B{h=mnd+*Z4 FPބ>e"28vVOe;3H^6bWej9w?ӆjewA9$A#ʣRUY8a@Vj8\ fu]<6]ky衖!E!(XhtqQs9}2X&,Y1#LCRLD0Jܮ"徃B\*$`Q%LZNU"h4Ղ)~n˞TiCPS[MՎc'Sc/eԤO;'0nTN:!."ߕomroL~&tV7NIfӉU33Dxsٝ Fk։!9~\N}1ЎǾꊅ2މ/@T0=hCZhp(NT [^Kg[fap~\ #Ig|ߕ?m)Zst8@{Kw6Nԣ&kZy^P-X!ǰMIXU#v[Z/fAcyRmJ{!GM3 Dr;P59@bPi rdDMe /;ҫX.)2#^uyH2=6+ώ~W!^"ASh%m\ܭ5dV;%1%讴n%8,J@ܝArgNkYY!uD@H)E+0c3ڪ_z O1RŞr፹įt> .е?"+?ڶʌuu0nW Eفq1p\-2 \6q(iZjs|, S⇾dl 5y`:[Ŗt=H nM !ZzPϬ"%s]ieqN*Xv"#2#(wRz#DIӀ,ʎvI^x_q@a\&IƻQ>$ܪBIu Z4:Fs8Xrj.y;X+sNo՝9 XE!9枨shz\vw~|x{$/·L*;vH+Yrϝڇ[;^gc6\EO,usmv(K\60 e 8X۲1A5}FZ ߚKpdorkdUMm70C0CK}Ҫ!ju{6XeΣdhbŒz9I3Ti슕Dް8<ݰ_ڑl-jp/(~(ݞ Nր- ]A$XB(M)8_G̈h ]/A^!9GU$B gx,<:3Td3QH Q;RfNhlaG+$y g-(ǵ6"՟;b">1/O[JZ4['ʛls֩=E Xəpt0&O'#Q V}17K˝dg2ƨZ|;(qPyKg}c2ieP ԰m5R`N!]?]*ͩ-YճV$]$J˚\M7#wYz;0.烙\o//.&Ј0d]TqO< ت/ V)6ة{$n +/g璦`TeZO3^:tYte3N=7^P7#+_:}<ʰk۞Jddo6''5#ꇡh7\n c`JZDGS벖'KGEYjb+5$E/Hp,'2+۞ ^R<5)d4]X/ BǍG/j /~rZ2Ҹrԧ7d~ZU75E>=;H`UQek2}逆dۦi"ؘŹydV6 H7cu*Q[]]mfMG"Ľ{L[VVTyl-Yܵ{Xr},@$/ϠQ9ECcīt&S3P-F"-R?\\݀`ډsQ O sוw8:^;a|N WŶɘ@E9o rYspM =_,FPf7y?qP2bJ?͸]w>qU&9y&tlR m}cbTRw*RVv|Nsb:MZ-[%B{eˁ&mί߀1k>;4oiGekyl6X5-2zV5H]¨pgGC U0 -uX*V #ڀƐnf8e bU+L$Niٮ&H4#K|.x0mJK.1U."WC4Aˎ3.܄UKqѦS_ L)w:]NDUt.ߘ*mT|:0{G4 ϶4XdVd*16CN&HhG]w#A]?`/hPߟ3R#IkI{[vy|4<9y{vۣΦF0#B@CSǢ=hK);l #a5d䓵(QT;O ;NJc\0gW3j"Q;ly9aO3̅8Эj %NYI)Bv1k ҲI@Q5w6O)m ҊoůȚ7%rUw9t!YB'̂g~c#4OOCٿ2t.9}0a^=?/'|U!;Eg[vN#Bb~hp׌bCԳ{At~Scvgu0@yO8Pibճ`Qye5H㰪=8R5ڃsP 'IvհYG'A~ jr(!,/I}3np!=1zHMs>!ڪQS[j 9<} 6"I{}W΃-6Z ?\<='WoAKf%\ ɭ8b8qa'X+0Ѧu . b1iV=JxQ iT 5_Ca{4{A$H>C%[yrAgfI$TzvRbu8z\"KQ^* ĤG#M|/7mF&izRq|56&h5 2<}kᆗvDo-P4l k10E}St<@sCH щ0j4q|!Tf&Gy[m̺^x[*d+sD;LmB8}0>en[}#J3шڌj'&duYHZ6Τ}F|14qZYIgc [ѭk>S0(]@pDzʾexg/aް>硽BzF(Ft;`j 2G(.Ϥ;8F‚.39':10`*Aԃ o *oAԅXĎH/ϋ"JD=,җ dj(I)&/7Yq2jw4^Q^7XԘ.Gtyll,#V)'?}]~{`mo7KzR-c-b$jӖiu8u|(T/(t2k7Fs{7_/lw܍и(TeW15R lmyl3;H_Ё?MkxH6RH}?-QhVR]`TŒ3>3*'Dض|\ۀ$U 17`B<4gLp#)|"tG=jfg ƱP|yVtlZ mn` U9HL 1oC_2hg6FeIwEXٓiBM`*tȀ^%C_JG7aQXdtkCϴ}EUi( C-T ,:dz~vXXNWL8q@WJ[X@Wg wE(FcdBrF'Ɇz@02)=P:Y"Wn צD';!P%7w|EɢR6Gf,2I~?wk;^f}ybhpVYt&, >G|~kyL%+) ^J0IA'(_8VD'[Լ=R7K.(xޘUj)CF>T&PdIXc=Rv(F7 A9K<ė.] o O QB$J_f͡A%#/+_ ;CP^C(f[%9(7uPtKk%L6ڧJ755Vuww[x~n8w')Oje+xza pv gTb yMSOK`*XlK\bI#i#u~H(aD%{4:-@JlZ8@ɦp K ~ /D-5T7΄I} J+Èw>Wg8jb:.jv_݌(OxGO>J a*|Z$+e_.YBQN$v"caë=2ɻJhFl=)I=$+^ņh,c[~Uw5 F13 #SHz;z%.2|10)T+fFډ'A "[mC2iۊbЄǬ2h<<(+\,>i{zq9ڐ^'%a:JDF#tc<}e$Đ| Ҭ'c&|hDa/&j̠VcÞ} vz:s3z%N򫬆HM^N1A3ܚyJM֙j%fW %#EX[$w 34 l* _3t=-Rc,4%UTan),<&uzKa|OGv ^44lj&#/9c(eb]68)wf0Q^/].*?UܢbDҖfn|Y0\Yx^8SڧiD_㴿}{b\4՜%"[6@9 E>RAe. XeRacRFE):׭;o&h7ڍ- _:+zG\zqr5N\ y ӓZ=3W"uTOg"lA{&nc;~k{y}=l'Az,]%LɥCl?ĢBF+Aվ5٢y^Dfqu=Qfl)5zgj 0q MITg^XR]of?cVTq\np=d+oʃ5%vۀE Gx:nX-z3dX@oNGu WsH#b{?ZJm+|*` N >[wFR-'$D0- Ȧ W-]O,AHY^V=#}15^]Ħ=. f$P%'hgm"XۜWVVC&bRf/ kw?k.A#ghPGVGtkHEϼJ%FE*ĺǎ]9u2Jf `6W3rnޅI4a-^%MlWlr{Ӵ-ߘ8k|Rdw&茶m?lŒBAL s.!K6~`D~եN s9m KE6gͫg44|rd}׍J˘9/i/~;pˎţneu_nCc24f QӫFggG3[{VsP\=LѳN1\HSvR4`(6l bag8{79=Aj֚T>Nfƣ͖1MF75 I>lJ&vh窅ʢz~X{ ߄j\ ۆ> 5S@4DUgϡ^UȈS+bZ q,{B!;hūHu_}ah+Ydi]ܴ (rnՄS~_ bU>G˔jȐ #fMz?Թ8# )o=r+JɏƷ[5-]XRlC"sLS;DM?ȳ╔TFy#! allY_P]4"KuN KDFѮI0V_ z@A T= 1Ÿto:\&ŸÄI( # S}k3 gΟ|d5 8'JGkWcb~)9*|69La#'U00H55EӁa眙Q DE޽.GJbgm䗋U@ 6Q V,~ElKhbLn>YF",Yc^~e?/&EWĽs\߄'xE+VN;z;~Bbȡ>$ Nϴ/Ty= to0lHb51JƆOΰ4oKscPreS5[^=fdn5fH~/}-Bl5 w(dar bU,Z,6uފޘ2G9XWs5d@mtdxwBOZ;buo;ܵ"WSjã*qx{bXJ7U'τ[@xvv?@S7xPO3DgٟиjnN7xbg{t99E2i{"'c ,U+ZJT:їʪ[إQ_x_r8/HV>jGL=.|n|r<-ow \Ju3Y>D}@bAHh3Qe!|"EˆXC L ]X0߹zT;MpI^m>ζ?VO͎1dX{wQ~W_=|oGWWTqe{IW:Fx>6TbN P1t)G,'¯yִMא~qO%˺o )M&ά;Jg`ذ9v6P>i: Wyy.nyls*?Mg_yQa#đ: Qd3sSuB$͒X ,_w~I wВ#Mh< SЕuYnjC6]/5h$xu KPr Qgw bIK*I"Zgr7@t:pjNd(f#~!yr&cWc[7-v(1+ZEP>P`&|jO]B()GʳjPHVal8^95sɝjtۄBC/5{*S}브J\o!% Ooޞ?h> :d2: -Zc]ՉpOdnᾮz&j*Pl)ksS(o.Kxg7#hNDEYTwwmClcїF8vtC(Q'5`ܓKya4@zjo4u̿;}D,@KAEɸ>yH躺mG|ΐ?泌xY MPn %z OjaVwiFAʹup)XGiY) 5 \WN j~-eE5@w3ߐ%ۯwSQx1.0F0;0ׄy$^qјyE4(,YFh]ފ4~1,/@/b|W2|Y6Eu?VM+@T:=  sh V@+Bl_L1_u)zwQT+1|XgT_?Vct^թ-`6Oӆ4Ss5-h5b4tίJh}Loz=̗7ף7ʧjea!hhVw]?eٸ+38 Ԁ}NY/NKzRƖo\RUH|R `iC-}qh=voDLYbU Q%CtGF`@0ؖUI<ݗ؍;OY j _V~\XV%S5SըAn3Nvi Ɨn_ [W-@ UۚG2lVSﳀ.* QyM_v}czA@5l25I~mZcU oXaoBʯhqx2kid-@e"cG׏u%B1g}IÇ.|٩fgt-TUswcre]-%XTЁLJ 3.挛HEW> /B;Qؒ|_D\ ?T0[XP" mU7yM$W}Ap71;[\K):]߸Qۗ?}TkDIQ\ߥdӏՍ*ޅif6X"ڒ6L b(Lxg#2n{kFvixm H_ FRfVy+Z|k?҆gh 8qH.:\ܹ n'MOO,֮h\>|ս1>#.nvN-OM:xnjKGnnǬyZ )P5gf2F0K)H>hTÖ% [ nqz k)i".> CFF؛DE;Qd̍-$4G+4V} tEgz<;"T,\LuJ*l3 {E|^wH(!tZ.+c]` ϗBijCIeh0:yuai 3XHj*j1TD(2q^U c K?57!tZϓHĴG#/*q񬃡&]optάL(c(2r_P6cxI<]#yl1PN/99rCl\֑K(;7m7=Y܈m_W(fvAo˘3~?.LVt2)h7tڏ>lX}=  Ƀ9ELE\YHp @t3eP:bP.oc _k9gbƎA^.yk|luIYD9MD(Z/`V(UE,v鑦$IW\N(_ƢO|YSS}qRw\"ZoZ=w?#b<_bRpX (DyrP)S)<ST¿F-c aB+^RjÒ7ZN.?lg'gʹDu7D 2ӌK,֤>FCX b#g݆nX:UgcC*DTsybr'ǁd>w5(V65wv|O9}S*7%wto/m0N⅞kD(f0qLUN``IY7!JiJ9Ŗ{ 3JW^9M"52B# -vy^b*͟ X|,58(SJ'רI**Q0#YpSCӐ6k[7LAX՛?АמT4Bz-g8VUeXm^Ds+D/R^B.=t|_l'$ۃ&[ ŅAZlen)ibn_ F=33(׬ۜ l\C6yV\k{w[8um(sƌĥ#ɵ3lluL$_۞H={wwёJC!|XHGN<ـt!)Ë8'ƒb҂EGrJX}>nbe$?fI,*p Qs( R{W#aÖ4f9-2ri~xs)4*:8 r nQ+oS= >>sl[ 6gMB8^Pt HFUM C}o+;:JT]EAr#}Vx?Гm),F86kxNz<5W}|RnDuF*[3{xR4U_%yHPg(WrE3eahAL. /~2jnj>^lKC[\T(}%] o櫲G@sl' |d9;3gVʳK1DY3hԔ?4b^.l( Jt K[D7ųXͥ$~< ܄t6Z1 h qJyP'xN Mn1V7G-~mQn̄6 58>byG>ꦔ[>! iSn'HԒ.E7~r#hH<0k\j1|vq&3Z_;u-! qbt4>wimnGͷY䬎h-ѐ28*SO׏x.YY)ϗC ъzBpK^WKNԞo6D෋ F_P<}zYw[bChi{6{ew*AJ*HR=,J Gø/;7rEg){X>2nHC54XA/[_)mM(Jrh;X|8^vZ J[><\2n*%ܕFpKLX D8L Aq6-8tk9Vڎ)]h;.s`9YSTt5 >ꭧ/'}J2Ë]^=v hoY`KDfUc7>9ڐ~_Os7/^i{r׮ydf*}znǯ4w$;xIj|I"3dT.ivJHyFyڛqK* s[mX<+̯VXl4m+egoR0nir̼˧y)٬Z]W!l3]*9 C?<>k'67]/367sLόmbvӭ"ӽr`/Ox]BQ+IJjfUKBDAC.NXț֠7oc&e$XEF2Am"W[>4 ^!ҏ/pw[/8*8qC/va]WP&_]^*׌DGfؾǬP1X6ۮ>*Q۵~m]ޖ10KNZ=w}1:H}v`nV䳚,Pc> ouW+/RPWGFL.I%G ֻ F-Z#MFaf[UY9>9AFEzhp8_O(;yJ2 gi6VYYvNbAVF`;l?tvsHX&% lO7ϯjϚ?KToi;*c Ei~Et3v%J{aMd^xqrtSN>.gu?KQ C 4ނ.}&,`w_cLt?{g!4,Fd 囃{>])u- AthC' 5!H-oCϒjwp 3uM뉽ls8<0iWF&k &V/_P@(N49p/:Ws]NjLKQ %S{zAIL::zs]Ϝ _ģN.5%cЀPQ.&hwPCZCAޕ#e#*H4 >$ m"d?M-K6S=cկ ddճAok0}LJS63;Za1\F*DKN}Q xS7gfsY9A䅓ejI\;,)B@xtvϢözeDy b%*fQ:U'#3 n *jgoHenVNۖz&,(ڠFh*Vo;@LC]iH @sX7%mSFgJk/zKy3ƽ\Q&L)^{(:m`aW:/`OFŴfC8}@cB M`UsMU)"~N(APDK/]B׫1S1P3oLU $DȐrdj7 D^GCtA']LcD˪ >aa36@8-Ch0tO;?Ff11ujXn;6Y 70832OA؁@m~ ҵP9 V_Gw&0YsX+P[n J7Mra0@8Ԫ*P BLk}՝d;4Zp,({RHotrM zںs^JZ ©bةNLo+/;jD,uLС@34mO6W y,/;jZ?g~Rf(~KkwQ"@;i/-Z<\ߥ|& 7Es :)V/XQ(/%* m=[lvt(=yg :K> i'FM5 O,t(͡>٣x$ri8/GUȰxx;>!n #BA)JË>,I$jon /U39iJ17a 3i]ILqi z%0S xfIZ0 ɣ.g^3,;_`qR]]l^$ . ߎb*v p!>|`xtoNG~L]q5ɪ pj"5i\3&!ʶJ٧7qadҲxfl/7tM#{h%U)&"&(۝%sjj- ү$7ǘw8Z<,ê7-BYdB&^ @%'h94QHʌ@ V*G(71͹'_v6^3zw?ϑdȱA#o&Ewߜqs)Grt ۂbKZt]bJjpˊ䝉6\dMth1J=ZV] nݎ}>fs R7^/P#=1ļHғ\T=~h~*Q*ZYc%!tyFʭS -ilw;O)$'.Vo= >Z8JFk()/+5%o_xٿUO㍲x~Ac}231OwgwU%.:˸N7g ~xi)JhP!W2 TYޥwҺR]vgH=BzNB7rk/兕u.L ~:K`&dc(ɳڍ>{[|7+qܤ|Bp l+.gF:5躓bOlAeJwx-Xv^FZܛApSE!ҢUXh\XSXن6U_rٚaX!nҨh;(p"IAf_XnުSqU8*̣SPv*"u{Ny`IY3=vON &V&eQ _!m4(ƛ 4^DAN}7Ng*7B&U.5pt8lf2װ]yr|FPOos ZV!CžRY/ RE5WhJ'(ߟ&`pݏtvp/@3_ypg (RiKQ| 5e2/s"z:JG}[pS8Td\#3<(vK\Y](E v,Ђը AQ6wr "Àp<->FH(1l a%yBQmilIĴ_ 2a$l"5"8|t!Z㏛EV|cH7+=,hjLJڱ5[_>V=c6Ӷh46E|]1*}T W~< R`^ ś~D#ǯ@oo+k.@]zMޕɥGI]2Œ$bDZ5OgcE}hj{yZFҌLݰ\L67﮿WNg%)EE;h+zd3/$'4p> qU5\٢{\IEA;n>؆^\KF1kSAa끗7H{"pl-Ohu{nrX1Ic?+Q%l$Jr0no,0"œ \ JK aq\}ޠ ݟYf&v9E#[/mY|1Ԅ }:_ `kL$JcXTtuYTf =|o_bUm,+\%.CI U Ձ~*f TlYP&JYcNμDI79OLgҨdػdjON`wH.woEw_9g=m$dSp !qEyBE;U.Ev2 v=`AnfìqJf3WbMy"<ƘFrm4'I^kݶB-caʦkeEkԉIZ#;x\#>1{Pl!K]&Je8aX:Ef㎋YfE~D W@C৹V)p$ OE䩱 ~ɴB~O=n7Va$ ȱx|M(Z#_lftI`!|+ȉ%YOٖ1+%t bn*c] eWh\i=uE޻QЩbKD?iQ5re4~( }2XawBV^RRx`sVBM4Tgfh'yOIC'wS4ȸڠw S|aڬ$FR k5u'aؼ} -nW*)U5}m Ӹ)ڊ>^ܡ1Q΀@ HAݢg-$Y,@GEVa!h3 0X_{W )WY32XHEi '}.BQq|> mfұVzm.=4l' ܗ!VLA TQD=Q%aΆr$]y_ICBQw"0.1$Pۜӗ 7q{Cu]9=T9Ec&]s=5I2Ο"cQ֋aejTs ^ C ?]N1/ "K]7ݖL8`T޽l˼$ %anOF2\ЎŔj:\xF "Ĕ ?YwW +o&Oƍ, (5E$l^l@Ov>UEtR!˃`,OQ&:^ƴm5rT}<@AAҽDpMn B^[{4D`ҘbӬE)փGw~ W[iY53҆;}OWm:y/_a$oD @ ?_}YkҺUn!%_0jl?zQ|Ű*_xL=EvkqD:d1d;e.V:1? 9Oe'%}+l8qiz{,Ksի5}m[v}z=B ?!@VrU(n;onj.Pcidg׸)5V*g^ȠN!H@V= Ȯ[w/\&c ЄSZ>y̲Fp>,dON 5b!g43z!޶v­F~]:_uG_0`Y:,6ϱ| o s-aܷΓ_!r"ܿd(%r3y56/㫮D#\Ȧ⭁Dk{Aw᫨َK,kC;"N@}Lc3$pcAW1Mͤ#Wj{V6˰F '憣Y5RPfyo :u(|,"Ӕ'vWp2*:$quXm3E!Ѭ̸6{Z[vv~R:=EE֗bS*(%D6t8o=$ܕ!"\Do%˘>qX"+:%}p0f~:GYOh^6]syC VhOrtBޙ4lf#.r'i@ip4cQ▌ª{`e\lM% !drs6&PGjo4^.83qkھzƜlR$a qYAwQyT^q;A*cm#*CPKi9z 064wk@2)H4饞?g4J#/5 Dz~LG vj'0(z $u7a`&abXPG_-eS?1?ҁEy-09- d-dS#@ /w!-zn~ejJXhKXL3, Hl4.LfGV>uZռB7iiB.las*Iٔ:'$u@M9`zdk{#t!28gwwP޷ޣ;QhY/|AJ𵍘\f>K/b$b~n)ğh|u^Ԇ"rD_#B8OYr+Kֹ~2#A&hgF^mTJ"fO+lMUᑺ^f%20cI'r=j:G~f2L Į-/}8uzIöCb r_ n&P[x41"RL=YZa\5_rB>+R5d%T72eO;v~&^tT'j8=5iZ¯WP/kBm?ԌrLHkdyb 2Q03qt@sKiVA @kU/Pg):z("T!Y-|Wv܆J!5F8Hn`Ezf&WUL78;ҰTy-1_१>|[^i[Zjo v"FۅCCAAR?zί@2Np2a8c+5#/#sZxM0$\~5vV!V#4wv&SBVM,qƙ"/HPoEhI0cy' zAitjŤ AUwݦ+2(9nd=vnǸg!bR}e8wj ,\Ei[#)[T7 w N]5;VVi߬4߻;ֽ{[`*2tuדEʉ*KXo˶#{ rSAʚbT0'?]ˬܓ!|47װ,kJL j>Om-;6j7EH}#qOvѩt-3W`c_Wa /YWjK,5n\ZYn* rʀБLqf=Xrk\5)SKAHĖ,kݎEyaӝU z0IHsLX+Q}]nwƑ;PD"p͎a5t5 kiieqU~D|K&y5uTΐ4n4@'o-6:Ͼ mPA mT@dnO}&\nnDc/_?ei脸]y*B!*fhU+~6`z,$>yz(x׆G6,Rdn\hDDL& U5M*0`$V-~zTz֡ dkx)lPB , i_ ]cYt=fا['!!}*{:߬6Vf[;UK d!0  %=󞽹'pHIKy?Nu5~Jca~ЭB rτq|\Խ,'ggWCӔ-OUm ~++vBl n@)|-(T+̈&eo00Q /1Tt2WNzŮ2H-^2nb~ijl%¨0&ʲ\ӱv76 B)b'J5X߁OVǕHX`T7)c*3FY?m&FS&xfEOM-oJ\Ҳ@cBNӋ2 U ,k(E񯚅#?pםwhXPMdk.Yb^٫HUMq*K9Q'-Qꤱ ?y`!‚c:]+U{ѩə^I-0? t$Ni,2Qe7|:pk'\~#GIE9;󥧥v >3NÖm~% l1PX/ U։&sl4jMw$Ӱ,{1١`XNiX$ITG- ȬpL밗9Y?hiJA 'f8{h";[g1JPMcԔ dy&8E;zM;SY~O$ ƴҘIymWYbLλ:v&ڐicK}u:hvtHqٶ x3Jۘak (`Gy?\C'4wPNRӔAqG2}GWص!}M %V-Pv flʖYu&|zUo+b]4hU%×QK7iLR0 98m_8Hs$?^4l9<6scu _R+V mAAmzכ%îeRE\CdmMV}O'My %S=,z:_P IJ a Mi 0\m(J=t$g M RʶÞ JuTY _˸=HC@hvcm=PWDMc"7諟\TJdeXcג3XN$YVھ7ozLV5H[mSE~V3 w>w%NݱRُs=$ZU71Y6#7(uͽ5=.0pVQE@e~KUȅE+4>L: 8 v|%$7ȁY8y/cI(Y675ϴ"eltߊ(n7a^p-72z20a.ZhH J^S*r/;mȋy1{5AfpUɦS V#<} h bAR!aOs*@:%~_5u.%F|_-^T$g ]H] W=^7EQ#7|c ρ눅؁1W\"% LA}BChlXU[@b4xq;y[qm#ss$BGK" ͆iYT??uf2$Mg0Xrރ6[{y54Јu_%bC5Z%:,Ytpw vZtJ}+cX̞GآsMShec+ iP*3ReJʖg>ΗF8rkd46Sl* p:o_uXp#8[ 'dܧ ֿL2L'X[]FV/Bf}SeSkZbcBY &`o,!TB] sMW.#68*K ㄴnY0 u{d@<$vؙAvTJnAd&Q@K j#3I+{WD.I/PEԄN1 Ρpw mstダi&TU{ N*s` ejJAcl6 ,r&!@SNR)vVylrc&rNga$pӧwVƒk's]zb1_ԥ6aޜoؚ5;.sqM4isEɾ51OKQ$@o -R.pz |f@vmicaHz$#2I#oi=4y%d8fi˜a‸AjTw؟ٛjR7tf.uvSʽ.n<*K6lE`(\[LW0޽2x$on; Q_t%& o˸CدMeZ 2'Οƶ )j'1&Ñŷ?x #zO5)8?xHwR^-'R!qwil^t|Ud8‹Rw'"<\0WK?NeNAm1%[S;ŧkY0!3F0=ZB)Qq䏣JCj|$wp!_¾jN%z/68:,!2%[EyZ2^C?9уEA ?'"6WL .Wy*u3xRa$Яb^@|Qx_W&rca32$3;KK/T -[^(4 eQ噍MV|ddX HW}O?iU<.;1)9oFU'ة75 w5bR݂7]J>1 agʈG(p7[w ZmIy{sӝi7̙ YTG.s_y>²n=k |[-}To`&sވi{hs6 9z@Ķ`{Ճ@Nq_;|eS2/PY>A9'gOnQ”ȅ6:.TsBDRYT՝d]Sk_SzeEbmo9X5y#k<2KI 郿gﰐԾ#o* ׈$UbVsܛ O}R6y3U* 9WTcF:ޟ;5]a% oiVޗh۸ٔG4uʜuRk vΛLkm Ac<{+Kx 1 $fΡg{]LfKCJ1fFeO?yiDAQc23X:>qoBIe\NցwIWk㈢V>iYq(3Ɠ8w׉,eRY+3hb";6f`71 $^F5}syXwEDW|(!]T+A~<*7PT^1|(z/m:YMKz:)9c9x*R h/#%`Dqr9(:=эKZWe>gc e'[ꏓdug7'Hh+ݰ=$<ƖNI-T\ƀ4KyT}\$6mr6>[0 q18#l_9J__<@0@h]!Xrǣ=^M- @eFYt E];*9|kI*nu̗?b:7ABwΔߤf*݄-~3z>(;pc!()?o M| $ib JR`Ů+BdQ^S0FČB:.lFAyOE&pk(8 ݐ!JHS[VNBI϶VT 1^'pCc*c0|G^֜q6τ\fK5naځOl'Z.l@{^y] ̑4_c~hRlDXI>2fo~66cdPaqk`OݘO (n9XbdkTe0 hΌTD|^¤..`Z]O~ 'Eh0aCnǥu.(&\8VS'mF 2`׎*j˄> +ȣ_dm߷HnIkT}!P=-m.̓_·¼{%_/Kw2' -l>9pz"jEl2W>\ Z9(b'Ēd3@pTn^_ـ;SSEV=P`u"(ģF=hFuc뷬'`cgx NWn63#<5j H ‚r MI oq$Ňh]KGkll8yp )ۤ+4ܴ6u)5-<ݗXyb%ƕ֒ψYL<H{uBrHB0 aOC&--RSCqbXdfo[%D#% k^+UAjEV?|oT(Dwdp6ӔlbJ2btF4Q8K<ՔQ?=Tհ°,P,&Xi?na[#nyR/jB71z`+DBŷﮃ&~G5 QJO%gZT$Cp&ND@nT0l6O|M ޻qeyIaƖm;]A}J" ڪln-ëzdF:v/-AA%j*`58[87R hnhU9 Kp=Xm=:tN8_Ɲk'TL:d͖iZZvWtH9#mW!xr)sh288_E|"Xp!Y[#H)IcpfeGMD/6Q!s<%') aQ+A(xb,7i^673u@]Wwp! X>[ڋ1j;89, ܜ^Rq/NxtRgN1M:3ZFwLS"\H#\xn!Cq#/asjE[Uis i%o< Qu>ZPx 8C _UAoץ  xǘ_ewX+kY| ɏ'?m-C&Tgz.=V ]~!x 3}|Aj%1HC#ٳnY08 s0Uۤd?EPrXr/l홶9GҧrXRJXмXK->8՘ j1=J r`6EuGd6L U>D8XvcvrUfWۻXy"?U91`>R)m*ӈZ}yg0y0!?KWu㭉gsuן RY|`1=1P龬DֹB*s0 y? G"zRJ@ f-BIǴ?q.niJEXBzT\z@ Ƚ=BРFK/lCHo$QW:nuI9џ8GfnWC19toTH|2t[(ONʏ$8qǎ1aY"?x Z 5CIU%uxDOĽ 8VX{e;\iRIspJsfL\hHoDXO+l8xh3.mt6C޽G(<٬sܩ+W5nuDӱ : Dw 6TjpHju44,CW1VOJ7^E7 huSwbo~?7'^CҫXbqTBkT C C9SL˃e_si0#G"em0DTuG bze󜥑ËOa2HP]>PC=05,x15`"MҚK>,Ż!6z`3&Չ90Q^ % *>0 YZspatstat/data/waka.rda0000644000176000001440000001041012252324051014461 0ustar ripleyusersu p\Wyז71NBb–,Jvܻ+j>ޅ4jPRE]'S0JʻGSCІ2-С(iBPi{3輾=4:{:{bXֿ}[ڹ]K;gWn["][šK6Wo>~VgoyB]}o_=;x R;ҏDWn.ץk:~뙵MX;FogNX;%vʩSQά8=sN߹RLػ{"MSv<{b]8o8\4qُ:l{_<~^粻{w9Wm9Gnugw!?{kNuI<;kC+6\oRsu&Zҹ1ۿWt_<.aڭӷXp}D!Ώ'y7÷h%~I=t?ΕO?UO#iuHrYWF?>itOw~891j+>U{@o>oN=k&G6֮>?߇IMb\a|h]l>k!9VwAp4t֏ {f(vx܉ȇ4\p v9$ rΠ)Jx`79:7q'~?}AvN{<I'uIT;ǮiAov^6g>b|OJ5w9l3웚1/p~`%I?Z8w=$).E]RĉIp1'pD?/f~K?7 /3w[ߦMI<f^i8ԫB?IMq<|;Oُ,gt~H/[\ۿ yypV,ٺy~| 9ݤUU .82e)Gm͗r.7s?1<\K]f*xw_a/2_D˒lrw[/'O{g /7}fRwWmxߖY[6z9h[iBm~mmUF':/"W黼hv3^/\T4\V΂*^K#}ы+nx]}/}F tGj^bQ.X!ȿOټ0W|o04M&vm^Sx軎ը/an~Tka}m9j1;|[xo5ߗw m.kdl_W_Mm(@OQz/3ר=Ow7i6~;G;x 4WW'/x朽\Ю bOzi٭}wagsOo}?lB.:hoCxAMZߏy/~Ɵ2iQh%_r68_\>`tWBĩ=؅8?fr{ k"#胺4C!~9j;1v~ٵ ~π?Ʈ{a6znSԕvylC/1z/3:W*X܈qO?b:'q{3@7@s_Z=ooFGQ'&_~8|MRF鿵vs9%efs2"O "龔F9|ͭS_"u_:LsB|.yKR?9L_.U>oy {n)r^=0O]e5gr4m}9K<}W*ek59&G|}0߉K7>OzM~kJ@Ĉ~#q~ў2>|!GgngL?A]s5y?x?bQ?za?SgXt9u%Wsca߅jfYY]um' yD~ve%WO>+WK]R2ov9}{z y Kk?M&ߘ>_@]z}:Y.zE2;棧6A<7ha!w[:ģ8Ҧuxk~Yv%a11Q{cc'$aW'8o/vإy|ySo4 ܫi|:[P:1<I$/R~蹼TɻURpyw{lEqsS to^q.69Q8 ?iGq7tK3?[zƝ_aݛ 9q=>3+C|^`įE"/TѺ.!.|-{;2L}_~.3~ ԉ޷s?{pYѤߤi# G|wxY0gdVOs'kL~[w9xտ0wcU m> ON}LJռ^W&8D91귣7H<^BP2i5 N)Eq}ċY$zw.9y&Q[@?EB>(S_AuEW*ux xx\^E^B7Wo>}r3o釈>h) \«v{Yl몾̓E*뇘OA? SdìKџƧcV.;-nG;øns9}-bqeXHC?#쟀}p)rr$gDu}uy}c~ƠsW+I?Z_|k@SKc:2O#q=9LC_>ע[?%= .pk:^fXsoY7*<,K.1{# D@>ӯ o}0/} G 8$<_b@DoΨ^|}&L.^=OB?Ї:Ax>?Hk vɠWEk EN1:8Zi4? Y+)qO!_Nz`^T\ѾԹ^="/E)nI>+ayGM|IO6ͼm:XY5+.ʏI$?)BG_gi>:c^ ߲~W%./QC+E)'%?}H 8i4"ڹzBޝo»˸/;v ҧQ[yE=“gXw;`e[Nn޾r:F.q1spatstat/data/murchison.rda0000644000176000001440000014662412252324047015573 0ustar ripleyusers7zXZi"6!XX])TW"nRʟ)'dz$&}[ |vM.jĄ4' ~:K",Y:h^dVp4 }K}ӃryA2D8ځx+iklwn". m:u* lq_D)N(%( ПIu3gRWKq$e^#HRj.}]cTjҤ kiakԚqQVz^eJ,:($ߛwOz .^IfkäBїSD=L޹RlR.Q<&*a:e@0{U'T]_a{oOiPK O8u΁M ?hA֌ZkhGR˱hnIT21 |pQ<,`x31GW綕 |dh׿8sGJ;bedR5n/_i鞪eu=o!U+C4]!a6O(dkm-..kw .49u!$H;Dqe+& FAd_GiC4JW9RSOE (t`O-TOFW"Eʵ:<}俸wJCLQ]ԛ(O ^3gAC}M\vfO=|DE+m!h6@aͫgHo}ݓn֥#8Id.wT#K{E/%/dNk"Uݏt^Qa5D}бe2sުx#/Gmҡo֘X&Ìdji˞a2qNw [tYg&Є*a=Fx<9 ۬YMb!-UjL?ar"oM&_4M khɸ(:;fvxP~*m,LѺ3q|'՝wqk?RlJP$ (~b^4J/k?2\) DΨ\x ^I-I 5oFWm,TJ0VO/2\6g9i WȡE|EA4[@MႬy#E7r޶!#%\x-Jn3燡JpuŐ?d>E+kqHax%Sɵo!BS xUBZM/pnq5*mD=`3>żly;[c'yh7bbd4ey.1,͚9jGGZK c~ll[|I?X[˙#AU* `wxf$aӺI|Ӎ1w) M tׂTҾ!Do'jVvP)DAV%<xw[lTX0Rhs&8ʔ H@b/,gf{ԟˆD L1THeAch%;0/FuiB.N@[ wO(3үBkgP&ý1׼^WITw;S.d ,`b8EC)M^|pc7oC*fCf}ކkxw[bu kgn쎢!N't5 j9w*`|}EB.-4^dNbp~2VXAWg"Ӧ|vmQF*t)b`I}dd+c"# "׃mV/D"edݗa47)hQ#Y%>~w*FVZQu^.5m,!6V,{DC"2C߻o'Of%K?褶rldT6.!tl:#!1yXElܪ&H~#2݉&UB i19"lހw_eu;Z5jZ}ȸsrR@ w#? jj ٥9swDŽ5ˍie)y]TukwPyeGTo cBƈ7  q=ē5p.f-څ %LR6E˙֚h A(NKjaqVQK^7D :%f)cP"dSO(ݼB*fH5 .L>AZ\&wSN -q5~u t7(9,)lYc9 -&mx\Ą+TalG{-*BW1p6y,h CQeN hR13tŪp&JӘb k*k¿c? \wg%پCS,a]D-F]VKb) pۮJD(rش;Wc}sAK`9~9%R"{zmeG7o|Me /#ýqD(< PD8Rc]RQނRS83h'u?4[n}Ϻ21aJ cj y2| S!+>(\Dv P  ZFYDCJJ|з,eOnVBLϬ +,e`jaU|O>Y"cpp)ܥKmvJ"=Q>*\ gXvOSmrGKlFvd)ch}7ikƝx}ݼ ׋V16c zV2c{9i@Me7}2c$h&ԀcRE3,~XZZ Ooΐ,CɗdUKD 8\z2g >MN8G'p{ZeOQ攧l`Id߼K:\K}yLOk  l|kSY&t/ OӃ1 Y ef ` !thӉhƵPn4Äm*٘%"RQ:mJT M `@{"ugpg jXb*lL?"*]t͙":,m"&R!k|NL|ZaaS1̉!Qsv NeUz~`v73Iy"e"&*)FzG-߱te|jeHKl0:ц.D kαb~IewDB#ϗ5yD=NO@.KD|aSlk$^]ʅ )Dz9G*` ]M粁^ÞpYқ܄0?w{\g*C9LlP'42c9,Dd=g6Ei1v.Q^IJCI:Mj-hZzyGF9ժ5~7|TcrPmYx!KZ2r!EbԋCEގ"&Uz$F@ hulAUC!bxʌuюvY}cUT8IʹajowR \ ʥWݟl?Z&婶&FW=C~bgdziW:,jxg@WYm_B.MbMT;RJ2̆h e"bcTr0awIn,;br=sbd`NӅMvxOR垐)?#W{oƗп1F*vٍ6_ԹCGsD )`ҚQ΢|7Mt| pv/9^ 4[trUXF<02lMZAzTB*5g0* iHNA`'5 7paq|_=iY![&pu*l>!|G..âqͪ,@F%N? %p *̦`SaM]՝;,l2:eM̌|0 e;ά|yU-H>|E9"sTGⶄ1R;cd)=k?xWgVVGD9&[&GC('^QehW#dHШ~Vb+w9֕ˈ2JgFH9WHEQqƻX{F=bWnyz8dp@wn' l7gT#9axFVEqO hBnCJ\e<-86}>ckYD}zB۸sLùDha -7:Z8cj>>ݠh.1tn^[oVle [Fˤa5n`,tw_o7'-*?09v{M"v$ӗ<8XfNVL/‚h(4O2x<#l1ˤZHW,'ٜilg!lw^3?0;:0s35/( ?ٯxG=o-o-yӵ ?Jd@INj,e:zUڋ-Ac+]7a_4/J`~"pw41MpfWZ2UDDC ,*6xW . 'Bz}˱L^b2v߉ p1ST۽t\՚ HV\ kY ⃑ 29Hs=p_l=2{N qXԡNd/9 d an-LQ_f,xӺ$%T7TNF 9R6 UCD`qno ?J?Ex'*$/H}!VlTvF(3F#5~}D] d C=VDTy>؊R3"e Km&hU{P)<3U#8j|t:_t[y&}+tyFt6Fqd`8o}jrg,+5ޙ/]mr'A,܀(O0.)I.JO?+ ]0Z2愈 @Pg[BlȤY`˼!<`zL7fr ޼2171i@lJu8ͨ7 F5˙X8l[k. ڶ`hwR4awYP6>aS6p JnL*Y3=fxCsA|ْ@Itn}t2^, J pvvSEXi@qp/\i!b#C+ pJ?iMN-/ne cH/݆*Xp#b6E4]#i{f?Mѡk1uaC↜Jt-"ܞ؛~b52'xXn@MA6R͆ex'C2[GvMP< sjQO5[9S U`( Oʮ Ȉ^ p?&h3n'TfX AW:Hqh.: ))b/4V-]+_K1.z8, OCݻܙ[Ko&j`? DHd3f78%WёXnhpc1 h ;/Ιr&4*ć ثgXQDc5WU-Ѽ)!( ]dvBiRs7k[]O[Q~t oP>e h"&G5xzDH(ARv8).Ys&d<R8w:]ǿ5r@88GQi;}zu̺KcӲ-9 nR ݾ2}>9zhpnb{$>bbaMo7!j)DM^ (5'I"(!cl\'gGgNFJ9[VXƶ=9wg^aZѻ{ݎ#UA`/.8bXW%bM_ I4oI {òӣô ^o}Yݭ,Ǥ?n+ -SuuC^FM0scp@sIS5HB`9\(.uJ00m͵8)Yb#-4" 0k-`%! tkNkǹ@FPL=HQ1 n+h=URJb*W s̢gxޔpA.`JI,>kҀeY P MWڡ&e큩Z.3{bP ت)`FGw*DU4>D=1_VQfน"?;DZVwq%uo.dWFЦ,w=-w:UuETL69x~xH=G Ԙl<<~"(ިV]AEa( ~򪫗P2}Tܭ_+Ŝe vD?ikd&f^4Zi%+9ȁuDd|= @Ԕ 2{u'qD~bRt x,+Z' 0ޏ{ֈG^jSl(TnڟݍrUl8NNA,~H4}]ZԫKh '2Z,%I#ЭonGV8: 0̟1/W3ϯr:03Fk&ŧʚ݀ab`@c3`jIϠX:!:dg Tn MPOc)ԵQ+e3mmqԽ/8=M kj3Xgjl|-Zɬ`kAOG\*A ?MwU$nQ$ C.%arلԎ mJxlQ1)Gj<4H*F6ž-|'aо>z\(F+7-muݩ5_ޛvh' e.|!-{V$%āZGH羒o &M5b@K*ƓORJa5&O%YYEZ7kYG~C2z Y\aad̍G9߬ 4r+h{oPf6]<=Jk(wFa>0?y#Q8)p!r~C'Flo"nT@ A泎9ֳfE!׍P%k՜kZr\dFݷC08JQhC%jT{V繇akķJPةn~ވa&hPzI 6Y6NPDŲ"/+x uk^X0V4ck2GnSYlZx{ :J]JAՐ*悽0[`0Lc)߅/%z,^,#60!B9\\xA5p|%\=C$]Sq)J|"fϩ~G)~G6ͫo#^%[NOչĝHg{k Kmo_v4:qR$;D {Xo>&А[ Hx,ekam:;.SCNGA7T=Fqn<~[*ЋVކ{b9`%0%2 +N_TfN!aMTl9Z %쫙C#O!!O_ =vlx̅CPxE"j8}o-H`I`EΘ-4"dbqarWRJC3D! H#SF\1/x$Uj ~!"Kp;) Q-9gfSCGؓM,@v &<y{H5lCo+0{i~"BNw8l$;C!DA&N}յ0M딾1?хTG{!"J!oTPg _3 1e`iJх_elb"'k Bl J)b3eXZ\w|Q|'ne[9D"aHwh)Æs~"ǩ'02L89v, $pwm7yí&f gbpe $u7},QB > /X+`2ɒVNvHYmIVElRZꖈ+pRb,b[).:(sBy\a`%|kY8qe;Wm+<]\6[h7[YGN(rM]b3"=;]9 k0s0OtHTqѶC1Ij$>zen&HO3@_Xѵ$yiV!ؘnhf _L!¿ K[/.>@YB&ib'3Fl0G Q44#:l)BΑ;{o?1ڐ }ŧ-Z f~˽tU Ů+(kʏ^C =Laq7?*866!i5Qj \4(@tHC]Kh|Elx"\БAr2Bg^Xtim Jڝ3e̿=Z!|~?/Bp3os]!hm6p/`ۜ}߉tQ'Q~\õFB+.vpn!We*B12n ixC +>\oC~)8}q< 8ore͙K`9$Iɨ.:r1NmNA5d8W4´}.D"0-fᴿ}H D;"ࡉPE hMxqtxQz~O4",;;uT)+pc2pNΌ'VnIZO}g6!"0Ҡv8~]*FC]iՠr_er0_W$3˘dkr\Ƕei:lSLu $ ~/#~+P# -coSݘ(~)t.dA54jԹ00LeuT.=Dz?9): 0J'mAgVfNoT8^{G| /|lOuN"NI4{J?{ ]꺭Efn) J@.U0W*rqS$ l4os[6W8+~zR**rx+ Q2Enmɮ-P> ;i-,"\u!H"!Mw@A/F{:(JIapmEnz"?XIKvu y38]vjtA;J>@7pY&J; ,9&]=DNISo6R OUah;k-HAl2#PkF (Ҹ'|BIztH%jb X'*ykGʾq}j;uҗҾCV~]L4 fv͞7vXgo@Ѝ\GlW y"Q#BOA c㟯y Lj z/m1Wx_$l"wj_1ű;- P.zh!Yֈs)vXZ?%> y1S3}j[h^hYķ$Y-#Ѿ: hr{eYIsI[{Yf#v{ѴI CmFϫ$!f'fϘ-w=8lJzg< P< f%^xs m8Yvvp?NZ,uix26F[@#qKp@ aҚ$v1%:Hyl Uj4HCŭ |\fJd]VgH?%(u1Ne- bY\4O*/H~: y 0:Pp_6 3)ߊ3U=N.|p D L/4Gf.CiQ,~RCDjPs6‹ A5n#^k #{v1$K͐s$p@2QHXLUդ]r#BQҮ2<3:bY5䛞4:3uv&-K&)(/^23˥S~,mKԢHޔڸdVB5+svLQtVS <'Xgd'lw.u + bAO;Y+UퟌVBlܱ2 첉+;(|ZQ9߃tOa2 mKxqEp)GsPnc ߘ1a`w>3\"< ?+mn 5-ږmDOwJe ]3o^h旨.<R$AY #6gdNECAvMn.PaD 3\=.붃1s蹛Q;׌JU~J?SWi<ĂX*5 ,yéR~)'"sb 1T) n6_]UWMIMv *eu03T*<Š7#[u0 "A\7c/kQ[tОam 榍^-#:;Ʒ MwnW4ūZPXjbm2*1!(֯eZp~]?ɇm8o$Bd"$ϊjc:JO[{50gr8,Om!q"5wC@?1K, 5`!F|mW֚r%BwR3#'8C d{]e9"5n`59դh-}*zH}3&+.FI3R*؞0t{&hX9HEZ.RDŀm̼EA_{RdPâ\:_3ٮ8 LmT N*mS[d&XH'%FU9aVZU+}cY@} aMC:"]llQe\P-d4EB% A(]UY%n4ZLPYM\I=/i`^{Q?UyBC3 G--iE/U j/bVuW֚AWPT8FCa* Db׫@Xm+W6B{$aRN/s3 kp>OӐ#rQgDZT5JT9 @a)˩yj 5iS8a/ͬ;qz@֦s 9rrDԤWǐs[m L /^GX"[gѲe'3py`Uci#M|}N ר_=O΃}SttL#e8ZV}rEنGr<)cZ1K{jΘVx⋮/ Ϳt9@O`yG XVv"wZz&E~kla"X3ktbW˵`O|:3 ZFeR" Mp 6`zyv>Ly/-+L_ (hnrZRu Ri.rmΒ5n3;#m8Mñn bG VoHgq:ewPA,+gNP;L@ĬiZW2X3cb'@}IA=ˉkՃF4&/t~G׳٪ :r6¯S?qju=W?77I[Y Ei =2Z^N[+;]PJ:B=ЀҳO{oKdZglDA\ ρh9e@hN"/m(F^eoT='?q^PM)n*_& A:NAz͌,1H8RDl!wSb7I3-퇫DV ]bK^nD"1Ac_rʵI9 #XʴxFcqH:%*Mk@;k1ճ2j+>˭w.D$6eEC)׃}s)H[a F7Dg4|%Gvq;r!3 l9@x=UqX?@*+t @5j-r[|#(it*ҏH/#ZFP|9:׬.FڬMS;ˈ_UX2˯AFLHaosqje >:(^$~Sj~8o?ͫ`}?yv(k:gO Fӗ~PʇKԞOВK CY-Rv Mu>NJj6N*%a/ٙ g>Q0U mMm%@Fn,KGy3;;]+PP2t_j;'r.B)txDGn|vb0~%6LzUj4DZr=Lua3}M(G"El\ y0 gu$ȈHGG#}ADս8<$,6ddr Խԣ2A$yKݴXueTV 6[yu``DI> 1kV1F,uGR ɂ\1txAufR!G:*!A(xzEal[]%!I+k1 bg&%YWi%j$`zn80JlUICTm8:o&HF,ғHR+%I/҉0RWt8l.gR B*cJ>م CB"n\| B[?Ԫ(jR%HCcۘ|Äӫiٿzl?<BJ6= hv'5LW~> c`g WiT5)t)Ә9Ή`3Ӡj'7[{#++3`?f>:&T=B'^-8[%kQ|Ŕ4Cml:ӏIzCtd]zpG#OɕYh V͎Y̥{|;w#!XloZVW"VzHl[d Mwv`Ζ)bLՔ? o.{sUL\MK03HX =q@*5NWUު͗AE*_ *w4J|Gi(=jk64Ȋ\V[Ȣ.:) [~jgd٦R9( 1A4!F $o&AQJaJ'd- -jxHv\FaxQh5R=u:'u]̺#ԜDykf YFLx%"fk]##ǖH䥸1ӓ{Ϭ0x_RAgOUa,'gqIcE}xӉ̠v827P祌ubv~g1*@Ɯ:80n$-v8i˯(j&s99{! I([ֶ ZӦW_3#HXjV|==@>=DQS '|:ҫ@P¨{Yc"Ys1 O&56: {wEҨC9Q #︄%t N+ҹƓ{N BƘas*a .:6E` di4-ՏYiNˍ`+ mf_P4rSb,f4Ŧ[*1A5*0Dʫk45[EtW p,en2Biۣ\7zwѦeH?BW83X¼%yBи?W$Z`_J0r;MQ&ySI#W09\LN4mbY Dh7ЬЌW:4"~f:19-܁.n7Өpog9]Zu5)F5˧daGSq>lvix #iAhSlPi>\K:|' @fuqm]&M󔫣׵QYJ J4㢼,4){n)9ew#&t$ ډ: ߪ/tyT?hZKJ2Jxڃ}?cJZ))ospCh3xOCV,9U-˹UxLuiu |?BuGiozľ9nv!X?ĸj>۩-l+.(L^j5yJ6|JpS^+zoHtQ kqiK4o;א@{IjQ/DLTWo3x{').G)_e~)(A֎9 aFtOdunRQi@Kŝ 5 ) q Ip|iS!%ۃ'ޯ6 J}eyBwH0*lKp!>Ԁ!~ذ_ʴ,kʴ^hwU0~ n $UoM("tBQ%0 m,T;'~; 34N8(j삓k{YH~ ;#8K/QjiKg6 'X׀<G-?ƘMFx;xTRt CCF 9H Xj ؄]PkRR4`mab71^2Kmy5_}P2x\tھvOKӠݫ4'u oV^օy:4Y=C tT _'Pq/kgÅ]PIQ L}6QnRsx©Q5ɊCK1sTB>sʽmEB_Pˇ*Y]zeӞjĀ9`{oЁ*)Ц{+څa+%4~@Ux^wq^{Y 9fPVMH;lO,Dj(5#h`RcQ{$=pMBᖿYćQ TEsಉr\v/wwشLes/U !S-`jR5޵’ff(Q;f9d> s{ǂ SũeԦ.2!+z{iRC9S|kS^uW  }u]&e̝~lu#  l݃ J{(kya4_iu-dH9Rk u޼tWbZ*_yP:j!SfmĐDeF b \DQwC;rq?b!P m?.j1ϡ+ SEn]p꾝;vV1f&nq`G7U8慫29_(; tD 0 "* ڒ6R (Q%8GOJcyW * t-~xcjh(K14$,1'5Tzic}£zN;0-_YsAr  ('2} ue =w(/l9kVK@͕ٞsHL7`Fl L 1_$ e0>^9{IMmy]%̙[v?&w|QoO,I1~3ix ) cNhnA`jiZ /ْ=aizs#axE Eꓜ۹3+V?jWN_1M^NS5^ r4/X`Ms0p,fS.iΞf4N O3r0uZl|JS}2 g 1 j(RU2Rl+c"z9>'z-W~Pt7B5|?k#shGp(i=v$k6Fl Uu1UB{❫gӊ?}x ثHqOd 7b J{ Y0#"q`A|%+bƾZ.Hj4 W3 A=bAۊ5^aKRύ"=S %0܇}N?j9v! Lvx0T7${FTRSSy}FPa5?`7GAJX٪1z ڨOДIY/2Uㅞ֑!E7+v[:MoC+F|`xSK:ی2旷-Mk&_8@e?ZBʵwl_yOGBu$q8qŖex ?BzT /nzeեJM?tP! r!M6[fkS@jR*yHY ~h {?gѯ\͞]%T0p穮֚Iu >hY[7ங*D=Oo25_ kp^䣷/I"H_dv;ݬ s+DB*nOJb^ۅ5KYpZf o:ɋ@,7C1zԐP'͕(kRjVvYf!lKSNVޫc[2>*5]?gf-c;xJc94$3 >YULp8ciI~ :h!'SOy+us!>jYI[ʇA: cgR^'?3 x?H7UF[i/9nH:r+KYg1I1T[5pGX6HD,ulcڸDlօlMX*B%,+c S=~/RvNץnOjb~ǿg&rQ@zJ/nUJ0yͱFEe;lc[DYOqDȁ~ )n?=CFU 7rl4;vZ憌3 0gwu{nFzteDbO p$p MOiOb4XH1(:(8ﺞ{V{(wʊ Rsp dVPKDX׮6Ɉ{U8TQVQ}˓ QN?[QS#N9qL)k^I^P[[Hn,>S?b r|xm]SyM;m\znQL.~bY $ɮx>$1;'Ávq7W ̻2iϢ9ºk=(t[L}}g-}AKU̓JRTy=NLc .]ӨJ0$@ToY))ZC.v[$'q|)2ZMTw" $A^[fs5:{g|/{ mKW7X0EW_H0^΋:Rja[%7bCHPvrZx,{~"b1T܁ >"PNT\?yʰXwʿN`na!'Fe2{BD.2B0]ʕ&lE{Ϲfbs- I~sD"=*H{(G'P_vloR>TȈF%64Z?sޒBbYNI,D.V<ѝN)\Q-.0cFsHx33 2w3oаU*>y*۝v#:s|(^],(uKm7dԌ60kBl%NIU=L >\$=o5:Fk/,o]Tic\&I99qY}ct߇FMUW:UoSUfuHGُ){@4rX..cE2o?- KeHjk%cgVrJd^qc M̮J֙Ж\Ԗe[&)JK>mn#jgڥ]\P `t%YnVc.|xN]32)|Կ@.=fc XJY.$# ) C2J_9lwY@m{l4Rpl~nO@?VEwA `KD(Q`r-ǵ9??jޠ̡:laZ-|vlHR4ґ;7>"]Ltx{ #JޣanD鏭-xRM_ZTn-0h~)ˏY965z!(C߃s7.UwoT[6yᱰe0lwEBrjk#`OG›en}h2ұ΁=):{U KsR.+~妮A//cV) ޞζuX}J_@A>"$Ji"56HO Gq7~`iʢfRs@K6QbL)c p1yQ.Gx{m?_-G):V^m8f(i+$] fLT& <җRP$b{dq[緘 dx[Z`|qo>9M5WEpn/6|Ͳsi12% ty7c,'FksT'Eᱦ̿Si#iAZpE'lˤ&:bEaodO\SګhI^(ZS<{Sb-o%ooX ?|@Ǿb\dji s&Ӳ+gq:aD wRsTؼyR΢lj5%HeYF^7{NlKU]݃@Dh8y !^fgKA5Vz#~TC0Fͻ*"waY=|9mAmڵ2=]Tg Xm+ټZqu G"+P./D(.<:ΝnٚɬшyUzwq3|}AaRSip"0$k/`:,xpyt@dE2 by--bmrðЕE⣉6xAs$lя^cn}*`6:DR(fOTpGX#fMI#}1oS&Meg@sk\ʹRV0̣J83E'|uWu3ԀΓo7so?Wa%)zc\GjAȳ;RbpwGث90kTp\w~l@ 0|!hE&0>d<ۤ@lB~N1R$+o<@nD2IbWL٣_(*F,cCrqDT#\5^B|O?`Hl!G8,i[ɓ$U}R e7i<(w&KW[D&J2wqM(b=:&VT A#(3m NnBΨc|!Dw2YØ6nh}yy&'`ך?M-@:jބh|Bp[,#W3/padD@_ ;ZfC+QAgʳ||rWrAU+ŗ%-j[mv?'ֈ 'Xңكd)0@7+Șaȣ'kG2wBGQA-qQ:P)d2jZr T(8ۣMc?Ndƫ늇Q;&p;[YG\̖tꤓܦ/0S0w,~&?q bαN+ָz#F^{>+\@)a"`.-t2TVl0eջ*O3w*P0^T-ȇeAs;#7, Oa!(OhRrfVUЮFJ#}%ݻ":5v½_ho_qcC؏Z7Ov5PQ >6d"꡷0KP=:?yCG 6L ְ"$AjS{u }衿/$..*|qzl`qr$֒d2sUHf;&`y{Gʡ+QAՄ pc&h1<`S{} rwW% ~3m=Q'EG\s{@5VHFQ9@M~+V1*'Q3lLW4ɂ|`LU|Sp-Gzp-0WwOEHq>vuU`\;ʤg[½{@ k"\U +Lb,6'Keb{#`7CMG^SzKiKQ)MWZnDM\#'n2?/laYXI4xnWv4?:pW#~!\2Xh 1 V9YrFDTKax^y3Xg>4 B_BUjԗ.nNJ {fف>|IT j];^D7QkXO'ܵI& =#IVJҵ)mw̩ lM눿Ӻ7UR܁ovxӋE5\,ҩ?c!Mf.jΝmLl}5ɘmp%=7ܻCm O+/F= #"QK@f;&_3Uxa\h-:ekf|byزKhr F2Ŋ*,6^  % 2^o?<4'Y@J5piSSLF(/9/; .}50DDafb% 6(EPB/z/* .7qǘNeEVzj$or[%5jz升JEEhMšmts)@'S,>fKo#ڰUq5m-?u@05ya"~7I8ݨh o85,?<ϑFU^>7f8 "<_ Xu˱$W]HF0 _>3Y4ĢkW{8~[aݲyvcs98l$]FϾ\ֆNh;};ٹcF p6caly=ꍃäN}2&fXȊA`?YҫdfHN_V(FavYv7EWAe~R7'Y|n\B3-C<$/LXXT!֡Nʡɉ ?_U E-,pæ/i\+R *`=FiW8d)50O.=R?ȯ9wJVegaX1D;0R(_P"d]b }^fN 1 }8ıbBrb2pCJb0h6g1DLY4i^R#yN/'EJ{}ꍾ gcs|yw0Rl-%+ӭY$>oVF@K[~FX7_ &-%:X JQۤ?-yl:_u nE#{mޑ*I +wh#?tkc aS6K)?"\; Q lX$0J&PߔWis2O:;{=2Zp:?tifQW tt H5'Xt\w.f]WhxC=YHY"~(s Ү#!Ϗy8f  p0eI8iO?OW@hL~hB=3`A; q]!KD'qV T!{[ONó5{laN0OedM_H4סovZ0j&Kh5l|vœAT)K0eˌf:EZ=C iǣa vL)bdp:G婢XxVD"(YӸ'H\Ŏ5G#FPy /|xg?9X'-*5f|Q,%oF!bʋWP3NJ;u]nBy:|l tY3q}Xp)F^D:Ȗ&"5xB&Wb v'eh E5,Ej [oz~3A YOtQ\| f<RXZqĸ!&"YnLP[}PބLyT6w t,gynA)[ _A' `~E{4󦂘y&~=3VsDDKiQp";.$h&+^gBb S)j6.ArI i9QקUe%=A y,x󲬏'g_}HjM.AG}_] HʃU |T;쫔b8cN۪ Mv ѣAn6?^?PaתM!^+y^XQd²\YI/o9>'ûS1gX21Tccg˰1 BK䷩񰭷d3iT寫m^ qtȻ^vivQ3¢>AV&~JLVO4vD1mr(UK2rSƈn !,j`A ,L!u^)8~%Id[˧u[giQqg:C kHT\޸+1*9uцY{nutO)tBlpMI`Őnvz-JSv Ȋ4QE-=K`#٦ Hi;>lKVtbp,:D Yj2a<[ ¤M Bۧ.\c1Duikp2i ۉ#.b%;,k (>M6 ƈ \Q~%{y_RL•Uc%1)rK))+H=yy1`cB"1{ .VZt]M}O+V63#j3>:^tOfԟІ qd_W-Dԏ29im1@G=NBմcc?2X8&.L*V@ZmwW$q+;C}_ZkP=>j Vaa"(␌CSoL\48Ɓ&ky[UD@:K/}ȇK G~wޕ*He/?5 w!+n3gǣBuSbFނ`[xNT65Pw*Nۼ VڬԠsBpvB"#x^Utwv>B<1ļ`ENh{m{J~OA @娯DSfVYL6s1|zc]na`oC=E1ܬ3ֱdj5/hzAggXOctv&32+  .|I?-?QE]iVG=5u8- dtHk.ʚ|5-H^ɰ m4hq?x'44BD+ jsT @FE|=<*3H.%<8AaEךM [pLhW3lڰlE,>$<_Vͱf&7v"= حjU15!VHGNCbLw pೝXNI\B9 g&4踸# I)vy5ޭ;2 Hsim:7qDl ˾Inɦ0R2Qu$qaEԞ,˛l7F2JUr J$K CVfx-$Z7.%B iU=޷͘a?AtCuAII_aXY;(9ũTi+hQ _p*c=Vi8F߭Nb[om@и+ &DH$C]x-|VQq"(${-:j\q9=xv`!"OMsL&yHh37FdvfDd|&ȝ(nƢweA;Ux%Q"h6gs>;,&}ׁw!c\,iǸ~00cmth vx u.G:>8Vl9iP R׾f GHz_ tz()z1&T]g5:.aIw9c"٭r_Ƃ'a$[Ma:q^Sz{ a+Ȋ6{('+yAUn` sy#Z9z:HɬFWrTrh/]h B=;/0jJg#<mB>^KICM_9%UT͋xPqF83?*ΞhL54#=w9̏1 Q]}>:t>5w05LI  >Lw '<֭F=PSx+}'M1D5⿣Tu^ԋ8[jAyCq͐%}W~;Ad5(OKB[%Kp&gЌ= 05<'9P~#8$3鄝ĪB70g1"(Fa,)WYV4"lQ9u3yaг%g5:E01ē lrlY:*H7*QIC+G PWˬƌ^6f+GMY@j;oG 9?ēғԚ BxLL/52NG57d ۖBfϳ'e.6rGb]pZax-NrPs 2a <4B1 (|ǁ\-C1A9`v'RkI{XOg%c`Xq-{`wf'R* >[|n <xdpx K>,d#3~xҾwtL*ƅp/ =hXq}݌1Ҋ=ۂ|k)z/#4wBj<3Dp!=Rj0KrBװ#*jHUڙy$rI Vj J}[LٙAG ˂T=zT5]Bس)u5Wߡ o0DT W"`>!wʤ"}g[/iθ-ҬAz 46GwВ+j_8&zSL&5>:+J|/ " GI6aNOY+/MaX 9 x"L]ԯqἚ>&H"FNgVU6'.D If5(_xs'( AX,^coY9;\Z_'#h?V__% | HSm+tE&g2(Alo[y40&~F8rjM}Q0xƐdچu,N&#\"^HļnMs@?` V xbjgH|5G0lr-Lx-'pfh"_\#`9f?uYUcֹќFi,,J5 Q8S>)4RB?WHB//+]1S"A :ΚRᴶS`tvnDݩxRy];ue%xF]R\ qvjssӠmX?K墥#@D$p:YFrF;2Pi*-z# )s6^0 #t r6QBWHעڔlR*&gJv#vCC޲7*t;7sI,#d6&¶E[AD%Ofj~s롟m\0ä0Cu࠯+V- I^4weu, F|W*<-<\kHۻ!Lw*f*{Xh팻Dƌ|$@aZb^B5 )[•)@yejT_fB؇R{D^Ⱥ<h_?^ڐMr?+@RJB}H-^e]KsZrÈDVY|KFZ/Kq|ۦEp.4VwҙKȫΗn=HaQlb:dZ]PaM .u+:PY!I͇Fni6z9>:U ̒ a:AWiM^_T\6wڹ B2/کzFx0ɪBQ 1ՎPhSX3y] ! 0H7_Vlws\>0h<`_JpIqZgvfEܷg4VNBhXMeh<% j.OFwP;T7;,}5~3р,/n3 6OdlVA8'_8^wBߖZs'f 48ނo2Kc,g$A(Nr<%*&.HɤכD&rc m1wU oP܈y|nWEVfڦ-7WG)+%Y<*[&Wyߗ۪& FL-fY#vC񳷘>&!h{NiԉwbB2Ad{8}PԺD Jęa̍{Jl"gjLh+ȭ8hA`0)KQvYk*8{%ȷ2a4n]ÒV.wؾ`T HCV >eXc?s=%UETOHlIJwkI^:b>fcmH.*;'b-v#f|] */I4OiyP IxYlLCN2GzY&hU.~HSCY1r_òx;?7uJh`c?r^VűFYi0 ~m2v]12mYD sf;X'DõxU? gm:,^yNo4X$?Tچj]*3S=6_|$=)YӠpzVQ]J}QߒmƝV!O Dqt9 ֽDNhnP' ;Æ@u`IA'GhR?TLgψ&bC`qjM; ydMF+[q3!{vگ 2t,3L7;|5-=oSxпK gCfMXI$gZ 3sRvHǨ?u+/s8sIe `N9N:LѮbEQ"aF'?)pw}< 0ӥ#Q*Sh<Ɣ} c&A |z9(MmԻ]*CȔeE),FL"yj~#7\si5+/9f}ު|%/]8O 5)v`: G\yЈpb5ǢMͤE#3Pe qAvԫ?:+E{5TG LW~\~x@(f3s}\m~HJH*.ay1./77\4~sp냕X} +2,X_H@Զ;y\cT`eT `B—#lzk/L~D j|N:(=? GX~f. uؿ[@QZ+c23݈[G-7 Wwkn`0zqRp( 4hx,h^Xz5Jg )O,uoeWH4nk]DK7It9  ,T17Ѥ%0Q_/njYD q{n6‚?6)c.y$c9V3P +4pmlEaĐ TFO.5i+5 Ig)m]r tnfeOaT,,< J.sU:Ȏ]m.6#4>o֖ +ӯfG-A0<WػZБӵEA-+WGO±Z2-}gD&MH@!RZʙeL:Ľ9fֵ=Dw-ma\tTtt:tlHsr{5\UON HW%lPiDcCEb#.ˠg ˜c|K$ 8"CJ X5DHfuO;|үB g.h5K$&zߓNG?+COöKlΪm2/&Cbl!;.= /#Kq _@yjI|=rG7InqK7ÓYRMYv'hr>SP961Μɏ Q%QxiY?ڍТf*e*indԎ-ǵɛ`~lV?OL^\. S8\6KJP\7+  3?*N`gq6nZܮg .hf#^66 |3:~_*w0II|,<.QNNfB1h]> "nyߋi4аT?Y3|@1{JD3-Z1ͱb8bwٲT?L"ADogJ|[3ek$r]Jd0pΐSTv3*߄+hVROfw"%ϥ 4)yO޵9N<#ڐ8rq?6rh.]κ8EG DvF rpjD?}s5+S.ٛ^rʛ?g'"x FdaΚ yW:Cre;Y Fun֬/`*TNoXU΍P~"ri;+~م=YXB}* {!e裶Uֹ ^rDA~gnr%d}e+C]=V_ :{fK~;JGar9_/o>9v\ԁHLܪ !eTx|oM3%ƌP-S*a1J>91eZ`.1^iGpЖE뚀Qj7n:{?0bpPeJ=-Xyв%m刈pZXyIvM)vEP#C\;gQs;DLjnI  }ۣm)uh^:ʆ5-zkf(8v1<WuipH>xޥ*^|Fp#9%&cu"[1{J4]SՅW `z ;*d_:YLôlpL#3PHrh☟w-F_{-j8X3Tfoth&:f$t&/6^PQkܶGq= p?t ȻҌZ=YS^4sݷOrγR8.ށ;B- Hj \L6Nib3iQ+OFRS۫hz0>Vd ;ccXd$ lVn'&Z0)'2 0W,?_ X>^3!G9#<@1 / 2~@r R(?_͓_5 %RrNvh&bՄeW˧ԪRuanC$CrYw}ujh>)wArb,o8OQ$?J87R2%KjRsQ+'+ '"W_ӚIz@i^:9wXF<(Ž$[Epb `/AzRgď͙~ɳhiqJuS͂*[;iW;4o;uy߁K&]u"ADKB{%DYexFp^Z`- /dQ9Zg) Q-Q v7kkM FX@j4h5wV?%Ά Zޯ5XEmcTe/~I7Ef E/h-j,t9>P @ 8&EwKIYO¸\ >07~Ҝ5aBO4~#u1"{R徦$E*G:rZ-ڸ#2\ %0e6Yvx]Lh@iGh>SZ,}Yhm5j먈JA/ Pi MLQ0!K N_\d՗ pt`3n QX‡&gGJ0>1ƑdI r%B_{9"n9}C7(#NM[lGh3`* *Pő-˭aR?Qϙp771 /cg169O~y#S6YlW3͂nO_8|Qyq޸0+.Gu_aD00@ZGwiץ$z1(pL|ay;.)[kK=+҈TcK;I@E y}< B5e#{a,]7ݢO0+a`G;=GܹDc/K VNI:@!JZ b&Jοݭ_B4rqGQPL8hHkİi (R~ =$Fl')70;]G#ʨSNQ$>kKRX #KTP P^Ҧߠ"@#1Ն>ޘο f!6\| *?{Џ -&ǁ%Av4/YcSI*U[QZ&Ps)+fE6_FlH-w+(U\=ց0DB}QL[ [<^:%3>  0s?W4'+؂$U|~cbicI0 FjUp-9b5_vV;3=$ef"|imI>|:X=݄仡HS~H]=:+| XV{rQ9fFa{}OϨvZRZ)7gLic݉k5*Gɱ٧ņ㰉U&Ĉ0P*zݤd]mUB=[-[ǜ6K?+LGqi*An:'AؽMoQD:#ۣ=Of WhAa$)īߨbN3\D@<94b GU%29{se`]&ÔF9(;BivZ_>UvfxP71xNNt߶SjfQ/B͔zuD/uVEn콟yP譙sJ2p|G)GO7*uT`xBl-a13L]Rzt{9ҭ>y1mBg2zna9&h%rhD*&F8uV$lXr W*,}U?dKgl; OA ¿5'MTPj#?.E<`yӢXqI:`Q"%@f<ɑJig&Gdok]*VzhO#+0<Z ϼP,XdZPiG[373V-Zww|C(.$e.r0VG(E;7ӼV0%JBy9jG{^/aۼI^(:sQ3qcwx'UKŜQ/e5(۪]zQԠјO^b_Մ~)f3X19;weY-u=s`; s{`aD6 ӠǙR#Z&DA׎/WI}޶d{1^UW[2 ҒD1fL>MIo,'N/A6U>~`d: yӿ䩆2 qF/ ې4>Mzs\ L~&6^:) :PM5o k3دĢ1.0N--,OSckg*Zz׬TДèzN63C@Bo&SSױ&#&~O 4H[79,خT?dF!قQ͔,=8..-)'7P*;D=5z7_N瀡;n&o6Oݞnv٫x?a^a@y'ugPEqR\ں,k$q.B~B"Ƭ]$:鑍nDUڟs ۠ΧJ$I_)tt>Oq&EH \Z )k_,=#nnU?| _ŇhGɟyA!9=5Ǩ t oQo06ؠ.] pw>Cfθz>#~}xߔJUj)F9m0zo<ߓ8JW%/WȦ7/t/.tLӽrݬb8ڢ :.f -hJԠd`(9Kiu( .f |k~EUAz~r4%:)UH>NLQxO@ZO `?3zDab*=4d2dSJ|ib k6#$ȶ/"k#k'!di.N\mVm-]| *]/6XF_EEU<0(/BĽ6>rN'TfCEȝͯuY9Pfu`a<#rxK06c2W]Z sqz07"Oª֜qMohw`^@W5BGs- ϤAA@PPk7P6J8p6mwT*2-zn&*/,Mlq`Jʘof%Wd) zfl?o|s}֕Ql~p#NyQت'6u[rCvY틐< Y9߉V(jl텢8֠0u#!B",Am9+wZ"*x<P,@(x )VyF:<㈓!5Q~dFJ7/ռOI ߮ P,;q.FHiݾa ϋ_Tڔ=#^' T΍R&oz?}Omh$)KK iu捋@h`<A ؕbuѢ{â͗ pQu<쭤w_GKhR+u%?XEbA%>6\N7ű99Fз~D뗔ͭj~dkI(7Pdȭ=c E.8 ax A ؄6l(źr\iˍċ6BdZٳ<" qD֏.xG5"+sSP(rJS0YTD[r[Tp3T. MIh^ 9`z 4៖h^Z\d+7Uu.憿3'!5~6*D ,w+V@d>jMdƋGx.kӦ`2bW $b+|%slasH'әVtv`=djVATa,6w"`>Mo <Y&Wڕ+ntaԉaPmhxyζ?]T$Nh$@cΆ!'\s1A0}\gEPC3P#b˨2~ЈzWwnJw<6֩b~JG%A'Z&qĽ!4 =Roϟ>Wߟj*a8>}ͩCiiK*e.6C%`aē~yQVHtg>7D!%SQ1p zV>D"ҧ Cw%kJql&(m5w BTX6}JouKȯ~/0=L; |ͣ{.5T^MX|uvJz `Gi;Z) =sqg0ѣX$ց=&1qLg^0 Hx1$?<پ>9?{QeL'i nZ: DP5p͈ kkR:8seQќJ7rwO2ԬldN0#blC!S_,TGHЪ.a?c_gmt#xS`uj}S豋?XrE9*hXkw9F_0%;q^#2ɾfYȭ>=p-@|'n(WGzKhX_T$1 3\3Pʬ8XYA +f0E^En e?yǁg#eyy7f*Y"{}*QB^J| :pTp^n$HK0 Xi~,F> wX>]n}f$yML*ly5{tWeZ|vjk_Hz%b <^<3$K5tTu9/c7F{$57ʋLO07ֆwr&_Zy<5m|l?D'_?g;\v9 TJ*~8^b؆ӷ0h?x$ ]ik" )4j_%  ֘QZ? dݕE=sm_ &ql15cŪ#S$azD ۞~0$I{[B}@u VӞ߃t-Jf["p>LŽ~ԎZʗM"$iߩ%E(nlMrX(amh_ɩm4C(2 g脨PKdž u^r3!g&IHO]^ۦ1B8RoP}}6=HXGS ˙{ }bera6ZNN{D4cӧ_z*e-5 ǽe$)h@#hhƔ[F\kxuwfO> 6Xlj; c a\ 6Yz*\c=LgUiM&c@cFVS7fsp;Q7BI؜6ˏlՙôcLybwJnT`.sΠP{d/y'O!T\^0%XO$R$:ǣ >0 YZspatstat/data/waterstriders.rda0000644000176000001440000000202512252324051016443 0ustar ripleyusersVMhgOեIMkC҈lloh!i܄tv$!znxM"ۃ"C3>ϐlxwwy}vbc Ih\ټhTNθB2 ی8٪a͛Ewcvt04I$ʎȣP, Jɚ{Ys BͬlTtfMR!ZJUO%Cp ]%ǭH[޾#~U%R^,YDk֍ܻKEN=^w|柼jLϳ=imĒNsֵzȣy$/ğwF{XAW>CS+ɏՋzF=RlG$5s_Kׇ!Vlm]ϔkć` \Y0 Z?l۶tץ =2̮AGTlM ٿ^/S o>6Q4fvI@Ɓ7/MKp Be^r2Pr䒻$T?_N$qԃ!' |G!H'dH$E 9iM$@c\~@C䡐ߧKo ߕsmN}(7Hާ'2 s]Hk 䆜W )ǺN׬-u~Oeā/l[o4vѓYTsasmC spatstat/data/ganglia.rda0000644000176000001440000000231312252324041015142 0ustar ripleyusersV oTUeS)Ęb!`b!dcݺz{wGi14b1hBP !BZlK][۪Hps9f<>0 O)~ 1YЛ*-Y.XSx]#69XLU_Z,n0wBK7⮵Ir-Ɗ 3xcUbËűS(󾈝Z6sEP VܶbKWY 㵑NhZU)?Ę }_>݃emk/|E߯N.I^:us|j`=w,ԲYn?>YP&v|Ҽ4LJmlGoJعQRgg[lZO7%Y/z~z@sj5?LKAH* ezq3(r` ~="UO}zte2j=ʰX3T~~=*(k(K5/AG0fF+QF›G>YMYE|:3pyF{i_hz(:t?%/Hj < ]Rtd?N6:'"ԳAYNʀYN RL\p{4|hpո 0!| :*є٘]|{XFhX"~\$J(WNrȸqT9@TyAt|J耫 y?G]vtq~;ϱ_G |BzsCBx?t[ {B.RS9GMߨ*lR" j/9CЃQwg>ݰ5k{p_ޑ'u/>> ;Kf6pEDժuހCE<?\'տ(yܻn>Ћz8}9r_25Cg O'WMj7quTlL&z ag7 !& |VV`8Ze1˕FrO$k: spatstat/data/bramblecanes.rda0000644000176000001440000000725612252324035016174 0ustar ripleyusersBZh91AY&SYPw]U z--h ffO"R:iR~&ژQi)?T@OHL'䆁 U?i&1ƚL56MSHcDi3Bm)jҞi#@{SFILҟAFm4G~zje=5?R=SjmM@4hQFM4ihb`FLLL 2dC#&dM10F&b40&ji&P74~?LM)ɩ44 M  @)Hd2oRb{i馑$3<)%h̑zQe4d 6Ghhj=LiIaAN_+DA3Cj>$P DS&=@`z^dDr$ S@Tȭs<,rjCP@2a @"#"ILI xN>&`6XDAT@$I5%I'"I$$$j&< xA">b<7!2!]S.[|ᮉȃNPWh S(->:iP_}}#Lɷ( 0:^'O l4-U5o@Yq_rLpȊt՗k.l5/"Lk׃_$>P(}o752 +:㢘;mhn3 /JŒvPH\@neâIj g' l+*=23o@'*z1~|%D :B/2Zpt0I&iIms! *(a-۶@4|x*%4*;8 (DQSfgń0f̗Dki#Ƽ5F08c1 t ?~kP ,d*oftkDr'h= `]d$E fD8:6[(!ONEW+ ]{埶eYX ŻuG8W-B{Qìo1Lf[*#D эG*lR:j6'mTwki2 P.h)b&:hԮիdARz At%T FLDc%֌TZ^c~tM_zEʁ 2ʤn(ޤ_^в~s^tSERsa/h((Fx *B6$~"+RQ fH6 x4+O-UnMޥ4AJx%@\ 9Q L)`ιDY'tZzmA !z[AN额{H\ڳ,[oU2dٞqR9hůC )5膿5Xu($}-)VWP7ON5V"Ԑ5&1vcDFv:V)m֓/-ĦMwY`a1CZ`o-Y8w5VKF *N!{#8U8pn Ո)2ݏ%+Q=5baB!*$fEaXY;[{oޗ9`kMC-9&غ C4[9fo۾H~#O SK<1dYm٭0vfP"#h[;׍Zu[3o*KFsC{ێ\p) RJ}8-7% ickqy #(\W@&U~_w1&x] ;;?knP7" Τ)"゠Wpn-F*RtW1b%&לD|Y^DЉMBD+s ǐk{Cʩ,a+&l8'o psԎvH0*xH>rtAO ZW+t#.‘ۈ5* 'h.6Ch-*k 2~M%P)w`fJLjq3 ̻9fY9+e-PyH (Q ր|O00PĻDmrD (0xqC-I%pF8n[%U%aܚCBl j܈OIHXVrE8PPspatstat/data/finpines.rda0000644000176000001440000000530412252324040015355 0ustar ripleyusersuWXTފ.EWQ 6'"QAE)`EDZdq Hb%6ASFr5P|K Eo;ywܹ3u!ϕG4a0MCX4xK}=4SȚ~p\GZ2 hFoJ5aLA3PIQj +):&w v@A~2lJ$s#%$/W)s˩)G  `V`3BFKА3BC UCSp'%x'$ͰuE5+uʃTU>{r~8^*lC\ޞ #ǃaۈC[0*At/z#`ٴ ]Nb&VI_!7;͑uK f,SӒSwjP'uDek8V٧#K3ґfLFBxĮ#Dmːmik +\62ilČIHnoBR5MBGڋVrG<;H{Ɨu ZH'f܉H%R#> ԭ]Q*G츔^$*=ijBqʉS3󚉞 vɄ.hAnDËZO,0}{~O4;9=8;Dq9G]&14V)vrh>zg@rnIiLxidz4m']%+u+y-N?n}ik¥:P+ȬXj] u[%fY$?Ϯ+>*UA7^TDtjǞf97RLk fmkc/v YY}2dٱ{$A%ː_L/Q $ AܧoSAosDKI*#^o86.G/~ىV'̽lnl'DS@y``z4 zes$7.@&X yQhv=ԯq̜gv]NoF_.!# gXz-٠}g(4j;zZ1NXf)UIFb ]]O72IEԿ1dsiOmK$dmq2SG%,j<3]vF#7[z8d=);N0DeuhpA d3T"+\"^ۑVH='75G]hCƿ0dR2_~iECmM.:↌F7lAFwTg\E=%#J q޶ox;_%>LE/y]5FBOv{:IH4fdp[\W=a|TdP2_y|b+C"c,(upS$vWSn0:itGdj:ke[+U뻦!'Nɓ2FM!fGa]`Oc.z#7m.j[|7( (NQ緔D"-lb|\F",PYf-Zh懘Mf/lx`Z]hxM^p4\o.\jWZSvWRvh;/,7zJ9Cݡs~'~:T5MN9|l%4} оa @բrz%S_p{; 6s>JQzF̑qEܴQv 9X1,hl ?gPpLQm(~MܧE/4ع`Qt ]<8mT*#GSx`9#=?^0bG%e%)A/zO8ă$^k,?7$q"J\OXM?0(pdB>Y Iq8 Am~y%qx ~~GR <":RkOY7aQ_olP׉] ۀq?ɧDG38~>H="x%e/'/6Y(MHx8qc('{}ՇC& 7tXɘ+'C #\RThO{0:aݻ )q#7B4l~=P5 3R-mAspatstat/data/spruces.rda0000644000176000001440000000225712252324051015234 0ustar ripleyusersuOpSU_P%:B--h[Qۤm^4iYጛL L4I*7n` kdㆥ:ˆl7ŗwߕιsλᄒSy^, /}^':Jy;mVF-t'ut=n`>/Q(yO#^|So]oTۢj5^buѭZ%[iT;f ntCqF&p0xQ$Q7LK~)}&w zgO\`}?KC?(X_X>?>>:  W|77E|k+ N$|O OJq+zi򦉧}1K|?#.ԙ73QG9ֳ͢c >%<cy Sg ¡W_p McS_E,R"٘A9,~cKJc%(SW2n\/[fN%[eY-sW p'௠/ +YAQ <~ѹú΃cO1 iyji܈s<.% <9'.ތ#q=MRxvΑc p΅sj :OK_st49KI:?G;:Au|PyޣMw(rS:#vCX=9$꽠ԠѷƾFzoyʹ̫_zdI0}Dϝݗyo|/) ;"zo`CW|+.*++_}P©>?rN7#xWW}}Kv:}B_֣4I_-UǮ}x9|ұ}uy5W]:}is!_uj]|A¹벪S=9[ys{9w|կ;'p4S%y^_SuaӪάW]FkZdsspatstat/data/demohyper.rda0000644000176000001440000006531412252324040015545 0ustar ripleyusers7zXZi"6!Xmj])TW"nRʟ)'dz$&}[ &Y c "Pԑ^N;M6t}4msPى%RO8HG}*\//$5J~9@08`#g t[DS&<ާ -w14^n yZ9Um>]t:?rrz5J~Jg6S.liImZ3ҡFft+C/w#۷!x8ű1L`y (7iƝ;MCLA!\Qr} P6 8fhXh1S_5A9~r9a͖b"IO "SX|z F`BQ}ÿNe ѩ+&4?*y@Oi,KɥgbjAY})qHV}b!F#hZ(}MV:q)opej I=[`iZ<6+ᅉXlH9;I]3g]DnC.ۯ>w~(2 g0?p9Ya\;E,_ jhbs黏=K%>pzTPTV5WY#&>LR[p&_lZ9c#Υ ?!4Xv|J _ٕU<'\Ns-1'=E>B`*`=qȯٖG.K&3.p{x`we_8j~C#U{`!:l)]X Nr}Hʒ8?vQ|.# `NM&=n]k^)E,t)-@& V-qt{D$^ z.AWLTwk.~_2Uī@B4LI2Xбiyڷh.i(]A6xJ8ʧm'5nuC]y }k~GtjO|,y[0P1n0oLLrQ-ް8jzKy3$իʸeV_$W/te8-Ez>-?² Z$d9 Qg)[t4ݢ9bLĩֺ&3Xd:s sΦtgZE|hn} 4 B/!Д|Fwouvu63AmcmB+jX c|cH!#aA8bCLm417@y"~LhJ~ԇkЖ"EΊ<9NQmgǬLOҗl*N xB!m%AW"e:S`qfL$ؘ^C ڽ vhM<_(sO-}or.n序xDM <\y)+G7o[$_Gm \K4bL#1x*[إ&MPL,SH.;ޞ0΄9 ~`FKOԥ(AD]ףE8. j F.+} SVF4g{GD c 8JAz@qE61h1x1cd/څAWb8kHg(_9 jvxa/ ~<]JI=o6w:?gEGSrԡm!P EX-&rvSOl]u5p͚?4ΌD>׺IC=yN4]9}eR^i^alM9N8ҞF*]etnpj#pye4eQh%S ⿣v?Ljz)ڹk#a@C$C\pY "IK0Pς _QJ7pb V|>꧙M()ԭ+c5LP2LO"BTra1)' NO8vL"RD&$wI ?PE1gS΅:2eMkS>(/cڲӼ)sl;c>J(ʙCam^! ?k֙tU S0X6l=u lku9.!kxfȋ)P{>E~Fox' Kmz %sȸRcQ?eǖܽD^÷_+t9sla6x8+P= }BWQ5\]p!@]{h˹E /"Tc{W }ک#"3FRw2-૰XŅʦXthlyb@8vګ*Ka&h^Chd:H/R>̃ND mVآpF@!/¢<=۟!'Hvb5SyMX\C:! D+7*t|Z vw,19h5iC d_or1:q-I(;5v[gi0iuw{i3;_jrVWk{qHc6l]cN@_Eq&m2ؼt|<@OzٺKlF>NHaQhEk oFzLz0>ע\" PUnR9u!PCS8R0 g0է%x8Zۿ|j$QNYHi}Lɤ-_՟6ê B.-J$gud95)0Ĉ mvr3qpFRTcR?iekBYYɮ};qHSz={y++gs+.[Ds}<n˓S uJg_׷ q"֓w.H75,"l% vLUn޿n!f*bK#p2pHԷz *6'fezai/[P]w,@EB?&rЯjоĝȖg!Q&Ҩ m}_pWwYY%HvX~Pw*m^ki\7C@WȒRUP||җ?Y-ّuEua4͓~o'X"i楇184;$H5n!Y G}1:[/;đŊ@4&ч%JY3yB| `!iAps,17# X Wv@Db$lաgSKٰk[fl: 3֙,8R+j twr<w"V퓟LjX2o:\1}MDXIzPD60vL؞b<4uh t:8Td[W  Jh?~G+sjE `U! C+e[! 4 zƹȎKUex?}sF;FDX#>Iۺ&N޾ZHC͊j;_/I+{79<T0CBRTt2t qP8yCk؜(S#EI3iQ\ +-^X}\!ۑ`[\7S?+uJcuKrK_yF-8iOcM&Ip!9lnh3[er{v|jPU_җx)u'\#Uphܘ.4iYۖSv&v 4 ibOz};%Gf0 !ЬGgDR$4_@M(QA9; 7  3bR;BA螀Z fVnE]*d(3s6 ,`'OރљhK;74h4:NCO%NH[+08i ;ZcP;1m" 777'_a"ˮza?K$TjDN. 7Tfȿ۞V5#%u)](L,ԣJxuvr(3=CFǖ:lCd.c0IC+VPƎJ6XSq9x҈'p>]w,́ȃ;wW= =ﶹ9jM޺@}*-#fԆa)]aҋ2z<]RϸD$}*DNv8|- kJ`>r`n`jc}flv/7I+K'J`q h<2H/voby-6@60!o&F nM0Y]<ISXlT k #F/$ U#{EI1=jj;jF]+pqw rΨ~#+Vj ea\";ZCOw|52.FTr k*vc,H'6;юoYZhPMkq"J/Trm<@*\sbMlhaO8C= 6 dB)!7:xWĂ5jL߇`^Mg+˛ 4_Dvn5Z|ny @=Lhڟ \˥)OɅ PcC]Цhk`>-2g{m-E܌\A~P 0ʽ}*>LJ y!DinjnNd6S ;d0Gq("IDHEz+~ZHY&R^~ 0F~WWv]M 77+‘s^Zrc\oDЍP/v'Pda)ޯQӥXѰ]TфSʧHu$mwTM*`t]AȎf80KA "7L켱afWDNAP+rQ58#!anv|/!s[ѕowTt\@e=0qNnէlPh|ۆ gQ# ZB'/uT+f:qWAuklewڱބ '0vQפloqN` ,'y՝Nj@с`q/ |1)xI3Xy/TÉ݀1I!,B6ύ;@~?G \0<V"ZRцpp wOPikS+V\jii]% E?0үfaMw3 IR|&*<:;D17X`X.L9mƉ܂!/UUqC,\ג&AHgOXO"uN~ #^Z(CurUR?[<\?5DٖJ $]TDsO4i] 4C*):ϰötxG.D+&w!e[ʾB,|/G׊WԴ? ɔ1=7G4 d鎢pbÈç9>N䩘yx!\S!&;d\ CM) ;Eۂ-}a",.ꡟ$ƢS&5_O]HxOªWWFkD\DcDl_Ѻ(`YS~:QU|I(RhkM)~.L?4KlDp`;ClnPj .ϴ ䷆ͣ G;0.Gk7Q`y *ܥm*y!FO,R٢e.۴RaĕR/2^ QC"owE V/1DI_e2Q c^k) K4}fZCP&3=D6o=gPzl=C q]Kv*%}M:r`ÍKm2u{@2`RZ] Qn CmIS}!4pZCӍp|+ß'7-+`!5#i8s;H3FIҿsCȉkATj ٖK!xQG,5߫TҠ?*+K?izF򈢝6 f XV9?f %c jvV'斜z>čki(dӍ[f8IhSP?eBVIDΉ5w/bD>(/r]]UJ Βw3= Bi>3`lOu]5@+ ʰ~цI֙s>u0j4('FMw#1 >z[-^q U?e@?@a$:E hV/]ce^3$GL>\Aak#}͓}򵊃,îbHCY%Bp>S38!H).oRg1!*a)Ә;[Qᔹ6U`E *ꘁC-+d2)$[Qw6G'F]sySY '3M[ js3O,r~NM4Uz/fO@ cL ֢޲Ke^Ul;7mQlo`5Gt,Pį`7f0η+Ҳ"b9r^;&Js+ock@g K9*ScCYbo"zU.Aڬzܬce1Ϗ~rFf8d ]k]Un1u_$5&5NKp\{}$Q ^\k.wU/HD5űz]?Wu/lz" Xש+ bojM վtT8ey;23v uV=KodfYf5xc|:XW۩&: ]!cҵ Yo#FY ]֖&7ףT~nDfs]IU󋗙imrc`8[WJQq26lwr+>Շ9!,ῥT|OfuD:w=xAr4pڞw 5 njVǎCT$>#"˓?*j%4,j8@+y$updWmeĔ4P*߳$2*k6h&D#RzJ`zV[mIkcdA^JD;)lHU9 ' .ס\v`Ec^t\\Sg19\JК3Rt6K0!kEԁ ># lfl/Nza OPQfB`j>N6fEaE`56ݿ;}szh)5('f!\{X+j7*j> |y*jRdujԔ#Ї~ږʑJy&q)0倌=xANw?}%M&M˵Hv:|BJN^N^8s=*ɲ36PP"|#!dg '/oX.ގ0TTw3hs?j*S`K(gyPmm(g4;D}스ň/A-*0=2k>Pq <rțip+ޙH_s3|yQACE0}P8c{*L@C[(T"Y "JK2ޖKg@YA9@~+pӽЯCs]b>ְXqGa:?%r+ 9E2'etasNfGi>{ @iKC ;5 Pq4zqWw܉(wәY};)76$Ӆ^JGYg|}8эVgGZhb؝KrMDP ʾ>#@!_9( [sPO 8V˓!/@# I}|wuSrޥle)"iM z &Cy! PGnR6y>Ex`;(GW<')nj<($;r *$?I6RlJmH%}*Tib> "M-gָ(8 /gS29D PQ4 6/yegW*7{pQya+Mе auQD" W#S:\.v飰MbEG8 b+"srfrְ ~K$zF{_r:ވ4ҳ\~=*HKNpħ\Sn-SXx?lr}DCd!UK{v.PQ1QI 86xJ~*ƚ:0rEM>E#y&B7CNꋤT;_/}?舊 oj4m )W,:?o{FrX7OL|gQ_B[<ȏ8t*zDP$&Gh.tkYmF̏k-KO4OD˧C!%m{zzq+q8i<oF)Kh =?ccϥЄƻ;.lXNpXxp#c߭#G=ujNƴP`bP7 ]:[ļX`aUAM*<2IYJe1"*b\+IV%uVj*AY%{䗔B5hw<0xx0]6?FujޑviiS;]yZKbeʇe"/;u=Z u0bsĻͤ.`SI-$%uqu@j3MD'^me#LN(( opV-6$}_'ʲ'C0b+ 5ڣZO@AB8*Ggֹ?P_/: }RE*~0t$,?ڗ:F|%eزp8;lw03pi94اusI41&i{)LL5z:Ȑ@KG` |oQ 9ى"oGz\Ǖkb=mr%U_]f+Hяyq,.}zF{||wUZnP).zD!=G\N Oo*)v6B1d҇nj^ll{*g\eS:#'U;qX5l3kAsᡱ%.-59N)<lgÀʢcD!=4Lj{t4z$+I,£JɮH,%lD%At0i{!%pi9lB]s77,$ N.#U7E;MA.8{% B"ILJRTD1 e>M@sJ+bk'د6BT`2v'Z.)Y !IN"ق,Eg&k1_z =e ԾC‚vSٟҽEwgd#^]lbBKuy^:ONýc?؀~ɣx1:B>x/;*[Epk?]W`bQ9?6c<^FIgz:`# *40x59jNU\r-NB3 @#T@g_G=I_]cRxpd ̵5F[k~Y 8h"ڞw/4iH8?Q0 fv1?[I^QT{uflji;ΞF6pw鎥3"wUL 7ol T?wEgV~§ kx0. ,$`j%fxѭ ghgÅpR\irңZ7>Ĩ P ^uJYzuh%dA˺q,.rSOtssZQ\^FQVE 7;˷EQmK6>jWClrx9,%Nx{B+ UM(Q%L!彈oYybi|ۧVO0 RqV ȕn!%)tt"]]ĖrJRC醨{5Nҙ(p]_F~93 N+R71P(IFYȦGʛ9PԞg=gQ'ko/ة3tfެll >vŅ-6! ka%덬9#䚈B XNVe>zC8oTl5{Տ(ɦ ͫhnؔQGvo-;gjMY, ~&C}3BG@kCD);+DBg9FG;#M`|CE$Cov.:0+7&d{!B;2@`;,UEE͓& yHU,݌SZ5)rG`P3@@E>Y^15A u^v R[̷0+L ~H#=ۄa8-grJqUIj0`Ja2>kYb4`px\KO,NN%.I#wgJ53ZoB-@6_O=СGO:XK jQHL}C`"n d͏P]V^24jw{0!c[qQY)Ѫ/Q`%,yH htGB);66qN37v!!Ž)'V`ұQQPXІ +Ӊ1#X묨q'*x;ۻL E)_,> 7+t545yǚ(jJy_i__jHq/G_ԑ*жҦ `uNE:"idpJ6W tAyK8'kˊ{yGo1l?J$)[gDq#ۂI "ObD2sycŶ;f̨eCֳnlEǓjEMse 6+`gR%dLm*a,ߨE-*Y\_, [NmjơXW@xSK#mV#]O%#1^1Q.k1_w1tl3!Owқ 4YX/`2KFQfidM}j|m?-Iqߐ:LWH1d AQS8V UsB~Dl^ȯfCDwWh_\ce]/{b?Pe{}Kpie'IɤiqI:d6 93m@U.Jbr< ԹӴuF;d1eAd ixM"fO M mt特_'_vnڽ=M^ÌxEVmn"7. {2u:EPx|ٲh@Y!e%1;Hb(tŪ*WNw]Ontg+USb@VKtNV3֙Z9~_ff'Ӡr%Z-oؔo]Zy9tGMRi"J5@f1l`GKb_tϢnݨ6v#5Υ߹pIƑ2I>o` H0^%ֺ\qQ8]&tA H"@!3"wq"VaHP C?DŽG &]L{OάrnPMXGv| oMT7\vo EdZ!CKb?IMkhTq$!kObb=זn8b5eLnZDwUCfD/ ~cT#҇޽،񒿻t x9EB VĐXJ[,:sqdzF9Uŷyb}ղvvP;h ݦ}h&'LB;$`HF95K_D6{TIz\ہF͹t60"'8#KUb.$I+9:~8i-z*gL->E n*˺)7 _e gSH֧SGJaL|㷤Cΰ-%XI&=G\AS1]sVgh['&/I&|o% ţcd|-t@:%Y:!GD9k`?|r8.ܪ%] F]OU u#OnഀȯPk-;?Z1CPtlЎuyC;(4 p]Mo\Lߺ, >iP|d'4W֖BP:E{D:1niP֑ވw2)]Tph7Bg5M0ⷂzf0%'Wm&`zh}ȷ٨cdAU<"TӧB9 SFc W_K8s+j2)_W=6 ȱ: FwtA0+z_:tZeR Rč6j h|H(|3߀,Vx,V@Y~]ؾzRalhlr&ʡkBq= 42t6ypOޝ5Y]=LQ,5{aVU$D +ľؠ$K$nQ?@GdjLRzm*QWWUf? cE]y-|\~B! )y=:Q_8_"44Jg7X) r|WOD*f_ӶOi[> aBg"?+1u]DaIO$Wmp$9Խ,[tHo)pwz$9ɟ}kv\jò4J戀^`kF |vjjR0ׇB4xeb2.(y[>+rVUk1[2ՠ(ؐ,u3M㰮zU(kWs:Ct߻JS}@ ۓz[{Lb"x%4dH'AX >VZ<O7Hbl!_-` /CKj ήJ_O:$lJ ᅅ|wDQ2 nS>"|$.ԀAh>Y<8fxiݻ%FJD' ͬF7oKk:XnG@DY,0k& EiaqwAFPR;jyX.]C}pP^ekrOXi`ʾy0M$,Vʞka{N!?ݨ]%S\wiB6 EX&; 15:#ODdHu{m]N 7$Gl6/U;8r 0MᮨV6 I&6Y?xf=){p&%<WA ggQj+_!0̜?uUF2#5'Z±wyM _M|D3NSKZűSȉ5l>M4{YAw1MR cAA] `LkQZMF5qAE[4|8v B9F\[r0ӜU:O"zXX`a CۘwP$xH5bx"i4,Sv`92sxc!=:: R{@Dy‹z#hLIt)YKs4di! v;mDG= r",ԫUd+ ^_v%Fzes Qwh'Wb1 '\\ .B.>*8N4((6c 4_8D Ms+F Mm{=HA*?'ؒ` xW-QkA0d Ӽr>}W#a8Lcvs%lZj5~bló}KG̽tkK:8%%]E.q5YV[\7&5̗%R :BtˢLR 혙*DžX'2RXXX@V.' m^Kw3iފJWN\Y?ٯ@I t,ZnU?mqSEØP!&~𹗘oPJ![Q>ϦFn'ُ]HE>C}z*A/57prd'rsd!=K~"c .s9[|[v4 ևN1NvB֝b2U1.;M2uJaH8F.;r e!ϊ$}c/~Az.^4Г(C-v5S); pM&2K(KdЍ3k(vw۝Xd44L&|g 0rnD_;/HdTsʝ0g+!3 yU3&%߂&nEc]qE5 {OۀIpRYJތ32c|=ABk6&ہ:%*&-$re7])Pz8MW_k8 vPOTn9IWqzrI%}HryvӼY 5f=+:h5#}&[rPežL"xd!%a@/1W6}3__Y>n,QU}^ZVYV?R@IѺ:sSon*E8B]p>mE^m/0W$?+o{a7k5\>=t<$kM?R-W|t `6kfMeY;?LH:t# w>؉1zqqӯ2zT ,E;fmQ žds[6i xXEfݹKj:{Q)ąA~VnsTH4_v>U8z U{W]MT\8) Y|z{Q&( 6PK*0KI†$[b)w)n 7 o_/m2u1@ß좤 D $+Ş{kW C-Ev<5I|Z8l1t)rDžs;h傊FX*܇յ5PȈ+ql"ޝ;W𱑰v)!+ͦ->-ږt0 ,7K{p,u0-JL kЀC_?Z3H/77}a5ƽT;Yo apѯ( E0+5؈l4UEd>;up)9o3f(?.56 q\̈@*Y+YRBA 7BVlDWb B$UYm.Npc@xL8wȗ'~L Kk2vф{G\kX]鸤?@ vV,>6c9dI$<Ô{􂺼s R$ʲzL̀Ս>1?gYʦG&aMߑeDdT>_hSjI@b,EC]`"blT0)D2wňU!S^Pܰ? K֙/oOa30kWv=HlEnRH.uO^BZ&On'4J"akKBUw³ugEZN3NAk`Ss[^OxT`q0CJ;75Cx]׃؊G;俣pxa \(e0{Qdp͌<,I2] 8j|./K 0iϠ7bpi퍛 f׀ EvZ8/ɸ5s@rV# kkЧ S`]|,M1YF3}.ϊu*Z \q. ILހ]cQV2*0jmbC1_ Jq wtkjzJWLߏ@lmBEoiK-ý2e^FW4Lǖ78T!iKhSndQ33bN뮜 |iksJ@y攤9}]~Դ s7T>.^|qlND+A}lXY5ꁟ*>?[`8c5>?5$ifce$e[K\ y:.꧇WOx@êBJK&E)&](2^rh|0w=_בrܰ,U#@e۪Pi>F'^1̱RxQ[yIӾqlѡhs9dFZnqMbj+AdIG/:˜TgB‚Z,w߰7&X 50 (06oݱ5BYYmfҵU t!4IĢ7Ts)-TY1鱽I1N*V_ήt8t(^ѩ% 7/nWL<*v5V m4F 1]\1Cr`>DS^+~.Mѩ^zso##_ &hMr ִf?\-1n␕͛^(%J3kר1iT35M%WK1 w!-kV:rbddG\+nqkO,FضJ%Qxr8_I}ڦƐ2Ns GԅEh3L`x%wZy݀%<'$ dļҗʣh|NGA%=dza ,ƈTaژ h&Qbx]Ԑ$G;]DeCeIzŇ3{QTmܼeL8aN>O1㋙&zNY[DNfտ"22\Xr'V}zw'̜;R 6 oI$=E5d7uAd#NR(l\:qU 0$c̃-@Odo6rBNQU`.+%vőϛ]a˖Kz~K 4;m2Oۮ"R)ZS++CR{A/Jkd3Td5/}CYq$DYsF욛̪D3z@3YiG%hh:0wJDLwذYh8"1#zQVK BG4dpH$50+EU| .<Ox`ſySWa=N w#Ua@'g`󢈽D(_LPrؓtB|asԩJV*N? ,OB+fI:[*i~#t[2[gg>qr(a.n+^}<}@b ) ys WBWcyo5Vk&T@GP0K/0;$ 1J, 698KqBTM8ZR9T<`/tk,@EE|g?4+TT&4ն58F͖G0|zu bĿ7&)vU9;ߌYv8H>Y8@كԗAsIr2>7p̂ʆ0S؝\[y?M/p >noOKj.qvAA]qxpAl|g"!ɹlǂ_}Rӈܦk*DRY=߻g@zGdM릁f!X](W.$2_ P9w `o3m%D,ŋ2RM׭%^y].&Lm ҕGݫ{-zYPpkRQN&DVJ ,@kz::)Z܂_*ߵ%ʤ|dzZ3 Fd]E ?1ʢ)%6MfPu]kxc+dHIm]D/TxhʆgbM/fo@C4O|h㱬qoA+i1Wc힜5dxFtH z@q>d٥u^׭v5Yuꓚi<ͫW9j2̙X˕xC~qjr{Q1")8jgD$"7rר:}/QzƜ(7 úf#+O"$T!zClvzK;-dɸ r<.֋z# ᚤj.G9I mvgW)##. !ɉEW3/(՟uu~ wE6<Jß= aA nj4$zs'7{p>QЍ$̀ꆵ+ ĪJsluC0 5tGqfM)zzvw#^,ٲh Oks/bNr$uP D˶!Ũ@_8:Ed1l&_2;Ke"īk~uݍڛU*AZ 0έ^v;M;K^L`sXJlj9AES֔LG:!T]\_n"#' ,0R^Պz,ϽX[~4L̓rߊ% گ B&x7!.=77q~ Kʸ% NYzjcuEWL= JKD Lۓ[өbs6c3oY%R#IX&@Sknύ3|y奨}٭my $kD+,FyPY \Bl#[匏`@L}19 g5RrCcvd#q苋=?LvYD QSk|~|7,dՈy$ HP5wGec7M˽ !!p1)DvXk1{] ыoYvĮɻIM5X : V0P^] 8CQ]-0|L7nT |7Ў odL-h gՂ[BJ|(8ٱgm<>Z7E*w9/IfRF.z@mY4\ ^,NN\{ṕb ] &=iVmm,~Fӛ xĬG0yeCR`'8HނPաpyIN~q@%rx^@P0oDdr L$qO%_{7 X`NFA2Ԙ44P j7 5)˓6B.hn-3 S vL8jBs.ɦw.mRI|я-%~lVWnY'v rQ]GE̾ 6@ԳdXL@2/@GiD68wFJ|>H ӊ'p_5C0@a@2fN|N2uBd<:!;F9yN^ Dk1+-lVd \J6hKۏSf*6\~nzYMrsEM34,LA%gXtem! VBG+8tJd<-(lࣿSޚ3NT: 1EsurQ#jPK9^$}-;1Ћcrx68y0=&xIգ0` H}Riխ^S3 T!VKLlQ%mxȖoV*rjdE7*baձ%^g%Ffo}RrxO.Dͼ?Z~r ROt3w0S}ym98]qt=N 05/QyG=erC_[M*SE|/:Oy[yH=ҨriA!#(5Д]/ n챤6w߅PTr< ';(S^(B qS/ 7Z@ >0 YZspatstat/data/letterR.rda0000644000176000001440000000122012252324046015162 0ustar ripleyusers r0b```b`b&f H020piԒԢ fa 9 s*s>9 J&8v:,e[,qಌH Y$;ͽY-?w-`;u"U9s؁9H<Py'ɪP7n0ɥ< |Pq>[|^!|ai ?8?vf s8b/A̳qs7Bf Z?_`x4܁}9psɃu&r`] VX@ɁM宾& oj#㥏@pL 0gEN|B FH0'7D9-eĊaNsPřy9EP>[ANi0Bx\9%9E豛X\hœ[RY (1/ΫD$U 2?,y [spatstat/data/redwoodfull.rda0000644000176000001440000000554412252324050016077 0ustar ripleyusersZ XSWNH (j]gjVkVV{U[tqZD f)2VuvqvqAt(*"+ *@ԅ̻s_#ttf^wYs=:ffSI$?ɸSG*K>РVEfV"q$rkhA=äԅkEK\cg)CܬӘ4à 2Rm'eH7jtfҀMfRW"Z&JQfhFccj֍s9f8W1Wa"s ]I JHRTpf35p:v61"ּ*_&TX) ?ζt[[FnXU'v_KϦ26؆VE&w@E[T>e<-Qi(#^YCWZr˯p-/lg}\Ds`p˭ݲre0(JY1I_=8n,#NBy,Tبz8%\ˇd_T >NK1qoP\Dn+?ӽrBv0=v]aP4a  } 8Gޅ?A}?9I8 |VԷKҾ&ʘo'Qhl"TuD11X).rdN&5s|巉A;x)y$XҀB4Xu{HE} )ܺeSx9LyK_C:/ qC&r *BqӨW9!nD 1c^P5 oy052 9h7< `8x-yS gRwc 9nR0_ϕvNa0Ӹ_ȡPC_\Fr>Pv)MsB8tyy=0> (7 $"#7~E(lBV(0.~`(̹PhV Ⱪ5pՉU B=7?\>#pփ|9oߝrdAVQxۼZ8? g9-c!h1hS,}`>_Kj]2&nߟz}\喏AZOVlvN-zm=$?el\ )7N$-}Psa7aGbi6H8\Mݛ|:28o׮GLGg jaה>] OΙi?#o3ۈq!Z*1Q/p"O_zzԬ#5,/<~NHk\pr? ]Nn^RmGÞcPZ[9P}UsV1Tc<Gቯ'kmsVPCQەpi[ H9űK1^ߋpt&C '*'}sة|Ԟ1'9iwC-Oϡջ6kWntzDžrٽ [GdO2丳Zf/%gG}'R8zmOwr< p`Irh|ay_x*LU"xH(x5sax*F|/F<۾|P/@z5%R6b\yd; J} 6w1{-[ڗ' ~ђYa~nd{2! ?<xfž&p0i #`>XC!qd}:N-(bT>:v2ՓL!RijEEEy%ƫ쯞c2(95ȤY'~X2+ih4}^ p^"fDo5E$ 04r5T|Q=4zNs8n]yr<+zFc/cz&IY0*}W6I<. 8c7(!&ʉ=ut)ZZs72>jt?pҞuD9rg|q̬t{:o-#&2J+iM1 $|ZxIouƃlz_?zҼJGH}zwKXҽIX^O30eFyW (umhb-})ևVi0$i8Y Ġ6 :w44FM|J@񻁸Uo-r|7`@_]` ]Jh)U-HgW/BjÞn协M/ٹuKKH,A'=Pwqo7Oܛ'{Ľyߟ z_OIJsu>`LBmDғНZU O!PR`JyqYL  l,%iyp<; d ~ OdSSSOh%ҧE:UкGTu.꺮WWW'zX'Oɉ}pN|Dȝ;{{6'fD&9?+q0N'`E]~ N=ĤU!IV$b;AI2,fe-%PL2PZUWw!kZ6iٛB`]r7MZZZ}U+aC'W:`EWQm['Zj23W `rH\uę@BXAZIPePfESC* F](d Q *UY8gS%0`UDwepd"AIX# mz2jL5FC+߯XL,XXL$$q?P$,eJ HʯLR/N'.B~S%w i#rJY3C&DuJ@$T`&mSp5CGU[UeL8|8yZ'(-eMuܦ}zmeZڦ0O7\c{L=:FPP@"zw$4m:6\'u#xm$dxwsllEFvؼ.#vK Fm3QlME U (r"vd+VX aEyڷFV_Q%"&*#Bc:EtоiS a"nLI"U89Ϊ*Аlmi1&{CK^tDs-6= eJ.EiHRL!q X!h{]0׹1'Dȋj0'̄ =l$-IQ)?w$S Pspatstat/data/simba.rda0000644000176000001440000001447412252324050014646 0ustar ripleyusers[T_AF4b$np1ҤCD[P ""(HYz]51}9N;7i޹s?;sVx<ϼyݟ>Qĺ3$}HuY 6ԬpvS Ύ>ΞE劕ݜ]\ 7;z$/7S;;x2F3+{yUk⳹)&^๕a0{cbb3Ջ7Ǚ:8Ό8y˸۹ݲzQ^}Wzԉp>>na 7q*gGCb СDa}oW'/E AQ)tr]Á폕[h@}W]h2u7b~1EJ}Ng8u=ԴTWm-W|jkdWVT=>ϜJrgF0G^Ha& JFN)V ״`_!Aͯ&#涡qyYğ\qqM4Q0wlFQUKli\}fOخs [!;Rn '9ɏ`ɝ ?CrK \`O݅^̙2qE%o 64G]f6{M`ͬħ{ Lĺ.='X~B7- Oi>IY/`WЪ)Z"xɩ%Zy $4!hY;yO $}꽖Zuڠ@£Ka'4w$٥x|MqUgnez!whCa= ΰ;.%JN3\I2.TL򲭶O^b>}M|Ǻk$T+2܏.*~=m}>+H`W-&@< zyncBx0#}#/h~Ϻ?7}<>`|o% fXLLg7_URH_tgXk"]I0 ΍jBGrw~K+?8O~(:b޽/qm"қm$W曰yK]-x+u 2 鋇W!!=cH^#iIanB "NZQJď5w9$ 6`.}p[ok/c1:x]W42覙nt~tDH"zy'r(r>+PdmVCκHe3kPi N J2pΌj.yvD/]%xfG/AV0 "(*M\yCr‘-?$\k5AD_=_eSD.!}7s#6%AqDv،'=G&vuxwƕ\ܰ%q#(yn4O qs˛:N E ,O;(>.E,hS+c 9t,^Л`5)W O6/Hz+on$;@g/,h\efހ~)ѕ9ng(^}Ox.џ#zl,|˳ܿyn1Ov- ^L"bg VQT4 GCym$")lRO[{ 7]ȞGsP#cwTJ'&xv+ *>?,_^cI^[ۄ/x΁MOj kW\,]<Ɵi/Bxq37l ɩ'ޒMT`d?$gyw#Xp֑\J1~OK<|+QelieK`ė33&җ!B7GH*ۈ?;hnWNb!U_ǗHOY+ȕ7ALO C[> nYi\;^0AMoI./I~WOhxG{&>nģ# .&>Qh.<٦J7 rH(w\;2aG|| |0 S)T;6T}u.5ypi)λ`"=?|`^[I z%_!X9AHtiC U \<En F澸`fDv П:2ٕ6ںӦ"~q- }ƋVMO. "\wĻ3=nFV"x#vu)#XMݤF!<}N)~tfO5h2j:]Ӥa->V7|{[=#{^Z44,Q߳`d D/:++A?IK#lͲ7vƣר1![$ Z^)f|H^oX;Tb=^ٗڶ)ne݌j͆dGvIٕM&~A[uv~W/up$]JI%T~9*_3h! p6d.d'*逛_yċh7,^t%%3_b]V`Y`(m%oH)us]⇿*C!z [}aJ2 ɥ8qu-UD~]迨/֩nڂs1b_,w"=_A| O>e݆O!><~Cfߚ>3h G3xjpt9_ /UE GVBtՁ%X3ۈ_3~v4ؔZ9PHx3u݂( :߭FWDzf=8[LvD#=}iGNJP-ih˃Hl) $C@C ?>]CP%{W4];Ϳ '@{{"{Ȏ'S:K=eʙ+[@3J Oe xeX/_[.ѝ0߂PSL$}" }a;hjd*sSxW~jK~F ;@sVkiLh<~ 7leɢ ' Ҋe7([wΪrOEW(u_98v2"қUExUtTS=b 'њ7Ւp^D;6G9⼩ҏyT+}6oB>dGkX߻ DM|,$trYIOjKYoHhAr{9^@x͏Af,SG[&MrR/ު}E~ۼM4/W|SCzĚ 3ėnyL%ﱍN&vEN^ԖJqGE+KK"tCJ<6-1"je|k>#n/'h% !{] |Y'5NΨIſUK;Mr~uey_bO,wc|xvҸ*N bMJvD+EG:h O2%jk 1ۑؿجXFzUu7J:QC #Х]Nķoo#)Xǧ{',쁓+x: MExr3?rC}JO̮%C^Q?Tu^tZXWbL ѥQ'%c>3BѵNRڥ0MU҇Vhs=M0]{ؙJ?LGb@9*9н,vCFv$ƴnzsަ;Jtah7~mGۺ.nJ޸%ۏݑI/kSfhy ѥ`o:n=Ǝ`zī#v.| ys%̞ zs]p|j:/翙[Crxo"+,As |D^t8l.3!'ٲKC'_>= P7Pߋɕ1#zd~E4T5ǸbU._4En)+<~Y0J󀺣~M6`>.q&z7AON?CN%G3p'Gvtp!`fߴ zc&O!= Ksl'w_ O~Ÿaۚ>3U$c'E m6՜ݰh%6߻ ֵb5ǀr?ҝ jn.or{.c\ֹjtr]h3N/ oj6 iMNʭWn>%Kq#vr^ yN9~ac؉-7K+?SyPx`G @Mn ׃?o;w>x"18a.p[mo'j~Ȋo EZXjs]Hax[<:Vg?vʀd7nxVY-/<(Ub5LkkvoxZЬfYeQR yZU ڙ{ paÍ0"]mn나f=A 1fHCF,<Ӯi74C2%ː%!1S^{}:HniDZy2}˴2;3Q>=hvD,>cFM/ϸ,̻2ۺ ]0ͬ-,ӲJӮiee\c5)Xeo31] oF0v9wdoŐ3jLl|nfN κ7!3K/}<0ñrv?v<ؓE(;Q$ƴۑl;wx 'E:Qj0t?XN}]yC`u9Xb5޻_.u}cۑ rznўۗ}u?X71~Z.~nYź.~n낰\wuAj׃u;X#?raz ;sgAm]=>~ayg'O/'xN~mgK]}<.wt48߇~nD~}s 掕l~o Oo|Ϩ@spatstat/data/datalist0000644000176000001440000000125712252324034014610 0ustar ripleyusersamacrine anemones ants: ants ants.extra bei: bei bei.extra betacells bramblecanes bronzefilter cells chicago chorley: chorley chorley.extra clmfires: clmcov100 clmcov200 clmfires copper demohyper demopat finpines flu ganglia gordon gorillas: gorillas gorillas.extra hamster heather humberside: humberside humberside.convex hyytiala japanesepines lansing letterR longleaf mucosa: mucosa mucosa.subwin murchison nbfires: nbfires nbfires.extra nbw.rect nztrees osteo paracou ponderosa: ponderosa ponderosa.extra pyramidal redwood redwoodfull: redwoodfull redwoodfull.extra residualspaper shapley: shapley shapley.extra simba simdat simplenet spruces swedishpines urkiola waka waterstriders spatstat/data/ponderosa.rda0000644000176000001440000000334212252324050015535 0ustar ripleyusers}V pSUR*( *-K+-ۓR $M06oZ"#*"Ee" 0Ⱦ1L߻{{p_#V4zРcZAӒ[}rwti4 J p\ Mdet/lP2U*a, Sڬˊ&AGy]Rl1AwB (RU%Yle%;6V&AupU^O Q(!Tw+ &rF*/Th$rBL߄_k0hhڰuѯʵIqІBk51OY=FF 4_`Ɍr$|k;vk57; 0r\ٻ["m6-,} M ΍Grz;;Eڢ.W|p ]mZ_rh?{5NavG6>‰Ķu('ގ:GqP;c2}6:,f,/Ms >8nΣ[!4Nè.68X`4QY,2vOޑSp*x(Ūbœ2(6p#ommiYKVmCͩ 0iq?ʼn>K.r>)`FQ &r`w^M~ 9ż {1)TȘp$a4,BJ"߂3[sH|8^kxz] ]9ݖ/CwtE|dJR,ݠ|wr{]{$O0 6Ă4jFd%e"\6_V!_-h" O]dRT:'ݠ$! q+kFQۃFS~8ޘӜU' Buj15# VxƋm$D[16A=50m(,0ˑFgG/?1<^@[13ƩH뛠%-t9< R$Q{Vea:-kFA]څ0b fMIQ~;س$Q+)qٞgrX?_R>b7I} (9tNIS0Ia .4]p ݃HO<^C-[a|W6;a;Z܆밟3Z_Sjw̎N_N x>N1O3>xpsi%6旼{4s3x2,,p_3xsXPVk#\'5&6t|{UWثF{RWMD7i?%spatstat/data/clmfires.rda0000644000176000001440000217114412252324040015356 0ustar ripleyusers7zXZi"6!X ])TW"nRʟ)'dz$&}[ Vl"7O[ FVx(WFZ'1)ZP?UF|%PZnQ >lں>zG7f9ǭf{6Gluv;@R+ދ;ޑTzgTXʡ^"t5|İn]uWG8ݳV&2؁X9/vM}SisyJ #D-e8Κ;~`< y4)q#P[W7?~ߝA.Ϥ6ڬ`b)תv[|n)kށ |pce\E~h?R.uXq<*ls{b7RTA>PJZalU%҇m(9>CAn{k>3IL'~ ZQ(8^S}0f$CZl uU?c:xd}t߁}4jSݹBWZ޵k^1b-%LbD;0ZN.6Q HـaZi &gٿtp[I0K76Yfޏ":  =o>t Gr6ѽG`9bxA֒vI!.λ'Ƿeg{IN\;hݶ%4~|STƢ6Zddzl)B%lAm:z:e\ƃ2#/eڭ@'~"3߭jG LJ2u8s=RGl?/7d\:ge$'~P;*6P$5\`; Na$B\p3{z ?dʀmkK_۔{N| >KxQ>b i 4Sy":hc_BlTH!?dBwu6(I h:p+Hn u!Ww"(_ dWuQ;.?g!|-.(]+}3R5ޏ `E@sHco-53փx`Ӗ>$AXbKdx5nrHKP`Ͼɣ[fsaV苴ᘄ+ ])˄vyp.Ұx͢\Q~[d=?&0sbzxp|~hɴΎFpY[ӶrS:F ˝[N{2Rᇄ]t%>#\xT?3X0EI\IAhDzYZ*Vx?!!qƍO4ʣ{(j x[<’AߒJ u]ῘVlslAF@MN%>+j$ӽ8@[x(< \ʽxA Onѝ _ff \כbdtՅ>qԸ|AY:GbФq=W[! Kx=pbx>1_Ls확2XF(%'aG31饀K*di%c)CHsAxQYвPpFRroإpRHWD[Jiʝ~W:յ$p/2PBKrqdO4`?CZ` wG Trj 7?i]`9@ Tv Y:Ep\yVc}ER,e4QѤ Y:PT1ԒTGː}VD/ v$ùuK3CQCaҔ E}kjq6γ3eׇ/QK ƴtg?&oW745z/b;R=GU{T*lOg,$qJd#N.As݂P{jUszk-YΊG~u!]5-ɢŤ 7nܟܐ`&. ǖ.|lͮj (%s٤`@.l>3%[0'}lr #?+H_=pm;q+S/ OR1kuxk=I+o!n QK)Z5ay:Dn+$}Y}%L،9LrD'ړlΏ'fyWjDQ|0# r _ц(SUc,KIn6r(kGL>4!\WVɮ,#gq ;P_@ϮH}\"]9NK,&M3'$=?%b=9[n;3.JG2_F̋'Ū'brF !Vb)CDMD~?yC= Q \ l9{x-Ũ^ȑlZ}QjBLaquq@_R{ƥ?^jgY~g`e~l!Ec:Qm[]d |k1 ɨH_tqr Ь~w@?[%; -oVa栗mӴRi:z_Nb8#4DBRSݗ[! A<8OX?Y3̤d2D^D?]cpOzUю<.8!DZy'5&'x}u\[PI,@`LGt'^vΰPH381s83G8ahQu(|qȏ9P>|#xp%`)K2*+Tl!wvE'=kF Ӂ"نSic ǚRžS.k\k왍m_D V"U[0 EZHtVUh\+H["X?;t0Z^巟mn\7;HF!MBgû~907zmoJ/Yj{Hj_M+Qoj%a:/ фd=Tqz pд8>|">8:_%ovy5&٠{MNϖ^s^b8ḇAPAVa{TyS\QhhI@E S" k/a\&&QLZ\J^'l 4rA v)# Bܔ,-pltz3@JFhR8/BM"=NkUR[WգOva_+] ھ~4!iD8D[t/Nf#yx0*<)B&;[ZsmwKACso7=<6:g]ѧ⹶zے䅌li|O5 H.tQp,s\ bm8?1ʥ3$pEd7 CRƏ H/a9v`3H-[QN1=`6/d%::5&e.:u!y+{!QD(\;NJe A/ ~\c_/(BLK>wEl90m0Jtˏ 26FOߩeS=)o6VFiu ǽzDZF$ٝH2|$sSIJ~ST? 0ꀮvr;͡5DM7>c}Np"P+IOG)][s,Juf*Z煭^5/X^V,mZd߷XfGG%9k-_s XA93oUIgcN_}Xy0HdTsEtEVQa|wetqFB(0T:$F*:0*u#uo޺%AY}VvhA"8҅Q4XúC~(J/%դEqEÛgm/~_iƚȅ9+3o?+1'AR&͒|"N="Ոq~vIkpŸ _sszP6cN ^/2K v,;aѾ(sv.aU a/{9*GCxk!dVi݀evŸ]0R%4+mŌ(V}!4V-x~M .)2W'pVtafHoˉ@_φPS\S-c;;)z=J|fQF8J= 6: i ݀ȳ~0LŽb`Էs&ȵE2&U>nӁ{_+e}Cq[|F /?o{\{U/&^2-*&E畃$R#io($)lF٬Bq 9g"ˎO~2\􄻦.}`hPۑZQ>.]dX'|18ebBDb@H3RjБy |KWo7 %fh ET Leui=\[檲Tɦo;iԂj!ɷNh3Ł$^E͖&H[aUG'syj(/MBIO[zٖo cM/ *#IkRɓ9AcrH $'٢3fŢuSM}L ,E[-7Lwꙙ`{i>|2}5tv3S(ť-5"u[6 /p"E.܊T8.*0:r9IC_'(_?Nh" {G:T_L/s}bDgAzyfE'a(EC'Zg䕍d涼$g)BjvR@8QK $dEN%vdi~S$+&ih8=k\UxyT!XI7v_@0|LDbނ?pq}.|[uߠ &3^ V̝m=-q10REfuX|ïuƧHwkq~5IrTDeauwBTlcy%"Qu6XjtٲW&i+ʂUDqLXυwA3uC%DBW>#)5jj|\D \ý:3F&vAJͼS< slma`4FTk0`67i@#^^aYlzڹ d.Y:c vZpIDu^s'6V]B 0UU궦Iw'x<ίI+Q^0͓h',BLR"`<Dp!:atҵK/Rng,aT/N7I (nhU}&1 Ik'zO^bfiꙂp#n?4ql%79N"sDIw :5pJ '1`dh!.52T_M~pR H7 %=89gN8} +pZBv>4bj]j8ac Uɔm;Ke`WCCĄwyׅ Г4SK҉woWX,kt1Tz<~J!u3Oݗ &SP¸{rG8~ƸM#}d35P(,S\svԤRTfEKABҭVUH^,yBiELƨ싒hD>75 bp$=V%V4 ݭԴk~=!A%aA$z~v88NnH( V{o%O 49!LtM~y-[,Q47`s '*"WX%,VƱxzKdHAтP`V"A Q$buTYTiY%SduAT $)Gmw"4KY[ʐRFcl;j᭟!ǀIESӯ_s`V9FӃW7NVUNv@Jz򟜍X䶒1qVtAG"u;%y;0.aJxsBݩf6aoؚgr a_śV7 6@OFA)Vclw@g]ve$S8GVtBU0ӵXL.4HXxnf>pV(!`O#K}&AaY4{r37E,#\r--MI%(THu_a3o5C3Kg50ִ)W{r]Whyoh/) U zEMTSٯ#{XWj^K$mw\ 3@RL̓y55\$b`{ )u5bE6[up;3qPY6=qň%vWDl6ٞ3OرD?Z'ț)rYm9)잵hϪC᫭%W't)]wgp,qM]>6͞we X'+4OcjTyn=LPG(\Gĸ b)p)(a !u*̲|w#< EdlCX<?!yBy(!*缣V)Mn6K-l*9` N^k:nU%f?aHK$t‡v yo=', tJ),\AfspL\Ͻ4$':laGڭ'M'Ň]l`.IV5:zkȚ2Eqʧ.XyNޓe"v-dP&lsUC4Qia>f)pj@8piEJ r6W>eO#<(3<S+E-j9\x֕~y@pt(n>ذbDi= I,Q 1DNcмlܼ3@95MݓSiCdDWǂ}ـ ~"pdisr*eѝ6fV_?P׸,9:+8zF^+Jhu^R/N3^%2]-W TݪGd_#E SmKAeX/k0㧆iq=|R 8릨7jrR84*@a}o{,LyQ-+N\J?BO%.$ȤPwoCce}sP8}ː?fn}qdr!bъResӁRXK-#loP"zWꙩdQc2#ycL=^P1apӞ$ca1}"sַ꿐&pq;]u6 Acq@S_@gQ6jo^cCCoE隔舞E9?u%- HQt ~ l+}F7m} P.~SSo 0̉ J 9dBԦTܙY9`[} A "sM۝(v%f8>^@55@pj$Atm4xaG]M-2Y,B5{mq' )w1v6W!תAy@Q1䳽!BZr=}E)4xOCWALlUc}o j$ZHtͨO21Lk*PZk3 xbH[\A9=[새ɤ]l~FşdMhp<RBJKSr-loJ)F!kODcڔpZ\/g{ .ֳe|`Çb냷ț4C7K߂dc @l ŃW2K0H)3ג*~"qJ?@%R^LR eUae;9N V/&{\uF,G幥 "WlNٷ67/.̝/8IY~g(KԝOAXPb#߶P& ~H"EhIZ?ܟ-Vu4f(}ULY!_qgZ! @h>Z fGQ-?$uUO#}ۤ=I3'FH__`E7g[LgCהS'|x#S8,B^elSnLkE q#6$TK?%YiO%6' )L-:Mcwzb29t}%tSZT^i p,!Zٛ{0%vW)5 G͕hxZ6]+UZ)9nӅWlNMI"[&qdEe6{P~ܦ*i."w)tyOY!fd-N[]o87aðt"ϊqdW5)-UƊ iG\$r)#/=3q"4wk-bouk_*=LE~U>{ǿFs~{G(B03+;VSG'ZHҊXuQ0 H gn w`Z7R~-"Gm>HC\Ȇ\',xƋȊ!ֻ V!~e%W),HףYJek= Z%i"z@<Ug-80x)nu"au $9E5 Z> YXg Ժ֑Z/.pfw& bˠNcЦ36(w,hlㇷvE-k=?&FiL/)_Wvżc{9qNNtY`cIý?x !N|3DTazK=X}f*gǷ됉 0UyqBЏMk\* k>}3Wͅ4lJ4xʼomXʣ3M);K!Uwhz7*[e=K]tjivMxNu,AK5J:zMW[0Bĉv>JKyUsuO0U3  *tqY 7JS{8~x-z!#l>] _E9xf(Ia9WhtۦՌnrRbo;Jmu3܇ve-  (X9ssb M5c{$ noUm]dٺu MԼk2(KEUIΤ[X,+Tݡ4WIdʑmt<1>[uL:=;y8dDz,: Mg;0^V S=M[ ZeJt,@oK]18c}ۥ*cHkYK]{tꔭͦYz&{ows8x:*.¥' DԀ)1F h~~Nnl(M?) py#"X,Oe*u,ϒ45:b`(KA`3r!^96ʒVBS󄟛Kp頬2*)N)Hnt)g'oAްmqԲaE|+j=R_[tgE]S[}ILRR,,#M&uu U <_SOm&~}+[sXJruʺy~7ލPIgcJ#olJ{Hbx4WSE>.BIɲNu+^{F{?{W Kb]:Th-)|3DELK`ӣ,U׹6*[KG o/?<]l ./h&2&D~ݭkS ˾h^Sbjp%w 5Izz-DZZ! Q}Լ.msяZAץ@xԠx1Y' Jnhgt\l;~ 'ar0/ M۱>V+G_Ee-.P$+{[I}ѠPޫpn "N8a&4`lc{^&r$ARc6? Xo Kkw5ɳ4F2}zboJ;[8W/2PPe4C|8;&L䤸\ } *-Y%Z5r7fwRЂ!q߭dֹke 'VI,TQHUχn@vR77a1ȫ yvyZh9+}V '\ر*z c+$Heaqth ӯwl ]Z:@ȒkO4o2r'i?^s0Iↅ+ȉVB2&oaxʆ$9 ~|ϔH X"pҖ̝ipZCV`dptu3r_wޅZ jZ[T:7 @gC;EwDs?&7^w|d*fb~9a&7YZ7n6^sa[!#Zvnޫo|?9`f]gVxZj`jYQr]zzXn:+]i' r+w/LA2jF@ϷFB(Yv_G5R? GSq|ߥ&V x`o{l,^WpH-gbn'\,3TF?Q[wOc<SH"Gaa3[+YvdHV5<$3O\'Y ~>J ~ Zgs0K,R~Iq&dg+PN_ܵ܊7t puQ4:5 tHjS`Ǘ֧Ȫ;Ҕ٥v~Ap8/o3񮬔 ~ o֝#+LGT* qw,:Knt Pa1aA|цK?*읙eiOH!dnHJ FJQG|pÎ+i7'6ǜe=64@o+hlϒgң.SRMU2 -Il\JAyĞ4nmMyGq|c[A*5Uuh77EOo>PW~\*n+VWZ[@pgbO>j4x ktxi;dQ-.UjbeKÐ+*5n]arK{)ĝ^^rʁаrH (bΑAG k/ʸZS.:8ojY+{5TR YH8)W/() "$<& }Ўds$6Y:ۏ)A>QX z<716u=ʪtx<PN<$$ƿ<$>nN;sw7_.!S}zfȄ#m#|sj?~ ~MOk Gj35COPeջ$ˎ#⇎dP.15vYhΒEx;Bspy9G41E;MIߠ9)2doN z} 槩 *q1"> Ph!$`T$41A!TSCQ A:`Ǯe>GtWJ|{. ڐtJvz Bۭ`C}p[žLkS֪ڠDnupBFʁԖdFmvrc)c h7vlY۹#`Uio##)4kB̯M4V8$/Ke&'Q)6K41I J<^4ύG7JyPۊ8 H̯MģƗ+ظl {k%]?~vJ'SYץb{湳۔ [LnpK6G#DK7Z-N|V1%0H8d*MT0X"U6jAD 3-.dwHv@ MX%^ܺyW-E]/uuȂ)@ᆼ=So8,}RQޮ~ Iɕ%"̬Kz-,"BQa&8Y_}85t |).#R#gdW]!Xzԫ'(~DTuTQC5R ? F|^}$-n =NzFdb:3%wIKzG,ӄ]{cB9tDU ;^&@K3zh:0_+Q?}:EQ?ÄMFc,r9Z淏R$EȿG:bK{ $0K2 am`i%5􋛎rA0NU3:Y MՀ6Pιbvy T]dg`rN|ڔ [Fq dckPYw,+~7O#d;LCH ]WmB=rMq`0u- ZhY׌-53pOUNiNɌ_C?_d7܍#8ұENHv@MRTBR4/jn5#r]9́?2eARKØ|9e+LYs dYrQux A1hY ͱ{U7}eI냢sJX0 ^~U" y"y/w 8iUTw{c5E,vU^-/lq;y:c^s,b)ևj@MWYK9럑,nm-*j=ilF[ųa +ӴTZu Ft/7FBܧ  C!PAȔ{oҪm]R]oΘ ->S&T9[M:wq;E5p v &s /Ÿ/R\A6F%@r%U'8My u69F5v;*a$2V-፤nM̒zfDZʕȴ07Ԗ^`.`薫bCt;G=5Sy[[Zx?/@L5;_soK[B6t;*5d?قn*RCidZ =\ {t Щxp`)J}g FTXgZ&T،(y'Zs,.p c:Y]e-՝Ͻntu`թR;i^jsE1E7sUMKHI )zO>V6Q'Yo}fp ;|C.as(`n@ӱIFʁ=]JL s]3+P$V᪌: :?f]#!*GiDb(:#լsF6&Mb#edeS|y :P]pǷ΄M"o2ʐ?]觖mhЋc#ݔ:ӥa٬⬳נaKlr[&(,#/ujow')0Svw[:+^^::5oSThK*t:?!tK6 R?^ߖs$s/_]Vc|Vv)ԓa`Kpo1e6O\PgKXE<GUSA|+!y.;",flDQl Ğ?Y`['BV^nIlӗ~,A9/E= T9Z1$ez-z `@[HmJ<Tَە7jN4/Hl Qo>yITUh&z hj5,?,z0^םD( 9ߑ1+LL.o>(xcB]LCU6nۗ<e\EL!>2z+mK i v>fϔHG,F(9 eL$DkO'khBKW `/rYKFmLE9ԮɅnRSD|Pl"݅n"(m} +)+e;zX:ƣ%5]*6~IǼ\_'t>gq0v)t#;ȞRTt&Ʉ15'$PXҞɀ(9QͰd;,7P2e#f/I>VyFK)GP)װ DTϹ]X&؍f| r zUU1˜ GKzĞ~kFx^2=E|bOկ4LP: rxՍNv㚘DިĴP :w7ߏGA qHmڹA4C/!LVIZC? pw2z+Wf8Lf#;Cw״!g\N߽m"T 4Nn3ElLl^g zmFcjQfϝac^p}ĸz"Ts-|_I\_ bY IR$ 7=J;;uyXeWgm}a&݈Uxߎ&޽ )x)@3&Vfsk.*:ŀx #;XH%ĵC鳕Lѷx4#Gz- V* ~_wAFĵE1U 718K^įPf\!mfLW ? `hбoxI^ O| ؅2g[?sn<)n`]NցH8HFWR'4Z)v3bSs4'b:>_2 F+s j|%+im 5et0T\QT\ rRc!HOu`Pm-~Ir50|73>懈2DO?qAdo;"nʝ*A腍I| KlJoP|߳}᫥m ,ȹR 8`K)'F,ıӊId+igHpX(! :=H] NSHX~A0p7u7{d>DfMCѰ%0l_"75Bpt,rA!(A9 _z'ܥb}ˢj-: rȤ{0 N7)[0/W*`w ndپby$4H;c͈*J#%P'xuc'\o8UUkW`fzܸ&Ѝsf|,QPLEaNIk9=ZJmӄ`+ak+^H>i[rCT+y!~s{` Zhe}rUړ#ƦBD!y쮭*2jmoP \.H-sfڅhvoTE9T>V=˓ JpH>>x3\+Z_+aqW#<#YL.@ߞ ƢLe~w!n.P+^]!Hc%OcjHҊY ّ =:y" oyWCD#rf;lQhS/ A3dɭMgfٝd3|`(JЅ `Br{LP|J'/M"} =ߕ%qZJd7"lfBGpE⠠+SbTl1Bv-+RaE#8i=NUQI61^bW7Q[+مOh+el Ֆ /9RYC2tz|W9F~z::"ƚ,(2-(Tk5C%PV;ޱ~؋ Y>^N?ɏ-~/i=GIp] *V7-6Beqyg o`E}VHq8Y۱k9ztG;`%"ȅh)^L5a#H.3mْQ4sz^`kLMZhQXqI/{dPbDqE]5eu-_%'V̻zFRLk6Oؼq%8hsל>gdHPdR.F)\U:,Y" Be**j72D0ڇ.3nmdNeaVzz6*-?"IS,iTv &nsc0DNIgPXoIZja`iXu ;IN]~;i/ ٤ZgymB03GpL|~U0"|ʎ/AY+RM͆UG(m-1aטz_4p|Iգf,[4QG: ؤ1U/ >l.b45uu/Y's"8jf7jSB_𭯖,RE3A;Xin[A Q>~0ŷ6Tl':[yf`X'GTWy4(^PwjMT̖qrU)<:S5nh[]{uu$Ÿn_uVꎏ r2bG/aTU4 9lzb x& + x|" ,TڊZyAv4ڗwP%N&T<ɓ$=-A6,Qns&vS0WqD|V ٩UT7ZTa #|Zo6 vnW+ K.NdstEܷ,>ܭP۠v -;7\6hJA2&Iմ70?]: i3-?#f`2:f]e Wu٨ TPIEe)ZDi_$A3 #j ;΀NouHBs|[ BlViA,WKEhuΡ(P0'AHa=VSElt7j:$L3ˉO 1rA@72=k=4Zˏ ܿ {476kT^!\:Bo./(pI݀9f"quM?P *yZl[oۿqMNSO/5=(?-CK~ *OU@uWy9&G>3e]0^$: *ě03o,#31js-}6#?AP OxK25wX+6R+?:)3_K 9wEs}1oIBVĬB]^ʜFMy ?4 =6Vl}PՂNJenRqf!b}d˔́%um>S@V 4-wk`yꂥD1Ŗo:&$ Llv+)1UEnd!4Arl9Nu<.V?C:L:./"c*}p-!k$;|=ECAȿyճ?,X yן),mF}}}x:pg<0=vM[8/2dsP̂@)@vn<tGz(s0 WS4")X<["5qB'Gq]>.|=Py^ !w2>(59jkuR5 mEI + zGfl'i_ze^vX>Hcv?b7и:Zw(hl8ɾ.Uji0X)W_~nTԒjpxr.¤o$u֚X৲ nݰe4WÇ-gٍxT]}IBi&LE+_+ϥ̷W f,l!BD0qJٍz!% i#FCNv %\҉5Mp W"_N Cz?ntFioqkt"Hb0{5>5?gQgs"2ܱ4y3{a]/08S8l&ǔ:IJZGcx{Z@q_jf.yH_=JeZA=GŔTZ&A 8ve}@bMN0P1Lw>? cݍKZK덊@0#Ʃ.KL#BQӮ'͍(p,-ߑzN7֩Exكo6G>Q i&:N/^a!)HfC7I6AWx>kF4oȎ>0⸧V<R>jsNy { L4t ME٣?M( kGi5ar.qk!h[C"EnIј4㐶?fpvi&r5&mD[oAw#B[G)>K.ۓ`o12vkpTZ>I &aN$/>@4=W)`9l˭T!'ζbhdoK؈e˟1c4z[" Axpt'uFߡXoMұtx\A#,Tȹ(ď@UPTD%U< io rxl-&ZƉx'z< >sHEz?X*Wݪ9uGKqHe Z02 *.}-T_ySefG)LW\MtaF!YAG{8sSdQ!xYQG\["iCL(U kT,lж@=o'èzXisɉZGGM G=,+FF)/:F-3 ?WPjW/+8;GCI (KJ 'yۻ7ՕeB 8c0kJ7CCidlL; p񴠮gg&U^9u9 ְضhJ+GInO#0A& c>HggQ e̥m7=ԃoё᭶Ȧy a?·1$LӾ&]ā+Ff UU5=xZt=vϱEsyvEzAp6tl}6!PϰŚҋg+\ -[F~ ^rp7Ov-e5Sb*>4WӼ~_ծw2Mf5Mi0@5x*ު\Lr 63;~xw9># Ѽf^`>J+=@`y'HP9_@D4"u͑5`<ˊr ʓƈwWQDB'SQLĨ!#DQu1Q+/ZrWc,G:HH<߁(&`36+BW= {} 0ݽj4z44_+KLWd2̱P[^BکD8VHYܖ傞q^m"9HWX3ׅ[G_`TC{3SUNjHkY$6*7WVszɥvQ\N΅{%(SlxW҄\5g&ryb Eh*]n'_(~Kؘ2X/ێe"R|ZҩBvQ:Dgvѻr0 EjW# TבHЮg2I  Lf4;_OCfvonOXB") M|lhŏtv͂5VדY QǨSNpEs+h{8oރc=L KVoB}i].[wO_@9CRF SQZ ]Pd q2l?wjL&L rxqryv,x k*neWU]j lp$Jw~BէXWN7ty6~؄ȵ@DUyVR1u4R]ǚ|盭SwW9w#M5 Mɡ?:c eƉBh]Ÿ4oy9{{mfcVt=6FٱE V{3Fpt*=m'4a@;Cq4*$a ү% Jp5!5$"Žu :?JZP'OCL`F&C#|RtƎH"y"&Rbr~̕ Җ8g9n~,IlÖZ:&oYT!سD.tmZ+Ci}2DhKM<|Fiqf2`MQ,PENl:hQ FSF8]n .Mζ _rpw7y+:{w@_X֦<`W4@ udF6s;uTE`퀫Lt`Ȝ6!Z(@)mmX,]&Glv$zBɓt [o=%ϔc/oBpqg00=M|=Sz_$!OXL2`ELDmCA Ob-ˉ4(0riC9>?Ky<@G=), Mavز}e8x(NF"Z`ryE"ק*!X;>: x) ?i,ϿNS zjxĆX"˨v*~"kqIf1Ȏ3q!ЁɄD? DL$4j$E u4KiD6͋ٗUD aYiC:O5Pos"+1{V۩"ꗮ`c=T;WkF鶘ﮥcp)fGB*{9֣%fW:+/PkgB#Mʐ-#[p+\фJ38 :|ꉭ!b8.l $@ Qot1Hrvٞط v1t CMJS% SsB_+, F-}qF ɛH *ޖF[|jcq_YU Fao d& }ei:DܽHSbaMn(&>HWv(<2ܠG##*4RfawD t 6[ӥX[]T VU{w&"Ѷ1 ϰ+|{E'| v$d| *iy~# |4[mVXs1iz~ԤSIt~o6]@~smj lxf_mݢ,hw~1"T%Y,Oƣ:R&;ٕ\t?3H CμzYYhvO<ԝ4$}ϵhv˙gv Y?]ep6#?׆[z&G]mq:Wˁ@u?mYB5.7uyݷ_zZZq'J5 _` &2ԍT)Jt<+uPۿ119${jh9n6!@ζOo`7ғAwE>lj iiH $7$ѓu&+E9i2xηAfkf!NAmԬu81b\"Um(Nص^[ŋƂ޾Ňr4.guQӊH鋹g2)=1So}$Akob}ǁ#o k9qtrX23rQhÛD/j ҜPWL0㢧P!B]_U1#+^2U&PBaf΋PZ;͂vQZf~vZle4]#yAJU).a #9[c@hᡞ.ɷnɖd4':)=J33A7C4飵 ?+zH!Ʒ!<zD{jʃ.h=ku7({pͧv̴&mZ!:jT5gE9Q>P4>9]麖;*G4ccgg!|Q(x7*CŶ. qz_dOU#FD LR9aijgfG$|(Zuh4p[k XfYC Mrzڪъ=3Ҽc.\i%B2L=%haDž݌'(d F " t[SےS) 6k;c=,j/]@pK<_%ĮGFcϒ "Z:?A@S0|Vޱҿ,=· &P=c@$#,>vGchslٸDZPkc&vYV@]|gxÕ~H0ĥ HMB!4 ;fV&d=1*[7/ko* zi|\K[YNol&JΥ\Kdz eSMgZ }֮m5XZyP?XO쫛tfliG{Lٝ_z@ҡ+׾ML`h\]~W@`Ye LmW{LVEG>`;,X=jt@s.`^(0?<nwƨWLnZL.ZBcLݱ%p5A 1=B9WTMTk{ meZ*]0CpRG ҋdCsSΆeВ&BؘOH]Qb:7+cWtp1d LG4E.9{U gXЎӁه꯵ߴ{P8dҶ:2?s1yV:͙Cy%S"c{(ygQO=ʏN6imF Gpl fRmδxw=T\;X69C^9 ϴ~ՖTɘ‘zi@.o U͢w}:Ώ>kW}?5*>A'z߯EgFJ8H4 K^7^/|+9bO$? #ASy,xlW]NSk@;Ψ{XҒopOQrXJ\yKci8ءYYs FS+8H bj2rw5ZyuRne \?S"?`H'K( P;7>Aϼ~I:HiZ5_&Bf4EC3@ƚlR{[8?t1k)[:k.Rzbc'.(6u!tzje{<障#QE.3XAF%&%:66! Z9 G5 z'mߑBb\D ̂nA Sg؟R5|_8·ΰ+5A0[a)KpB9z&Ί x2Vagi'gkSE\7 _hMQt9w1;w-}Amh `[֙a-sNANJYfp49&P!G*hu ^F>igo×IVߗuHwQk;Y#Q5m@Pt_\OBJDIwcu/7"@ð_)y2bOB])lmZ)@A2, 4^YhYRicIM1:)· 1G+qq@rky 5ʰK\ ydE2FQ tXjDD<_@7vCVPȍA9f%ްbcgW-pE&U1VHg, dƱgNjO?Fc.pGZ@a]鈥Y7.Ů?߅@\ZA| ֕Ɩ^ncI4)ڧ! G@B%N)Ԅ6R0!\GI)1fkig%U5|^:VXC7,b=]i{㬼U!얈`вGcvNGCdS"-O/՜' Gb9]d(k5BoV_tIgYV38zCO8)BB4~ӭ95utp\_`klCA7{7p &@"ϱuuy7QLy7c\"=t^w3%ٛ\CEz d.2zN3 d?-劄C8n G<{EdAGZaDD:abgJz45d2y71ԈȨ9s h+!ez[X2#O>POBC,n6/|Ի0`np؅sD Ō^k9D(?TiK'CVSU)dW; 9.fh[Ai9"){Øiٺ;^KIJ(cə"`b$B9`Nfgpk9MWƝ\>)(C('RWtP+Latyc];YW*hr@nF=OCV/]Lߠvr PI{/p.[="_a]  xMc'ggV šsQžNפ)%𱁢?K3\VL "ek3T*NT nj*D_H|,_Na;GiD;]1*to')+]6&lIGpLV뢡XZ2.|w6 oLo!65YJ,M_~dAi5TDq-QOD(,kL~u+ $-rFQ3X _r]R ޒ&9)%okrFc\.iH%#\HPoLK|266>ÐųuFX$t)p ?P|+#^Z[;#1g7:Gx|Se9#y$y(]`dU)hVec :w)(9 ZX KYT@*M%\XOe >oUo!xH.)v)>z:sώ4r[wV].jI z9WJ _2THE:q4Zl]FfI,—ŋ61qM,gjɒ/r-w֎pH$tX )FRqJ0fP8wzB}uےD.%V_ԃs: qẽXlL5-h ]d'[{p v>`^3uRnN^]NǠCB*3;. _vXդI1.a,%[gHkNsQZ~]2kwXFj9'rqx(lPȊW&`a7v8pvvѾ2t!6v/K  qSKLr(kE8SKKtoN{I; J&dy7sj|$%c+#l_5x8K@p ISJto,XP6?ly fk{gP% -y"@MP x(=ew jl k|E , P|y.FfS|0a-Wd\믄$e;HKͼ rm3}+4ٶc>|;r]&@"\+1p f>^𠸉(M0'0¦ha5l! lr ꩇopZlXU"{yt9T\{ r5=b{%`h0q32 *+կꮉ[Gr&Y$ȑB.Z{OjK[TbG h9~J{Cq:Mc}OR'& *Wfd>c#z`4mWZNXOiGߒ5!,=GްrYJIԐD7j+24?7$I7Z} zVD^?lR`)9eq˷DP 6\ ]˥3kfr Du+ЬiȐN4wgX`f[Ҋ>r%h5ӘV+E:K+QJa.<6klcSODwfnP֕nْ| zI#x8_GMƷF}5z T6ǟC' _zvʷmLȧ50η9O"= >]_9K/w' :Ow_rz.F&ig z<'fQ!E& gg3{YԱtҍ$XۓkzNTR5ɉҙ#r ,,l94iHI,]6'-倍'@/0%@QQР.{]H!W^RgZ&DWR̲\ }zÚ{09#jC]%!̢APqOL43>eJSp-T1OVŻΒS %9 AmŅ⢰ov 9*ڗ LtX [D g<[_i;9D6[LiҔGj,q+3/xLm@Ь#ZIލ'\;GER2 2r,I[%Goj6rGp(=KqBg%IpfK;UmyLFF\V}מ@*7I2 -Os7뭋  ^Eoe5֩Q . pd:Φ:(:- Ęc_g3+u=qP}X1׊fjNj2~$}1OOI3jB *%RəEUc/A(6뼚 #U3FdH/+#a!7Es+ݝ[zUFEݩr8!~K_{h e1FQ=rRˠPDdGnE4#8M鎜Qv.d$- p|8 'U)V嬚vsZm)YnUncv?ѩcNON .6޵{R"'$zJ;*Fчvwh%|]ZjyVIgӆ쑦5b<,8L#ߚ ,z6oL?4Ԗ-ew&;ifa;KCM0~k"6gDyT)' U<*ӑ'4 G\IF`!C <;U],Xr)/&vߤT 2tܿVJ5?+ԍ D AHWO9DKHr1SFԺ?`lDym o+ŸKňyNϻx]meZy^/*i5D o- p> v"(lh+0!>V˶G1.c@V=~o vbc6qi_/qACt"LAPDD70&ۉp6v6a׼gWu긊PzouH; xjш$1&Uz3YA`< 75 kВP ]0EYWj&mâ];d_ia!9' SYA-"[MVά̨Z:?+au7^12#m h"m ^ȍO$=jtZ)AiADğ({)Zj(-bz ُnalceoMb'O~)IzE  յpum z@f1o˅Jצ_8Mk(7Yo6rdǿBo]TCZF6\#k׼̜ _Fv,H7x!-f. R%ǒ(xqe' jӕ^4Vbs u>(O5Swq.7B&W1ݐЍNTV) 7ws|X&L[hd(TXu;'o[Mǘ`/S1j txtbbA5^ی}1dt_ZZYv<&fYC|w#/5g>Cv j"pN!U0^ʶ Q%1=0^s. 2mK~go9lC8/~OO257䠎o;ol0r(6z{z.+䓍ï}pn*Z@+)B>@6h7av 2N|:K#.ZR$G8j}`l sqɐ8*ҹ2ѵ#D2dm: j{ y€b-ۑ[+=5f4=T{Q'0v-}ِ`Ү EӶW^Ae2o?T]²"G"#аr C4 ȻvKYP=^1j de#]0&Y"2D4pz!wt}Z`~=@]e&pa=EЛHpd$v}9G)VwW-nK}#dI\FoY WIPڮȴr{9}] gzo8v@ y$(}k^?FQ>Iw*w5`QFT0s^CΨ^a~"p "w6'70PebUwN'a:%r`.;*kU!ߢM?F >p&3+ڋSgϥs:dN}| 2wm-)'$ J߶uPz44c)q GXRuHTǏ9&Zs$B ^=:JVKz`t#VCN4$-:wfMyC:왈ڰcMr6|`{:=JB~쁤x]9N&;vZr=QvgGE!brCD@۰^f5PwzC\8k}/Qڔjq\}يG onLw112xdebB-#H>Td`yTkkP=Cߖ֊9ehqa Nο0K?lz#iN4ѡr/ycc3;HRn_>,4֑}`壪bvN9\2 IgQYT$y{)h46Av##"-9nzzS3+x 4Ɖ?vZJr/fb--k^qyI;GjGMݨ/l|ī#>UA~r%h (\ r(5y No c@;\M ;{P7K{U_- ˏT'F sݗt8Y>$*[{An5Gzǚ7WN.p>$yL9}2N3Y%u<~?iJ=T3jME0M_@hw]9^,b6Pj_ x]6&@Y }N>"RnHWU:VGRVboW1h~ZT  L5}m`k4Iڇܷӷ\AC|sV֣R&q7%<(tk^Sk"]kf#ϓ K{-EH0*죗Ys?V}OU;Vb^ŏ sK+Ԉo]^pm C}L|P40 K* `p?> Mb$g!h֊ZyXInd'Zf52>A ?2SдnÿUȖ㥇5e T(pf 56Pa A *S^e.hAK3ofSD sMcl퍲Au91 ;h x Yii )BB^s@jA5;ޠFQܑ[z KR DTbϸ$JM#%PF> K>| iv=[sGOjB锁 |mT?*Wq{6yGR=cMl`cer}y'[*+7z皉x'`K9YEey1/bf"s;Vrέ5b2&$]v5nuI{@C..6,T+Ia)J5:Rk!<1fC٭a[G=9?̀ ftq hf=@Q2 V]]^xl7(~ bFz@' EVF,lj[9CTCZvVFW~l-A$>30]gm@)&ŬΟfL:_rWUqwB4%$mXW\I`UnPeߕЩ : bѻP]-{ե3K\c*upUfID0g*GFa;.M 2¤xhTgE4_ȹ3_U$@!Z[\j`J՞7d>y&M]۴HghNlr$<;qY= 4Q{T36*c9K;?L%}nۣ4I"Y+&Q<ʓY}HA,: vtA0,õwxv~ Z*$|2oD=Yj,cO潄=Ǐ k3\.Fݏ#qD<wśӜS2XK(="et(NZ$Ђyò% JO x& I1x$R0Rd*0@ C)zX6މO+DomnO J_~}VF$OyUv|ҎQ{:&i$`N1ԗ;=ژҸۙa[w$ xj:/ؕ"T(c+mԴx`+bHnAF<5H bqqmk'/~G7 >KX8׸|x(-{5#N ꩭ!%)Z[ U/K\M1 ܶpD֕4 2چ )z_ ܼ vך>n}o}BK!5fYq5Jёa;egVM\P& z xϫ]g^^V<#躌t5>JDoJFлu]o /l5Ī%}UZ);dyJ2%i}Tkˋ7`Mܖ]Ma^{?|$,bf"X3zӞ^Je# DSbO306+_R $2IrQ'}'qpQTӰVeF5+*"ÆV̖#5])@&`e*3zl9± {(;#a5>hX,B>_k}xj%}n>sP"oP@#þ™zXi+@4!ԸcKkSWL别Qu~<¸m7<*ptϷBE=;ٞ5ŭ~7EږqIyoY=F EifC k0M)uS ]'l}+Fo f)H k4Z}6N\eZJe#̈>\p{䜇GjT qn[NKtlhWR=0A/ [%p6|ä J;,'Wul#}Q`kYE_bLI!tWJYAw:=tuk[B5_ٸNst~i+B4]fv+qE|ѻM4J\  g~nՊ_Rۅ#%K_2vUR>}ᘽHK mʾ\rvAi% .{ʙJ"i?7&f- k=_;y' :,`)HįL)fJ0", ̽F}5oaYճoЮT; /ݼLԟ}qRMӑ^+O:R 2tE*3{cz6u88>S% Q!{NZb>5?'rX*|:8{բ섷ikm;r }wGq45To\XÃk(7 ~ڎt. EVLtz"Z* :vSQ)U^e@% |j1' RgeEv\+Y+X0I]ed粯>ZuNC,H]r+953# N9cqݺ@#'4iYCuڔ#"aZ}oLLi6 tejHj3 YFQ%I'94/cdz|/ E̻{UGt pڔ¿/Xgfj]MI ^̟OU$y[K~/Cm)c:<"^HcN3Ҷ!HYs'R^JbMۑ)~ס/wCBpZOih@@lwpiAros;a*ax͊.0|<ւb68sQmV^+nK{٠֭ Yawl\bk촾=I(P<^/Iwq)l}$`3.2˶321TR},4Qrf#OҠ]B8WSNZ:Sl# NjVv$m"mj&0\`> d6}ɶU8.n't2G]rwFDPP eԭʌZ2 w5i  w$Mnn(/ƮBtB^:i2P^tG)܂i+L6a1@ AcnRZQ*g )WB}ZWȉɂ֪7 K˴2;a{J.ۙjHٓZ"x( ӀGwoUel [{OR @Zxб<%aw{Ȁs#|_'^_J A @3A+Uo3UMF-um2s<B5P@ rE)jUQe&%aX(Qc5!dCa`=ݖn3 :2NG5?tXT;>Iv։]LzRϥ/Pbu3gA}tAoavZ BGJ /gYD`Wxϐg^f4 X{742a[ZLaҲ{sޫj?A8_^&ߢ}1ȍ;_ ?U VMtLA9)"\ #zXхBYŠCndH4Imh_>ʖF rSnB,))F9ұ/sa<2^ػ]l [WV('>ތPh)|4DԶ d\ ,9I0{BTnVxA|H#hl)Dsɧ6.:n5'G;rFg ̺hxր`N-KbR\&ǘM㹺$+r(%ըMȿ8N'+1ΉIׂ3)k՘dS;4Iz2U)Yks/&%ߍMnC75mv'7%" JkiMٿtrtVD{YfOvHj?;H-{k An!ֈ$D< eߙy7o;b)b E osx΄)`yTd<_qTz_i4H+]J ,]@'5PMdy7Z"amOv'H0_{~{jxK7qj;{|qJ.Tr&o"~ꉝŧÿ\ocDJixis"uu_=@s.ᵎ,`-o(fvCfph)DD>#Sv=CHO S\t7ٜ Ε!5<=j6+`6HXvBmBhm2Ԧ311vXZp7,gN̿V `Y+h: !QLJ__ J%o*F?As6NΉD-ltMr5f^L#XgWgإ[ <[?lѕ_T}PšTGJd253vM=r3v{M~,A$ YJ ",i3| jQnOE9U!ې~R<'J7WϥrqI@n& ?8S%e`{d1oڲC# fU Kߗ-|e Zq$TΔeG{ !sgj kr(hm`eRlM͔<63rBA߈j5-߿BцU)^dPvq;?,dPPOQOLloý),5/cYV;'U7q1WZ!D44K _ rt=b &N'1%f'|D]G`hL**F !9yhBFK ,4"W o>qbe_jHx8_dCNԦ`) ruM(VcN;5}[YA4*L]!@POHB}fqsI"}*iٔAn5[1YEw_@LBi/^ &u)͸<@ 'P#9]OYfiFfT)WMPMZ#_s'B$lokC({Ǥ$+`'#4S0l9X빇q';IڴȏfJqQPlC6"Q;Ti 1"p422L CH}=Q e&VwC#dZ,5X[$-W_L[l ri FƆϐ TY\Q^D֐|Ofh`yw;.> 2i6uC{uEffKO|祟{3^)U]-8Ӣ3o2d<'/=U:hI8MSjU}$*Ea8B]=˸dSūÀy~d]7rKQ[yXp8Ic˦ {+[mq%4,\NMCR`zD VjqNwY&g FB8iȔ䜲c#UɃxJ87<*N, 3#q/xtE~'Ł+Fg&O3}lhw2vpi;tq/]8Iο  %M_J4 \#Rv($@*zq9~E&1O4иUEbv.dGQ7Ze+;xݵd ~rB{M ۻr ?97RH,&72h#o+ǐƫtB cM4_k8&>xB 0$q1Yತe'TsJ+J%0ai٨՚Z Z|V9Xyt`BA#k>̟H܇rNjƍ`7ţD; 9щC_D`W#$gQp ,2[zcZ>I:z1gƈ7l~rIb_ZAkP1/ADXn2NNo$d]ĕQy:رB\`tE·y@ ]ݍZ33a%gْ[a}fXgeRa.1ş`e0eت߆u BjBzsQ(+`6R;BU7gLEjKC:79jV/T%A|o?^,yӡKWpnqvU X lӄ̺[O2Uq pIk, Ƹ&||!Y TpXKBdd0u [ wʔ:+}ʑӔSK)\/;~ =76!>y2g=(&8Jt)%F$/}v]Ų07IPadp+%ι ^1BF:r#_|j$NV%.xWљ{D}뉋7)d(cȋj&ʢyG ؍‰č{7hf _9P{,[m \:!@ҋ3 N:ӣx4.ލ`rr.Lюk^EbL0 d"~`1cqt&1C1 ^ o1Hn'󠹑0`Tv0۟E Eئ@-ȫpk`lFf٫udȤ[r>|)EՄuN Q/ѓ x+egmģutL8X; 1!z4#7OwcOB*l3p%)`pf3G =WW}4jfN}!kGVռӞk`BbU 7,Gq[dJDT[cy=ȧCLz.`p2qdn 67B yjc:_=ڴL(.FjCS4|ӳǢ d *Uen%6Ռ` `P.cX8MH59T,m6{C7 e#ocC p a@xbGcmK5oU8ɵAFB >O">p'R2',G.Cϰg7ec]#)wNfEo7N:hWa$Ͽi`(z& ptHmf W"JͧghQtUNte㯂I~fb c)&H{B4nXoJ 9fPPICB*q hÞ!1c,_K}!)D oyT~/.BxxWqv$] BdX^ik=4pJM}aeC/59 Y<C倹MB 1I<3 [6cu O '[q : 8$U5])-KQxڇU+9 o~G%àTApOv}g5bJa<$8unQr}9zNsi RRGaL!YA)zsz|؄pJz\$وn`&);\t`:֛f1 8,OS<HaxDC-ٌYP9=WZP$G}8>1myY 65tX>yύ(q;4{!@AP\] Z0<š*Zòe`:YvSr 7rTe:fߝ7"ߑTSJى1BW"^ІTk۞,.'yCTMd+#KL\N5R2mӺ6+Hnxqh@韉V\G#V-CRn8ԿuK& -]f V.QFK r ;4hBic 볯%Gs4VK}b$wED?''z邍7Wމr>qPBe)[tEuӄr2k?>ũAM?Kj fVG $%$\p.\y#b6.:l2)FֈJŠ1)}?-!P;ў5ʅ#DyA'[<; OQ*jG“9Kf&GӍCĒÚjx#|u C zkFf/2 A -.> 6ޔKW(n|8z_#v12&L@y5 /"54urt-S/QKӼpJ`~QrND$*zkC3h;+l@>k;@$%`n)Vvi_HџaIEvU`n}A$gފr e)l/'Ȉ]؎rTg%a)Coew>IYWiJoQc3poZ] }M 'F=Gl;q|0_bOC-\䶜Z<+\_u4(g2Xcz5 FN'w><;=RcE)G;<}Jyh;$嘓 >]MVqaȂL9kQ_yҋ>@׳GԤpTvϭ])U أ+{)3qvNMl\‚i /PU@B_U8RcMP4nj]$ :fY"/^L&E R3-(_zȜL_$Ҫ* zVmў{I`^ 廹ZIE_GU]KFhoQbIυ뢗9lsUJ MVLɗkO9пj l[V,dѝLN@t~.4ۡ6eBQ%].H%CBB9|I5/u`F\}T\m>Ұj-yǞl|ͦb0yd_G{|!t>L,!ʯ+x;Ao6I;L]Õwyr\8k:ONRo@]X틢 rR r-ٿ+_0HvlbrNz?%2x2Fߺq}:(n -z6C fG Ɵm# Hxe\ W?{owXR>}GWܱnN?=g[Ex+WUO] neo~ƾT+/kJ Lca+,Br `a$^#z6TcB#Lyǂ۵3%JP&鏑k:dYec'c5sC( [~ ,1qSΥo[)򪒕בw?= G.H iM/4j`Q 0|D>j@ W#0a>cq ȃp Js)3rt Kzc5A&ϕ ;^*ވcf9gXe0A| f(ktR+$מb#~!6UhVOG._Ԣ-ۇ"9sGRdL"G>"_ g5oŹs#u[>׮x8=*/D@ ݉ zsQRg-Ǎ,2(#,Po@C6ldžm+S$J+^<1W [˕Y=e-&kݹ|=<{^ۘ1s+wMS}.s7f|x-WdrWWז6`|j˳n(DBcKEAb4+)"Gy*CiWY-5rQsrٗ+j.Ԃ%F&U@(. ^/6I Ӕ2!Z6a!3B4,6'qUH>]$-0nd:7%hX |Ҵw`? ImAq:dKBpMA8_G t\r=!4/^\ysl'/Ӈ ñ8]93kA E]V3M4>TG;l_6.㥌dV)l%%&AO<S1|!g=f BPsvRMZm<-2N$e ƈhNJ祥 |̔A8s{Y&薠)@yt"s09aFKI!ocg}*%զ-AH_U BjnCmP1ߕ^=<B*AVoh>2Yb5 M-m|漄CU.z$kx=ڈ ^1"vYz4FeH&z)^=gۙpߢ5¢xz3L"j.c~,銇4Uct0f!4%|OV( P l LeaXB#ASM|>BIA.&)1q&Ϩ[Λ [V8Jϡj4Y;Ռ` Y!g3"rLaآ ԁcX{hm-EsJ: ϙאb sE\b g Z+έWq. xxWUgc pz!^ $Y?V%J(l*m ; 2%sS8Fvnإ4v("!6=g[n1{U,٠MӾR5P`>Xh'f͈m`Ar.r}PН1 muhzn'h_a]I14[>{.XnGyHQB5iwZdsd8`ԅr:[tۨ/U9 WOH\0PM+fBly<(/:.#EIb,4|5ZoKϹ_#˼ON .'Vf]oX@hsЖEofyZ^ZDp^]-WS!BogÖK?Ѕo&aÀB\qgcjBW>%JI_zQfީd:zTaq;D,%Ŗ-1@83_5(ܲi |ٰΓGY6HHy~TKUq`&6c3 ]w}9 vZ6oX%cϝAs'FA6njAyL>c4{3D2/t9E+o$N.N[gbFby}5qͿrb=E8FmVՎXڨL#6҆TVлH863o&h2=bF=vd5| HJQRRCң\ f~ 6\@Y9!ï`#HId0XJ5ejOIν!AyFr2/trT^bˋRn.,1V%:ُtI MCKűv[:3侂󍬭ڍ,}-2JMT$cHd= z옠vb/Y 3 [Tq{JEoW6":Ny]fY5FL'n9p<&]uhYqo։?T]O'}akypnHG\'R# .˼oOpRV,Jh~HJN df,g%!iۈ: ` ^v3adM tK*oIPhAF;˃ MpدԾTrj<=!^3;< ~7 Ιq VOqLBM!9ٚw|ߢ &\>`[$rhe;G+C^|%glQRoyLL$f55q҂ZhZb&VbkSY˸{`9NKxE&m|~ɿwrtgcF|Fj<8v}~,\7#E2m;7M"@"O@LqUϓawvƐ|u0) yL'&*`LWNta^1AQ8>vL>^H~(;޲Jt2 &Gk D>>4:ap1Lh8J$w);ׯ 6ͥLR HUlA1ŒxXyHbūӢ?BJyx{`F߾)tȘUO.D fIr8r [6r/%OLs{k &SIˉ;m80cRƦE~/?)z 9.:;qȬW oz+5Dν (#Lwy)HBЍDg]v_]}r@JpyJrIYq4c rN%M(BfBAwi4^? y=?93 j}T/U/:گEKJ%}@*'S͔zov,sO"݄,̓Xmiњ"{[[x e W\0]* vF-teEO:^A٦Deͻ~L,/(}WjިQ1diwo4'(Tw4~5_  \w4]]b4=?x1mXv 2փ8(8><<SJ!_S)?^ЌznozqU42dVaJ#wC:'gчt:_cި4=^R0, B&0?g{A-1MUA5zV&%W l _Dw ;Y]3%/XST #5O=X5P;NaQ_%#oӒб~(<'LU%t2~㗗5 |~BvnRAsb U׋Hx[JT')ŧl6oUY+"Ԡuӈj\<{ -PDnCtI.˱3(kXv/,\lAY,`I|\^7 lYf(kMRwB 3|13b:U9% aGX4͌wY}dCuL/v _-IDG䰆 9+g'v_ 73yFX-Ba{SN8o28HԍiCc:M˸.age'nі^J)mkjCfFNxDwj~k qjXk \j9hiX=zvFkނR}c8[ݔDHF\#&m| $FT*֒VU" c@"j!?':sk%EYC4^1Vx8ŒC67Ysc Z6 ŝDB| )ͨx!FW(́xJK4,j欢CEpÍd~]8 =]>g/Uqj!d 2EgT1} 7b͓h"4lXU[d SHu$+ \F١cu(Hge<Æ xù IyKrlG!Si!3MsYb(>EF<{T@Ct v׆U$뤂^Lr5t/}jRD].4(nXb.)1>mͰo"i+a w2y՞ Bt'Н&&B"Az0Ex.Kf@#u:_0MԨm6 dih\#j+@B(31">eI07̂O STh3Bc39\Pރwa(sJ63 kTmЊ+ b(d?ŋjg8v$2N48Vǫ|v \ҾP$k/ 2= cnɐ>\ƊI|_'*&LOT*Sf-U4a`ʶ/[s67V$udiAn|؆{-ZX.#,䬍Wd'Uo< :Yr1ը 0` szvG'/3)F #BW8a? $8Qg0nǝ׳̍܌F4-ƁA#o|Cpt9q/ivu NaOqd9Hw^"cy"ҵ uhT *' f9dM1֋q):J\'7 _@$^~cQ,VGc5$ Co̡RH;b.͖y53vO>(' ~=w W{T:Ci?~YO@&cu'sUW:%Vj'\O轐:":txF/˴=p [XoEVDZ} :|6I g>knzeYiGIs~WoG1b\L[<H vXd &ِ ?6r|mWQ=UFXlS}l C*Hpmw_Hz4¾j٢ [^R@^# k22N~:{(t|5#B>}}DzR5ܒ:Je"<͞to<\9dJ@FLC|*Q GIpX{<՚&&Ei-:LfYG,ݛ̺7 7g (j>ilLAJ@o> a_cj02ôXi/_(cOmp-쟿*-H2 5XdIsAFҥkAb!Om-H?]zk)Y mf'n' 4LE`<3B7;9ZUMZ>\& U=YϚ[ORyT;$hϯ\ìѴ˔W$ JugBVǭ}7CYc.qOS;Ɉ2 A\°-ܗ)dh^sC`ݧ59]ػs¤O`:얈MfP8JD%ASWlD\ 9{\Av5MlI?~ĖyRJ+f~viy7(k5tOAK!ͺaO] Еs۞`uXCANQ1)f/KF{J }q, 5jNVTͿx H#[A^&p&|hl_v밟uX`kpbl qV8#[R=n%8_]e D&X_0xB/C%M͕+Atrq=^,蔵H}JiF~ Гm~q=϶]ݚGA}%bF-J:brP\lY{d#X`T#aERDnв!ޠt,]gxTEP?1> _ZWra^fHqRKQUvo?TW1F19;r&[׼FeDWSwy |ٗDd:"lu!%Ҵg I87>[a>㧩k dYq~ 0̸VRwl> o-'(f)=2ޔ.laۉxbf'7*S3S? GV#=Bޮ2:Axu~J8 4I 6}g]^ {ㅛZAEqy,I @ G@H !z--7W߆(4cWmu+׼}auTA/-ٗ#eWNDvV,e|{7]e;2( .8p$>=ݎ0դGY"N-f,hLqh`SUKAHouHh/aK8cmG7Sl =!]"F{<M aNԸҵ' 5asFjug~謹XdZW Xln~\òC!ZMH}t#IH;ḅ7cQ".JaI1&y-Ȗ}9wMNy(T1ݟENZjFT 0'^X2 ]3(Z|hI~黡O Дfpyj60ΰm0: {#jxĻh+>x>6*tJ>$}R&P;|׀.̕8d!I$X/f  ]8G$}_XǪ\ b&M3z:bsl[kO "KB]4V};OvZN0&MBc ap82kT %1T2E9To^'+3O5ߐCC(L#w9= )>}z//ܢc<4k Ozk'S'Bs^k9nOnh0rokʐz;x\rl!Y$L dtB1$Yi>dܙ&dt,cÊ;u,#:gNzpԿ\gYt窺hg &o(T"QӃe3 яa"|ߊaʲ.T_շVPV8m dBm+RV_AaGgWe=^oRNdqUtqoV]/!T҆pCS?\oŜ^h -_`@cA_FrY_GK:urES=8Gk\q\enh5Eqa mt9;*`juGcL{.D0q;OFM~p`ha̾zc†RXZqA Bx sAiJ}9R-zb `t<0M-]#DtV mi<%~ Oނ)r_@3qZ-Nƿ֊IjBF࿗ Cfa)o##\iFAK,~|z c-W0XNwK-Hb{ ~͖ I!ʔ?x/Я1U̫2TplGCgUUn篥z&;:+ׂ,t$Q$q@PLoRRBN5Ȗ7ҫc{JB K.modql-L%Hv)cN@q?ii7Ůb$sF09?w7raO`tYz%ʳ?D%@N3;o]=/#*rQ4w-݌E*ퟹu?.Rd!|Sy޼ qѪX*ȆV DR7Be&,{N-6ETU#Z7Qc[ʯTT 5 tdhꓑ3 ݓU6; K?Uʆ֧}%h[.c B~.^@;GPsSWmYV.q^yyc~*)_8gwf/34Zeچq=s&s9R[4T.c >rnք7*>ZFҶmuNdGؒ5 3o#Zט7ֹ5/Ĕ?Ӻ~($eѣz5}9 јjhN +V}Li1i@2.cC=M,{$soOes$_}ņ#bnBvGv4~?+?O.|l~qS*Ql9ҏ9]"BO)Y^Lc ĪR렌џeςنt0fB O8ݻ1coǔCiBw&-.6$C HP/V6!.!UwSd{-&X%e Klk:\Wy-TW<OomXiVU t3A#$W~ȬҦ|?HʙezKIV}$ v1nlxDž0`]i,.Bee)VcόVU47zrLf!QP\8l\|JL7.jbN b?'NBz,6#?7pa˲B"/U : m)_C;{l~HDoh%TI V+U?D,m[ ($U8h,X?UF0,EShFᄾXL.mf 0BRiHp諺ÁWc"mps%ۃZ/k vxf,`cqw`/x9sҧAQֈm!~u*q ƟJ*AŃOHgD%$yf™ށm8G,A?%e'.DZ_B ~c`32w add0 %qkck _Y _=8f#3r tȃ=J0q.)'z.ixvWe9aS{lF+s#﭅]wC(Ő,xV*{]f5P$Dc*\{ETA2[b76fUo}3ߚ[M m'O[R-oL"GaIu]kZ,2PR$x9U\"Q!G9q{@HRgV] i(N{:$]؃6ω I; tPz&{Y 1!8i- 0SFXrG ӭF7\I G$4!XYɿB>MLrԐ?KKSȐn6Mz/(\Dxx^߳6RM} ك_= Rna7x2F? `,9Zs\xIkM&mb.)B`Vbx=e<8F_حw)K@0 clƣ^:59>|u P8apd{a>ۇy@|>logպ TPE KwuaQ=HN'O4m0C@hJMۿ^'呏_p$R).t+HqiẎUvOM7gzu.\/`βIAIβؾ<3Odek0Mk"oɾ'V9%9xud!m^:<r1MFcE%C[KtvJES*RƟ4E4Qը{>DXd|K]e| (=\GMsB1fGUπg87#5O'_k#ҘsO=rsFkP2iC1:7:_`˻B5XyBI0A;˵G*w9&Q}_VSibK9)OQeuʣ[?P&9f>_ڡLr]"8 "R%Ɓ _IO@Mۓ>qxy}d4c"s2 '2&z.9[<~o%'@2]vӫvw;ڊ*EQƢY '2-8{R+mj|?ZlO Ty'-]kXHBnc1`ΣnI Vo{X8h8jwkD% 5&bZ 9n"5R-;CW_6Caiت\!uYq:>M~UTŕNrzi?Iz8FfPpy_f9fdʹG$v`kCa_Q? TճjHGVܙr ѮQIsh*fkI}1I$. H>LsYף $$ȞܝX1[ wn(eU>.#$7yg(aX!s+M|.vg5L/Htr3=YDwRoԐ sq|- Hʪ6߮> s>J[7rǕY=ڹǙD- z87sܢ:]ZAx} R^UWіkTciw4q)݈uaN G,g]Khልb3nT&jcX]l<^ k U#Ψ)dV,2< kt @_wpU3:PRJk&>FߡM9da:"RkpLLgeLBDB*9c.m?t,GFtEâh|uVg#tw݉ ,nM miV(@ֈw[]fcm2ot<~W |0_'Ѡ9E򰸰(3Y@]:c? mNHY+eݨljM&oݧW%w[n JD ɯrG^fK=F ٮi1WoG< ;Y2Q~#Y I8Ua¡xV8j!sc'W\&4J?a}j Iy4Adz1{c~S^=3=/Rր_pp -Fλul;<c_m _l6W.t|n ri.t6PI\*ᩪw;)k!΢PT&>M|0kOE %:9qj/dcs=~`vZ]g̮-P %~ng|i f&`*:[+ޣ1rفˏP)M (Ôgu2@H-*c ! wPH> uz"lѫb$F:虶4᪤Id@\.!h2ﮐWJbɨd ʄ ")[T\>Bbө@x")͡@5͝&i! D 6Z1\\OBFL?AF&UY-l O mFɻgSTK@sL=?Gt,Me3LvRA |y$eYT͛*gk c$ԗZtOuxCuLk4s v7<% ꂭ$m"aأAR>4y%^irGiqHg㱼bidG ȤpR]~%uuZȤebڠ1#Ϡ'E (ԃJN&xEQܮ^ΈHi!%:;SH+~S[u"@Nau%FjuAtb)s2qE\jF~!H,2eGhXԎjg5FXL£[Oz1c}X6[Ɖ[?]l5,,2Zܖ[v2gh]&T=FEKK;r9˿wtW5j_Wj௘\4b}&ŗ Ws% _Ǜ-W2X`j' ֽh |_b=P*Bj%o^C*O_<+ Y  nb)8(V-"i-iX@>M9 AgmcF.eX-W0~嬷0d\V_n,20vqq^BC:nK0Ԉ @ '.KsFwD\f7&ևocn&;?wy"d#9PD)¡ fRDܾRt=Bs{:ZV\ɣh"HB"%D5t4{H606PC apLƀx eݵ(h^wJ؋\r EƤvSOk\Ej <4DQ&Xqq 1:k} ~e)iJY/SR`U?tJ!&YPQYFt䤩̴;Tw6C~tM8#`Dȏ _[\Yxe1[oG,|RQ=D8'*P12;p7/R25.<it+ ӍXUbv?R8( wqJ ;0@Pvګ:]d{I*^>!5:4`tM2>E6\IK3 {Υ=`xLw: r~oܜ7Lܛbz]l6ʮ"mJP2R鬝Mё Bĵ ldlM5U1t^!=mWfy+^umnMTtD$2p=f4򅛞uT#{$ptr0_M;>hF.v! b "jGnM=Yz"+'hwl8|`QWZ nrR.,w4F${oPC[21\ʚ[DqJv$>k>H7PE7  #;g2 0nDPb6. H{_V7&MC>?*zk0 us=|= lҜ#FK;tAlQ,ӿW/&[+:/YDńomiU!tg ռ$ /ՒwEkOwWYgp:CwRcqX:`NJm9Ϫ.Q66rÇƧl<@p7 ݿ|׾ "ږ\ZIDl, c{MuU!ԀW-&(am˜}#)ۘIƃb@ ȅ TV},$rh?`VXElweJEŨNB/ُh()wKp]2\A/G 58rm̟WT29s#"†pDWYؓ -,sm&+7PO$p:O?ּbEiFКBJE9)1]!>?'"BEKm;!w\گi## =B͵0y| S# ˏUWmB6ah0 GLQ+g~vk̔/ͳS6]oՉD++`N:NTѡqqj j23Is. E-Cf7)ÑKZO@^AmeKeLxvYSD JY (7V?ҬϯB.t9m yHq=nJW"MF!otJdv4_X/%r/@l)Z-U|^J|rS?& lvrxܖ Jx2Ayj)GHZgAZTHeؐ:=&SU)CŚ3KIxfR)h^k9Q  8G.36xlzf`H鵲#QIJJxk Ӳ.IT֖&9*z֍$.=,\=Oq;|eq9ͱC^eA򼕕fA;u2cׇ:d/-so>JWLSp uky1ؼv2Eu*0gI#M^p/' k=t$ czaҚFgnŕƿB9ln*EmE\\*-ɆR"__DK%ŏ^TDO4vAurO/G^2 ]HpC9t?)eहR<1 )H@io*({b煣EeK Yo`HY f4ӱ#~Ľ:qq먦*X5̇L Eq%I)f:45eنUv7V;&L! R{+1H2p'>Z)5]g՝ѨgfAE{5Ưٰ̌$:{EUwZW2~QJxjCǓ͌ȼ%I/r@&{WW5?`u`;H7O]1YNo@GAQHnHS6mh?{Lug$IF\:!M]#}U=Hb02FdT'foT.yhf=!c 3)ɐ /qUv@[P,{od*"nar5 24h{6 <3^CDo{cnq Zx`O|Gl WF01J<JC|KO;>9 fTʹ XNjǞ 5f"ҠW$,"XMt陛$4iZp bÛ't,݉@/,{R_-e<7'ңa,:5">Cbcs |q874k$Y} iRx18٪;ihjʹhjTZ^r`GSA a'w Jhm׋|4.Id2 <aR-.$ȟ~YO~u:ǧNJ¶h\j^*)G 5nk_G|ZhDJx:p_ HN u7' zOG.6(1"!=v"ac̬5/A4piI40l5_xI=1&9%=zKIE @ԲfxH _QitB_b>T.`298p$L-YH2Η"K{*:E|j|{+hEVrBMY k k  ZF4kpy@&!< EJVe@/$A yZٵѝ,yy T? V/SjV97Τ*4Ui. 6OY a\? U_6IRtV ϛZWF 0>o"O.L]J,.3D9L)'NyIũ1v7iY?d,?ʅ2l`WWȷIfT{༨11mKda ZTYezĐaR齳oJb$AWZDK(?ېI* yмSEX>p9?H0[@a-nXC'^1S4w̖(xv 6PW`e<WN-17;]ϟ86`1Uާ$c @0V^8mr/qc\63XqnC"Ј9&?əGhZ[]>`d*S]],!M0cЦ*%53g[DrJ XԀ_~}'Axdo{<#;I;*iK>q,a}mmV|Cnӳ£Au[cdhN Ƞ_p½7.Dãj۔K0+ms#Q!¡o3N[ 4]$<G8L ~ZQkȎ*| $[v}|9zqDڴݫ8)-;*89᪫;J&]C!b#*JmS{~20r-cBt]v)P9S=ZC7|/S{,dž2ݛ}Zߢxo-9$o)GuShaSr3&JQUm8͘L-/NZ(yzFJ3f jb.vo4}fa#g)np]9:a!` HSD,ma* v :b)< (W8S(6`]5smNq?ξ4ePcjG(HcjS&k`KJRR80q%J`8_oḯ;bpNw!)\j?8mZ'4)OTØ-7SO:r|ZrmT@0蹡E4\7lG$Dec p*:792n5e-Q{7ca^?u[s5jP-jApIк^]zOҎWDbbmCkȒJVHRݜws[j-B'> iA\ӗr2**g!N>Ƌu.PH.;~ye9#:#>r3OwgvGh#~ÀI_.AИ]Ӎ?ae_IʋIJjA)MD3u?AjP+7J+yN#ۈ_UeQo~\B5!᧍)׹ h(K F)v>!W<2g|ȯs^2&]j8>2ꊃR8\cHI:,AUp0" HͰ<3vAͷME)Ήa8\l)h*)rɮ3HX>= dFq|9 CjJLZZMzrрÒG_T}-;Ur'[ToO9'}#ȻzTW1N"S]W>.# Pka!ؐS`SbUk&oۖfu0a9ZB8̃K5AmNR S2RׁID87k$v^K lKZL90@]̩u]fO*g%NT_%h"GA>-Amw-ʔ &kQL14E^^v9a!\P؟ґ-q4 ȹOϒ0 +5X}]&0#/g[VَQSH luGsӮ)zRJӲ;H_X3;bL^;JTFe؈fbH G^~ ǎOԟz~J-)FOrΝJ 6k yzzBJɇ;:{FocM,ٞr6߾SUo& aȀuӡ2Cj#3%J:} 3]\tfҦ^K"$~}Oi\]ڈ&\uz)l̓hFZ: !a(zrU7le[v7hZa,iBM'W~5 ״/x{t盡 >V4 @JEl;1@.|R5ܐgB&ז)wV C@חEnA<{y=x.]tIIr榿3ertoA7|E^çɃnf"Z6׉0Π&=Ɇ;&E<"LHy6DOϮ65_|1 6X(y%1UtՂUڐ#@6ci}SaV}cji߽ PKǁ5af|!HII57hH:Q |Gf-|Aؿ֮ho8!?jWr.#G5b@>z\^g7wW&eLv N.0ÇO 0Ȼ2߉9!y8:`l0Dn̬r׿y} Zh*_x ˢ*j@b0OAJ_zr~TwQԪlEǝ g#&X͋p6Lh*Kּd>[V9fU,L%&EϨ)m3ܹYp㯾/CE14"PӦWͧaQȋϻ$^WׂU=jZEsC+Jy-Jp!fl*Y=R(ëVָwfb5D9 p"8`-j~#(7|us@K#q<;OJ//[:`/x3VMOS_W_/ H^o"w30q$x﫯;@f3CDBR,9&]AtTM扨o:pY~TNU2&p[,$~,pئ V"`ll\): [u]bYUto$$m#j bTq%NA9t`䋓޹>z[*DCDd$z%yYӁ%z{Ҁ b5z ]'-*d 6߱gz$,@;wM[ 俸CĈ[iRơ<"? i֝BQɲC|W"w ÅW7`NlXJ>i͔P B+eҩy;HNY :LVICҬ7 & M½qɍ}_9%a shQEh n(g*U1 ZUwd q"{ |iL$Y1/L 2\eWZI>V\6SyW bꀧj$_gWㄐN=hU3Z~16:>T르I6#0}Ad2ӠjXj6vg,V=X:C!t5:\P0CɮֲL7Y4hΡu[xP$l'OX>6^YUt Eib tH7˽f>ό^f&ɳstBфG$e+֒KAlU \ x.^cFwK;!$hNQu~nbPULLa?[I="F6K_gZ򪨥p/KgiwOt[|̭We̚ԦAq;Y38){"]L>*{-+ u[hT22 Χt ,h;JO |DzqO̖V3 _~,}xyg7/lR-p3H^trA`3"0BLL;JSHUz8waCQe* Pbɳc[Nw '`\:*@weSD/"WrW2ouk+ҸXOe Isf[@? Wҳ"R1N;e,C#R 'u"g tb3F]yd>;94-jgQ |D`򾲟{oe|p-b`@7pNQUU2}D䥦;Õl!޶%c*ˈsK͘‡48dT)kAƑPw(` ^ļ8@ΒNUJRb k&R Ć$Ωm*a7r\.[J^+Zzee'AXՊYVGUTՆK'Vo:IH (D5W6'8lﲷ9T𜒖Ј3!B(W01Z9% ?t*>s ud6aGNtW[4(GX*a!Q0x"ae| 7IUL"ʅǛs"ê4yyLK! 6Skfχs4LY+BZ'tj8vy24KrcyJ/x;您Ka\| /F t寴K@۸[BoBzj\vjz'^⩝ ]n}4z2Cl"nw8Ň3A9c:w|FLŌ&.s(ݲG3. H0x'?ea(z .*gf2ή*G{l8j,0z0,)Dq2N>?y1xGLJHbNJYʑ6tJeRC '܅Mtɲ&ޮ4>wXTuݸ0gbA6)CD6#p7kL++;Wi@i ZSWx RG6G7KϛE؈^ITAKSiEza-odis2Ad%O<~D\$< q̡\&%zD_ u)@2W0ul]̡@S)  h:?Nuq#.ɉU7FEg1Sˌ>=YmTgj4 yFP++K%zsL'0 )ʹ')A^5;K c,c5aX4C0  BbrJOVIֻͬU" jLgHQbiXTufqTЦ|9?fn2s% ~Y&X0KVl܌Oߥe(ߡ@.RVk_\Ӆ4v2 C҅"[X/n $]$LmN9!n/0ԍG[X q8.t3Y/L>j֌NǦ4Ґ䖛=]5!6Z Uߪ8UD H(2>U[zCkJ!;>W+j,WbF9WjlHs_2jtѹSy e@wPv&=N];[ SI5aPdj,^,Z@<͡B);D ;qQ=ЅխM>gmmu?a(+i|i͇/>lO)4Wry#/']9 yE `Bx 6Lh%$KR`v@Cx7˯")<:oh]3uK?$b*97}4T88OO 3PZFGGl:ʉљ&+1i):L#: ,J36.C{<-دG;d>Wi:vۢ(D ҆$Jp6ʹ֫vo03DUTܳVU:YaUcNdNN\ JH i+Jh B6Qu76C'Jw($[*R\\Sfͥ?A[~i:Fj;bc !G5fQ;SGگAq*p ,c/8/,dY#yĨǦDC8gӼMYsg 3>fXgg*eK%^)(6e9X2>k&g`!9`7~?zUF P#ޟ$gtpj"LD-@`6/S5+Fh mbn'ǃЇUHJ =BP<3 Ekt& e?fe~) !H ̜ke0e+=y5#a*O1$֨ E'\bx̀H>Lk * z;,csBq4'(@8D!v09yB矈OŃߥ@/r9b yYm>W+̛4OXC-v K]Tcn~9}tH%m~q;3WK~4Qթz3L$,X7=Rռ+/_~rnՌ]_YooU sF{e I! 1IK|>" 7V2l顓Z;)g^ RqF[6lÅ( lfuӬ91D O}nĆ^72DOG 5)I^f{ 9f aS. m$O? 4PN17ObVp ~7xDݟًfPqHt v($㫇,5L=GboI07$ ͢lS'sWq7!x|p5|:p="=ˠ} :doXZ/ ҁ P<3kA<oRC ,ދj]#8 $˞2'{RE1Vfib.e 6SaʧAQ#,ABT%̃d7 6)x8tӱ*H.DJ/咯-o`JKS30/iT` 'a@GL #*9TZQEMG6(Z]Y+gJzrCGsJX<3e:I~ z Π8`#=2; o]#@`Ah^`rNcws{fd{#ϙ܈K҄p/i4 I;]eS.,{z՛8N8 "~sqHL%4\BL2Y/mfH%X15hN@(1gуMqQWR$_\땖/Q[yl%kY:,`ڄ~t:Xq2v_׷[ e_)5d to'\]Oa5M$@ 9! 16B e!/ohlQpzb|S@BcF̰A?/`|$;4 p7H5ih :/P3V긄0ĊZh>FtmSL4Kb%; a6J|sm$kr!UPPlI7`,z[t!l bD?ϫz"qMh* `Fyځ# 3c"\Z/V{i0>q#ǩR A'#-NG-(|'ſ-2[gdn?UK2&B!Ut1 uԻ/ "hSX,r'U%6VUëz$-mX]9Jf2 :dޑ; VgU-Kۯ~!~ϕTA` TuR2;']n< ʥI ՒRdP;8/HA|VLUqpI2fluS?34dŁFEy\(&:f,ZSa ?6'0+sZ;:{|+kӑ4sAgƄS:@D.ά4gIKB3{v^(T0jI).+=!(ޣЗ4eb2J*3A3~4>#9=PvZ-Of. r/;g\J9L潼rdKX7' >dI^=8,¶\NF[,Q[;nn+ fyD{Ht BTMiia;ɰMUl6\tRyx ]UMrqS%ܪ/j,ez>4$.Pf}O@KbC'pq}.)6Y>; hǓ53^^ho`fV![lJ"?!IS@K;PxT o@Ôύ!k ?̏9onS4Quō!߶L6 !wUF bm'Զ޿MKK$ "^'dφM|SB1:uay.=EC3l|KO}t-gXtJ󨈪pX}jAmZ>f=7OǷ)]g1 FðC74 aFRF (A \B@?mw)[ޥҀkH G,`O}jU9kGU8{@7>|6¨$5F 6RY[' 徾,CS65߃ XAܡ9fmlL6 \T!ZDrv2M1|9ꭰ@IN6E +)n+{*SL}>$x}7*A͛辍צ_cz k0B1Ǘcڰ6vjA2g~ڇ֜as\l++b:i(lg+:Qi# -FkD"r8 W>Le;ŢC:E~Up[FxËL7"xep(;0\!+]u-8J%BMT^CHxz@b%Apo}{#}r,#b@o-3\dyqej䓟rqrsd$x4 #}-$eޅ˷|x357ByocvНx N ;òX,'=F86Ћ"Ntu\$k':<}@W+4"k+ں1ˡ%S{zxny_xBy*|-L%!l)V+] -f }B.n]9g*g]3w!R\Pťǚ't wCERgR\3 J>T}<;4 h+ hhѢ!]ё͒bp+Ҭ*ͣ$?[TrYl)y1@.3v쑥he2@VNTqM f,]sNŽ=.5 +5(n5mA^PQ(%n8XA?W=0nl%OM/G={xBܽ5PMb1|5CM%=YVjYu_ bԥ&e<9Yc'_w`_׀SiLƲvGK6Y8I Ɖy1ڜGdp6Zg' Z.U~JO89MAXtܧFc"@Q]NRo/ }71e+C.__DOWjq3ao*T ރYyԞlݙ`<+'{oLǀq t?o:S/ hmɓ_SN$&1`\yҨWzOYڌ/X{sEs hÿtV{/I<:<јlUFĖRE Y8M0{=$@WBaQH24;ƩeObdeSn3^o8vQp1*9c/;>5y wv,"IR}{lPKǖ8܇!eD:@ S/'5in\-\kW@?/0%smp?'[ثlEtTA"ᄘ\.= ~{e)VL}BZoX IY@PHߌ-Yۆ7Gޣk }=Y]꽿I3"d\-_ yK!j<7MNw;EFV?FLUh)WǢbPA@6\fp\JX:h]0\|@}qWC>PvK[}&G[=jfMj%D^@;+"1fkk9u8h+4<Lˋ&/%qŢ=ҋiY#ZC-; K||Ct/A'LEx9lvв;O$o1nL>rw>$(hy: gDXYٷNI&# XT1tp7"|sY/EyUwI9bÌ?:t.Y's꣈ڦW뢒 zlw1;sF`!yԪ|Le{~đ1CGN׳)6k5Co`BKgwPTI;%JNgEȴB<;Ң?qj%EHĆI}ƍOo1y؀IFkDsLf*sJGƙ=٧8!ؓ[@M]Li|\v֋( K]:3VRBEb g|No] [%"?kv5F) x`Y0aP8-I+ide q##`r6f}L`X<4FHVᰯ@ ak5mJaNY_5C)=G}FL(FΓCW%@PĂU +'4CGK_@]ՏX[U=1:UҐ?η#J$0yZʃ>хh3 _XTb)V/i1MH3fbBXƹNqHgGl#}{]2"O5Xe^cqXx0su!"]%Z~~Y|Zbȣ!j:8h/K#0P\i`.-NyY]]8';/24̓w?~!:~v:I{>Xs #@cvuT+s@bc\~OF bLϜwXoI|kS"WXT66j[a~fX؇cL :" CoNإ2@4:{[aV&9"e[DE{;gPMeC#:o.v&i?.-tĎH kX̭oۖ?Ysxu3od(/Q#T/j䑤j,1i"LF)bna|9U2,D%G'%['4+}wU&J2HNɷHV[&M}pa2*U^ (%屩 1>mUV7wM~)q2XŠՋĬˑAUt *2fHLL]5A?q>ߞŸǞì@fSG5)ׅ7*q)oN%R/ܱ6yQ,e-;\w Op[:1mZ[ FjcHN{fn'SQ ċV`q`|af:?8 _Yiö[ՍPes3]\ */2"lZo1š&`xY.mi(cS&iK ^p*7CH99F{C-UCt )xdRCГ+ lVIGr2>-6IPU&nZѱ jL.V?ekÞ63)Vڎ !+)b]Y5mixQw~ыt- O (|xX*PN0TFN, &u@ڄ<]{ w8V,6'#~X0O:6 @˜Z!qJ%%A9”^+^+>I2f/(?>LjFnȥ ?rf[/Qg:x@an!WTEg?a%1$( sT# BC3wp͸ByVqмW'z1s&[]*(5^WO55;o k3Ͷ?̩mdRaf:.Y?J2->@c;Fr8f,꿇{* []EL#רhn DU ۤQa;3!ArU']!ń6dNGski A'ukq+fqLъWgc&#%cv)ldNgQt/`r8\YGf^6Ūbir9oo7֘3fĻ1EH,:Pȯ;DF.&*'Rn$wHYszO_d]m@{*h]Z|C q ڡp,+f(H^e/W"LEY.I}ۻ bX]7r|TdIC/^4*7iz$'h\elZZxe)I*޷k̟- wbNJwO׫" :tBaUYy1Hl0|W>X)X{e:i:: : zb 4T2ӥ2:ċ>S 88WLL\8U7Y0C@hW3tZ">KQ(Vh,V( |>w@hj5kQSLXˏ1sd8T]݋fƵBxLZl1kUA94Jt2Yc^{3a=I~J>97; KMV-٣M)Ѹ?%Ԋ(gkJ<|՝-.'& 0KL8iuaZ-#}hJ^$84FtA fcX@ \zAԖ8 /Pey6ZXrV2}"HHU2( ƺ| kxJ]+O2wDisqYN40}@~L]ejnBɧ=N!beRWu~&6c`OMz_,jH[0aM=Rkd? _Xܠޛ (~c N|~Z :QB>_?P*)ѲoAbG n LmΈo$;*"c#Tc`@) OQJk@!s |3 :(ѻ{vǺl 7hh0!&Ȋp3[% CO;'s/amOI`T*oѸxK4 09QAV^ JQh3qqRxmf=B^rۙH5Mᨔ\P#JY9Yn|!XU|*%#țU,1i,>s16gg-?4D ԑD" 7%l#m܉V0ffh$aC)hiMx&-[`3j 9JX`O V1ֳHupWA)} `#eτ PrO\0o:< ;Noc`A҂6CzARKϴ 6h ?@LY%ȹWzufl/\dfrfԁ$A3Y3-A}o Qc 86F bO>KMtcDcoUa($1Z4t]DV'iy탖WAC'GEboc\vW*&9AEm55 eX:[[mQo_̸G d9pM%27] -85(fO=s(k ٸz ̖7d+A'k5K~oIfonKk$ I=)AT2.(j=w q.g1 ?[Mcq֠?+kKB{r$`1'i[@'UĐ3Jd,(wQ)Jga;Ӥq޲QP/x|VOU15JG-=s3)86Pöfҏ,K \0{qyKAB:012o6iGixG\J<[*w֎o[d?+oCq_F.,%(5J87#dĶ&1Rwj㵐^i@{cga;( I0ÇSqUWxCGi 8Ø^cNpaJ 8eҁs*B Z2lz0 ZG$`aH !sehH>tUZ1Cb sg jʥSi?0-ӊ#5p ɱ}K q|Q~Y}+ϱ0 Hk)U*cbaeO h(w|ɻK"`  e7=tӼ}db/#A-ڿ#@ *D@`Us#"DƩM]w1DE>mTMf?6gХk,q7gg(_ 6hJ4H|Z8y,_j>=T/W,`x[(7v%ܻ5) mbc촷:.D[Q" uڏ\L,: 1֧_2{NG\Ӝs":&/e?(8=xo*"DhNbc> {%bu}J6 /_ߞ}p"H{>/]\n ľM3lo2sGVo9=(G|< 9 ˕c1ۋ>W/+'/:czZߪ&_11St_Z|p,Ⱥx`ѷGn]jr*5ZpurJ-[z|т&Ԍ7N{V| Kޜ".{lnSU*|OIД!Dn˧C*ڑGA~4UBmx]`=\-% @Y7ibvJ>Ol&līZqH0.񆲓婣p9t0ݴ=JSܤ6] K>Cp;l&DrEcitk2haV7qQHU)eI=i虁qJ\K҂3ez8~JHьiJ&|ӀC#|Tc⭡2^} ›oSD[,wQL/[S@TH^Z@H"R`mZN.w:`}+!@pp``[/#s Q\`I?@WMA%)w5. ~FsoIy篴ѵ>u%sbc_@('Z"O"DoE*߃x{f>mWd}ݵyxpp1^&2)騂!1j\?FAp8$Y I2]TWŎ"繋{ {y$QF _!Y8 u2#-B[3 k"C}$0$c>=ύ1QEt訬I( m7bN7͊\d;4 >~ơGwɲ!,k(dAG2#"G4J ] k&e?/I0mPbӉQfPC"ڱ <} LT+@|*f} P8gF;lѮ2CJ00׭iYWG#6Գ!LI84 kܬ{DG]z' rcz[LuV G'^ogo踓Ռ9p24,Lp$G ,%D-v`餐IJk+93ot &nMkl# Ζ~rD3U>sE GW<3V4L[AKRmc~^dED}[sGq4 <_O8XJba}-sel|0YXuu1=.Qm|K򔬒ocU&N!YH2o l?lbCQD.O'fߵεyX[S?)r(:ҧdcKbVN9×M4F^A iGA;WlC"ޏ`=tu%]jThcz'3ގMɰI稏JH%z,zIo-Pi!@~vɂ48vȱjW]饷_ -sKRXC'h F՜X^]cǟ^*Y "4C#ԎvBfZD^c <^ZOq _D ;W8SU^DhY|V.zVTgՃ)REk NpxDM~r&(@&]yl;P_G?.jwEIfO^G@T1?0P/WU[̋-}D*7f q缝cILu@'8Pi(ijsi*'!s Zͻ$` VjwtK3b9]ۓ >*8M_ZZ1,قQSM_ruKv)ʎq|5OAジo;",O ]ܭ\G^؟lzqʗ%w"y X ,pRG8׷0+YƞkC`) p mp8{GЧץ<ƒt]:oXqӜ‹2^Xؤ6\ѕ[OI<7Ő ;23eZ~r@hnPs[MS~ϰ@ y: \k>=.#l?F=9Tx..vuUNliV҃N蒴oɏ\yjYrGz)y&}Nx+dK?UD+Qe#n:*as -H7b3nkKWΓ})z?-)b/Qw:rSQyTMnL@@Iա/ˆN{zɸVoCN%OXn\CAu?Y4`L{zdzC6k?NbC3t<0G *=}7S4OB2RcX6qdY=^OTEPbL3R']c7 X>ou6yjUUit7Q w^l| h=t*iRx1!$9} ?qк7=Cmt=Q "Nj{60 LOq{vGr69ѷeN?m sV›㧥Bq,w)`^=@7p[Aw`.ԊZ d/q» CmT9N.;APʘn6vдs\nc {1GWC5)w99XgNv(i /*BLV˜`91RQd8ՀM %%lmד`%hkV>F;i}\vHkIه[N)'\yzO?T6:@VkCtAjفhgM*>9',D@n=Vo{K`{Xf܁ #[P9\eɾ]g#ld@>K&0KPG!ϖ(k աrb5ZLw*K;S/1|.&] 75W4Wh+@5İqv9 kK}1OVY8:א36Y!a!/{[ N-$2!B5J 2EzT5ZpPd2lnzuaP:umdb[+ibph{@W8Y`ډ:›5u &L ʳUT, ]xm+g`!V|$X䬜x9u g _t!#BB:Vw8^mpFn͜cfEXP;<]{5T>C`Ukj3J;bh64 Jq-̭"DMhbV^=M1$qLsaGos!ˀ=?J<]UM2]|No 'AnǼr, yrJhf*@K:>_ R'cd}gը-!߶ydx]m,@mˊ<6NУ'{;@"l d'Ut3N5>32{\?Z}6MKFTLBW5=vxHM.?4SXN(3LܨI0 (E/[ӉqXjlayRO<֖pU9ނu~ARr*zc/-U߻cQ`5O" ۓ}tb;j/`n - u"i[^1󬏦O-<םVYY,Ջ?}]0$Nǵ2A56iv`ׂ5wxոɁXv'YP|73%eF,`}i@+i/8&w ?wA+3Eƾqri,0=" PSF &1Ը>MssX `JLO80von䵴M_ =c1CL{[N}3Kz*kl2i@~ [uT+En=jg'9gd ms}m7 oFi汘NpE&Y؈1󐅑"9aVJmy4eDŽ7~N$ֺh?Vimj~7+CH`~3^rI5=Zo\ı<FM|o 6A885 ~MKKƦƳ^,߯Vf2zUiY]uuA[SAU A݀xNt.3Nek/4{E5o.жUՍ@ b1etL {V}dh^̢2Q.<@P ^/\ xZ5~Hhͅ Ԧr;3R%R*j%ﵛu#*i,AֽReYouOӰw#KR_xEF`ub'KB"2v[l-_ `/.>ۼ PP\_%wSH]9A?; {6J~Eq o{\!u:9<(!Vzx wWudX:k3f/iSa\I>u;QDRٝ=5V)vX1"Q`` uGF!yY˟jJ?[ixPr<rbpM6Hڥ=S?8.% GTrGvҮfZ0by^kc [sS'Zj}Ue͸]AxbV&!$Dr 9&Sov(<-`[ =m6R{Rk]`D]?+Cz"֮;kSX)HVPm @wyO&'xvy!KdkUIݴHgt{Uw(?ԍr=LYNG?UNT(ӳW1'`4 Qmkd;L3ѩIwPRZ J]ms:AJv e@" SJیp(b[{{cb6ׁP \_cC_f=Ql5V|։>HS0~. ['}LI퉊 COuʳ:t 9R5q mZ.V=bp_&i<\Mo`PuhFsnSM]cҤ}ڨʈ1=1*xpݗ\ד vj/:Ɖ4 *)ECYvb \Y.:Y@@0ɟS:9}l :ѱS5geNz[ւ,h;V B@hXw'« BɰA9GF&z5B/@۬NҴχGe*y_F-:y+\6em/]?5и&aOܲJ?Pb?)n :5T< Jxּ؄Hh_uL⑳N0_I#ӎ2Ts$߮l=$D}H T`NLYVOw eXIqr+KWN<Ts4H9ndn^a:CnŞ-dGsˊUBXb>VElwg]ARU/9ռn!q/[#bu_f>aC (*gTU㽿#c2u؉\ [YNeW> t f+\&dc F[<@(m!0 Xb]tA xu% JHnoF{c"=@^8!xH,}C7\UVcy*(qD;ʳG܃3jӛt+ 3 $벙 vY,0$}ޠ񗯫p2z,B y~ҿ0p-5A-6ofbO8x >S|jtSS]h6H!Z8*?wdM}{k6j [<[EI} F$$SrV{iРH W mHAPWSՆy\ԴS4!myo?jۈ—K R gz^G^ AE ܥۇ>՘-ڕg`i.R kXtbu 2k.[{]$yWvg$䢨^Ssscn|{^Q}\U`5Ew&q|EC e >ɶY40kQR.&6""~vŴU+y.S&YT,P1ikװy`v^y}kSnj7IϏʠ/pe__|i ?#j@? >ËP0?+nNCt*GN 5XZ#<ԭ =s!BPsZwӗ9!WeTƫxO _d|Z$a,;@u*WlZ=? V{촸!dk_]znA@u'̼+mq_&{LTѪaLN_Hxfc# |FuП72Jt˺ܜBUjzܑ5 dp ߁zh3l3Wi>μ:\ V&7oĒf#rÅ]WBͩ0`{؝`7V TG6EAG]UT9`¿7:.]C5v6&&]TihImw86=-C˲a}fsd .S Tl*BW^IhBc<\pQ OjG{2Ql$/9B WKu% ~ 86z0EX"}`в:"R1d<&_ոGh[XKOSK`RB :/hՓة(w:].T㓔U }9z (W5_&~]nsz7=A>+D"j{e1i^,~?;Cwni L\%mQdI'Y//gV "" rr`M-}M.Z. PԬ5-_I '/l]C̎ƅ3a,Jz&.BB~4PKPhoV!jdb~j+ibͳ2ltؕTLOɸm=(:-i}o bM:潲[(Wyܘ)b$~ v0 xg>)sjHthi˶0'[ ^f=p@q qNj>ɖOWf:#<{v,r!٩V4Qҩÿ &g'WrAuɹ^CDfE* |) Li1-Pb9VBqvYlVVi@^7" ޱU+YmI+@FNp2;#7KyS._:}dodԏ*F8K8+wC0oQ2n+;"VJ34Z Ov4˂bzp㕵 .2"rҎe\1~S15_*k¦)aOnᡚ_  'b9~ @xds1;u%l7?WcB86džZ hMJTR܁2hd,Vk)\$Ca8Y ]ݖMaAcJ;|T6D}U Sih k^Q;):lqKEsܹўFrCJWDml}ywu`E*L|/5[VG:nr/ 3Tc5rz-7IfQܞ/Ҵ5p qzkOz B][9}-@V 婟{p7GgTf5`\q2P( BHwԕ1b/,a"~&!C^_g]00J^ Y~KpL~,IovrLf>ZqhBo|dnȓ[-Epx,ffv@(IyW+KQs#[MHf*@IT"~O&x)`IQ8J`WMeGی T9Wx`:aט=`4>"!2oK0mշli'xnM~(/&Gp@q`*.ӊ˘1 ~et XD,-N}M(FFGaR#Yl&^EW &7PgT?5)(MZ1.GwEsP&#&]WdP3,ԉ}z@AY -r,[Lrgk_Uo!\8v U=(61eQS;$|+3sH\Y̳vub0)l_;xT`ɩ)^Ru =APPuXۼmam t(J`WkGV4W%oy3Kl?z(Xcϑ-$ nZ co>𽗪v-Ru+LLFrh)5*`3,*GRs"Hmӈl;_Q@(v>sIy}[lrRU0c4z#5TCVN{_P,y plҏ/):ٶ"on0*5. >$(q!Wq7h>k*22Z*M/wOb(. W^H㞦xY%/V.HCGqP92U}-+"a]P\Kys쏳'3Kd,o(烩mN`oxaMBcT܊@TͶtOִq]N#$ǗUR( q 8|Pڣ@HZi&g}Ngy_aVYu=_`ao\[fpT(.mB"#A)heY ֈ?+YSw 0uJA.=0iԓs3H#amP*C)7=K(w2axxTv q`!AJVB6Pj0Ҥ#[T6mߌϨq1 G"" V1to$Ţx`SĒ3oO<2k%*! 8l)^4U2`導:Xo8Qq ӯI3a#sőխ?Qv6*!j}(䔕( +\rPGAҬ<%` ]tKFhnMiLR9/?;?c/NVj@| "Hϧo g>3']MU!Y׾ {\D<(Ŵq:n1BֈM/'ܐ[fȋJ sUr>!+/65o2uqwADi&Q6fu BώͰ?k-~IZH|iFo `]g=![ᭋR&m5DX[bLm1ӄ#E 5L0rDyaӍN {@W׃o1K;跆\I!0W@׎뺲lT"J ̒;'8O *njeY7G^-YKe =OM>L٣Y ؄IZJl36otcV>2*Eqej`"ry[\955/2f[oxK+X$g[ֵt`V3v@EVWܒ:#nPFqdX%#mΖ=əƎQ+9Iƚo#$%9@>VE1X1AL1҃g>&#>}$& jU:ZXXj6L7YIקzb; PA>Nڋh|~~l\TJ8Qm G?zE G'a JW͚ m*U u&0lV3htalvpp٭wʕS \ixxM.1CT}$d.݀19jօ7![US1yGp bhto/.+~T*'2ΒX0#ֵq,W8!_7lo]朜X_^7rQ 3%B?fQ|T'!C*k , \+2!QC殊6!9dHqܾ0k*L tWսd;ԢB~h ~ 5^ צ ?Ïsͱ3h%˽e5h۵p5ؽ[ y:JDJ@OM#[om#3 5/0rAl 9ѻSb33"Sg]Yʖ1m7\[J,&OyeF p~~3/7E4o+Qñ> ƨu(@=OeZM_cPIJƔwcD{긯&zW im56&xfXDsiAq@ܦ6sbR4F10<#E'a3΋d춎LnM2R:G?ZO z?4fait^;_e}/bׇߘ 7El`ᗀZi:׌U/ }6yS>'( ӚSrC!zejs8.tFDobA @:'<-az':OhnY{TSvw뮤R8'y6-79C&mz*٪_Ec 譑=L9֛z Hګ^㑾mNbwR[I &DJ&WwCrbө;Lć' Ĩ"?A*S":USR(D?e"!:3n.C/8)^I#/700Pf xn2 m7b9uP<3a8#ّk5T$,mcy{{2 f8R& +|wP<R)oéJ0:*EG|Q;[\>iVʮיVO~:RF]qHM|6ynv/#muJ|pZBS t0^+|[Q&J KB[PdbhCO22O16 k ?c[w^b\d=agQ[d*cOw "7%C)XܿaGXa2\ )\}43ߐ7H}8G ;4)= lSuY@Ds- OnE^C JP{uuH>.ό74[(J/vO].)Z`2bg:ͅϽ K,yKJ \T}k6`S9\ 8P=%C;}ypR]z6((WfBC^Ӥ+曒d`ֿb@"+6/xB-S.ކ:Š4qQ\)H]=䶕1Q`1wZ#7|dmG)V"hK bC$P2la-VZ1'^.-J ,|cE;IELZYrfόwE!YoZ; Fq/\:Rm5j!Rk؜cZx]r3IΧHs2C:7 lolʔom준7Ttb4#Lxq5=&F6~]&hmrAA4 ژ\>BɁ;ƨRX:ȶ,Z_Mn38*]ǝ?x|+& a#C ]e̞zlNa_eP#s^U~ 8dCbҾwL۵]i0NEE5*X(oVģH)ap =sKPV[j!hY-/=vD@,7e8!L7*ځ&PbOmYc51O\w1, /$(8N!X1CQw5N{=5t]ҳwWtZQ%:Pw0O+Y6t"ʟ em4f N6y{6qȕlHY Jc].ۓ|;NԧvQDhx@V4KɒK)^$9S\4zF@Sbcu"\,vHP3=1"Y"qMo/׺x{lf2Ǽ?$ʢZӿ9e^cﯻbVԊ :ϯ"ki>)Ƣ egg5c M $XN1T~aBPט=Aܵ&d,xN N(&pulR=QN/N4=R܇.dU}ѱ `rd#[?˔]o#]q\̓evcH!sĖ?A݅xhP#tJMXzlrHQ\ 9M[a98\\myEQ]fPJMM-tI i&Hq}9x3UOUTT'37 GFJg#gU\iB1l'v2`;2:}MQZM] 9:`* g? ONŖ>%…bL) X+6D{!c/)O.3*"<3a5$7{ cԱ:If{7Uc3Tcʾ,P ^`hF.Vަ9u K56eAʼn]5~"dO7"ƊyzUR'#s q23ECQZ ~"sb oۥ'vFrj߾Lҥ0)˥J :ROl/6"٬ MokEIxԚlFY?{tT *4{~ si$MI<b.rI<#3GfGx(~?n!-6w~\ޠ)]Y_V qOg *pH"K HtXT@Y\۔t-!",=cYPI"Rrz;a޽)DnXUEgD}T@ˤы:󩃣<ݼ.u9oN<,gr_Yͥx';X - !vᥬ0QX`+`o>3>(CLNK3^Laƶ7XjVs% 4 G{i>Z9GO/Sڼd4aq\V0PZMj 2ݗh1J!5 |9I\@"Ků`9Ok- ;R"|A\ONحr']I*m'$at)k>uBwj@:|vd1TOmn:(_ 8l7+A@I8gO ŋ:HǨ܎fŦ:I$/mEk~8.:ʑ\?{2C]I_üwd!23c&)-VfV !? .C!0&t̶i\ꚿES?8rTQqu%k`y Ĥ4>8vd!vGRmOun#X!Ӂt!45ؤWw>juP=x5걨}'{)v-) IN.#&hHCywoιo p髶ܧ1nDr,Zt}QSY( A~FuՄ3S%YQi\lA rr{I\|+ yDM!kwٶ8*-^(A#kA`gkPX-Ӱ}1}(jxF& eϟJV F$]D4EDanctn5gqsC9d7\^yV8V~G:Eo Uc+ !AS by ~rRic/COG`| %]iI|P}׊8' &J{'x028 jFGqQ]LrMwwv&aĪ=hx=~Jmu$٘n\ZLbi0'w,>,¡rӂ?TEࣕ{!8mi/>_vT84VM0ٵ8g >TZnW]nLn)` wP(<3pJA16Uv(TIgI;N,)HxG0V͍YZGؑT77kśN[ PWN8a:x"Hc=w8x{i/"]؂02ۼ6p/`zԅ.,8J$S?71˗{v |)-M]2sekV@%-XBVFNex%Z8ɳJV:cTh.+M:M*~6<6f]Z?mSҵ!i1Hhg]`Y(\o5 i(B$#gn{%-٥ KY,UljWXѷب^H+f^0eR- " 7UaAiolG@50=ׯΧ:\+m)ȕ>+G糴zef 1RSwm`h uXOVb^ _DXEFuŐ3A䥅r/u:1w>MD*]lm [JN>#s%`[i-PaNzd˳<G)u̲ T~PE-KiqQdm_Z:L9.^ j ~K)H 8h @:Tcl55KsZ/"{ IC6))<^>"?'L/դ;lKX<%01Z= a7 \h3ʩ$~Jק2ІSlB.mTd @KtC"Ee 8n{n|/4BXo^ awh`Liw#ظ&Qe'Ԟ&_HJ?_ M1m6CHت%򌼓y)̨ő K]iu,W۾=d[kL4^_fA{kIΰ]SL eLt/D\R瑯`SoIrQ5_ * >^41hixBE.XB_eixi*W*юsY-`2[^.Ll%IGX#^፿DOh?esFQ=Yzd@b3u|*N%x [aȲCz)Dlj"%1i$ g*:O=sTJYXh<}j eGgJޔkسwC2j@../lo6ש݃Y7ƑhY8oEgՇ{k\F@WilwnIӭ$O}\ ,D}nZ}1?hϹ-z$P١FVvZ,^XARl|r@\>Ȫ}^ k{ǁ#]ƨD${4+vWǷf <4(ߜ0(SK8s`fW"|[${'rsdO{ۨUdu0q`uB qij0B. o*D'^fؘP̔=H9IAq3/0 8D)ʺh8?s&[?{SHEѭ~Hz;dHj;>WI7h./𡓱[N[F9$5Fe(wxXz ,XC4@do;8**e)An1չ֏roP/HdglpT8\D_VW fSdmjSjr$O ?P[ǃ1B\{Ep^>tF(KR ~*_C"EJ>o+DT=794HL@se8Kmr)E$e, c<.زr`})Nf'ϻ^RN/5 O7AbXOAO>,AOg#Q w)5bb>`_ȒZAso 'f Oٽ D7*ϑ-C80+Ԗ_ ,FwQxfѺUją⤎[E!.h;([b`flψ-m03؇6Id/ 3;WKU!ó[p_ B|"3u%Iqf*k~(.Z.WC匞6 P\F{ϔH!S|भA(qX;9 z<7N,['^\W13dqꞀ%OZ|0үnH>_L7N[qg?4j_*8O^Ym*7l #aE9䯟oߨ ;'ڲzs8nUC cAʎ[ Edۄzn0Cͮ 3ǯd´pJg= EұmH̷]-˖$xQŷ4N4{pBFk Y.5[*HLD:˞dvNOWY_ 8!;gW,2e'k3z8ػlW JBU5|4:*}վ7 =ޛ]8-GzY,cmZI^͸bU*=~B)ǛuD qp!2N B?R9Ӹ`(( jj1KzcA&CF^*F.AHU6RsS;n9VQK-d6M?. @1ͱOk?B' g\{LdSyw@637難znezs!eQ|4,MM  r p ȤxkLKyNfi0@)-RGN8Y0<2Rv(wvފ)Fu%ĤL?3KF'%4%^i(:p9He1-&…_44B5W@:#^"xf-`%`M l7c뫹KzZ/M퍄Z\o=5@0e>&pj78=poCλICxq( "IQo= 5):I-dsgK*yҬcc`I5mjٺ 4e=o4tNĪZRjqBx0@H&UoE#:R>2(tI*R Cq:w%W?uΤkd΋oi{Ϣ0W,t[z8"H*AJXuveZ`"PL(bnΪ?'TћE17DXO|pXkG[qBK7˾5`9z`P9j-C+g8sRX aM_.i?E_eC&_v"ugWwJGRgޤe?u14)cZ,Z{bv_[W oz z[=Xd-I+A RJ(RT.HHˣC%dSDP` Kv`N& PjJNj̺7΂G%lPN]u 7 ` 7g]'$ð(PwW؃ _`%瓟nb^ChM#涌eQ%jdww.6dBo*~yhA福rM,dإe1A7ߠSVK$L&ΐ4^jFTdC\&KEnPq4 Rj݌ č Ww( &.~L{%HCK>MfbY\G#28>3[WȆEIux~76cn1qޢke:JM%3Pk,[;Z$%S(6pׇYGmnxmkkEq !QMJŧ=&"330"?_;ͮ2؀Q)Nȅ'+="L*~ k%p+p_82\A`פ[h9B"2+/dW1@XC9]Ԇ}jD94^.Й;\^ #+P*Ji&L5[^Yӧ̮/r2~ҵ} E1M*>ET&L;Sfn/L9i썫%CiFZ~L#pI|-J'&Ǔ6rM^q۝"xL鱦y}[`Q{ѠCx3it(Sǚ]rI؂^9:[FKI3Ţ*hF,C ~&5P xFd0(J 2'VJsۃA\v;`Lȑjd0s9u~~ڠrF?߽%T+@mwOIZ,*CG\*{?+Vs #U )^IbIVX;(bϓ&'P UGEꁒ*oh A~䛈S q{UXv,&]C x *`  ׮RQe%^p]O}Z uaxLo5-1q+>Q;p]?VRY.MHހC=V8,l^'0J+$~'sO]rK%]9?(` XR l\*ͥb^tqrrU4kQ?p ah*W%\J52I< Qdz9U9:u>ZNvLM0M̸(nL><^}ҝ$0Fv(iXZ6b$nQ ~!KȾG'\''֬)?[˖ɹE<? i,\PGU*#`8{ߏL$nd-fs0h=]ѵVBO@Ū !8tV"2Tjg+֥b˨ n)Lr%6 ̈́Cfޯ%C(DA<:(o2#~5L$! 7劂7OQ eMg8c?+޽^j tΪnj6,W}~hE*fPFK\y۱z:17+i忕XM܁(!Zz|HT o]L7V%dVQM˩Qɯ[,H>(bXJZ4?2\bk4Qt?9qf_RdcyO.esX45hz/9s^Jp@G8iLW=䑟qYnb=r hbtETR  WX` sT@=kJZ!yYåA9Lg0P iG\̉DU)s-p!g'B'v0b*BoP Czf"S9_(6/*Ig,1 }V\ok­A'kn3egCocUjRQ&sc1}R Bz7$>v12PYO|}.x#k$202! n90U2P]`87thK $xmif/fwl $*`Kb%]?Dp3pS$l>&ٙ" FP&J=PN>_C#!G ]!w4lT(L[X{A0q?  p͂_P{M֦zgBp][4 z;f`8N V]zp/ AZgM -/Vk߷B;\2T;Z2߬Pԭu~:)| <6۱+/)FdƂZ}${&Eh iSEQ^f'p 3 Y\[pNh=@pWYBRAD渂ڶ ܁ji9U)c^z2=O,7&<(GۍWs׵B$gPw`x_WX1|R5|$' 6 G_ f CYU rd>O}^B}f;`\rʮ= |QK^e0nddd$zR۵cQL!}1$ܽ!ouvLIˮst~N8!qi|{m%>ލk!0{88⯷.C:-_ڐI~"a7,ZlJ7 B]~@n2.:C3z"?J^E HΛ ~K#=L'ϟ`ee?#&McunP w>ʴI?i_@(\9IMeѥ>ule]hgA %o'qoV>zYF,vr%ҍYY$c(^z,Mp[S kL!l_@/b:ܚsL)-X 4<~V-?޲ik˴C z.*ynuʃk%bl.(MpHif6u8j7&7r yɧ #S`?i,;qUL${Om;ٰ OhJ5Uz. v;WX6Fz{  }}o]׆J۾j1Cy Gf \7sy]өٜMg EGlø;5ը 2q}s__\;*w*OmZ*IbֻL/:,H$ԃ(  I$"$4ZsEʱ~dS~}̃y,|pdW@Rn6&UK$EsݟGB+FD6_)nT t ji!̥MOp"M8?n1QּՂ*`ftprrbsR3PA9,76sʇk={9r ~m/B0?KeHNoxS,xs5@t~nhv␪ޣ7.es1->hmWC޴l%1~G|_@9%L nLj&>XAEu;۳$7+׀lsk*:?9FIB[M[ƞEH M7wn`+(+"k6" NI,yQo}z٧UW'θzEtʴhW\GVF:%Bw`prXDO 3Q2`Zx䆄e;ǰ+eUM`?˧e ='tF48?!]uneEqWZ7oTe 1GZSSfN]2mi#HߨȈG(8,/QGΊ)AYlԲni樕7vv5nGCӘ`_[Q2`uC2$2%*)HK"xepW}B (U3m.H ؛@Jhzd/skxqf#qm?=vm\|!%]tL /PI_ØOmYbzi4~M07΅E* ^Kg è[a=rQ8&L1! ֔YrJ |h̷eޅԟ*O‘fW]eq}'@I'P(?<ل(1L3Bs˗ tL7F%&B.j :_ s҅%_ܒPB@[ 爩{CMO=36}\׬W@3d0/k 80쫲JU] ~YL6zp /سOne=$}69vZʰnOUAy Hn|C]sA2j !7(FV{1B131鹞{w_0o{4O-{jϭʞ7 LX-x˅`1 5./+he#ݎUBrYkmePjs<{I?zz ffUqD ^L/Ҏ Cw5xI`*؁)mE\ȓb~+pQ5tSKR,D@d)Z)Z\ma\A[?kbO y8{t#`I=C1jf ΢.$B b "^Ì?eS؀Jӹt8J:A1뮨@ˣ8u^?zGB~M$e_xjqwoAy\$kS!l5 j籋vGs2=(exPY,˼ mBI]v0P]ztpdHy0>.=zd|R;:T4y鍊գKjMkqmǜ%LaQ<2d 5+9N/PKĕ/CXh,ێo9xxNyV"مTߪm[nS?TGo»x^ع/.^~`W;{[İjԦώ_F@vրqkg'UHh7a*=59ݦkE/5D_ri4UN38270 NiJb#܇wf=[yHbqm j3_\mܾbP| VAO.cS%:fDy1e3mqgS󿳤>1agR.U`-KΝ7&2TS0T2X帍xeHa o4"AXy)(U`|<˻W囱i3A TvC+4 Cm(q13Nݯ]M%W"[Ǘ#8: kc G93C JVXkcGRwLP=%LJb^wSCJ62f}O|4&c8#kOgɊ۾—ړv:~#su ]DBIyɤԡas%>OK_ɊėPL珄u\ ~*,c%u#^N#}0`χ;܅]Dqea1f,J|"' (5g6"牞\^>jbFaeNa jb`5PΒ4nUD'JͭM Qk7}nX8%'o]ZPNstFѪ2 #Z&ds)eugCIE+D76`;X¥M8HoL ZC,6]-BG䔂Z񏫮{ ox >NB| a꙰:T!ݏۥaQkKAt]ʌ^XzT(X[~FÅ!nsx3ŽU=tDBR4D,e5,B̌7)d|}0XwvG"Ӕ'q+h|޹N"s3 Pr\Og୥GM9-4񉖺+r`TgI5s57Uq,/*ʚY߉_>o.>n.& HZw3Fx"yM[%Cvd,52nl*ZQӷr9Q=W**%lkWtY^^Ug_d`|nt }Ȯ=|p8ڸAPqŖm(DKxіT~dN L3%+D$F2q:fQ~h*akOx0(+ h]apO#0ߨ==॓ťeO/s9<뉲la WZlAO 8![EwQX}RR܉p|/~]:[&FNUsO$3K*X/sOEU>/NC#i=2Qy%uԎy\S 'ˇd.vy.O? gsnvf<},ܭ.8e;ɔW11r:x<^_(oZU9{glSp[fI%AHw67B X?(gtȣH55P#jD} qZN>&$DU™ږMCT={;+{D]ld-)Ԅm ,8#u\@T+VH9#xfegI:ߴn."nͱ-PE)˲85r)m(2:X(<Ae9M4%znt0RHC,>L|a \!^2xtL`U݊&gz?fL!#K/!c?s#ESAM|C7s Q T= @RZOƄPmWȴXnȠe& lwV# rؗXFDj9U5vT<2v,ȹ/&zj[\!aVs3LC^xQQ}B$16^W)gܲ T  &|' J@M$9W.8¸Evzͺbe1 }1U$iۮhpV@^`TM (AdIti,h=6q,`d%;5!k'g _F>ׂ1G5 0׽؃pӘ-ZCk&(Yur&$$ie~Tlhd#y \>z])}Q0ׯ">$}]Sm8'B#K󣑂rcT9¤54.Zgv,L_QT+s]Jlo*Y@a`q=H8||/%ƣn2̅z`KX\^ y%rRB.E)o1+ ;xIZ: T7Z2&wW4)VdidkZLj^4:_ 玷!^ s`YXfP?: "9+ʯv}N|b!<%jRp 7v2ɴ-m7]=}HgDy'G)1)]5=9epʫWt.6ދ:{5ɀ4t_8"k˳݃(>7[/}eރ^92tmc}a^hl5*3 ef,I \IOjUYȣ"w"DKb27h ҷY!9rӃh) qatRRzqm5zow31O6^;ضc)kbPpƒ)_a1wwH++鎥P5l~r xH*VkEsns5dKYīZHxU*LI( ߝ3H/W5=+Ϯ0SQS?abQibsѴ;FWIV2Gdm[CwED{B볙OF*68m&"03Vo4uZ y ޑKd V" ne~ol]H> 5H7$-@rVGJKغh"p~c 'T=j /RHyuϑ# FhY`@6Il +%a ) v/:Z)ieȖ1ckzYKK1H`m vU2to,A iv/ЮSp?F-~,].0)ioq6bL/(%<`U2꣰X%_hGȎ2YB #aS*7][D_% "ea#UTۆׄ,?u*eٔOfl#9nP\3Fu+nؕd). DoI%}ًf|4 cGUwQlu5e?PUg/6A)؛@cJd ;ZX$P]Zdg7Zy^45#OTm˧8. dr9L{;_eS^4ߚFU#g"Lcy;6ؗo& U?qP- ;,:3{ ƞ{ x/2;Z8\^~u&[=X鸠F^@LyyV8r5+ocئ¦qF9LI}u*Wxqų} Uؼ!$^I뙑vB!dzVƬB$o@NI *lX^BI4 @m .:KB\9QxU-h)çz#baU .pif)v4JS/ʲߞth$T,1POKt凨 ]WCqMH}s/!BngIK.M B_}~_?uc˓!YhG@,[$*NM?ouAn#}ż$҉ @`+tH-*ql WH2@CM|c h0M4{"qnܸ3H9rWÇo㸛Yd,Aoc]\Wr9ն֠Bj+6 vWNp6^θfp߿.3Exλ|.{ُFrЏEu֬@'c#:ȋ"SApIfGx6l> Ԁ /YG;ZO3%tu_еL켷ؽD >,c7=F bTo{rEJ@/ָ:w=1Mȃ)YYfK#CU#n@Ezy$P&kH>E}%g?.fN3$ϛ,Ln[N*vU'dI =[ 2礰>F|x>*K kZƔud{@ FhX8'nJG-m~)0"J<`[4/4IxEZ%Rd qsL5剛+Y."AocgGbҝ0.: #K4H獴iDW_ qB,oHv D{^|5V65*%1t#[qžKJ+*Ǽ :~]Uc4I2Gt R_f8F\%/1qcZn8L$L#uak3,D*º`1BXA>lCZ^[-6[0meȣ"(9~1C0TLk[rrL}T1\/$T=/Zbmtc?bEҜwi=fmlbW#e5Wt@814ū,{pVxZp ޔ'n$" 5<7H~"j,n*̅fuauCoV<#r ~# K6BC10{ܻD1q.߉f$sz}U_7kK~ ND(y]%aVykpn(#A.#uaȠB!KdU{|;v*K;;0JIΩhD8?`Vn{It?@RYGYvCN7wGh QC/ЛyBR_݊Kbb9.9hM@ +P1(E'Ptv7_NY0ft$>l>Lh82q*2]6T[xd ܉\bo^^;m' xyKGVH{/Oz"DJV%2mfZr9=Dȫ*ذ)E1_|Sʼw!ezfvAclxoupDB(J֣o.*sbQ9?PUzFՊŠjwp״3v~+ gW鷊L'fvCLhWW>/hx:B *iI]SN]Kp*zs^Ϳ`P$ 5p'mg _9$qgfwJ#؆ ,擗$dJE*D@5ԗ cBn˷ EBR[<B֓@l}R1ks'#Kbߵ|_6Mz2i'AZ"9 n"_tmw+cON^v׉]c-V2 !kP~eP6܁gKY[ PNWil#w^o'&㢱 {ҭ\("[}6 (+kzghzX{*@k#N+01 iV[PD 2GxuNڐ}(,2PD Mxġj[T"ٓ_/kb\-[zKobP!gS rx+WA<zy3<^ߘT􏘙yԋn[uE*hq;`G3~(A`8CJՏfÏt/ک̄ҊST&ϡoܢAf11W+XV+k1OEXzu 4pj!N+dIkX.6`R ;B.3rZ+mYհacQ`׹ O/cP:M^[_,gOA6n z[Pf*1@殙B_"05?盏B\xq0 ' CnڽyJD$^*11,nkYzPH-Zea w8!m, C}T=1V4:Mq2E1=>`ٷ( x@u"(7v8y} shW L૙u#Kh\]AqxahyJj% Kt DVSr_A!a9>(|,7SdP>v)ifʞwS I_yIDq؛TSkןޟ珌Gujs~==RrN;JfퟮlPڡ<ɮ=ri`U+wqˇ%ޕM8u9.Ep9tBАذ~>}ldSաeL=.~H@~B H}U)W~ 8/p)ڱ]$`]gRY􉭻ј6Z5oVuNr F$:ݼͯ!:OĿўߠZy,2"͸N:Z=T%h8HZH:U,]j`ʈsAkG@.we- ~23w8ePkeP, Xvs!8ؚ'/L2˽R|YC6n u1ABx^v [DtTt*,YjënbgBz5&# ԷNjNlO_IKˤጌcc'b8°Ml?X?ӆTPX;=͕Kwʜ1:qᨏOה<8q^ZjNAn`'sURBl?I:%X#VD׃ ǀeނZZo kso0#P5ʐm~!+~jm9υHǐJ|h7jh ~&_`HՋˈrg('lѨl aJ$Kh+PFf H 0ЉKR~uͥ 9L|FI E-R݊5vGNQXIQI/*PBynzB}۪q':meJwn_R`lE^8T ShPع~P4ɻ~{A M:˯4?pzQPd^: !d 90erWFݘS^gX(Bj~ZF5;hV͊S<*S&xpEW0{.7cx+^-p>î_fvg~kv7,KhMx|AvIc0'_{/uݾuwrG{gw1,af7b>Q.=_{p.<1)o5v"d([ȤJGA: E_k:#. 64Q;2$9ϡEI ?QZvuh[^`=8=!Sօ)*ff1>٨<79lI95Nj e-Q}DNwþFf6`3Ws d1`<\[}aKL.٨ ,)\㲂Ji*$Oi+AQW淥C̘ ^ IB_ީ'桽P[wm ~-jsœ -;1Tyh|ٰۚGS\xNQ5oxZpY"hYivSU-щ_ld~)"+/mYЎ I9O6hGP+Agﻴ9m,c?&|&_01_ӦG[U3'yR}­SlؔY_}?gXw?r/eId2]oʀ>Y}O~6q0NJy=$ Fa#yh!\W_9K^z*a\'x 4F@~dW` *|lR0u* 춼XLdYSV4KKRtPƦ$_8%T{=tDʒ ?v.M~՞st5E 8[D4a ʅ X J*iqy4vs#nt#__?R:u؛LV^/͊&:.Q_եXJڀ.8&XQ_b3tWo0 N<4+_m6nvUHߪ-<|&'R 7i!k?(-Q'{rwEMd8ށұ 6$x4 aS2]~wE3;B_ Cת$;ȅ >2 ]4z',M1G%*XJߢbou5cDhZ9øzycbBueF5 a!L]C  C^}9-B0)s9]70/+<6=$9%ApE9]{1X?55s \ΘYxM'MvTW;s䯮|,]*x@9MO.+ˏO6Hx_IڷVVwE0:Ak^.C}ePԸ~ڠ!Ȓ`öV!AQRʵg/cA2L΀Qɮ1z|=! V@U>CS+t:B,=knClDd+RۤBjxASzK-0vCet 5l౔VT8}"_tD!uZ0,Y!e WQ)Y,|}9a'񹝰MбYۃwU',va>g^F f@T/^|wGAJq6{=I/8B%խc*bN 3HnɊ=F{ Uqš{R_{\~TtEwH asE$8$l/8Dn{%yH 4 7WYA!ɟQPWtQQD ;jئ忣vr]A OIv0&q@ʕ&=`}"gPm"j{+5_og+PyƜocgp)NM8l=tI(W")s!FZ3#B{6fVLpzlϟ8ؑW3eryzgu)[ D3ۜQBTr^KMDP7}J\>Y6Iof5Z#p (/LSGfyl-JUQګ{"K:@x\3͏Vflt+* KY!_SEl2SkpPBM4LQ2*yந WPrIv׃=+شr57cTeR N:@tKv @zC88GqfK~}M^jAqy 7O;J JQQ= IR6ZK̰qZqIoa+4~lVԯu|zx-G`&-FO%mnNR./Dǚ42i gO偉iNU|3MXB4Y) bmȟMٍ!28^ :u[!!t[Pse-HHad 5`)a6ٺ axi5i#& !nG LƙIsѿ?sd4>=2D,b.N#D|.Vn35jʷUdxdrG1ʇW{_F ͥWFBS:l&%D;PK[VAVw* H\MU s WÔ܍Nýʡ׵StF@_5mD~ryfiWheVXj@ޓ:!aCpTN9tU2puh!-.idKg%K*Xw!ȇ?H۬qg tIuM_8nJYFrzFwgostN檨i&2YnqIT&}ISaS!}d^|V{8=C]0[^ETVb) G̘R~ecMrS&,''7e>MW<>BХ8 UVk6$ij,te'1if:_K(D8OIh!; šD Ui8fg8FfAMOf Rllw[Wޯa;i@fvZuon5 UxL[9m!1&j| oxƌR#(^uқV0Y.Z ъ‚BGz4/KB"Zߴ&\W2QfXBny.1}ODGL. E \IrQc-<f XLFi`E5<8Ӎ;wg8N@tK~d :zp2K4l KH)Ql0,ڄl3V8msHܑ>,luN}!tl#3ϱʫtE^ObUCQYsA+vƼ7c˱{Ъ gիbn.갱0|OԿ=Ջ3lT| :Zijt3Pܸ3AQ~vrO͗eA#{.{#\+U.v䦈 I^A3qxdu h,joeg}pp!zE бYLNwvE M_o⨚3NZ xKr4 ?McJ9eϝ6oPQlFwu$v[Ї f4>\ʾ4 5ĕF*yFvVR=/Ssd\D]٩TjQd$L@x`K*/em{Vb1TXh9 ʾfSVR#1Y!o@VH e6V?koXZsk:iƙhZJ̲+X{/nMD2&[;pꐸCY~o^T* &Z7$f-QNA3Ykr1tk BUoEQNdh[ (2-rڣUѱw]o6,aTYhdnۖ)'$:T8a3v L)'\: ҷIs=%d)&9^MӰGKֽV[JuF/km;U@G?Tc9y.@` 3 YWrm<%$~rj: }PX+fu.qpoWt\T޴ WS^]vQ$AHYk.u~ B FuuwVEgQFJw^sLAHs&X;rH@>?'~|kw]D[mב97PsG= m8vŠmJBGپS̢I`Aq4eV] ~$V]t%&,5 yϟ83y*m:oX+zW͕& uJ>:%;GM1Kא]NPEbXb0_]cwUմZqAD1kxӵ+5@{)pĿYsnkzvCfB"wx)tQT h^Vpӆe7/OpzӪw\? X]9#m}9~'~?t.x&*cYFLV_cii:h(j;_-~ƿ {OΝBtNiE]G|MiAQ sL6l5YUq*e|sۗLF3:HI?k $,6^ Dsh[ǝyn<K WWΆf{1t2@XT1X"Je?v@cmЉ^q#A^VCUN܃+2<jn^A/ х` 5Ȟ9~;rt{sd}QbEB{SXdD6MoB'&u@g'`?hhDa3)q+jM^Cn4VLg:1CrASiC0zʷ>՝r&7z܁O0saD+VmsJ,W@pH v{|VnzB=G멌jXeCXP'u'zGLx* g(v ) VK >,ߋ zP^k/1ibf&qmRA: c,u]ӂr3M׆<*o ,y#R`@YH.j"?-9t_u`. {Ekr`0UO0YoVX r@k#穎β hejIKCսjxKN0{5A"|n’h!GqL$-Ҵ z>@V˥KA)..(j'e+W an1W%ͦ5oDJLRTZ8%$oqWfF_[#!Ž'8;wQ6T8G|#B"rn[2Hw$n,=,C}3x0^r*eLkr^v{ f?aYfF=Y>9/ȁ4CSu2>4 6HOhH:ȡ z7?aX?ɆZlM0xl;w6`}`g<4" fNCv|_TL7HX Ίr{ Xazj)@eB݂ZD/U$B=()e7"2~V}T=4N+/!p UAJbӠhsdDIR-0(*e L8 :LWa1 bfG}D0!kJ8;g=ee3LqCvj%^RVF@wen6isjh(6Pg1Y%Fam#j XD^РWs2yis=">^K®p)ZB}7S=9M U[57zuX![7\yegʄ=b޳ mT2mΓ}k5 ƃ@v-ܜڗڑ1V-8w8H]ƻ1QCS Jk{ 6'!ddp.R!4GָzAÛutΘNȻƔC舲:=j8sZBݕ1%ZR̐̕A~x5 Z0*Ha*k.+ae!zcR|Oz=@`@A3ZR)jbs5=S 'XXv%:)$,8 6ϗ&3tD44RiC7n%T^ϩ{"-lAûj2@##tXP~[*dul2 #Lܨ/Mu}.蜚٭7W$SOzJ_mOQ Kp0Z"T}CRΨUc޺a-= ؘb-(q>"R&fU4~uOݏ/ u]sqxQ%BG삛h\SnpZf_ꟲ* K=K40H5mY ,{)aIx~ܓPlS9a˞',qs~UP*Oz{H Ob^'IWG{͕~ ͧ53Xºc?9bA`ТFvW|ɮd0̺Yf2/C,-ssb8\ O濍ve >Ms079y;R 6^/mbcۢlbmFEUPZ d5aq|,r*פ_d3En[Rxb?+Wgy=󨹤Z_}̪=^J'dF0dzHnǭӧx6}"9`>؜Ix'/Vw$O _~m"wE_LJ7mzB͵ĬfAH**--k9~'w]z,9mW\~ںN.2 S1,sx1I4 9CBF;xpb[` F3cHIT p8Ӿl0FRfa}S/‹TKuKpƏS>T\˝bo*Yx{&޵w[}BdȈ`RAwNgVQW^1u m w427*bS(e=iU>Q*^ ]fķ.Arʂw t8BSJ,T$ؽ|hҍFőqV~6݂{}{#5.6B<&6*U9,yv+zᔚ%dEjנSVCh!A|^MsS]8G+m!5շ?"pb^@|G:\3Ijq NN߮mfM;B0' sgRJL,&G|Xkz{^M֮nQqLgFNmy[5N:D%TRWr!_UhŎ7Bi R>F3Y&1}:m6;u5A$^oZF.yU-:z(N4)mt}Thc Oq(-UWi(( 2> HjlgB~_-@4-b%ċPڵR~BN©WVHq;?-N\@J;~dpl}ӉbiB\j`+/s"L}S*_V#mkD$誦@6ƣ{~L _ok5 ~#;3Cmpt=d5<GcucIQZB[\|qd/.8cکbbuOqUii]KongAazX(^{oz:.7Q.n A̢{)Z߀@?1~CE@) K wВNpQ$^?4Tqw/6KWaBoI0)euD'3Th3i|!^~2xv jx蟋ˈ;[(IO0m~886mk38Y1^nfز3=ߕt'ƛa;[X#]9p0hH}OVL61Okm<\q+H:?u4 nm+:0[VDy)IzJD*Cvcbumq=]{>cx5TBNiukyGyv*gkKfzzsKo䣆WQ%:he2N;$nJ3FH9H u/RiuC)RRtM'@Գ綊g2th/ٓd!h[Eo|}|Dؒ[lc`rP&hf0k4kG9iKm 0>,GJ^,X_9D ;a#ʌ-o>?W~y~Cv k@3?Bٜ  *ئӖ+eb 3ղ|L&ѸeN#u } П٫ADf_VoH=؁]APe9r';[ӷ|_?X/3h'l(ĺLOt r5~'-Wn`AHgC`_=EvM|dGIl6jr2C,dҕJ%K*1vяCAS%ˎ+5o3̆"VMő:H;ƨ[?u-(BP߼0\l24M8;\Y+wZuB6JoP&U*JSjaA48/;Pc3E9>yjgJ#.2mAsͅ^U":zعTOǡ؛KZsbFu_po[  )U}< T᷇W x2_zdd<|1.m\VƘuS KbaImzp.M Q XbTMcMolz/[nmАCTwE+_3(i*BkoΎ5G^Cgsv ^)3 }ƅ~mrȄ4͆4܂~eݠmsLQ6a:_oЎXP33*%TX ѳ?sbB^&^ aj&]| GrHq m·8q67/h#C5۳*D%I:XfsRJ,&ٴ{ Dؕ+%x|21bq됰 {Uߓ8-UI .ԅM<٢W֗}AɫMv΋ J}Zm}A^VL4s\sr9CӠu+ 6x4KNV+룏AevMSȌ'.d9}b,PorCś*<҉X3*l6eW9| (tV %: AA:'v;`xw=o'HIʜ\?W>M"EZRʲX2| i#^8JUq*=2=$2TKl{~Wàb!Ghq!Y( C v!TM \nOOɕGaULcwcplĨnaEOuQ+ t偩%T$0Ս̒}doF6clW5CebVz8$`YmvK{ۊ;e OWé|jwzwG<60oUp6J8.PXlUU@!,rl,ŹY}rm(Z(fgz^OBԟβV@Z4fpRTMhޒ/T3ph Gc!uw¶ͬs]g) 4}sݜ@dw|y#5ǼpwhR^w|QBƺz&5\n CmޝuXAvG˛lY"J> .XXwH?w٘`eG'2m=z Su4Фޘ\_4Ug b)9VYbC_{40.-c E-Z<ڠWflXFeK^7`zTL)O>R{N#R d~ݤ :gQ>IcȊtr He@a='V~@i(o4w<,J @dddhxW/c^9@u{ LV.40}8 u c0/ d^B'}*(WhF $:Cd2ɎWZwK$C.v;5I- ne YXGC{:sx-]?vu|Eznsq<(;^`|ȧuQb:&P"^ݰx08,[y'A ̮ݿ-͆)eB/ e֧dz@X<q@q>7iUZB,75z_u9ߤW : QBS ԏ4-O:R_-u(%=I; \}EFa:DehP@dWَ 63|110۬arV_;=uH~ >s2+#y,z54h8}YP]0$Da%!tKGtTL3̔p$)x2|IV]Z7'[ m:&\[[JnEr"ti=X^.\yU% cP9P\wL'rVo^6P9"x0GSc8I}`UY912c y}`S)Gp77"!3Ǹ5*on%ȝZV_=gyP_3|Ճ-QbIpO@*(MdI)&^tǠ*&?(}VN]ɽ?#<aDF J$<0=US<44ML9"%Ir 3GDZ)E|o v6HT/0N3Ks^cm}ԧS2y@̉ºyۼ_LOJ!?/!U3*c攧n72&{΄:ʷD(D$L'>~qU,J}HL {#]G5?8Y8{K|P]F< )sʋSB xYphGzs.Jj_56;xB)/5$P1-># X雚4~fdHpiM D֪$ԘHY}sNV7Xd܁59#Bn44ƣ{pXO *_n8<Dud[CxsCM{d]'o\,ZCWE0uu_u2\A > 5g$eob, N )P) S_p`E/O_CVNmӛ`:e] o(%I֧$X3X+PgMnSlF9MMP}peIT6q_ vЯWa͇TI.Ӳ9U1A%M~WXрQq]K ǜ&A쌤_#@!N~NQ'rjG|! с'dY{"DzZ] E"rxI(uKDUsShȷL0Jבtԅ-6\-S+qZUB[50  @L?pܧvnZNCWMXobw-N6~U( dUK/i-^4]ڸ]Z-0vٿ e$j A LA7i0Q<9ZPa1Bm.0]U1F8mٱ>"'*j-^e;#ksD$ -⣰NGG|&w]C#3U9D`>q4lnjLNLhאe-5IOIFAlj~cb[^WVm6Ju#@t $a#2/uFp1bM mbM0N|:8TsAZ> ^Ѧ%V%Sdn =O53Ss@j! A 刖̔/&D n%.LݩPy)lFev|@mi5h q 72/rZ"Z'a5Zo*BO.Tfz8~25઻P+Z=K!dvwޑ!Շ胙j$μs=yo;+3*9@#C3Hi&bR?D&z^ZX"}e2p]9l? +5W%-vc,;̊Z1an='v0Twzj QSEb6*2h[Oy^ `!G~= ;AQ^ }>p)vͥPkŻ>Ul 8m0 VAMU6a. .DX> K1}@eѽFSΗE˚ſ!47.jF6xLLE r؄@&ii[ǀɎ3%Ggi)!_!8\N\#`.T9.-u4 E/慌< 1LvFؿՏƊۢ?14V܉"cmɮ9 8ss= {S~RbߔRsYǠä \=uXm<9^IHd t߾w0oLTـKre}[)uy0/8amWmI7L@[e8x2%+(5~ۄt:qYn.Fp<E֒%-hw M'?g{QMct47xSu ʄU¿@:4eO㚓yh,[]n(hU2#:Gwr0`#1zTHsHh chRr]OQ߶A=%;Nʘ «!Cv͡Bknm!*!rmJV֮!w<_9e0E% L +st3gxwlq9|/{#Gf6"r ]rzA> lƵy.USli=Ux>!)o2娻\M +e?U @G{sF[#*./"ދ(呕DHhLn AS ą[(F"4n݇qn)8} &$vPG6҉\iظ(MuJF:I/3 ɵ٤XO+y1ԿU,S@t44k@l{ gcN)D``N:AcBy [njI=eeּߦa`y)jn~8h5D)dNWҵ܊iV<`( X}_oKDC#޸}t0?"ٞBA~+ zpҢT/5癣Q80F\Qbj|` +;u>O5%O3 &8-tکCgK+`g+$@I;~g ej`$QM@z.rڊb)U$xC?oP\`M|Zh&\sF$>ŲAg4}h*XDY25,jx2F;44Zԍt3g^p ^P ٥0l_{ k3 44[EFGňlZgq{f.uhE5,P83I ?[?CZYO2"80ZE1?iSeЭÆ&k?<Ǿ91i?w}-"$`DLUex;O\ܖ-7^E";`AE^j'`LI ֆ:O:>vc%4|yq!H H(cUB_Vw=\z5eFΠRFKq-Z3HV#Ᶎ^QJ'?tyGdyn*ě.bсdĩ H^%aINw)[;ȃ &-yf#W*] kތN TMS<[ugH"y0 fY4!5ChجJPʴ +Er}0^4t]ݕ:7IyJD{+i-9xưJffDoSn.7n! P=mg!nt(!^zVh`'B~d]2:c B(>QI$ > + ""࿈B_vs; $7wQ*& YOW^HtHc?&P}lmh#?ZK*5-+y0Ew >.Fsɕ'yX%j)yOnH{$Wg2"`ÖD=h.`xv*I(`50 #O CZfc@ELZ:Y.]dBVO;zc;1=8qnti8ubxr\85 cի1CamH` TA݁{DSh*bU"*BL}a>OW".{W ^@=DMƭ|Puj4.H0"ՠ0K>%< a-t}Z: íE>$PyՎɅ@2Co ܎ӻPdtj H,f1lD ;FQȀ :$rD Ԝj C|\;2H⾜JX^"8[shN3U18l8t&I%ZT>z@ n^9.䯰*;:gUE?n%U~xXD\FQFbe#K36X<[tr[Ȭ}DUS,a>T`߄CA:{\ߩY% +zlsuSOaf1Ɩ*yyRm( +@\GkJ6P!M 2ttZ9S`rˆϘ\OiQ2ZqI,L׸2?2b/dJk}BJ0@,Ñ)F2ٮ|(B*k4Mu &lʧĐr+Ll<N.\ca)u` &&Q#X HAźmM|pp >kSիZkjPY5bYO{YW pD.aB+8pi(#֩J.r2fe_RxU>isnP&Ĥ5-np}<1Ce54>Hi5l #"o|0]Q~*/Ⓡj"imGp2] K,:9L5{[qrtuU-J[H r9  DNۦJtdUY>9oCD՘A nb~B@E2#i OBX 8)+W2[u9wӼ٧ujVAZBc2 \.FiLqjc}~mNRE{[) s9i-эsZ@DK5cfU Eo߉Dh,i:U86YG__gr>P ɜ^:>-.)vb RkYL3ѳ2 >ؕ@XB 0{8>3wDߊJ3{1".'IOӓskڭdys(Qz߀:su e/6CŮ2UeuD CЊlP޵z:DKAV٭Gn|woP 1Bu&Q!Mtv7by/Bi.XAb9JTCi(Gu|Dm3>CF?1iu]bO=UMd7`O2~AՋ9=' [rUKNМ[h˭$< ۖMt4px=;veYow65D/.䐥*͌#*QY,r/.U~ar1^'YS+2Lq72Гw8x',7gAkfBڱ<u04 L\ۿD -X1-..CCC:dgWk}I08z#! >yO8cLtp!i)txa}6;Z5'[sǕ.6Ͻ8"8ߨcxǴ^.as&7=\}CJk~qlq?~-mіL^pN{sٛF [D@ѐDmE#(l^*m!lA@: Pr(zs{FUuk Fe i{' _~G[d; {2Kcŏs,UCnnP|ˮS'6(Tq}oVґݰvi8i M X{J00qiL,eV+(1U,G7&2C3&JXW[ TU_rW~e ض,xM5[@KH1{ҿ{@ۢ-)vˎ{kSj""""HvMG?];#|7tq2uְ[2D d+GzH͇1GYIS.cXrj_v‰tۜGS!9^yE}4C(Y859)i~&Α9j2 /<`{}Lt!90u?-qI :up.ϊ 訟ޡz@s({vE߫gOd7"n[OMXzaI f Gɢ`r#X LxrU,L8AͩK^ H@mmEn=y@X/*zGk+ʹ;,֘Qo;|3I19B %&suI`)K]=T`# `r-*B'PsJܡELY=@J3ү|K"\ CС8!U݉dtB 8YL!"c5y@iS5wo /DG=׸V-3JKr7UA@ 1xmCWJ݀+eV?8Qwn;L5SϻUn,A01rXK)CJ UVE.pژghƍ+aT9YjCMis(6$k[Z!OzPoLJΎFfq3M!l ʂ80e8eELݫoI,0"tM;' }Z 9Pg!Y2&Bs^[&qe PDp()_пZ堄ᙢ1aB g, ?Q (Ն3*J r˓#]CS }A/=lF)`.ks>eIDH+#rf܌ι7u"^~{bhEA#0w?o#0I2pemVYصW0kXU6))ިEm\[};]lcKߴe GNN)cϔVwPNւI =brѰ{5D~ g$/'wNnCʓHnc'b vUM>ݴ}f;;*VUToϱl 5E>hbPl4=6¸'އ8QH'Vg݊ ]Tsؒ &p@ cV"OvLf0^.o sa[pn:3dg&Ǔ@#|ʓ l˃uw[uYe$3z8IgѨoQաa4[vOr17ɲUiVU(i`^ yo TСm݌a,cnEFz2<ٟL%+L&;]Icw ?ƸCa(  V}߼ ̹3s""["K~sK" #)]x7.: 0͂b;N椿\uӂK;l_ '<}GrVyzgH.A$CdwWFz" d;YJn3!5ZH?~Ĩ[:!K%$pqxM7¼c6oVWFJ>$Q[2H 6=fH6_܏l Ky|ܚ/tׇ;IW ۍ]1]3FW;W"z U غE%? {;+"Ӊ„M eȂlLp3pJQ`\MvH,җl4dhz/xˀKEN^EGI \Z$SYjuݓ`qw-qҜ֧h*c8wt'CE=wsF!0IqYĹya [q%ty܋2VT$=}5J>9z [qqWQD )Dw~rPyjgeЀgCLdGL>kD6Ud]ڷ--̀ .t> `k&6dd*fϡjJJ1,'t׈Y[]jg.4+@W5O[xY9ˤst/hۺu~h/VsS8yC@ym C_n!7R"WG''y=WL7&SlR XϮ[쓍 KDx=9˚< Oe&%dEoƭLfVVc9+_A|RXgGw4(qrgW P~os59PW\X[Kbj!:aKtf]?Ta"TJ53q/@rVA$Y3Vuzwgfŭ}^pZx=? %żngGR]!,OnM! 5)5/HRlcS oZ7@s\W}Y>aʷx dу%K~5VQ9n7["3|GΣIdg`|j308`n풆ȅǐBf7Dӹ1ˠfq;ؿiK)~<O^ASmi$pB! T# qF妗O{GU?D´E/{笚vh&T3&oUga~꒔Z:22`~ʾaG"pky|8v0f.xl{c}znAq7dcŭm_U|ZR.yi=% żdhaBFO{fZ,BTrk^jXZV译Ne[ޞ9Z'Г~i+"AKHY,5X~ptv#o453-|3B nfEsp{7IɝH?_y7҃@:iC ЦQ+k{fndS F}_5Pз'B)3v3+H#ńd!٨)SԴ-Ȧ`c˦&d(JC{tQ=xPH {jNk5s˓,퍛u9l[~$YPKH$uX@(jV V%Tr0k5X&}p -Vs$ 8Z?̓wZWLF|3W5yXkCzsbiAPg䐶PQRª5@P Z^_-x[[^FD7*gSA9]Eo+q۠ϳ)e(l=o_=4;V]R4݌+'ց9=)=OE-?*2`a;Nz4X|șd>8P rwHO*[[ܰ95f#8ݦB)dxהT9#Z^ʚ)8꼼oK@ncJٯj-MiwhX^(Z&ښדEl^;gѐ:Bq-+1(G3TZߣ鸯\W.sy8ۖi0Tș4S4vA3PvS-^tWh\ǹ@HcMP{|hR9TPtY> $MƖRQ?m]ϩ wڐ^?f:AaAg /m~m PD8Q3:ѦpZ-Rqb5:R|Rv"]q4~dÆgr$fw,6˭q~)~}md[3+h 93Džhaۊ j~_sqbS@>y>.v0vx"ohO"F_ P| s:iV)4`GEȵѢ {o'9@gܔ nag0Ə xEGAK?җ:hvn%ʒ&&]W'nr ]yoŘ]G쨴LpR}V4BbM|̼3!/OKk+uɜzU{IZFW&F/}L k#q+Ύc2'ePb./[^2>OQ 4yiw¾ҫ(aK7m>F(LM&3I@GP>ssB39IjQ)qRzɼG1ld0ߧy7uv=&<#1!XAXdsS68@#e{_tCYlxF*O $j74/icD&kJBᏙB㮒{6洵k 쿟te`ǥ,:@mE~9/.{N6e-Y\U~m?сmb&T'[oCįmCb'N߮ пҲ-z'%E(q'&دvY ;-e`i]Ur»jqT1ynC7PZ &qAט<_Z[zD[d9^ֹ&Rd5ٸ4+ (bQP/M`mA>\ S`F'"AWF'y"j3JRt?ɰ(}FkM0ğ$Su5sQ@e%2i ,Uj4}$N #`@yM##=3g n XC#}LƗkل>-p">˜pi!'EI'wÖ.DFEC|1=~ =uRdI~/@bh藲jV%f{kư7QVIRĘ*d>҅ʛߘ@{W['2zĀù׼ 1*Y7Zp5Eh)3hZ"}W%;质",gYϞ1Hi>Y*?d1_FyJO$-U'bF0S.~S89FBABW(,%<+ +͘7)sI7vceAx1JLb+e4 O',dn3'LBSsD}z)8TD(8sP.!u E$+]BqU/ߡ{;mO$(kM;A04+-'cNÝ*mnxp,f7f Z{Tjwhk=Rk=/` g2Q Cճ|K[v n6j F#,Ű9>*ߕw ˰EҶR-"kYΕvD3-՞w]e Ego)X$,Lt}X޶`XDIGϵ{a"EBqJHU_2K` n #^qD#w+_zi߿?MmHޓ©&&bKm:z-{+# ȍJt?Ӗ;~M~[eby.Z`߼wVboQ!MSRdVYq{ZoVLQD@9x*C/kYl,Z$mҮk3uIFUw7ꋯ=ubE$Mxf|fX6&S.)U2KO,W&\zo w=ME i:d>~fNWIZ7!Qs_)#:jHk@ a4#t);m@ 'u>diI)_ۭ%e}ޜw eZ*z>!٘x =,Y0W gmeQHX{JP hkJRSI3wz*dEYYFڷ .cnm}׫g cs1n7?(>BA)vceph"G Y3mVAj7)Q)Wx!pT /X]hUqkR ĦV~%w?%^bI*ݍrYhd K雰_M| Mƚ~!lO 25lRacSԽ3(MO(v^&˷7Ag`UZrپ[ kz49vjHn䙈*:u |peoRQ B%(K\'Kßb&N"zX]%.4?W ąZi*G:"#Yۊbؔ4Z3 XWzza+KIUqdBrhu:'`-І/zLXa7Io9*3ľBZI|9~@"f ".GՔآqijXyDΐ/X58WTb5:WNAsBf}ؠ">6dO2XH7XERѐ" kM<0fV-7:ډ JH`ZWp g ڒ S1QֵtAe{T)& Rd7hIqaET.1f ~ի,'R$3tQonYpUSŁ.~ 8_ɮgXP[p |t$)_M,s>x"%];] tj,g dGkg!-o! ON=Pۣ{ë.)v:4{QNyY :x;1EZNFA<̅ #E1V Ѐ芁ü!ʞ~'B 3!DP>-@ ,|I<ׄwL+ZH*)Z 2dqRb6K|u;j4dkm/i/y`(m4|#s^[]$!-pXA&n䎇Ί<{eA2剷BEi^k{ o:FH7Zo*ly^Z#g0чHŢ#ȗg /ЊЇ]s,Ld%2u2Mƙ\ma9BBWijkoc* D1<6~G*zJ~ܨ15̞2NHi"i__p,& z3$;p,rF}~sf"5!+_&e c5=se@'j A]SZ\+L~bm9BSIR(( ݢ6\3ZȉCqS@6?-;#ҜJGw NcPgs427L!^Eo7 )Y)AGcu|уo޲ΰ\Cd_= kϤ¶Q=@O-h}UMkdLН1LY. ,]?Gxm)Hwu?hVݓq[y' H.#n0gjRN%8aW`*C;a-vR ]wTf١ʕ;|K]kki%f&:jØjn+IyFA 2*uEޮ78K}s_ݓ!WHZqz;,Y4 2C Zq \ :MEH{b\p'5FpTWZev`@>Rs89*JHY3IAoy9R}T:e.R{d!g$D`)\d#NT+Op,T@{ u "Pip/]I Pa^YM`2 RW[&B=50wlAI]~0^A緳(E袽vw{В\nz`RΧ,ۛ teZbؘ`hz`!{M#rN[E$EӡY}ӣ l?ؕ{P,tܣMX)G+unක-Y]x$$g\rQfucTͨ\C3,NcO0)ДPadz43 _ea.vt{u;BT?~U"$w+gS$1|uMQO+nqkhLSԩuBӔ]M%4U639:KYx{ƳaHLPqİVczG[1aILg6sIH;aMnnWR4J9 QY?$J谞fMP"?ikks,܄KEk-. t W;\!e䶯7 =XR<6heY(qPg6i)[3â~#anNUI#|ط]}Z-D9](<\+UWfՊtrӈ8X< 3d28u 2KNCф&Gj%Ti^)C3FEXqƜ)ndD>`kS ?h7-DudzBӻ:I8\Z^ ZiX*LǢpL>>"xȞrS.̼E#ݜQ+O:@aP9,:͕Ow%J8p)ܙnҡͥܮRޥOY6q_ M p@i<8m!$2/s ~ a9ki+jDzgW?kNp#0lylPH7h>GTUEz!$qV]=2a;er0=`/BdʅdR6?AP9$ v yUƻ7rI@1A/[!D]BHPY#0ނItK@& MLzn<Q̺e$PV>hT*E;aG"|QA8kvx qxyN_Qj '}қZ$3epQ:KD dU4'hT+ޥRT\"x>7U߈Y k EIAϡJ\ЌC2Ѭ8^5.,o,o({}]msGd H -Z\b`/dLe`2+ I71Z珷m ~ 1OԀ|2)p&-fq$]W8= qF j=.!* 5E C7BQ &nӳ)ߧ k @5rDjN0Hjq=OmQ8[%g,1BMuճj+mڬFhy',b&CaжN>j:4kFټVg]!|[yvȍ+G{Ѐje:"?0Y.ۑ'2O~o&4&R-9rkyi'~pyyvj 6@ȱqv.~0, suy8g>&#R% ~[o&5q3Hx .0>ww4[ \5֍Z qcueOw\b= $2N) ;Xؿ2TvX ?~4ܧR[y@^+V(4ᓦ uF*S䶠ϼNmiTg"_g@/vU(=S)@Dٰ=ezt?W,Lr\ laf`>bu9)ͼ$q WB7{Йv;~';KڊD6<)&gU:XJn!gAHި#$G!ܳ48),M̦_CQRد('cV$lR0'g}SN_ŝ>jjKڅA;U*H4V.RPb@>x~5̩Ҟ?|֋',LNz={AW6 g=H{ՈQZi"!呵aT lċ 6a0.{ !иS2P϶ZPjvj޳;I,8O\>h~Zth4-3@{5蛹wQjq{K-oJOQ_~V{=2 sfL<Cs$(D.1Gf?STHj9( n^j}m GD9ċQ>AG*![w Pg[ _#? oLl(:(+ ^5*NF"2ZMkZH݊8t o w S#q_b`46Gq}[ Ԑr:e\!b4_S;j-ԃJ?MM.x+uDHZ$O#a07Hs$:Һü-9,EWtovG"|ynW;d]~WQ)Odl0]Dj.@"eE#Y̶"ݩLG/@1j, q%ћݖA :GGQb)@F"^39py"Q%]QgAOt#XFD=FΊ Nɤ˵ļbi: 忪TzZbl) !h3,#_@]U"ALX|7ZRԍh <1!ǮMmBZzM% @ \^eU^Y<*- \>AZa 16WwK^ەz'1#Efߤ(tca6 @߃7dS YIaR&*ip^k &Кy;%ҰEU0*PI 8%k]a-@8Rl4xD bG ~ wx3S&#7ÛzP Ti0ULcy&FbdҾ؉Ipݬnhca^ )KaشgEoN_AnjW[dN h}LRٹEs)4hUw.L&z}ZZ`(b{oL3,r(07'$X{a B!Fxq“C]܏pt}oPn,xkXʿi ڤ^ls& `.z ~R:V}Zs(i۔ɟgL`zJI+^@ҡ'^øEo(KEmp 3m<wUp^bz~$>>`K(LE/ꑐ)TP-V)|¢*9ΞU6G }MLz>LVjݘ-p<_RG˕ Hrh7t?qkvI//I ̑!K"yߜ&+JвN1D_лK}^@C k tO dQv;C kݝ6:.YA.:.e/G7L x hmQn}2L̨6Ax/xNDp4#*jAڴmP0$&Ax.$fT#&sHn8L?fkcgМ.wtMᅗHSVnZpķ^B~m[*5RBPK n%p&#"oӠ WukTgtCۏLY+!gqNua\MѕĢ8?2e޵[l'TEF%.\ DG.J1ј9 @uk9;'sη(5Xأ|k[<]gaC-9켫 ygO7ઝmS t4D3-=6qbzϝ2l(M\"|Bq>ʮN4ᛔCA/+F?C_P~,`&ooo>ukԻŗ#D5o^E+<~00۞#v.fFHNNВc(7HN>@\i' +*ƼB`7`n9L_@]J!犩l)ѱ8g#c8kKS)ANsꁰI.=Janxgri uU:Ǧ^{ r!C=od//'HOp-֒pUAi"&J*TX hW a2TԼU0L'7T^VŲ].5u/C6P]9 ,dj&|IL]~uѧGI.VV36ɋFֱ0>hcc^=Nv0"^Fx6gPaeK?VnwfI{.#ԕ*ECɐ _1LG5(B*`@H~oup}Y&!oŧ s1ۤV33+dT j7~1uRh3ٛܦ&~]i'p)kAE(HR ^W5X%#noHh`4c5j2/¤;\k rWt'gH>h=> ,*Ҷ=k0gb)(>Fu7tlWgWd͜rCSoGSU/hK9NJ2y{H - fYgD[Iz>onܮZfO_.:gM/0:@~DCykc<NrRу|- ﰼ cI6Fy9Sm!b ?8 f>wN 3A\xL}`%Q]$>m5hҜs cg|N-NzgF%}_l|bى]!0vFJ<ݰz>iCui 8-nxoPVeɖ1VJ z1Mkgm?c0k9^0e'O_[ZːԪ~.aNX W-GÒ֟j014ydfv7xXEj {bN҅W]F{*"FgeÀdesiQx@-JB[fcF6'-/ r/2\9 n'0gLs=]!ߛC | ŋ8bB, u-`d}a/xز"8` )/݅Zy$0}U|LUoyhMoh_{AH^5̫QyճB2q6W`1\ w3KeRn I0;.]EOߏҤg(vC"Օ_~">ak)kObԗzu0,/*ԎިAJLkSE|xuKW!Izsb݇4anBeҗoZ /9wbm+/W!K% c` %Db~Z|!}9'g"dz uW\< sPE$pqyN8xEaUkx 6xx5z;0.Nq )oE٪8V,07T/JSMkjqo!.]E_;O zmS2JZ +1NTMu*yG*;|{Y'^+/E~ [ |ʦl]tɬY"yzjeggYFWX(VNP0YBYGd| VԺI!{Ƕ 3R9Z=3iˆZg@E5\Qyoq|Sl_#z[<יKX3 &I;I9X ,hQh/khm[Jq6!=D`뷼oBT Cڍ;0菦 80 27QT=etNq+`K2^K!o3u2 %:8]ɞ-0cs:B%>J>9,7S)9CIqhg >Ö]֥8;?:) vPgUI摩.x]9ٕ/}PRHn kԉr`':u$Ik0o3=-%x8/3m1ytChf +&Hoť6˸X 8o h!WY^ }Q=2%V])*8$$x%:uniN%Dk ҈cVQKX,p͕gHH#<͗!qr g7 csVBb̶8 W, !|Kj?#nI5J42{fN܎Z%v#G's*U%t ]m邉lCmJ$xYLG;%)0-\눼B@,ۇK%,ao5xGکO8sbI$B8L!i {vLm@yBAƐW~*ZTA^O2E@Rb'!G}-pj|yjD\T qRۼ!Ir:g5e=X>TCjve[ #{ЃAd]I+'W[U݆ϫA5;A0!5iv0Cs 񄈳;7iPfY3$m/QnK.B\FtFvUh 1i%!cӓELj1 $P-O7qX'08jgz̷/ r?~HXvH7dtV=ˡ8u5gC3MQx˝ڡX35i+Ÿ~dQ8I()͆RM}By F0G%^hLRփ1D~13ps|{}9 uyn2/> _Uj)Dv*jCMT}5i %=k -{Y ZO% Kp]4ǖ\`6:ʖ:TdRx+S9(q|r"6'F$3_o} k8ʅ{Vms $q# g%n7Mn/Qdif:D ؒh%7`Y')|#44\ZU[Cr3k*NGۉIfX&CTB|#u4Xm%S[EŚe(huq?6YӼɧXuF>MHг$Ţq3 ǛN[`ខ#uFe-C M4s .5gsNN^p ଘgJ݀Wz=X7:ZKb(rȶ+b52Y_2ڠ9SA{z=tsf?vkJROb}A5vg|ML+i?*N,Y)C#{ؗ]00kFɴ'x4Au}0(+~Wq&&sX&ɩ<u;~l>S4CdoAR` sI8Rg$:+yb:8&`2Wr H1m)OW7P~t{&ؔ |S)Ob'|xT>nF LQ%@a"8iSj!"ݔ7sM>0 שGENeB{iN&ļBʮ#>d Lat.#Nѱy 7BaǎfPfaQ (rJD5R7Ӧxm'Pe99l\WZpg-:_:{ 4pYtmʃmDbMМuqړ3O8(AL%ic]:u&9%eXrL"p?!D7"o.Qݺ%@& (IQ`J 2G0ERak¨,j:tg i%kJX+T*= oQt:GLd 4iO\MI(Dڌ+m19R! ft6>on^&ߟw[Χv6- s7[ѡ)CPZ+m clnn QZRhY+QL tfЙv ڨd4@Ѹ} [G*\ f3'DXw. qL}V@R SS S%dK|AI, C:0JF)q<ߐGD#QwS9Ef%Rj0 iEv6]lՂ_Woy߻b3KRYlݨ6p b:B+0JQlзH_EDaT>oR$l+Y4;GD JeI:/w%͘\L!Xg p;͑& ťvX }֍O[W#O+N~:_d@I16{ 9R.fԥS~!}L`Oܟz|U`3Z{Nl~.zڃx5cL툴vQ1湟3d٤Vݩi<Q,^Sm5g@GEFĐ+gEfNu vH@wkƭ3 5j K._ qprvOΰhRfI`Sk"X4/DΰyXmR×$a]D웒J~ۛ8ǣ=RЕ,GkvA>DJBQilBi^k.j/BoPŢTÎsţ ::-56bO"B3,KtV}p($UNkPu0VBrR1Zh'3-Cb(Q/3+Ul%ц'MMltyk CGAZA)190Šghi/ͧvD$r`2 Pb* Y'ضqʳ P'AZdǞno]<ӜYCS5+j#?dFĐ0U 0KэDTj8EyY`IѪW.xA~:oFQrY^H_RwOʰ]tk\Kd/5:>RU&|g|Rq)NV9vJ=yD^]$wc~J$vGtS0@,~ ]5Q@h; n3f5|ϝy+G|@!c)ꎌXS='U,BlvJwu֋SȟI 2aup*<;\B!i'R܀4nJ=@p"muFY,*7Vm+J[pM]V@l Yf=T ebevkB@+N=}d(qEi~>9*)5P9, ˃I*QWJ`pSF jv.XYwx(Lɤ3ͱÂM{UuW_ {T~S|'1 20.%F]w8M.mrU!Z} Ռc JXe_CD#"[z>\(-@t]s(S~dNz{H Gv6P5bIBu6'=5Mcoنj%P6ў X'E]`*/({lo!Y=:9֯{uÄߥ"䦿L{c~o&%_6q _\r7RAC,auc2?u1]Ôwq!oi@wiYz`Glmԏzőб8`KOufol\NPzU?d!/_c~+@ QgZ/ qުd PQN,4gY3`1 Bk&eLg̽ MFJ?`+I4l]W4Ss60dmIN^[/\,vH7,^gn8B [yz8M*[% Qaer!Lj:##YkT%mm%ƒG.*SB!ilBO!m%$m26nGڜeZ< E4(E9FEUEGui>**d+?ɂl,FNnJ.{ [*a]A9s:mDȩͷ&O= `@CXgʮI6f5bJKk^/LZ9t}ʩRsh$4Ô.ԘjfXx!qpwP`GL%*rQ/a+Bz<Z="4) Nh}C o"GY=_)CԴoDe#4IYWu8j8qqt#ᒭe+n4Ϩ5(ۻMɖc4#u)6U} |(KBsʌ8x?լ3ipBJ,:C5Hb.- 1H#]~]q8 ;?1Ra-$.z^(r M}cpfHÔQ|RкmMyT}6ي5pv OY ]ʹߖvѪa G`.fY.P&SS 5rm#)bZ!rg=-(˄*Qt`* fG|x][>X5+T!Ok?앪i\|"RK=K-e' ݑT8fv ZW.__n17ˆ:(K$:uݞ..}!]bDE9tAU.w#(;|}8xSz aׁ2ScFՋlQ4li"(TBCN֊!J%M my^Ho$4zY0Ti^IVc( B[Df/U)곊Kcx\ZC|[mܣZ;hZʼnUM b $J_#Mnb)(ksEy97Qͣ/w߯vW ?PaöX)H-`B!<QL|'A Vl\_4]Y tZ.(MH3 ZF0[4M◠GY*b7A` ֋e[$# F3 v&0I菦(wo 7ҩkR9t TxF*<g~A| 9fg>[2.'$Ep]ZQ81#@UAq6Å֪I(ai8c=xJ8EcVce D FH [qo9E'HCPc=^, 30[[٫JwyA.|xPXlژRzgY5'bHks^yѶ 3@2݌!{c+f,ڲxeZ[\(GF fI]Lw;TLUe֡=MeS݈?|Y%fTT-5X)~];UUͫcOVUD^p|佒4u8ۙQŒE"mcŧ)ݓݏ.2fiLzH҆_V&Sƌ-}u{0_0d7.+ lA-S 縼xCbKr"=0_N+S8>z,}2^89Znk]J` &RHkREwQ)Faⷋ%ojI7PFZscLfG Tn4\] ZԤ_I%84z 򶧀Ѣbl˯E#1$QwƫY|ׯ͹p(;vͬ7[O<8.t=PoP=/Ԍ/iG*_TCV!l'qoud~&=~IK۷:,ʍ$E~; |k8 .Yǥ&͒iGL1ZLlSlB M!GuB .ǟD(~-Xc)XQZY9;7J;ŝO_͒ZC±|6QzC[/=(WC5hP=Pu;+ y?Z tb_9?@[pk#lJ_"p .*DLjx)i0S^_XBt@TbXO Q|xmo52y,}2f0#!pr$$DI {AZx{ؤ,޹|^YՀSʬU{"~CX3ZEKuy}s̹`^fl9HcVwxTB c3Ec͹ȡ `z=xޮMa.Pk>FY"أ*MEJ߿ v()L>h~Ŗ);xrOFĠC"Zk#Ov^,BuϷwz`w QvtBbKz{R*}NWli^lY-kΐ>1QDTw>~ԍr$4P\laX f,KpWHqErw[pbI YuAH8R,wQ6cՒ~r7ІzTyq'$kMؤ}q3E!4iYqO^9b^rݓ#]Q(E͘x(dxMdĄL)[R[GpSL%#8dBugGO7-B r9wZ{)Uk~Pk&ODwtZ>}Z, H rD~$i8d/?OWq_Bq-Q؜-)aQw0Sg|)zb/Ӿ\W92J!mǍ'&r7˓" % BӬ"Tʃu!{I.dg;bTW|s=|X/LH̤Ǹ;gAĂH$SG|U9~xVBG02;AW.,Gɼ='GJv-gG0)3U 3g~ib otuL7-䫢GR.N3oP1\b B*(cUS9g&=3s ೸l+ ܉7U_}ʸЬ /ZنMVїrFP68]R. 9?Ȁ655K OU/,\^\ H*` &HOwQ6T9~'2'_zvC=>&H;HdOP(MQF]ȑȏۻQ2M+P؆׶b<'-Zͷ[JGk! @ ]0"= Ƕmѩr\g3=sTNSGyec>f#v.:~@9V7P=|ҴazAl&Q)6 Rٲ?!hqD?Li*MOefSQ~~um~nYi/ńfZs|@ z_nGjsT}m3; |Sףs_rɏ].bwa93"gcl›gZ]azYMlꍶ=!;Q5h`J\^!ڇ󔝫Ӵ.pf¥F8at.7lMr)hm7-[,6C=6RߘŒv4 nA閙j^+?o0_N7 U7S ?HF{wS^]8A\>۪,ա: 6.Yz˦Hj鸳hENx3ƍkh4/uj 2#tm&~oB3 Fν|}fJ6ƶ`}kwBb3_ny=XϜ$HWo6t  \ۑ! ֗WqCLNǿTϲҲl"E?I]T\=-0)R>\8 er]/_p%$`ϗ9?{r-!%}{U̝G\3r N,FP0&9~FyoJcq$Ӈ(f(XQmkA"5ss~U|bZ|rm_"a`8]wQ S5\ d)?RV;6jJǢB+G1Wʹ[g lɰ\8pd|`z }'֌pq'!@/gZ)e&2a0Ϻk+qR΃ޙL9422mxh/~[׷r&ƃ"¥偄 5)M1:<أR2u}/+i7cS蒴W5H]9ȱv"/w@}Yai.Jۜ{Zc# fJA'Nhb8B%l\^>;T6czgZܚZ-* Er_$SG s+3/("(z/[$ Lc\::l {#Px?_y >6г]̶`+k4tx51\V{^cZ?n(75)OGoX`(}(ݩ;w| dl"l ruPMC$T-/,[Ԕ3}hz}vR˜ɉ՛&w?}Ǥkk~T)~& vy(r8 A@O9 G$VA=xՁ9f:+82H cǤ>j\&ε^)o-8 \Ҍ ;Yat)@ͻhH]ȗQ #Ng"N4D[!~W?OB4H=g0{U `}! YBˇa L 3+ |B~1]+0M0ܠlZ Qň\QXƬm=1ZQH]L;]嘚;gzfzWMlQ,єxd˶S NgT {.&߅qq9@yUu5W4;de蜵,%=ƒO,g׈y<5TyOF65< NG>6f;w`qB g4}DZt':P9_.JoqX׆ !E˫OwI1hPmEbS} NlUG,Jk X2(PC |6& "3M"<č?ghDV 2l _ ؏ۚn 4-BlC)4Cs֩I 4AٛA_q26̟X @R(?!uڬjSv!ˬwj >&EJZ JE-^Z~kBkE2w[+f=Ry"Ζ]H)_JD L}9#jy΃9_j '䂰טq} *CԞV8PP]+LVvnLumآ;5RE*1]O5@Z+D$~2hD#JfjnvR S~$d"&ګDFc?y9q~O>|`YI7ov}ƵQRo$bڑ\4!U2%mҢ-Igܭ'ZE254ɺqPDd\$a΁{v0+:T?m obhyF1>n"B@i9&c1iK*6?ɿ;~^v^Rubc(L{JcRd6oPuf'/bOSWER!#ȇz0zzP v+P] Քg-1&iF¢'3yCZfx4sS,Z? lF594,le!nR.4ՇHIuTe.W!S2Qka+R' e++!,-?.GͧICYPN+&ZZm`B6lt5(^ѳpol5kZvM]Ir/t[-oniMsym|sKx{ 8q)("4]>ً(?l^"P)Rxf?x`&2tE ŅS^5Za>5X>&_!r:]r $Aj:Mt^,&xtN]" VKpu`Dd^2Hpqz7ɴZi8'U_277gW?Y|T27;8St U*'Xc8&Kid&"|0*G6"XkVݩҜ퓙pػ&bQ VU XTR\&g=C 8KO7 48ø߇nXQZ[Ҝ{z[1y5eǿPF՗WODiX @%Y_7v+yF]gQL.~ǫPDфTHL#A#7O _ƙD'8ʦLbO 4-QpoHkW eV%%9=dqBG#1LD7NK.sꦯiE^7qPcIg ɗdٕD-$;n\*|fN //,'}xtܯW2QqED,u @€<^6߫~u¤ ÊE(˗õ]ffϓ'R%$MӯѤ N,9OhѲd ƌ.;yMU&~ɫ0ۆRV4ZÉfge9Z~ml Qȉ$3%i»hB$1lZ#^H]+vsNT6MyQDZocTY/ĕbLm,eϮ'^js# q]o%GñA TRוBYT@^en%YPɵ.-0UtW>jP1.iP407,^Y=l`CD82Wi5fɱɽ `5O7˸ĭ(췓T٤^>8N$׃`:bE*W:~>)\t[)=e5>tS\tEů&%q'YVOt@A]Ld=vl42QVHStZnNqY 3W)=h  ]M=;y;tMOޜv3ֻݡVğ7Ç@aܥs",E3-1ͰppIaіUӨnv9*PGy%ĎEB!B2' p=e]fPr~|lUExE.ݤC-qL[t {`1FŌ Ÿ+_m$72K2tĻ'"U1t]ˮ-Kۭ?ׅ cn#Z7L^ؠ?$%nxHVNLDri`3qfE8ĮJVI ,C#5Qğ vLy,~@vX1|5iZ=eVt~9Txa2|xG 1jz],ـ[1:{aYe\} ME8U8w7%~ƔFVJ!,fTUm!=EhUϔ a~v i^-*Y/; aX8<c'g@հp6 ]N_e2}L=Vϛ?f.aȥdUAG-B,L&1%[JY'o젊R+-=1'I zp)CPP:|pi|Fo3a VvhyOHv{A>J</T Gq{ENWiBN9,!b^ʈEB{*c#'wE V3V?zԉr~ŃeZ6}s"naW$/KY!F*ѧ,ͧco,`\WPG./?ک cؓGVXx}ۍ^ilGsV+J :ih04ѱiCHڷgp]~ ѧB*=K0!w# irB`Ru??1GUɐ3UCbE:8RVn MEvh@ķ%{.}m۷t"r "ED?`2LB~՛@R yT/W' {4`k@p T҅U1vfe~t@Gh1A)5ѫe8/@Dي# 7l)?"&"!AQBA{9Z 9n#{:7^G31c"uCèȈb8ޛԒiVk^F$cs_i2uUoz!_vuG6A&)Ny!6F4o9a@yɅpoؾ7OǍ_=lMRl n pIw?{l=58QYZ%> &Zuӎ5vwmT!F&w0_7cH\ݎD O]}1Tɮ)0*.2| z{X.Ps6CՇqbTI.6< pP#b@)z[ԑA6AZYx>NµgOGe ` %AB7qVXv>0QV(BlsGG2{{g~2hȩן'i>im? @ʽr(l,;hU65eX M3*?99B~{rmBA,XN,ދd7\r1# ұ/ƟAyJmg {1U9x;s_;̺V&Ń`>rrX[':B{~#&u ew+-VAPCLFTzj6 z)ejo^(u;jS١I,Pzwx$9ܢӄecVG p\fl **tp;Lťo( {+JhO Qn6)pnbuf5QunrI<@ti)Q#p3h]ky)olX6DX!qo73NoK=fWdC>BV,JsFgIQ(AڪMr X-רYyp~8r(F-:T~EqR?n4uEF?k*eΤ=_~A +TzNXK^7G~VM*^{oų ~ڕQc  ;@>-'ΞqTXNkirנ  *axHhڈcx1uZ7՝UN1(*3}nvXA̋bWLJ>(̛U;tP0mŐAOVbԗ[GB&R3ڢ pW,nH_'5/NkYDz*DDanN"# 4JTλe4afۚɱD;Įj>9ݹ٢c[3o:_дiWR5Vׂ;LO3"/s@.MYCE]WLKZBQ}jaHkȉGa _dOTV1K,1o-F"hߴܜ`i eDv.?nfB][kБ+*vPElkqSfW2k2rWAGWΣ2<ng5aw5f|<'U+`)7\ҟg3F`EcBr|ڠ8¯pWE&Si[jˆߝ9D;M.YT![2,T>0A]4qPmZğ^γ R%lQsؽyjh]OT*%QBt Bpz %5URUj]܌=S6a@qqv߮:SC38)<_UzE w83Gb e'.Vpu5* }XXVs= iǡ@-7 c _O{^٭>u~ f|rR45׵6i*%GkK2,j@t5 MUy۽n\q\mMSB;p^"2QY亥iP3o@M+<'e`ʃ`tiыܱѱ':;& =,Xk2cPO.;R]^/?H#b[(̨$CT TJNOC1BD#t LuE(} jD2hez[ٝyX"Ek*HkWInYب(" LNaoH Y~jD.kVij얝V<]H Ģ #6ܴ:4 qV̋[j<%绮/e_G Q-.p&2<1*>;-ΊP6J>fpV}5٥h`a |W9j,:*hJ C4OAl_))PfgK h""2bKGT'M~n(,LCfÜB`PXsLjkUlc9C6ZF4x|k(!3ISB8'c-y3^ڱҁo{NB9n'KE"m <`'};lF9@eWo1;.UX1[ Arq6r,rEBװ]8L;@"n/eή 8~`Ci ^&( xo3fQ͗,ljfƠh4}+zLCx<}d ѿj+R֯=d p Ȝ-Z,I1O<ǎtsmS2H < CoՔ@9:nQb.y,A͙2B }F w#Ʃ XBb Y?mZPSVSX="Le<ފ"8&{Gxc$Wo\Hګ9H5Z,z)ϤJfcG7~ɫ8l- bXDqWą:'Lo骏/Y&qV} _Z5 -)׆MyR#a.;|k0l]8IŖvdXC =7dKR d<”܍>ɫ;pHE7s3Qt)Tf7`_,5vt\%6%>i=T,qoXq{迖_L;;\WY^oOM* l†? *lcn!rf9]L\cVityRgeil\y0!4!W%k*U DYso ߠݳR)mχH/9ӽIX$jUmrt/8LгC&L2}ٙDnqZ wTtEVZ֣ӛi"/d""}27Le;N!/7 nmpkp -QC)Z@/lӘSsT]l]6brnJcWdj~'^RI |AJk7&qҡ83e ö Ǧb3<{ t߬9! Vq6(Tdnxbu@ fz`xre/urem0Lk萛|UCB+% 1OF@ʛcQ,{Xfoa$D.bzGAQ4`A Oe qR} )Ϲu&`Nk3v Sj˵W:J=|/K3.@31 Cӡ6Ɛ4'{anoKvli$QNOֽ},A As|$9K_cX-sMi|, dsQ<|C q6|r;xI'K{OsI-WŊ۳ 2OU`Mh5?'78!*֗uTX*c}Y4) axiRCXj!賻1gJqI=3^=8}GtָXJ"-n9,Uie3Dh*;᳜~R #he ؖv$/;!:(?">Ծ z=)I9,5X8:PuZ' aa'afZ=Ѭx<zUfs*7-"Nub85 &I\cZg:B!*3rH!88J'jf$<[YCۮ:Ȯ 'BxDQaFNP@rwk0r[}V>̜'I'jٚtg>sUc:w|7 Zjy ;.}2Ȍgs٪7jC_6/ʊ"J0~C쑔FkpM3XԒ6oU'})ȁZs6g3?\jٟ$+(·QU |K퀒{h/2\'+P؂ ȸhMQ:.BJho=]Wtr"Rd~9&r@Ϩx1I-GNIt'l{:h!I [evm9-G}&1Cjh|܊!Y/f6?Bdօp+]Q샵J]S%( AtP ]U0C(Z2%+`*&~]!ݩkyӁy9"[C KiBdX~߫#Aq0lsG^Kiy q5:0_nfU)'/;+c^ G({fc,+6^jIA4%LԫD%3 \7ep:LY 0Jmbun%+4J|ѫ5 rv:&*~$-OlUX1wg<. [^VIO 'g_4JtTh:[G%q3U36d^Vg,ЂLp:O[|nQ;gTx @Ws_*o:'/ևXk[b kl #AiCu%XpmTT82mQ~/0(FtI)~RcRv_:2|׷[ GWz,{6t{GkXSZ99~bjWJqRNd߼'rzuOQ9Bn A I)ЌFD .8/?%C;kx(INՙ!/ MۊMRzid>֎}J7w'&4yiSDR1_UrAu;=F(#"#S™xkp_= Ն3^XV; \P5|E#u"\eu)kAEc]QƆP#bk?Ɖtd <6Dc+>N[3* qk];I=Ox3e(5Wi.b?oFfDwX8 co&!.0{a4 *-(@K$Q(%E^Y(0U_d·&^;GtHE F@苄B">y58ʱܷ2VATӞQ3z';oƴ{ătU Sʅշq]W> vٿElѥr0x(x܅$ČH`&dj)4~=fv%=iP>:xDl5'1P^!S޴ ~uOѯ(ϔ0%qQxpx{y %(QѻG+<.Y:~[\@Cx *Y}+ fМ$t|\Ogۇ("T1>m <I5!ƧѭI3s"(%H5^z"*,8d88#_Nr~HbXމ3Xm9/}(Q 1Ynx]]4V0˝Φ<=G's!HTF$&B.aA @Ml~QxNϊ)aW$`YH7KJ}aJ |}gw=F3'I'?AOE}.þ%V68ުi9G0/ ;fv%s1uѿXBˠ-=-݂tcfWy.V28Vh.Jx%x&xHyx YY-*7{2{/3K_iRB@ݵ ɦ98 xKJQ)P-.cJ!R کNL_r}ZM'Ʉ6d =Y\z,nyFNH8#}LFr|W>4SkvmvcTحbx`MFp90Af|߰3 @罷劀C^6|)Ƥ5"'kԎjӿ$/z<\<4+\N4TOף.0ÍxxܗıUc˶B>:14bz`.V&)5H1t?驕/vP_Djq @s'7&&a6GׁYk]7U-?ˏPB̔^a(h74!EmEv Lm? LAT1,QF'HeC;7+]l}r70G\&Q1XIUc΃~ Wn`STѷqS6J VÅ/ِTTvuns5u^HkVdӯI8ȗkIzА" %),tA֏RAzL/vdmW]X/w|GGy)z>W 2lc}qc{bNUh|p2DdI?K4IL*!,m.+?2qHl/QzrCFQFUB4Y0U4Kg4HQOMl #kA|t+: dk)-*4c@6(&X|}e1A$c qז\TzrpPA5xqle?xE$"ۋ.""؜ L_Z` J2Alfo`UO;fJ*819O$]'JCDކqije^;-W$sh.W!ҚTW|ʰܨx!(06NYMÀdؚ`b+/"vpζ"8aGi'=+Og_,ܷcF61M#?તFeol.fxhP$e"{Z$gkt K؝bGȠ~niw@o$ Y|[=knʡ؊՝ 2\q>+gWWἾKg=DsvB dR^hsR5! `, + E]UP"z8ت0kNb> -Z#)>UbPc@22ҥxIYki)iDO *%gqFRBmR^7g`(($PcIm֫5pObZ߹O.rg0I |(T uЗIZ%VdkthĒV;ntٿCTˠ\tjGDJv{N+<}$VVx%ٞ2.Db ,H|J.w31ּΗ .0+^GS$&>P3c hIe̺Uv_3qO@YjJApZ蹫C lmI1k =H|8zuXc4tR gQTaِ,@XS7N9Uj̋&p͗UWU5vλYr/2lSJ@W_z#FJϔqH.]8+c(> jP0'~!:)(cp6fR/5yT>py]NQ.Dh\PqVU4(LbKrjPUnIl5N4M|y^4'73hsv Z9.I '_LRu*WHY;C~Ouy=NnmnrC pJyCdߛx-"_M-3c_Q$vlaz> ?k5|T|ZoU %9ltk=c, PC0I4w9%էer#fIE+Ui>C.n'䡯:gPia&O=rs@HOٶB&T;nֶ "/TY~\ ޚ`mO:kڵ<3h ~ׯ|cH Vs'gJ_xJTKxd#_E,{[[fbS| P1fרF?K/Gz|ӊR' j&BQޑh'S NF<Ux'1&!L}nL2Pi*<% ͶN qFLIch̞0} &)6t084h"]?mٮj3P *fěeL${LUo-&6JWN@vF安xH&ӻk5ĩ6udVHQ DL1nшItX~F q<\/**N렏J}n]e:SXbϖz"'eRa߄ ͶqA.B0;p6Z7fhcm]p/I8' ϥ Ksٕ8SKXUYz?A,>JE#f)ȸPӤnRq$G t% 7Z^6,Lƃ^QJfܽf-rvy夠J203"dnӪ:7:jO >/u$aO Y&Pvh0{q^XU>mԔR C'}P9/9`r@"eɾ5$Hn2į*HEXk^$oGzSXx6,TT KIb|R'XӰh༝0)^d9NIV!WO4'ߝ[f\rHd!^g/j\!WL&=~H{EbШx"=J22]5͸=뿃*#|ǵ YcIb`=n~<)|[d&HO/&Pr3,$D9x,?#)'(gSaA-HU6HR+Qp9N&mn?A+%Ekbyk>&$cW\wnc M <#(M_ev2.R*x.]\,L_ZNpSMk85O sM6yp=֦ l|S9O-G$nu5+R3H.SpҀ_dԧDB7l.őz*/ՈbB=60u]4ḭ9݉N3R< ̂M\}Ssgݷebo&TkR S ^F Tu@J}zˀ,;-]?uk:"Q 3AX"E/s&zhjMlKQg챹%x 孄wCdRNaX3p`?-@* [6_R{yl߫N͆+Jec^5@^܏$'1|\Hkt(j~`Z駨b lCed.@z\xV oMG/0l$f- (pOF ptePi8)ʤ VvxK vjN[צRbEMWVyB[.,I_H4MEX2b:7ʼnjIZ4')-]۫6ZO& "9VNo6˰P2UqGc`S|ğkMph=Q+[6k<6|%l"uǭ֯R3 eדēXr^XF0pysabcGrLX?ৌOrkѪTz \[`}mHˆ ZB59UY" c@?〡~J2rQ< yV!/_'ťT(_1k'VdejN[ 蜇L9"IϏPb_K=VN{Rt\\qܽ8ftF*(ҋ≜& `zI4H>;)>o>\qYĞs*D^h;cڢ ݊H,ŌEDWӘ 5kaFvcX 0ѨiP˙Oҭoƨf4J[ |Cڈ<"ɓ9*P4syf][oR; XTvuxV i gl 9e6FD I"c1z2;1{{#j2\b8ᖩA?TغY֘%Q}xJB4q*=RC;*eUTC[`+"7~.q]LK+ܴu42;>:U #֖h[?h-.ΐ%?=Ypi/02Kkjjs;ԋp*ަjZ!Gu~a,HXny\$mfmJfو[~l)4c?F}㹋=/=Že’PRw)hGW71SCN #[y؏% \Ĺ(KGidJDv1,`vK #oz0 WoZ4)5 %~>KmPsG !RE3f#ÛzU ,=oҤSϚ^~ُ?Zb=xw,/H% 2LZ~4s۬;e 0bKPf(Kd1U-'U@*N:چ٤lT9!'򔙔#x+WvfX:x^d!U"=ӊcBWn79'pvpXu?&c?9GOC GUY^o8# * 2Z,n}0Wc<ѿߴC5ᄄhnO4,.$*2Y5Jj+b:O 6ǸsƄo- 4:SV9NdbcncqKTN{A'"5kM)t/V~ |$`_V FU.ގThzcPB!"zo4k-̎){Ut@wuez)Rj`/қy 26-K_L.\FdS^«j8Wh way`SH\k'=-HrA} fD _Bv@$xyߕr#.6r%~{d3;\jʛׄ CAO,4*Uj`J}i\Bԝ$0=c{}e/5ʹ%࢔r]ֈ$2m"TǏؕTNn`KgaL);Iڐ1IҨt =d, a~>M?GD\ 80~p O#`} 8t|d]G, $-l̐u}-6=2Hztg]  ^mvX둞TԤKL+_@Tet~V' {~؈'ih'KMҲG/w?٣ Ӣ9n>CjMQ7ŅNgkfL&%SޒgQhAqi52(٩Gftӏrе'ߏ[~O eLXQVJ2i |]뀮><iF]e0aM4 -YWuڀk-Lގm(]5@F#3*+<(H\M˓iK;H>D{vQ[pꢥY)3 QIzě+:^Hj=fg?*L>cå'IOxNb\d dܻ]Tߩ 09`0 eo89Soe7 >'eZUw"ַb^} ž\neN2[eH*vW#"#ݮ'=C'WQ)i$P3Gĉ Kb%XZ:GҌդE +lss@yZ4O>dqAvzfٟbbp7`We ӮE0d ;V/!!1tN-tJ^ &0zpC}1vU&n)QPG"GZ|¨}~}la;N4JFMԤ|r+wÛ tSM3Stۨ^6݄?GW%RRlbҮdGG@pۚϒȽOjX At@5 $!A/i' #O: j)h@?]06M)!K3iG*ap:{_\ujd4g4 Β1sC&5fq?3:a'C$)*V%S$|XӮ&Xktqh3A#t'+ѰqMA5*%r#!kZ"Zx4bÃBOFBKK>.[t/ji1s B`P&XEx)w(N q GϮMX I\% 1vi:W_mOCC G5c'6r~$Ww`OcyTSX߾z/4#:AgDwJ{a?:l ?L@:=Gq 1 e[IAd\ƆYSN?B. \=rdw!RGpcrne}$>E"H FpvD uy `9Pfh?Eizf/Ңb0R?CW Y}`綰=:#ƺd5ź$#ThO},%.>cKḩ?/zHxL"T4|=\%w9E"I+!{ӓ@P2may&^YtF׺)؁7ĞZs.66ArGB<.ֆci0=f tr F1>VK&∓eR{IFP4$̪,ރA$|B'38$̤cc&(NNsOHu}q̚rr3bֵƓACHtCSyQ<8CgRڻ{W=Y#`%_ |9jFT!e_ FBuL/t.-4N&wVγTܖN$1J) &jw ͎i"f( ^YiLBxPnN_B>G?Չ@\.˳0 bq&Hg&߳mA)M|_}K#6-+]kurUr cͯK탹)f)6g5hu*z0Uk||1=,nFs (OQ 3k"Szq^hCgo΃ci8XtUe;ץqH|&լiVx;59Wm!¨+5@?8^/%r#_@)*.J &GK&"E']}?[,O޹kE=g%_/-iY|OV33&7_GgxT/\ r8.NH;ջb!a7MraBd8}ݿ=2f +$O Z-W :^|ke!!RYI\VFvf -T^R|HaDwY[D&;{̱|LE^eڠU!ivE*ipvˣ[Np]yIoRҕ*:G27$t݂)Pe9,˜#2 0E#QGKs2栾i=ώ!`F޹bHܷ¾NzXAuY2Aw _h+>= bw2zô  )o~8hlsJ_1UCֵߺA敏.TF]fNܨ3Pg5g!q nŸU`c*NJzy(9MU&bxHt ))7eh Åy+ ‡k.Pe:SVt[O?b{^SNOVdcT_G?4(:`B# NlBibѥSt6{/&YVo^t2=m K(!CMU?v>\H0ncIZ UȻdfdˣ 14W-77t6`A < \o,aHBQWB !i)$ IVnٚc3EwM\ r8{g{5Ez3;$ۊ9Y QjDY ÌM<5f.,ioJa )05җ&$ ƒauk|fgiPhy.Wc && 1}cSL9\n&Kt܆V-Xƅ=}U,~XX#|y"@['[l-q쀱Q2Cq\r \UESB;,_wg>p>m\Þ,(/{İ{Gڙ)P-xhʨ)ܯ k)D cF՚R: MSN6ڸ$K/c`,d3s5 c^W=4+'^dl@8wxO\w'dk֗ȹ\,g iG5BOY7)x}fouvլ|_U>"nR?>}Q'amyaե_ l稊 k>gX-$W#FV`܇9V7/E vAūWBR\?)0v {M-\ܰ%Fԉ-hYIG#9,<{O"8h-YD7oy2>e[H_SGHfW |.rɌf+ >m՟.UeZ~ԏR6Va Lp|Af3ǑȨ(ClHH?\[  d*Yj `5i %Wp 輿a8?!3dcow*VyK"ޢ" T;|OIȔCZ.[ VC`)]b?PL !mVD@B(A mEMd]WɃW;]},LSϾqeԊ?zxK;2: ( )=e-`2Kʵ }?jG*3XU o\'PsS x[YRojыȡ1bg*+9-5"]\okjz,8s4tI@ "\g`}"(NH_ΎP>tWAWh^C(`F\<⍝Lн`9\6T=[UJ9 sן Xi7tZy 7׼T٤5DeqSB܏.09:opsp./|?PȀ3 ˦ocs8겋xd 1%`Do)!#8 0 Y\x&ϩ !\CI(5Q) sq QP eK9@$OmԬ%;I+B%E-V 9ե97 u"A% \r-5ɏY^"l:dK[~s{ ޼n~? K6|ܱ$i4+AșhQXvI>ҠV8G Ar/A*iYɑ']t1J?p"f4G!p㟵j7!wfu+Mj~׎Tx @PѨ,+ZJUWIZvA {|T˼e@aYR>tRV$ Tkee w(] jLJ-a3 Ǔn'9QW'*yY%U& $EoOSX\#EMD+g=C $v=rP !G4n']X^d=X244ePW-[dK DYD;C=61MCCʫ+*Uн҂@*N3m;!{Q=;7eG&N[(9 B׊YoO^98RʤU23]?TuЪg/ ?!:ǂB<\>X܌ i׭[3]tyw{+wLX]2fsx×;߳3,{$I.xN ^е¦EK W8.ܐ3@3Pf)7Z]/0(ט!}X oJ*V4n>)LPYѓ8Ev 8L<]zw3bɟi Z>/{Ᾰaѽ#62vcD$\B=Oy_MP##yIY:kq䂳9KخA?uED"WIKТ]RS\49*M#˦94 Cd⅝!8J=A)PX t=v"弣MͭtS\豖 Fڣ< nlv_%8oFy"Vc#}|$mZ3C[#| ]7ˈF n㑙Xf=Ltw;Lޱóu.E +WjL&̀. 98t#u*[ׇ}a޳cНQ>qz9IYxjs URaNŔ4臮趶5Q+.j89xK]:宼w iԙQeW }ԍwnDA%uk`n?;?2s8Us,ED(dk/_fһumo{:6pb͆TtOj[K~soE .~Pij<36*t6`9!g݌>)9u^7w }ZꊴT2h;J2'fM/oW]tnqe`ta0 5XHSՊ&4yTxPEM=-%1^z%4=X'p}twĉТU5\=.T _e'UP |ˇ^QXUW6e3$O 9LTmAn=!DԈFQ7S95FOjd%KJK%ڠ(I,˳ʯ [dNFe:jH3\j<*m{+C;QE #|d2RBPsi#c25v!>eWxOvt;yMwsmnP;k?"^ _+v GG-.DG~<̴1 E4S%@)IloLGM1N=Mb4 62JgI"t7 yO 8/Lz)I)UW(]+#MسT-tVF ĥ@*':.Q%q"n CIMXjv7'ؐy8`3Xu>AV`vD(J~W$3;>0$g4,gZzڗP{V/Wqk!gPUUX96 v*ٷCQ(dՈi/5"KIf&v_ʡ~\dD_5沽U"?`$[4mT0לںՑ̏`j#AO?>65~_?ц2kz"bX1uf\O0Ud@faFU$k`T歨ZJ䓌1mx-КW쎵Kj4z C v@kwIEsS>*;ΘmrbvO4x>YGo1֑}VҏT[ƿB9hh[Ԡ6Ƕdeź?6ߺg6uc=t^2 gOi[iIrTQ%MKWvA?U^nMZ5=NFg.$~mrdL?7(g\yJ`""* `퉃ޙB1G?3 ݂,Uvdפo, s]?Wj2$ ] qp`0_Am gCA2Lb2*PR~fxfjw# u^vks_I~L[J61G4U;G\q;-؉P5G}߇z2&eXHIO+[c-TyHgC^¢T6b HhAWK┐mU͂"XDOZB2փ֏dILme @Yv,v#jG2n`z?=ǻ*f,TʳE _LPG:034dZוH?gT-" 7" 7>: ]mb(ze =?٫eGrs}CG,vêr \85>L٢:PU/Z4V‡U !f6fvZyҼ+qp{xɷU߸{7L={>r㵓k'aYTԎyMPq~R*B5cdm,=68-O8d0x`1 -G^ztAp~4-6ȌM;K½"X ]0c$ ңCt^^l1?t񺞿T7ಣ3y,SCq^*|^sg(W-Q d<旯 +J`^;.dghJ쒽)83b>YɁEX~I[`Ma;i@ij y8$4}K266N:l_~ݨ*t4ΐx aR~b̖m/|D]M*瘸6X(}r3PQ OnkQq ]|KGUJOm}d-ݮx&pR. ^g^P0}QzQ)n%pY d|{ᄤXsN E9dC\7}uy"OlT aq7U$2tˣ3J"B>"2̋Q>ΰ7+皨@4qR4j%{ut^Hoi]u,rA1ˌl w N镰9@Gە-&xqOIjT/оs-gDY$ OݲnNfIAalE5Aay;_ʴ;[BQѼ''XV$ӝM/8 : cp}u:\w_u>'s_gjŪ|5Z?s iW-뺳.lauK|g?I7@|pա/*x4vֵ>$ɢ]\ʬ$9_ >* PP΢%~xda/Oe}`uߺ5D:b'rJ;M|ugtcݠ*6Y0xHEQ$TǿnM ]UX HN:Տ$&A󴔤;1qi;NqBd%R= G^CޮUSreLQ' O*a Ȳִr34p_(aR-/' `ƿ^\m42MM׬h辔Q ŮQ{VNRV{W1#ub_onQnSm٬zVt@Rq[H$kuGom{`:^Ek>T788CJMꈺ΁>r+Ao~nB'~؅W% 2^ //UQ`SK1 vԋ(KY^bz{PHG<yqK?)ω_W`)"%orTbȿ"@R+IkzTb g<< :vQ:-Os2"|i,}_t)0#~2][2c= FJMut*Mo7e}8/&!>7ZG"vYbe YuЉ4CQfŏ~ {C2\\A̩#q/oS~ϰ0ia+t6M~ahT x3c;#]׼RSυ׺aWu;a&j%zf {(YR,5(oޮ>9M@ȯ{E:eLsb"Fdا{eQ Xp wTџ;߭+|ҕ6 χCH,)n.㚫^PTޝ1u l+ ,lǡtԉ Π*F8}R҃ҰN>d!yO~`V{M1\]/}(iHS(9;ҟ* װt_3>ݴܒC!k|9J }1 [;$%n(h.@l>¹>ل\@Ϭ|QU$eSϛ2zZS^%"`e=87ݱm8] gp!36Wz̊CүDwE4Byw&sG;4$B_! qӘ탐@w=%.:1c+ Sd$[/&%s_'mܚ׹=*N׊]C Q^|I`|G2EFazI-os͏U $ c4D~(MOi=bjݑ6y'hR]W$;DݯuNWIn|/صT _Z(R?=;e0n+KD;z>QImґ1i!e')aTL,rr@TgވE rR$>wUؑGݖn \'mY#"(>|ZghKT&|ҦKcv04\-VO|Ӵt=wCaFN [`ӓhޘ"ɝp$xKd4<~v>oQ'֒]hYb*cAPMtYG?@aOa_ -Ko'ЛcP )βI74v >zf|[u<Atb!}C +C'KGT7z( cgjےS #^wӲFYDꢤ .΂V tٱ{=-}{$8-[ΜMUg(1H@u|앻aL^#-H xsھ6@K0x0Q7ˮk1}j_!L܂9ԉO`%5WWp7Z! J/4F`tB7 H$蹉d?cAQ&56d>^7!x9U5␻ŧHqI+B?q Pӟ8'Jc@u %CӪ".YȈxsY35 Z^#D}.a7 [ޱ85~DZmV^ +^pѧ獍m V9ʢ[0Kֻ/~W2=1kOjFE+kK"dkk/nܧc$,N.&?3_]Ȕ|n5}^+JP Ľ'hs._38q1לF!R]Pm%69E3"{_9J 祩N@} oAϲ$]:>Uag`]1;BS$uBsDz;x mP-?O.7N[$R0r5gx)d^0A\fsT?Y ڤ {=+ TNj(֧{{U԰~'Vt3A 4G]rRWYi_leI3m@@G0/9̛vbV5o0XJQpR35!srS[F _;gn#,|D\kw(:^}[v]AhkޓBx Dͬ#ͷu@uHjc.Vra(b/d,j$_E~+p{C8vنQ[ lDӃNdewZ5 KOK47l.b{N~ݳsM6XJ}[8Hba<]*%rhF(h^p.z&1,-+t!lWJΨT^uteڨ )xSVBk+`X-bHp`R5I/ ϲwnl(W$¨Zvx89V:F@z*fjˠͥ_ 44cU^EͼIb\pS;%%9F6NgAD=O<qWwƱFr[{¤I00,&,ipUYܽuw.8Ėm%)5hw-Q:Mn LO W[ىh)y=X',Z+ $uMtmvoC3|jt,|AVl.•l[3*{:RCL;4Al4FjhiVj+W + B.adA71j}K+dnQ$#1]a(mG(?&E7_ӳI/gtmju pI?圂r7ϙV4"r2ΟVLWڤżо\+Qklɽ~It Ԏyd뀀`}[gMe3Kc}g8, mLV_=RXfe)ݔ-*^0 !(HNd:Su 4F϶?RKi?`_ZP c{ʸD(oL,cBkiㄮ,^j:δ[VmB۶ IqJhL? «-(%5l _]FꉽjW:˝$h b_5HĐ*fvG9]d> \?9.P{ P8/gi!ݞdd$ 7'=uDłhò];|Hn$ĉPI P1'.mQNi P+I|auՌ:F,RCV܆.dWҖos̷/MBKebu)cI%~h<֛kNѫx6FVM)$'}TZ>ڔ8?F>_a6 #)z$  D$kdgcVĐ3rd}q_@{0ڈ%%@ kQ`jqjcg]~;k -Ğo-ꪌ5L*7=iYIh@ ]+h8ihqZ9`['>`uS+gSUPh‡CtsaPps^%NH/_xD59IO=  s}P:6 O$AG7grIڞ p[r vxl7b@*zp6!H ~)f\j|?x4ar/Kq?-1a#f<9F;7)[/8wA៟,x1^Yb\7rLߠfjʈR.uRAOR!fW¹ȴzw$',[~Rْhl&x3*:EByv,}>MnnH쏓qQZZ-aX1Šh޵rJg4֡-U!jHiι˲+.L@XOl5((6JqAe$Yb}=KK ;p$7NQ99|roP>@r f wsd>&U= ^5I4ҙjo@V(1k`(vu)z(fHӐ2#麫?3dEL8Od2bt>Y$7H*o%eϑ8ga ay]XßbJS專QA8!Vt̙x-]VTc{`i9M|8,]xePM{]2MO/d)UCO D@.\2cI=4ʛv `LP?]d<(9IDNqD H)JImyjFecіWvj +,'O\IϔCv>ޢz&-kR%Ix%7e}cĤ CDvW #F##SfHN=St.( +ݜ:K U :y m8B - ?Z~rʪX/xKӑsP-)JkҏV0}U 9)YR_ us_w}dE{l(g=Hq3y`FH{v->[ %|FQd'oa1?O20ɮ Hn!0d4ʆK֗C8)L j~,W54L# {Ei P*U]Ɉ):XH:)cupFf G3XnܰbIz3 yx_ǧ"L_1t"&n݁ls,_lsx:a?`c~ٖZ*WI1XH&6WᑾJ2#%vHEh 7^z >b%/26Nf͌c8^ea*M˪Bv<!XwL-"tGHȡ-> 8|0u{@x5JsMtV|V! 3FB9o{z^@hG4aԿdO,1&8|29S*T"lpenGAP])NMCYL-B8*Izr54c}ȥJ.5Ÿ́_N6:υqMʭӴ\):''eT2! s7L}0;m}N1 65Ŷͪ(څ+.b|:yzߴ6޷lhIX`ZeT õ/4A4'fc9o[6ac=i_ C4hWq-?tqqMb0Tp1?K?7?+[p8].%֞Tg03PZ@ՐiD-?qTc&t+Le/-S!a)dH"OZ}{y7Y^P@TjdS;C7wl/n_b l%`ĿWzX=v>,X8m # vYy2i KyOlv{ E'\~kI0er[@=Jzy= Lϰ4ێ7WciM+LJ 0@!n *|ʹ2r'a\V T٥widWlBtqMΊd` 館dԉUQa|\YGЀnDj hR!=1ǐ̴KaWLb-YY -{MEBxl,{THe࿞0}#^6 irLTMNኪ4ϵ1\`ܥqNaR9N_TEQl1}9X_hKK7zLFUw#&̅=ڽ-=5r%/њXtV7O^B2|en=ԿN}/haڕl&y}H;rn3@I#C3Xȍ`ZroyҢ2mD 6hE>hpA!ٜJ1Lk&~ '}d` wgZS _ N黪a!n]6BwG5a~G(WҹKoMLXcLԤX(ytMdX ZYwRuO¾B~9jIH y{Q}SmLaC?.Z3,.>2ݸaǶ'O ]~&֜UQTֲ_zx:XC]KhDCb`9R>d~n%9( \Tt& X=,%wppKF y--+̘@qDDŽn[??*R 8O|a}=1A6֦kP#|rX'_YJų^12x{RC,zh:3*,[sJjyӀ[JLx w05T@)àp5oM>hK'gXy^J8Q`.4wVMB)"λdMu `ԪݜQ9ٽx$.9|^̄k Zj7A]1n ǴZuyh:É`zl}}gG=FlOp9Υ1jVC£A30lO*i=WF@'̵UE7w;-r{pgݫý]p>{`hF#<]VFAiDȷ*K9MQ6!<wシMWc /3ַ Kֹ D=wzsس JLٳM)/3xlLt§ 05)ߪym_%=^dsY] y5D4`e3D2/ju%5HH9`J 8o}b zC4Um yA-yv/K=su_#w?_P./En[']¬fg۟m 7{45.Q_*"2Jn)t&r>:{A1Z1"g>]=6j5+`{O¹{㌛ԭϓ 9Q̙L-9m8\QHߴfskgܹ#7|KT= I~ 4^7Y OG {5'&k%_:A(%tVԗN՚p>"e׎THWnAq J ȼ0aV)}=h9Yo'C:>S%9_TE~6KZďt=P8MXg2SWDw/ZEA)p(H8N}M=ZE_e3cN6iA}!՞ϚQ1z9*k'bGxJ7^`s{*O9Rz0`$D@l6~7)\e{I@l:u^^GPf.LM,lc[Ձ/~Mɏ`Mݧ0McGgŒG@2e M52+凗QC- fVeA"I X*m^rkKT5~2nFy!RR꭮| ~H3AIshQlPvllj$z+TK/,ީ O0zV}";:PmWNZХ8[ .Eĵm*CuhuTeo?bjD%- uOӉwr= .jk]rϝ.n> WgQ J25q?V:K3 +Њޕp w+MHe:< wXo?/#kB[2gBƯqIR4^T ,w6$h"!}hcFN{l*4])r}#"Ş8T@QQ"2DeeԣS qH ,HCwNk^jL<y-{DW4l7o|\;6>Cӣ* %Z g0 1 .Y`[{Tz5ȒYq+U=hヿ5L8G8u_KAt"+0Q]bإP,{+20kb|Ԙ>g?YD V ίz7B!bW\?*],PfJM[P:C(򫹀aO\;DX&uvTq͟;].SJ' [C٬˷K LuxܥDB6dm4oBO!;}TYK׌ղB"5XԼEd[NLT꼶v0JJB(pj¥4bk3đV*uVpUc$1 0poˁʋ5bUˀ{j瑸9rY|3܎8ݑ \~g K'ƾmO+χAC~fݺKW =Z˕6֔9=IGۻFmk b|c wTjV“^Sd ( x֪w]Qڦ!@'& 玕vR:f:}[N5M*MgaXҁ)lj'9(\΄pCRX!8}_=HVhct6׶9Tqs1NՊ4@%ҙG)& !dLt1i88K5K8BBVb}SEM%4eHյE~8{n^,k.sxi؈~"jH>6BI;0)Ǜ5ϵ ;ׅ u t¥ oUzol-}/!wYyɔarHȻLnP=HM.ϗ%Tff6WJ|ܕb Y,NAh8|}t5{?WYuuo0y"}`{[AxL7O QFe03vd jT-H/d 50j"v!520vF >ߕ/i`}(q^tB쭒u5CCA4\䔟[n3u,frߒQG<~>`fvuǿ_ 7j:`EoP,g+\5ӘKdzԀ%wa{Zk#"9l_mZ:o7h$v<}e@6% *E%M#1^W7TK^5$ &g%Lu ccFft܍o=1j9I9CD?YA~KiY\w;źe"D8az}j0SWGyc"t_$YT};O!ۙby/˺5TvQ/٧W.0(1xu.?id0B$W*͌)PX#eN'gM)Éb7"Aa?.#-GVuz+hS캞ND@nWR_4$/~[Mа}JwbFsWvc$cE=w3lEhG%?ٝ܎|.MyIܶYɈ/} jbAYCڷҘUIbzt&ej B.LqȭmKH(4ˇ(*v!$Ӓ,L>#z{{܈֐bgh#B|Ժc4iqē CU=gC` G7>6&H*B>\Ҫpƺ6,e>>9=:m91 'JwyQY.Q)Aw ,ZW8{3J)/Zȣ\OI44kػ2P:W^1\?L[i N e+"_ˡ0k߲Mя !I"])hS *s"6 ֟I fW{>6%d)O0 ',fe6E(Ô#ɆJ֞p:3. s4۳AG Q-2D:GwjLD8η@J󞒮Fy,40ƙ!tѪie)󛚂(~M)z1f] +譍.^yGNJA=!c,8)(ԼHEAͫ=^域ڼ}P)PH?QVt}_{c2f `; 68 [hmjS,G'Lل{x٘D$Y* ѷ)?ĦM"§%rY ,qHR VQX,9A!lv(@yd蹖Ha oi)qr08/B/-ͰrS^, C-NFF G!#?QUm&Ns| (OdWL⍢xۑǑck`f%JFIܮZ>pEa,( *峜L0 9_@/2M=oe;0ч+g]0wƈW'oj' y=4f^UIV_s$ ~3q'ղ%V`@Wfݘ[a x.Q-#"q0ֶ  󴀶=(.VEJԠʕ';D.JԳ]o2pD٦)}˺4Z" x zAS~کMUUZWֲn:LH0wLrWJ"TB _H9; +pO_<,D>`ScO6QfւY5F]`v;/D-mԌPIZn]0~DO%]e!Owr[jVx˖<&c9BؖI<\Z=o+1^*ڇz?<SvvG.9NNG@X^c&n pvXZffE|@rBx.xtC ;ܴ6V{c?7USHQX1!LYMƱ VrT!k$ v3# rjY(//' Os5Gtkv$mRGh&OL<(Ů:gcAh`7 GA. 5瑜,m&t,E؊>; 3Iд< 7Tq|!E_Ɏ\q$.^^Oc+KIXKјmcy8ʴX5(Ay=%t e`gfa@QB-3v,\cvU?AZs=<ݭS>[Ri@Ɍ9膨lAt4 C.#pRɩX Om`(`B +Hy-&mhegO%i吶JGp]ػ)4Z"9T]A]sXV[ - Sm@3txLRϾNKut^[}-qdnxS:Cz~@7E(EK0g8'/"c#Q#zZyoY84 `)DFuI4@=2^U7 4f,ĝ12)hRlg2[? xѾ}q g܏4xͷjՊ@F,L~+H}uԎvK'#?psxkTQ t;q  #vhzJd#kR39'"W%vKa2篩J+}G .)voƐCC馒G"ZYo.vgT .י!NEZisJ-F~V26db~2z'/Hqc;w![vdx%q࿝?W%lWOd!"Jã[ DO\@/8(vX{!l(uN/Wz\Mx(Yv+|*Z `tK Z< yчG=[]iW+!(a_*!z B0D}%T@ˤMI~_| ~LtEUO^i N+BumY7?Q6ɯ2@EYo p2E꟰'@VG BwrWq+3'"7S} Xiٜ|B$i{+B錓${_M{sł`Q+eJO)4uy*4ѕ|zュm ~G~BFu݈`Sl@p)@|$.\.S> YAFm SϦWqO}HY|Eg^tH&%D(D0r,Ȧ_-FP0sd WPk"ϴUxFeE :YNx扠lUmBM.v5h]JfGzu`mTu3l@|;a.O6D4PHI?5t@rD:6#-5;e:!8a\JxN1qsN{?(,FQvCEGMї,1s?|R~J5 WB8Lp&|fQOo􉬖ij'hz%=.xU,>hoKiQB%Z(%a89w+]cи~W<T+w!f h/+@DK ΓVoe2C>ergg 5迡zby @U##!:$ӔPo!f,}&axTJ:h[vR O=mr"kduk_|h /ȭx<"b94,5F`֫RΈ.)r0IN-P<9K&&N[v @O͐C9 ̠W !uзM,VZ}<[݆|( ИE9ۥV!Wƴ"yXMaxYX~_&j~>DJo$}er" SQ;aZqsw_h:#ܑ8gN#rBW.o/ōd Gn, !F |try9 wrռ@Hl1ӓZ!>[Wئh& $[%IFw'.2xO8F$t*iGѐL;RR$bY"eOU&?#LfQW;wVH3/"t@x8`]4"t0 O|>jϻq~d$yPYaX?| (|6g\T,2hUm${@@gGppK-A|YM]텴PS]YQ6Qř= qLpߘ+0 f3=3Mo!SIl5ώYC L sW6Z3BǨRQ;o!k34}/ +.X۳Zdu 2"aNDj`O}5[d W>*04pq8 2rwAƩ-&FD._^--^A_̳>f;Hҏo慴p\a԰\eŴdrg:q O'o7MEOΐ<MW~:WFfN죿N d=^16U,| t96hc{R47&Aw-VV)2 ;޺LǪpgfB`!(QAW 2n^@WWGeuwS>ܛjЗ־^# *Ù0XٌSGІ3 cϞ4O!޿qemڪ)?dŖ(c/uF*SNoH|!!`yB=$'v="U<ȠO~M(]=⒛A'9fmJ+-1fءIpSȞQ '59-JNt 8pO )Q{8t GO & |&Ut&Ͳ_w9h/bug"/Qyn~̀pz7A(tBR~ JS>VuNTw05~a@]Iq$݀ rHDm9yp9dDm\~LJNcK&g0V6bv=e6 V/?p gN哿 5I#{.zר[0w_Hl׆ %˄i>igW^I&9 7[PTwZfĘKM;+G#-2/3oF`Thu81%oKQ, T4qMM@o7#IQ $+0Ph"dOU:*EFsV -Q.ɢ5l?eX佈eNk}9Es*>Nˠh槙+>2gtm*ia}%x/{3QݭkRHӫR-#8M i{M¿͠Zվ v|ܿ NDA C$]}qX{3OlIjz,)]|Z߻Vp*9wmhԐxĖ_e% \[87>ގ6ZYp=hiR3'[ބl }#wv*ށ*JNg^&e:}X/po_Uq Ťr#Hgԭ@.y@] e-Vk4}׬9n|Naz}WY#V|}XJt1NmvvzX]|rqoJSf۟鯸ѹ$ ȩ64Th!y+,ڍ2>^VbzLlk!iә.P^bҦs #I_YU"s9.o0|˄Tdz+i2OOa\ YRYHnU#+6zs$};حu -7\DW23l#␱}'ouM֐UV}j0kk9mN p1:iGg@@S1g\+Y۪-:7Wch4I4gXՎc&% %mJ<0vk^&;=F}~-!НTc~h{63o{tHSWݒ9).Q\I Ú:anDv7mpxӣV+N;rp^t"$#t"5.ClOYg.[Oss xԺ+;AsT֍sغEiV x3#ʲzbJ/ :nOxs$ѹڡМbG h*Uua=bXזxu'CW .*s96@OgXS@jżVqc9N).19S8]"2׏R-T4A+2.|w.o _<|맋(0zfdbP hQ .Nշ̇}<+/[|GFD򰞾zr G+Jʢ> 6uC (p gs<hB,ZMuw X~Y3k9--gЊb"?C3&f歙 PIc)4@;Y`9+oؤDvB,T. [``ܞSPWmˠ6 kUi\VOpOOu\Op74HBdx.1i-#:> @DɇL-)I"G.{' ^ctBŁ|+N tEm.xbl!ΓcX.tLS MA]N upu3'-l*7G}xm XRK\IKΩnAZlf;xݤ n-a5sP(eA &=v05- x1qw9{l\&cqmHxpfO$(FEާ9T'*5 G.bmO|WDLTBak)5i߯QP/k][[mN`d.ܣIa9UeOUd_4“ k1a-x"I.b B5sA@*DPgNzUiI1<ˆX:'b|;oa‘*NɄ!fĬ ש}&M*5Cu؅`\kَ&U%MO aJM5'wiޱ@/=3졾Q2A"|o?!%KW|됳۱[MO[o\ng7T6㓰ޫ4ОP=7P´Mnz(qϵwd=v[qheOHjJ1D:.dnöI7Px;,'B/H{+&Ls# w_yKgR;Q9 jG)s|ב1\!Bb1$+ Y_%;:U%nS ?W ^5NP(bJ]}j;|~Mu\T|HO(ti~zޤB#C]ZꂥW}.4cw?En% Pl[|QoqI]ٞEYQN]5*)4M +~BMJjptL4 z8 sD"xD{YJvs,GxA+kKHyRz?6_ Xgv(,xk>B V2q{{GfegA(iBr.̻j8[w o{ZkN+7j2N p4p*&i}liU/Ǜ9ϖ|,K?S*ū Lu~ùos6Gi5$((4̡,d$з?= MqH5^5U2欗u\h{qZx_4]eR,C32rKę$Ca[[(IZ\ImVXS(KIub__\~YvoT"~m2ŘOrɽ6E!s0Lb8f~袉N\˗;e`DC#=CUD(^0MzlA#.+::ey14yS1/gB7=@Uy}NAq^  R&Pnt?x ^R^U@B$"oNOFِ-j-6;-rP=bmm2r 2}J舨N,rhQt_! EmpBL܅X; +2~6F"ītR0 q;X;f% IF2^iUاu,3rqNrMĊw[#Tx,GއJۓr'-Ԧ^p ݗ iny=`=sy.(So>AZ`;Sq[vK^`%ɉX+TVJ<.1de6{5fUN5NnL>rRGӡC\JB7e %>@**xS {pk9Dr}]7͎&Ӳz76ձ2ylj؋MzφbN&&O`b/vIԐs|}yf~0x ;f]$”qd! zj)%a~Lscv|ܗw(&#лgnȶp^+^]۫d @3Q6FKQJILxEˣ LTDe=ҀMc5oypx3ZV߼c2T{ ߽+sKOn۩FE)I2}o~bof')td7Oh'6Zs2sn>*$׍PzSx Ev6ݽ# RQ>gIFlHuwvpk^dEaΈer^"\m |$6ijc~t?dlʇ &<0g^,g 6,KqYG?gu\R>i ? ȿȏTFloCH#CFmS=7s@$\f0t.gs'Tػ{."p*7p7;hvjVy97a/D!( ;[/:r39۞2  R(zLb0 -Vzk!UzMӪT-**Ϗ)*иj$X:+JALp_N3.KrnzķIp N}I/ҽЗ dʒfߦ(fD9 Cymщ%Mۯ5Q Yߍ9kY,CM 93% ?}t{@"Xh\x_7ZFL4p h^>R8(Svnv.![;}^sfB@9FJChX--ۿ8dǙe7i3&Y4tCץ"FAΝC p>Z4yw e#3D+hCKిZa2CTj\c-Ҷ~ t8`2<)#nѱGL!׋ADvN\F7O͟<!%tڗK-Ι"R 3f~\]҅FK"նk۽1nfBB%5}zujO0dM?ـbrzψD٢y4+VI?"w=Q?#@`~U 8> 9Aiц|J+hF/L$f7à_"P+W$Co葍zG'YДlVX"l(x7c mNNcUNu O]@Ζg-zh+? 8De րTFiWwLw,lM(H)}&-֣D]ig7V&TR R8d$&S{wf r)X)Ҷ TU56Z ~o@Z9&OiCt t%.@[ddp;OʄCa̟_]OⱿ t<2OskXY#aRDXw cY7KB̓]zR.ÄX^K,+ikL1˿B'ri)yNaυb4|V5n_4n\GyFGS*V DC.)a,n`5=H%t~&vR8z'7OdP^Vv(5 iX3'>?jA t!w+IMJ|]Wݦl= ӛGV`16xN\,|/ZYGUJNa7]xHSE !n sjvafJr\g%="#jPT.ERKVٷޮ0P%09a5#rwN@^DyG9b/b2.y*C_{%1oo1c\!##H[Hm?Q*GI "%L3&#r RkUU~wjP!A pJ_:5*=7n oߔ$732>\R7 rD3TWS6qm \C po6]b¿/ b,;;-=I[sD+Y޳VҞ-a wDԡ;Tt,frFf{^G:Z1BrJ|1IOHw;fuJo r,5M`m:;R,}ӳyIYz_ZVcZ3I-duԦTG [ઍpp} }P:WL+)Em/Yh^%uB0Y>n['.r⊀$X(ԐKl&>QƋG= լt?01G$?eMDo{6Z̔+7*t\BZ*wؑ'oŧ PWL_@Mf]TO ]3́ٮ`1`oxtOFI5:%o myL"1g !ĵ-z2ȌnQނoHp:UE{RTA 9eJ$) 2ױZƑو ?J͍3! ..s 30C_n.(sIO5 '@yTFo:G^TO&>+f-/OR;MizwS 8H!>סTKE7 $;3wcHia]{tNp=Pbg˥O`_]W" ;QKrF↚O87i,uIb|˓'S_? *H)v] `0w+T~. qӆ뎡F锥Lq Ӹ^X= /%>rJXl]n@:.S%2#7)MU*By S0M=t\*ҌU5]BnE9w|+2=J#~=.75S>=Ag5 Z 1e7$YQIs|> Sw2t5qEF Džx%ȱ)u7I#06N_vt [+hA;X@{1L ު 5\߾Y) ܼtgPr0V# Q*gQـL"P 8mܮgvp/ 5Za{9=A|aoNO9Uԛr*̶7UV̪ẑa׿ N;4 p3p.#RYg _BSNn>~s ؝AéwҨ=!6-lCl.uQ^V1$]Azq.Bc"?w+!7ӟqIp_m.#D\wGwNAmgQg~׏~,ܧ$FEo0@s>#Қ>ءadC T` |ر֛}y:~&qȎ>/L=1ou` snէQ>EcԖ4RG+;biuԊ(5S#GQfsEF:cV;M6$)FP@,(]b&OUiNF]˷u dcUG%tX[*D0I Z;g(39jsݘ(a 6i\([»Vl9vX<l .E mZd2Vn\:DS 2Yuuk5_ZnmRpU{Fpm_BkϛѤ}O k38;/ȭQHXSLRh(F%lQG>$kvś[o-n1?T0=Ω.[${.0Qlp헊yX͹SMN O%,XLuUHu6 vrIPɪ+X?XkB ׿%PyZP.aijߒE**࿀^Gq4hat#_'㯓{)*-glb>e@~h=!CfP o*<<$uwZdM6빗Z &VIz(v#&ۗoS|& V.cQJgI>POff9"J>i6ѐ{%׸Mo:??XuIluÄ&\]+ "%wR".8 RS~K O nk t[6)p`yK eѳ8 Иx:sW CQ6T͔q {k^{S 46t:aX:g$Ю[nsyU?VܡIj;"Jium,i![2БyJj!(0?)H~^?شY,X,74sBG$P7d<2aV kaDF=1T1nEEbA%\-7Wrse~h"tmB˺:g׆' q Oqrs&q 9jc u| F v&ojjb%mdv}{yPSXl&;(ʹ%ݞlU,= <8cp,1}WȮC(2Q V:R/ӒbN a*/5'ak0S7Ȁ|X?IbԬ `Yd&c#DSL}Y3|zLҏ^0Vxwx#{ۤ%2|bhtzM撑Ohm84 Ud*: xd6y$^ @"oߖWuh ^CƧAff"/$EpF^ X@rL%0%&/~K/0Q@P'/$c3C^~ZoAEw;`+fQ PKh>@YUm`+JzxцQQpmKeNb䣸vqhҾTXq $V%1(wݮr|}&rW!N (bQNCvJD>Ae{!I/^ ɲ;q#ŰߓbYW9L5~6&eQ`Txx [K2S:@i9E;܃; y%J,{<7Zmro 1KRZN&@WOEՖhQ}VvegSUD9' % 9{iU},ǷL2xV\: z:`J8t^59@FflȀh[F0>,}q覂z#SF4vV; !ԩPھ 3ۼkv_r94Zfg #y0L (k.Z *{< $T8k==ZRtY $.8_wa*@JPL 2Y )@T]ijiBf 3ۈa({MC̓a_ ֊hyX)+!mq%l,5J~efKax'a[S@9ZrPCPPZGz!mJ I6khP*F;t(u[L #/UE5'|PH;MQC]GJ=*kL~&;(Ejp^]j[C^%# X1ÔIٻ;h9>.\Z& ":Qt^\VKuT%8?'}#ϝi"Eÿ[08.H_C%tLTycr萔E6WU= T@uV ,r?t̼vaqF[J&r5Sn'٩^(u>ɿXRc8̬?^Y3t3o4J뚧$xhvkL4amsЀ\۱ipw/xq9)M YvrY5P 'Z1!9'/<_ai˷PwrmrW?y|-L,rD'7\`1 R"#LWvx*TM:`.I#o^H@򄘛(I%^8F$-,LS'KPŶWauJW%d(=j[Ýׇ)N슻^LNu!ܿT5OR td_L, Q|daՕQwt,;iY Tp*zeIh3k\aq12*2 -1f '¸ !Y6-a󣥖 R&!4/WTlOuga>+H @VWOl @6౾QNטi(cvUNT:ftXpZ M3}>Oާr*ɌJ=f.>ir69~FAl3>Yy06{(;3>.)O1fQOR퓮ns$@2vݳX?_>DP5jBօR)xTw@R/0hkЂc[MƳg> > K,L\6:7׭22- 8<22X"g{*Ku8$Z.o.H? F.u.5/X+r_3po2[x mxq ApITIGI Ƴُi_Y/,\ :%%/ׅ H gO,{ p c6B(Q]y6]"սX|B\\{(/b,c,oί\5Gg'Tf{]ۊcӴ"Kg|-w&1K*)Pbu{rc|Pw WjJ's+d.X]ݳ.*XEkfp/|v*)3 Fcw.&^(}|_NVAINU5%!L[Zӗ2`܁x8iqȬņXs!g&F"9R*+08C٪魐EAuP2]K( zsPDG¢p& *{ nsLȏ]`|!6@|2!/CKhH6f6fşYM*bZy\%*u}M(F_Ὃcr+9aKYQBiא`0mvz,nkavQq+ ̻U@Iv :N?R 0'Xnn]i&Ucg#J7Ϗ5>}W&pM=WD>1c.NyY#l}U·DDqx=&̇ʟ13cHUR2GhCl D,@i M#l>NFh m5Lε@ɸb'TSr3cL&wϏB84Iゔֆ{Dv LsFQut}W*m4 b}>u#}d_}r2JZX-8u56|q%4e29cʹŔLB\$Hl.|T``ErIpzP7?vKc!o 6*}?\.Қ*]1Kmg|DwbT rypQ#{'  sRX!6vxX-c]y.1-3:U=k)CMݶ'eʿ0{d"D!!8Ty$%!?XP{ 4wͻI?ڮywR($cMVkW$0Q5KA]Fj0|FDk \ onv+ :ıS#M3ܻšKK+6qov^"uxg]d3Ku};=y!yK+]>16XR]xN<;(DŽz}OYXkY f  555A-Q%fԚ*"JT# !V?Nh4Ņ 4 B4!|٢i(G?∫s_O5Ec]D6e*A.Ia nEZnJ]]"G?k❤K(M,uF!+.+T Dcy|Oȡ,6`]m- j,R0(o6@]9 2fftU8psTit ye)qƆsLwjQhJ_M#D qߖ-LAMQlfCʔ,bb. @U4QDu"/c߽ Cxϭ>QE{>|&?V GPw1|dhcuH?pR.S;0Kcu6sLn=JXpaJr0S1!Rmg:hZw(TgFn WC ?k@!\]7Δ)* F>rM^֏{DO1%*F.~y`_*8$cDGښjZ#Sצ\Aj@cO+Kw⠪ZEN{#; В&]h6!Gwݩ{mE0jPh)sÔ5Ȃsv%qF(.Lw, wMB=[Ot$(Z`Oo! pbμpżըrC 2Mǟ1}ЇWӢ 8PEܼ]@=]($[i ć5jl&U=~,NJ>p#.Υ~#ipV4BJ+T^s%+>>e@ѕl7o:J4x&>}>u1x>O9"t}dKQILٲlv#C( 40jgbh`^NM猩3j1Lx3 +Եw:S0OɁ8= g5sZ !!{ ` Q{1 'ה]xkN=x99̆T"囬~{'(H#)\U,4R?;k 4ʖH]H 8 C3T8KA|R4"}(b ngE4?+׸ԕ⤛yXY$I!Aʬ$;4L?bH+N8mPAy1FCI,r&K_=,B\,]9-VLjaBk@u)ovXGF&!XhAރE\}6Z0MRmWwq;8S[NE'yQ7ڗ<<<ͦ:߶̯OVgl )SCojb+Q>hIvU'>I`/9ӫdg F:P.Ds)Hj1C-KM/npvQqUA Z:oB`xO$<\>>7:Pf1ik`QjSGƟqHx6Ђ]eAo{KdWE/Mdbry zEF%^dvڬhDLSLMb)Wge\D &q-mZM*T>T-ɑ鞕#2dH_{Ѿd++uJG1T$XH t_%H֐  sv 9T*bsK0` lh6Š'sÃ#q[&K4 eעxkqM>hpa{N/CN?-흵V)pqr1XP3ૃ+K3[K<l*"n"9yuJbݘ_LaY ;{<,h'X ևiۋ-D@[C3 nWfq#a >}Vb4tr恎Y|f[O-gUUF|awaqOK|`!gܠr>"'M}JJfܳhR8-k27[߼GTgClBiͤ}1;#쪰N(?(?e?yBP(YqMqwM\qxU > v^ZSf1db ")fFb:-=2PG5X! DģvK h.P`a4;.;j^:BG;ˏPw‹02:* èbD;ѫ`t#j[ u]J+rv!aEuԟg"o0>4Ͻ+,ZQ} eKNCl7.V,w*[\iz _~ }koi=+Y (۱z7WͣO;z|fBzb#J_W:MwEW;~#snǞU.IhSm|}U̓fZa=K@6w2ިYMic(e ߠ?ĺS*Q=z|r 4E\_X zyO(#\C*7dRާJ/*q/DWjh=fjPi'~Ŕ$~b-j6FF*Y(RE_(a"+l EP'5+#+%E ᢕ%8Y6H9ݾҹp^|Nv_F"ͪYU%&+j@ڣ@btC ]X8EFH]E2+ŁAF[W=;{ҸFIEЩiHi.T EgwX} k7FA2~brf(6V]t'&<Ek/w$ a"QMYISbű}јXNF2uw0ptE(II!NKACKN&|WTW~eƥؾsQw)59P˼AeI UiG|t:K{P&]eGU7 bE5*WVόOݻ_˻F5o> E{t3xYt dCa09F;$Π :%K42[%>{JA`\нF9}ѐ5W,AƝ3Zsҁʷ}Y؋Q97eDx>}8xtwwXxpVa0n$Z ʝ8i.E)BvN&eTRxBI.xԡ 4"~)}:!E.dS(eJXv,ٛzi;S^KzFRK'˸)p^{ r#: ǘٔ{/!fXNN3Ljgo21X;pMvܬh.ð7kLr:ͪTA !z`jvg,bZeA![7pxs"1mxϯ`Y=0`ojHPHJ2BM@wi TuEBd*y椧x+rQDzc<^]VdT0BuB }:{N E)ՍXo0kEƠAQ~*Ό5WQQ@sV%m!};ߑWhn5B1yi(X eCC>Ks O&!]>:O{ܨ) üÈ…9ۘ.,Xq*#,`IMNz:\^INUcnR}S&l?ۺzԶJϵ׸0\e.H#< ~W7ʦRc^Ӿjh5Sb)͓vdQQX`8|G(5u$z4ގGlqjOx{A^aG bŠxbxGsU}bi%M rЏӿATuOEJXWSY|u^Gו3i3qO\?i%I+}_Xwd Br鈍,Eם(lUp?=Mq%Ⴁ㔏0_6n<`)G:ۻHj U)Ջ }N|&)B=&~In, yϖ_(5(3ZxQ%% w|3h#Ӊr^ù  TE$}ti| F%w'36~6[ -LΞA#$嗸1QJ ~}$f)uÙv sO FtVZȓ!0 RT 3'?fv/n!c<-SVa[69pR?u[z[3ٵD7l. 5@^v?zEgˢUO(9/vNn݁`c$>4MTmM3iE ^]`j%q7iw ;gݍf@$MMP'>xPLL볙`RӃzf{eέR"x'5)vaE8uJj o|έq~K\̢VijEgs2&'$盏h%6㙘P L>R* Jnfȇ]evL6V& O/iGUDG 6h.r4t#eqF{'UkLKpVLZ#M>V$ꡊ憸nf)fY`SCDZG|:6->590_>He/n#@ '>k!2bR2>(oR#>Q[2}A+܍ּ[2+ZxN= <:H"r8oz7[6{H]9X=ofSR!.0O9u“V{rhN+sǵjKn>biòL9+ e/c yIO~:&IJaT#}龭24=H7[u~:=k>BҩF*V79\e rOOig5 '"ǒFr5󈉄&mHSYhn24.W S}@!7Ϻę~dwQԺnoݒ.:"KM' D};v;Qr,ɸ9,{r) mPOo\Du VPYpsvopv 3Kd'1iwM%΂`s,Be@橡)SrAK-NxP)z$ިf ];^rpR!K1ƞcK5)El-@]z$xs&e|q}Ϟy#xq_'zqg!L :˗ o"?8K}X'I7U༈{׃Cc1ݠ rϦ%_9ӆp)+ؐgn7C-fgNVcy6DYrVHn㿙c%76~tFUZѿibt%ŹPe%D-]&vWx\^7mzUe`?~=܏{'JH~HM4ƅh)*Gļ*&(Ҹ1׌XB?}@%.RU5-Lw[?j6YǫU߬&qq5aTXd-ho,~ÏxT͹Ćhpdbڴ~j wT 3ԑ6@σf#q'\) ZPeZjO5R:rOKc65Y{0Xv:D= ϰZSҮXo;`տa|Y:kۡxfґ>B $R!wިvg'1($3:$K I|k+0H#ma[ZR-i^u2΢H`XIq@QX 'Cq8Zf5,HC8kwÓkw^&lL>a2j_S;/@Nχ&8"V6KKsF@%;깵R1vNw>;l9o]G,y`+;&fE|WymG$=/_PZ596PeX11 -3g/ V 5YVفw p2nm+ H`֨CnP)[o0P!Lpm鈖W tu16y.r{ۃ?d*dj( Je Ғw?SjD 9(>(ӣI6)t=[k DEn$Oz~2etw UCٻ\i Xʦ@1~Hg-< ka%E;[qc*Ȣ iG7YEsA@'OzqS(.OcEQkI'<&L/Υ ؈`GMWyo#*"vz6KqMlX$5/u&Fc(ܑJl'$Vifļ:]$cJCC_c.e6չ#WYՆ'91.9 STV5?4HkmpVϿGb31݉5uIPqnc㗶s)eMc0yŐ9N,T/C-`5ur31ԳqFV ܆Ubpzn0Rw.qM.o+ړGa=qyRP\Tdb=[q@=~Cz7W(ɊTܴ{ANOEg %F9нk2L G=XDqɊhBGQ4S6o} {` #+DӶcY'_Z3\Wg\3P_Gzio<'"VBOCW}: ~4gY[Ƿ}ۆI Rg!W"q(aU eD"ȏ!W]]'.eqtm\ ]3ύsVL1Yrsi rTOdB#YhutHr, byRޟ bh&i/*npNh=Odw}~R/1b#O*P'ߵD/Sp5@8{Y1ij69 d ǚvx+hF ٱ%;#˟{VnYUYWLUToX 8}d#a<jb頃 t5j3Ihw MNqeθ(lr0Ԭrq @H'K0^0IP4@|- gƧz@ݏzPШεRģa_#]SkȆD%{\m_W<\VZ9*F]Ex[nd;PѪ55ɠZ{[eMKƥn 5$+WP.ʎ ö +`RJѼs(|!zxŻWWGP"Jb¾Fs-;/>Ub¯&h0u L‰ޞZ˯6i :(^_&l&j[B4/i.+gGq$b+ed-U{n7(|̶5ҺѯT[ ȕҁLQ"p,ɇO_PyPP@]w/ȮTY߲=- M4ʹ ^JࢣVSVX'ә{hla!tX̆yw>)ֈ,Dبl]#V[mW}ͭ"c{ 9;ގB^#=&SMuPog.y.N|a|h!#{u}%c1,}MMz찻_Km%đp"IJ$b߉G_Ej Ef˃"_<*si0A^Z??JGൣwza$lT.Սy4L_taÞ!ǿec<A|Ɍ!꬀th;҈jXq(f<dw#Y%I^dVQ YIߖZdҖQvü7z)l 1rl;?9RfŹcK`2+!oӀ׋5Uś>Q/]=`faw\%wF = 埁ާcLj <.MEBM'֭m2m_aYc./;WPw^thxK4 #or1'泥Ff Xs@[^%2i^6KL e21qHUX+YdMP Y1) Nػ =/>!ˊd݆=8XTI4ErĈ緷_s-/b/מf7:ښX,8 pj^L0y^ =et(lYbU7 -/<2YKx_zuS1"V6kb@nnQر ]$2@ȓ6mNCJ4#*Mlv)v6Ym+̥zEfD %Kͬ]Y#p&ڝN1J*?*Z8zIoJڽP5a` 0\CB!E #欁؇`,%o˝*.'0aecG1Oks4,K`^^xiV'c{ZRň>Ej`tfފ> :9ыc:@ΰFA6石˷r*'SOuE]ز$)   &[/y@-c,9;fPIB7 be(BUA7 Ogu1{yWAz)Ng'(ZIV `œ!Gxq_{t'Z1\O>S>aշuhXj >tviSљJhn  M\Gyիzϣ%ka}q5Į2{էf3߄NӲX~i8 Jor(HU 8?4iӑjo$!6/>e+ G+kR'dieaBƉӈ% %I…i.%Vg*\ DD{`ed;_>,j4~Wm-U·x;]%UPo'GT)Wm:ʪe#;я#!!"O&z [=ڲعƠp [ϩv'lhxן\OM7̰.|HjI6jJ_ D5#;0[\i&אI˖{}s:Á1!n)ma#1ٽ>2!M5Qڡ%:cf8w9?Z>q2iKz8p3H]ywƶγTV0@JC'-= [Е@#I!5hif.<|+zkш#ci;:>r?vۇl&NШƺuWo[Zi"n(PxcK+-1_Z*/P/+)a5a0Dp`R! L@I*ˋS{2G벮̧V)Uǻ4aEbͭ*u4u{!bgaup -8Bg iܥDS6ɌkL2 =Ȅ*8~-p̕*UpH~>\f'd"RFzJwFz62UHέ"K>0 .d`:㵤"^P|hj{AGPݩ{e/@ΞKvSN|M9zp)?J{Wu/5-k\Fb*c~;W>BWT>ū~A@=p|rYN {bًtcx:sWuD>?6}} ?L#vӋ?8RwћW=pyZ%亳WeJiKϧ8xjy@+Zy|KƄKaWs0 (< 0Rto.VdFߑ "A5"@+ǏkyGJPe(mYPn ^a봹L<ΚK}5 7zq}+Wn־-X:1 bNWpix؉py)K:d,nnjQ$eyRQPap z b2Uw"?6h& Br% t)c>,/p0`{+vBm{Tb4N*Uͬ :SK;IE~ .L~H ̅rb 1w׮q~Nv3tJHra{$Q.WC]*cH EKyBN^%[2-qH*h*_R"VhHSc}qE/.#,3Em;PbLH#͞TQV-;M4$uW)"u%(+|Lrѥz]<%54WL,j*=_+UYNP*:w9fr|"*meM2|YU}N⽢Z >LpqL^pnT'š`VqnjpP|欝bh9,fViD\T9>hFd X QWq`7K+Rj|5Jkmʖnq LŝE6t:.딙zm3'2w OFN/m\LX |d<}$wzsn:u]9'uO rх{xz`V@)و`[=!(Usv_rA*ͽKBpcy_ /3HX( Ÿk9L f{QAlOa-p!usndi~/JC"sK-æxrېG=5!H'mEHDQ+CT펨;) _Dy& =T?߫Zd aloK@3*#8x^3\GJ. mX %:&a^V*}{P0פ5'򞦻}"_O`0'`1Ѵ5J_.K2N5r*w^]-uyc{ ?ȷE`qĉOpFI促S@qMj?I&# ݊kk2k%~y|0hKvߡ\3 Uex)-W/P]g8'Yss-F~;|!'NRN,N ˆvݘq`&d惲b~Y)e8)MuN\DaFT5O9bҟ$>+56lB7zMvشN1j-O7u9 RC ,c[U;"bWJ]∻e+e*Z=aRYsj0p)b `SWǐM %~癍zjjCXzI6{7=_)"lj;5:)mk%ݓCf $3 PўHqϰ?y6dJP#+ ^2=X*B7&dd_c g1tp܄QFeEg Cg"8N&a@uvືhW 75dNuE2^#=i Cz.b•Y{Lt !QFBDģ7b]|6x 6AdōGL,2K8gu$D}B~im# 5D (eS2p2G>꥿&_sJE ĴR~s$v[ {Y-& ߋ2:57maDKGV}Bwa)bKQPQ6\ 4_>}$=uQHJ8]: \+3:ND}?D+ldW8zL!Gckhf_ xTHzw=]~Spb#(=ǃyr[WUr=,90{PM[Dt-jVᾝ!k ǵR^PW"ui?h4)!.܁`9ZHc󬞹¦@@ۿKϼ05V'16ku %dgZH:]bᚂ)vl:V@g&mz.ȧWV8瞟DDHbL&k?ŒaII9)Y_۠pU 5`tX6yM< c<{x<SѣM(DmEҘrgXS@%`5L`!RL'X7CP$RobWJYXGu紅`Ci*G̓hsxږfWf=&KnTX,DRط|$ᮖʓwQ+$Պi"h8_>h&NъM {NC)n?|~ZMy` m+"}u_݊藳 ~r0D~N uOYQ?PvQס^ƕQ58YL$5W\h~1Ly(9iG*Iw,p^yMdk~8o`B簒gchQ]T ac(nj? [[\m1Ow {eo2E;ScDbQjZ$NYY|Ko E2e!y=[R [=Rd| 59CyG8'jχ ;̫AJk:^4>K ]c$Uxz%Fgw6=seK*YRa.S@{%"cBptk))vb [|\Duw zKߛ^񙟆9-JZdýy0oc4Ygl[4*[z30wx X`6^l(*MLG{-p>pfA%R2$2~ôY ]|nY4CPY =t0)N}=#EC1ԟ 'io0b pƍiLHj`ɚY l472$#-07* }"L_1afj_`v(^/*JN (RZ.nciUPm@dل݇ qyThO-[}'y% }<S sW:䷜n fKt]g\n!TGMvn=\V-?zMh_R).&S|ծ}GIv]wr$bC\AT@]fP;k~ #$L]j7 'ܒ.d׀F=^ N80pr=prkdݶ'cec6YS^Td;]1֒Js~LG-%;?TuZĺeehU*#86Q7N.Bf6gi-`[VM~9Ш}V?*}]h)VaYVY$(^@7@FbwX 6l u0($e3!2%Xٝ4g,dl+ID+^ԱVŜ v +Jpkguŵ_t|?܍q1xA>~}tL-yCIz!F^&XM@}X Y(4U5筘])i<<OJ6}Z1ѰZ zJDcqUElHc<6¢4p@%Gkh 7 e=G'oCmfB$H4`@FSzH#w^ QSp/hI;1~Ip pt@o7FDv-C;IgG1={ZZWoWF%\KgT gEb/N @7vcvYڞ3"dQU><:R2蛩(rW}zi?,VfKƓ.``Uأ#}a0Li[?264@: K~ZT@!^6L9'#4<>%{q K# OQ ,TMəᢍ f{" 5F \ƒ]Vg8^*'#gP>CU@;UD~㽑x*R{2Ongd& 0c"@\.^^O9zvjW S"1?K.!-  E!2@%P[O5|R1wz *Li/M?%$G8Ў۾5І7-Des-ėva-7Dr]HX4;j; cP.Ңam<WǞ,<+ (! 8wFpst]#bA,MN/OM{l Kg^s4وJ2{y>!9D$bu^OlU ( #civMM_6 ;n6Y_nSp7д`v'/ #,2>}>\}VZfe#u%'^BX8PS] ϰHkwZBsU]2q6+ x$L 9|lk ZK ^7۔10diyCǁVs%C iw3q ~\6Z!/Aw<@u@Z"9b%b׊\#ƊMr9_>ѽp V¦_=azB~+K"W1jѷhRGJ ª@ h_aTVʏoYkD6rǔ:an d 1.x[ˉQau(mc˅G YaUL"55 U7NʾU'AAsf1 &mĜ9ɠyΚ*/Mm `_)K*:DVͭI?P jI-迢CU\YB#= ,Mⱂ73`H <9A4XT*:H1ίZ^b}`}b5[(-&ă|8gkyb~Ux%1]/ow@  qP\dYIS7V )SJ|},\ 57~W +`95yE4V{٦U kwtM';<E?2O}IljeCh}h>Xvw%73{K)bx|:aT#_!flhݭǙT6mu0;;O3ymE]sH32ԩc{v$$܍H xUYa'1A?I4˝ w@Nn>D!LTfBzӢH-R9`E>`'Z n\WW*!̾#TiX(<dq+[@ o;.j74~m'*R'L}ҟ¿ͳHi` JCH'Mښ{rld5cѴm3^54pȮiDyY?p\)22VscB|go+NV`E>o_8+U`;cl##rgVFsIuzeeŔ}qCL0w@LÝ&P)v猤 Stq">}TA-"֟(dV~߇N:MRR]u+Z+D;F DM_!pح9 \L_ˊ_ټ|eKFB*Ezj>l{3$y"(j]8` 6f|'Iz'TO~mːnc:Rg]pGbon@oZZ<<=r3h BR#|WY `-=70t@3+DuXRPw?;U/zO T(~i5,ږmߊݏh -G$-)122ZRR~u\ƃ{!7`7oG@*\D}2SAnm5iָ6t_-b`P[6&u]T)|D`:4/D)P/Ji^c"ulU01&{ )eY@rS g/L +|( T7T56cogd9[%n ݴ ,6ፎE:O *T%I{~[R⑃BةPщ8Ű(8':ꬨ1Tra媙F {ϯdh5L:A[)mIE"q60ztA2fS>%* hy]0g\޸YۚV]&x͍C&ie̫W{GGW‚|m#.xEb$s܀ sɈv|1<0Nrv{z8l#9LSh&Jնh~E!'_&P'DiMLa(<"w7tGk0N0֤T˜}c+!H'kJ.jq`x˗czu+h5s n=^y"&_pO o~t} 9mLtq] S$ &b0nD/Ԯwfr:u%U鋰x^xaT?qSA0j]=>0~zO QLKRD 89ERԴ5wۥǴXRRGJ4é9vH/k2>L1/X>,'VO>p.bB]I%H9eӃ82+(Qkj&2jWBB5D@"|g:k+Mgi䭐n.>vEVj O /e\9/!# Z -U% EI={=E;u|^>y2("wVx6ڹR~~5'f7f傗TR&$xL Aܐ/A wȗe,iDbڝ+3ߏY~.\vVIѽOb{t<SXRxaIsBY];.7HBM-B^ Tz]%魆Հl_hyҏͮBo78ϫX\<3&K¶lEwoc@ fc.$eݗKV@況 HF ̕_z2PHc<"T ~&ƀoKU9Kf4Ɨ") - 'QB8Dvc[eڂ)~FH?5qL2uqaP$ma={=&cI{GƼf̛c*T=xR쒢.P@+4;dE5|D34Ndz( B@߅ׁGt uIQnF%l{ѐ2^]cɥ~Ӆ52ExLi 5A[JpnR9\@Mv]'ӂP­&!يB¼W9~](b$AAbNW]GDa=114|AES@is8Lp>RrHX!|Bz1I XU쒂2n-c7?zm3-`P:ZJ}#R@2BB%0*-}hKn >Y9;uo$($\O48Ԃ05Np7kbpS΋jIZjQ'.M'vO't߸|`ο/N@b }MU!IUp2)8% X؂No3\@}Xg tecIϧt3VTT^H=i&ZzkF6{]{TvK9 )ƳqDN8@=>_%C `Y\(]! !eoG =E=Zd̉0d,I30lRFʔm;ܿ^IH!;Ju1IcǦK^zyVi*ҟ߂IB/;M" vkLPvVnjoobSN;i8,tuAh4!~AYڮ`T6YSjug*y3ul /Ε..zzRsƄhieRZ.ݞyvDoL/ݤe^ 4*w2mLŊF >rdR4_mK{2)jWCKh?cڷQ<.Kz ZNLĞ7뫔A}7'аT/4z*m98O9@JkDR!~XɄ*Z>$@PXye ?V;/44=57#A(")kx78I6bi+lꓑx UӪJ.!Lkm>[|g"܄&^B*Yۑi04ܯfT_ ˡ ,ട4}2F>7qUNdlI]-=/4WyJq#_dԺ-L!;BL%V.Ӝ^K{WXוXk3@jCfY|(B>=OVRJ)FNHxsezny0刽7nSbHC)dmC9KpуD~bݺf"6?~a+A::V3 "8GNaø=,ց58L+%g^ܖNyIj)S#',^v5_M03i0ϒfS9{#x&9yn,L $MT1tIC u ԍJfΩ^N픹qHC6FjfܕobUh :`B t3sXr@];+}H7~cad_P4$HN?ЏhV5f>Y 1)cU;^Ky V s0ZGaNt[F怖Gz.7sZ-j^?ДU5-`UXHʸie#C_G޲wn-7aQ*n3,w>$6r4X>[3waudܼҏoPk *6,D1tv|^)/TEl͢I]V?y' 28OS }ubw0ъ<,~JikAx,P!~*:ur< l.=<!nQo_ wh״g#S= xIӝe4LB[tzN=m$mF<]!wTn;>Y7`/bƊ;mB8ԏG6bJm0 =Ѹ';N5w8:򛣧<wδ_=#a\+E_w=o5Inxu<* c#vڲyG*62kA9k >#yb,@֙KIv0YOa~6΂ɗ2Ӓ;2SRN<(qFfˡo;Qf\'"e0vd`Y;1HPI0A(& W#ED: Ϣv/[ήZrQr:M=W?`4gdZ}a3fdJ4I'}Hq?&QDc0w0K:+R?Of,JTIu@8 [M OzAÎWIZm'5#vV?.-kG>Hqb;{^: *uTS=(#nUh=Yv9o}߃KE0&i)ix6tuC|:!Πn\UwT0&SpDn~K{׈2"\J%2*$/MS1 ]7Wd(ە)O̿SU˟ ]aOA1;H;g{pwk|._Q%Ν$lxH,߱RqELjP(֙Ä_p)%=q3OhS~!'Jl8VeQx8%jꪥ|e {}(n1Z歀r|2T{{KzO\/m=Cњ+m!I68Q˥˧{kuP~"rʌRm`I?!m'^0!,LtTYQ{O1 FGg{{C; [֌:dвp&M]jh?N;QUkSʍp/="e,j;> =lJt_@G礩ƣ G(D? HE-ϳ^9p2ض"U)'MχB6M-Pj=) tųDA,f>ɒYղ?M7uEs@ez \#۝Uvl*nFČPStXcE& 9E]̲u<#om(3q PfA^J$?#r[v-Gir.IFm-}vKp'Xl͕IdrlB"rlE*XOQ2rmG(4+ h?OIrdu6wQ Pl{A=U,kqEƫN+b ˠN[P*4iyн(sGyJMfG&a' (e6L$+0.œ.t*=ƅse4H6<vÔ龩DQ˧i(i<=rs£Ƹ0r"AuQUkL]3e >2 c]yGc"_G*q?gkb0dJ`ZIPZ^D ]WE)QjU#e|6M? 4h'L ܭMT>IL?6%3߷d#HA㨠WJrr0_7y|0<QYb#j{T]Cs6*:l'& ca`pD4-WPaE^a 3'8tHzpaMToEɜ~9CGP(BZ>0 lُ\h# FsMߋSS{^8c9eeyI;psP:NTv:B4Sۮ8xtB]".ØPa죂GEe ᠢ +ںls.\x*G1=vpq k(NZX1s.(ʹEb+ k9"w1^e+Mk29c=JSrA`uTˮ}ȚnYι^qJXTGlA GUUrHǕ47usInA\L n_KlNt/eb NXf,5 xt<όKY3$f.dUꋓ2O.(%фu,c`t 㓓~)"ݍGi<+58K{deX|0/`#̀x:;6%Z MMJ"9ۂy|aR)D{--jVv88vȁ V e CkVis16Vg"N( &##D-,"\2Vd#r}p?n6XMm,pK=Xk0-T|YȌ ?`ai^ 94c-=`e>U 1;Fc.1 6YľkNH⌭`6az<:rs%آ鰾 cupUt%fjݗMf~nD(b}d_Nq/ ɠ=&f5NxQis~,jLRk2}tn `lB/dJ{\#ҙoR4 s}bҨHҏo1 'q:a7e6y/X!gN3rfjhUQDQ ?aU0#o r#{ sy Rx)B@`Vݖ/$YPP9.iJZAк¬ Rr-rXO]RkyݲaqK؂=4ĥ =+(mW8ZRw@I|,DrR4M1&/oj-=rg׭;O5LK{#[J98\4(yR聖5rpC9Km0QݸdFk $.mt!R@\~ՐeR (FrTu8@I\,*yYA6q+=y?]Q$/ eX[̲ltf7JZ[>Dz QbCg)J$p)$FRxCsj2  GNzB!0m^ Pi>oI4z]:fPZg'tȼ<XԂ{cKS*ced$j`pVE@AYP#ҭ"4dzOL(nCBru*&'~(Ag)@[WZt9*bkxh"\bUHX|[-|#{QUqV)Jdh؇!X9*?83֩: ,F1gQȏ5e*?pa+K|PRq@*f-Vp;A ;.ۣ'([0aӕe'u>Y`ٳD?61 T'Me^-b/6ydɾuCY,'ۦ/\,>dD?uwCkY>Dg-RpbǼ҅+\xR@)!|z1)˩U̒E:Fv#r$f:(΂NeđԦ YG*戞i|f=!p 6i7Ny7\87hNlB;WD#$o}]j]0cTx:Su1CGa!iiksdNi=s$~y]?n<03BTfp"S2qjjZÙaB)4@__ُ}hu \շzPo(Ac&1bkJ.qKkMA6.Z%/RsmIޔc`{3@l C ]pmQu@F#2_5iU\Gq3ZUaX{Bԡyܹ!{ Yք6@g6\X,οVnm6Ux}}n"yd̶@E'-aUb@tpSkՈ(aSH3z~lPy dwXQUEMӓm&}JheiՕ`6R960}RS fN[rMCvg/"v.`LW0{(=uWTQ5Rµ u8?KLӖx.ZX_+'1/2ج#1 A}mo]C.r iܲoDP_.ѷldi ʸ$Z˕ҪBZkˮE1wKEuFpY=vbAL>|%2ɷ;K&myL2;E=Ƞ+-O+к\$fOASPa[I(ĀFr#F$xn& eO7H]Q.S N}N3Z YMin]eO2N'f*,MTQk<"ҳ$z dpI ktm=MA -fXsᖅ!Yuۘ,QچJocx^Uu<^?Z|8v JOx{V7홮r&vp=W'/fh̙fk0/Qc"_`LypkVD䦮<@iٹc=P ;D @3mi4w0}Lx'JZ0\6OJo+z{(,gT]k*dVC-ȗ>,D"dBL;G|=,3i{e N/0ʞsk~ WJ@7[&#+^H-Ÿ<\l;(j(}YRkZ: ✕h'-τWo8Lh?? L\*-$,-^(_ &]~4XMpP;` n03TF̟Ec,Nch(#Ow;qq8Q> ,:Qyg>A5ghlBMbN +$VEhH:AS'=i:sUNz#cg}8&۴֕ɸLp> Jxlz=_V U&8|V}G՞xr5ۄǾLhpn[VYl Uw"G=էVV_Gg]'s;h,="؋(d0 R] u?wIU&IWpmOu㌤QA)1R5a rdg (](~d|Itjq;VFw\n# QQWn8=+()hUt٣WVte7W:2M Y)B]c[Ӿ}J3p]T.=Ќ0<" N?z@0p;"Ffs8Srrfܜ܅$W!Ia R'[-,3zuK?fx`PVY=AM@-DW{2@hjP|K0'8&\[ o ~`D*r?:%=[ޠuF|[)H2*G@q~KN$踴G Wɝ\Kp;AЊ"{v"dp>($<Yd]SH*5>w1+κ0v~!pP-ԋCvhScQ/n.!CGav_vix;%od$=w HigOrwˈ*(ѷC8)23;;$ Yȟy6Ϝ+@8OViI8@~I~P)/% @_S0ǜ&h+H.nn(ss;s .SM]bm)>bWY(37Ux$vx۩ W=QFB );A=<#Нռd7,~I v:jd Z(vx}NvYo9Usʝ;=ج0i )0v> 3oiUfh@ˊZ>3AΰklNa- %;}dMu`ݜSGS"pWk |Xg(+[t rrXCC'gjelI4L{RŽ)" !9MHo2}#$% ݽ-i,'p.YVT WۈKY Xk/bڥCkԔuW7 u4ΩLXTuN]M-\|Jv#5o$S8ת肣H2k L)PrVX]vgR$&o1:^?EfZLlUHCv<,y }S:֌|mfdGGtf_jlj8o;nj%]l2Rp!)yDy4 M\t"8#6rJDv~MkWTҫRU1ɾ@?XD0f)}法#C4nk8NBΌb_ =NpW8 t@/?#Fk750f\W5`4X-ݭɓf#3Dyw҆MD1-j Dk⹌$bNơ,p|r=l1<'>ܨ{CPp+ _*wTw<ʂMu&dsh45 67ΖbvWt믋=+ɏY?s]G;5,Gb̲f《*M%oل y;>)g'gO  zlEO/#s(Ln!!k42&\g|E?!v`$, sJH8ՋMA N6^]]dbJRx9ҨКOa0쿒>ڝTq Ȍ+k#ekcӖ$=$2O  2 ZZ ݦޅD gyJ^tZ|!.ZcoJr{p,W) 魯6n8^)FhFEC7on8MQĴ._ƾ'(G. R p*@/ 3U7=D4I } ,=yW̧F!WQo%NƳvj\x*(R_҈/{W7 KVCV|2/搕yiOv Cs~qQ"KFggOzs;bN76ⲁv( HIG *Vh|'s0A4YAp T3/_Ԑ-]VFC-|=F]{ɘ9ٗud^u}1ʠfvDr}?f {AgusZєw]E"kysb&$#t՛FS18Dq7ڱ)!rWBˍHp<4_+vQ=3ڮP˱Cf0h\6HsM` =T,C}{usa zQ1J>wAx u+;) MO QGE!0X5\jbB f#“]A<@#b[`dѓW6G bL,/ole?kljY= pWI#B‡J$&Y)+l¦}c|:`"kJ! ƏU$ŷ.7YByӵƱGf<*u fH⻙*1d09#s!kwrA|-mdҏ R".&7K%HDiJ;iU!L|}&֨ߨBռN+W^"<5x: kBO#82tKa&= VRrٙ4EG{vw#c@)>r LTE {PAj7,~dA%t1cYYKJUn7jJVJ'72voEy%K`lX kbkZa5\^tco[ ̿m6P|=`.r:I05v "8%笫JȦ{'qЍ(` I~p->ڈȢ'2 ik䗎|O:X(#X?_cZMDv01/>B+r?-^ri>PjF<Շ 'p7z&zHc+CA!=s‡Ch^ԨE16gȶѫvײ6|/k_Xw*^ڣQeKj@Tl oƈ0$x&ˎyx#S9fHs}t|yӢwӘײ3lƈAz Ҿ cMy?5GJgX.:.yteK,ۅe٭IjD42;e&4Yٳ!`A8B_羼i3}-4@7uRDaB((؝gr g|ɫ A$au dWw89P;+3U qZe#ȩ>{g@>?Քp﷪;I'ψPz"5|coWfm*z\)=駉CdIiDD$t?_1 vSǭ@9#M|[JٓTȣHKn9뽰=d|8Gd`{*0YrD[aekNl╱-ljf Js#ͺIX߸l9aLP&8ǏmAEۣ*] ¾[^_@ B6)8&!ecG6K X /U>|vWao+Mۈg*Tc1ԥƚceEFCz ,6MmK_=?[9 Fe- ea%zJڭX'e [L[E X8u9]G/_n?r O'U))]漣OVljM`նxc;+= IٿGCo5ECqzsЖ!5%55& YHCH(N濊LO6§LDN$"O5QԩSR; Po:"fz3^$ˊФļԅlJ)Qҥq B-#.]+y%i %r\Qr Lc/4C`@"+l1@94pH-2,St:b`F\v|@vX7 N_ï9 0fbs_O%StJlH)*R:0mA p[ u[lG ݛ+-*"_wpq v*L/9zb8ގ~Tۮ{#/+>)X&;O[A GI_ AڛZ#D%%ƕ@g y fpG݅\@exLbÁ*(jX RtS}J ],vrLFR{[hy߇P(_l;6HE93<TF3HY돖@e3Bۊcpj#Пy}RchF|xrbmr7C1X}w~p!Mw%LV׃(&s*eI*Tp$ p3NR*zc"He#>|kT3YUJ_̴_ d{κ>=_]no.,0ٟDe|d2 q-<ǒ~Ω_qIѿz9讪H2*d|-^dYgz@m]I|J0ƫ53YKn;5= ơqfWܣKbyڿD6^w2 hn <[,.-LD Ԫxo3.ؙ`)eMDtث^d*m:؊,d 2gSACBԛYHc~n`ɴ։bMnIqcr9VE<% T#?6p N~.xBc.S>UmENa(-f;nK7 t\BBYNI (#$x=7’S ,r# V5~zmb(DO\F.RdZ. A=!Δ>Iic C B}:TLFPg&c<yFf%O7<6 \E>(xd:P{=rh$Xjp cf(qGRj"Po]+J,;+:C2P8Dꎠ,U8sܺi*%z IZ$BkR->jdZ4@:9N>𣜠Sޥnb;ΕU՛LVCa_LE?f omҥہh.`*JGcŇfO-̩C2j‘G$$Ң/ʘP,_?rP EDl )#9& ?QEyL#iP%IpļqPu] ~XD$[a{kHh 3rA* n-m3~P|_l7,%Fx#f'Xxz׎c3ĩ,KO^[_>Mg}hّI{[hY ~"#3l|1C Ou?遲su}"u }Gú1g#kwiI#87e B2swӾ8d{d+l7@Փ4NKU~gvz0Xf_ ^l{Hc lJA;Vr6A  чhS3ccr('R=A韌3B#Q-`{f'l<^BL|?~6[z ߶tȣ9¿?ܛ^ 5E "DZ H[BgoY8YNQ6!2[CLUdaHٱ Ae{NMT=BwQ{10j٪$ ?T+9e{Ղe5w}ϧǥ" ʘqK | eX`'TS>~L%&1lD)HI V D(9b5]-i;zԨfE .:)ȟc"5(AICtab }Avguq JA˃[heJKT,߁ ld::ht[՜ΐr:͐޼'bÊ̿܇.q irq.6!Q8;ƫ>ӲfoK`+moEãujU9(^_]T^&'=&j>ըT[|˚i5 ^+E1?t#r,{OˀYHy([*VHH>w?mjN ^S[Sܐ)͜V||Cح?"WөrAѵ`Zm=;2?x&z:^?;!NAzC/<ҝ?C!_PqT}#oa`'DO[G0ZIq%-lbJQ{1N{ujrׯ]Zuyn (Ukؤ l|)nvƝ]vi.Api] (ϴșf wtIKl+L!g#k mN6`;+ju2abƊ1k$GcU\S;,}jUF7*F"L&-^!5 u1ػEKZ&>s~;gSOPyǸrz^I )IđXn'V"(k y_?Ò9toe& H ޖ8΢gNkj1'C2m<].Jl~^4H>Vd-G׌ct .g&^ڙ/3w>C_h69b(뒐BJoE" Aug[ Ra74mʪǥ.uRC۵br*BdbJ֖fNcKoq|znfgz}}tU$*im4߻wOկ t:\8 ѱ'%~PD=H4 9.=d~kih^y$gNG\^=/Y rNRFIb{]pq؃qPqk :l&"AW+fjtᡏ+aB қH"$ΦM 16.1Г/ugHu5:1}b,?~b6S<<,J)(/?t2FiχsmnHλ gB+Ƽ_LVP|VPy`üQi9tkysÇߺDyufoÜ{|Lձe+wWs˛V 2؍@wڌl1+S;\pk4/Ģ)ܶ|<5* #g/p9Cif͌zRI-21C-z K ݋w䓤ZXiw6@ 4"͗7pWW2_ý$hɇf +ª2WEDĖc#V!2mbkpm_PwFpڌIn2enAB=lp^:s|KC`MSbtߛT592^AI.qvMJzLZ:5F J%iNru|;`9wx}1ဵAMY侉h)b_ ;K 겧4CSEfGċ(@iaE7S(.MIX6 <1ZHOV%zGgy `S"#i4XOԤ&ҘZDFLs\go<5>2̄L9_~+Zܾځz\,O5M=y&kT}d( YTDzD1/a &{6S)5#'x7I^|" }/dP{u$1X(;  eŝS;|\V'+Ok6YUK X!r#|AѮs:?"lhA'bp❼.,uCXЃ88h0=kܩ8Sywq@EsMɑgPc`Fs2T!bUOng?Ld*&*&C#{sl-Ta^*CJ︼UP?js%`DtGØ4W(1t}34lutt*C;ex}'~\QIQ&;^Thu}ێS=POG\҆8W)XUI7;^cmNWi^׾8S`edWT%ޛ'ACJBy*p&Pj+~NI7fHKO)g\߰:]x}Ũ~ۿKyєdZٚH\կ`>d`È{5+! z\`J$LMިD /D}/jVxqzںbړ=6AOpOa-N{A{%a7=cBƽ;P$& طЊS ?-UvշlShEZ=k/_Qjm x= \{7w0]Z|NQ9:<i .2^eF+J\Tj4Kmwaq(/=FNԮ )U)S.Z 7GN x!"k= TY9fyۣ4v!Adzt* ʇ_rȻ>83a97$בq-阉?>*꟭f Mn W\!Ww{T>&ڌu¾u/F[TfiT6r֯͂Vq!͑ $(66/9e$nrrfya]oKWQݟCf:EuhVjYpoFvGXƣܶje`d!UB.3:r1N:aayŰ¹?AcAF|0%6*DN헛"zvOC{PR3D\O7uQie@H-Ωxz[UJKHVҷ6CEf1]Od*X[9*wAk>RR).l[oMq&Z?̢/HgA[C.?tA겘\&زQ1y /ۛk _ilk8^4rv)jQRЕ 9!ˏYvry;kiЦ._a4`zf78 k9Z3z^q/Db6]Smie1ƺ܎fA[hdo2#K%J&b>ʡhH~#D76pLsgW=ROY֝Yit`Ӊ^\Nc$B{1>[.Dg|{݇xJ0:۬V]bz{rԧMY,0zĵyj-_TI<JA]Oxj!7ATw>I˰q݇]G#~="}}m_z㩨}[ﱂ~Z $H%F~FPHDψ]?(,T'Aے&3kV+ceK { !vMMʹ!ewhnb#e=U&(B\LnlY E,_6n"urUe+'j]dac|xSH|\wn=J>0kHۯp \Yն`3 W:q3Fe@J1@vo+DDC O?ByaWXfSqg--x9x8PrOhst}VPmcw%8ǂϛ5ϪS6ǟˈ9x1LE&1! ,`.h3!mW`(%yۜTI@+ɜ 5fBH'G*glڼ$D* N/Tu؝MzA؇M}Gܕ;2agjQye:rE{p`glA9$-M͆2c!芨7_ fnmJ1hR4VlT@D"9_(,uil}O#gudM!*qp=1h+AYI{i)6/ cmY:J<' 蘋"_x͹wҘh8Eٛ(h/m^iwh _9=z m" ;\ҍ7t./C T,`#]]VBRͪ :&{4Yp~KX# Oi0PP\#uJ@*}C+\? .p*Je≄=@?qV5z{Nf,'VJ!/T{f !=bn볤z@spn=l;@]PCToSKW?J=B%Y2K| hÚ8-FGMjEkĻn砇3·z7: ōpx;A1d_W ܈Hl{Mg&/GdF˴GR{9 UB{_.o&f%!k  $nw !clOdn&_[EZ&P)Y\|N>T) N d$M46lS^ate(KeԽ5|yCGn'[%ֵKuAG+[Wz\ۡvb:du(_8x>hd,#s-EBE%Ӷ;hIq_1ڢ="VWױ@Β؄;`%XI8殝ĪXƿ; 4J.a'FxUmoԑնqY +{y1`S 0<ۓx.,-8 Ue/OZTsUS2/V<14H}mt`Q3_@ǵMU'bC _֌<hLlUD ۤFSܣw.(?V s"X EC}_'㪎P2}@s7XQ*5QcOh< $]i"4=m͗~"6Ae^Ӆ,b\y jyAi'acu xF(N}{6 1gVC*'tBi$y9]A12[1, |ֶ\M-2MAzy=j6h{;GX5uyS_ HS/- )+ (M͗6Kh ! fh.I^ڮ͖N dz#a\Jׄ> tN7^N'iF.wM m]&{]e=VA'eKlH_* X~"G"G`lޝ:3Lg뒎UF̿nᆱ,a 7 ;1~D4"e29lQNɽ r ?2+ƽÔC,'e\o!RÃ/Z]%V}(5!JE%L-88EB!1}I+ϖBb w`3\Zv| KlF}hoN;u^BL٭UFo75CcŲvI{9y `@=Z_aE /6|ve7cbR'h[`D=t\> j2"0A{9oYַġnݤZ-'M*E8CCpxB (xN8bN iC_ks= ~q J 2&Zsp!u#jz볷n  l"B8IFLbI7PpH>7Iؠ¯rzAy(lOrʖE]3 ])0|n ;Ì5^.ot#N7uAy|oBC㜆GMW{a%"]\}5x~b^! xuK~!HúɓL)T/ |V)*<ZY4yn5a1^5:KȤiCwIeM6  |ݨɡZww͌Glsw45/f0 6.~7ul8*w2Uk? 똁[ TBv`!+GI_t\rfQ2Z PڎF\NkO:( zCcQR FXtˤ@6|9C(47:D.w mOj/PW;^+jQ,^ж[8OLCՎ{8qBTry+2졶tu.0D]6) B{N^df61w^p}Db$*DfCo7C㓥wL{9FP9tgMjAoxY"F:oAm$%] h{i+(wU3lxwRw?K}@${ pV\KX5oG|$W7TD}j8|t <Bӡ~]~jѕxW YXo~N܃ .GՁvn{56kX?[%7<6`c [}=eka #!#muaz:9ÈHyyu;Yb8F[v3t"qN6x#-;_M$( {%6h3D- &2r*+ `Fw t1h[z>VY/?T^L=F;Nؿ(5~~.V+G^3sZtwf[mI!7:b[OCębS2\HAy:]_LX/#Wh{,S:E107m\>@]7fv򢷉W\fsql*7K\7' X4]xm#p!aLpޮX.e%&03*85x;ϕ?c5q9Olx ;MNBdTrajvq1xpOc1]G5s%SlJ:)p\Q DH OW]&ʪ*!2_L@VHۖuR6x>-KJo:h63*wiaDlr~?Ȝc/D{[7.Uc6Q$ h{s.[-=>s-w;o)7߽KlܜDŽkkc(,ÛҀ+_v=#,Ց^#o1" ´uƎT%~_*e,ȟ$dnrbTEcK<c(<ڲp#7=9k5A6rxPm0J4-¨/~3δND9qqeyym=70?#w4 ]kI2 5md('/ ~;ɆGmYT%K/v)~H GI9?yꉵ x!n[96 ILUi%ΗHuV'.h;Pl'Ԁla욶×Ϲz*vBVX3cYIqx5QZgBJݔwks)B3["^ht]*MZo++bX~詈 MJ[?G;L|6#A#{*xcE˚׮r! ɝW<1iu/(`\O2HяKr)Oe+i@|"X4[ ^v3&cFFEX:u 枎'|r?V؆>Ö0מǁ|,gYޙa+.K%ovN/w^!s`Zpsayy>"i9ܪݎ,O&- bGS [Üra&r*Pr0$50U]0 wK!Qz A1vv7 ]D1}^{uԏ7xs&HQL kuG2m Dj.`c/G(:#7j-GSZ%r PCO(e^%lT»TV܁ =4z+E-)f]rڑ"i^RN&)Fs,;La;:_'Rޓ3EV +T.n!֋,5C#h_ 8_K+Ԇe,KYh}˙S|p؃ErZI53 AŪ{ QeΘGHxJf/w_T$Z^ /p{~݅*F:!BpZ#RiUS@@z8Ū [Uo2Ԋ8e$I}'opMr{X"JjiJӅHuwq:54X!a$sIhp/y7.،j,\($5|j-XC-%N}*ʋ"zX'@[G(x 2t8_Ϡ4B*Q tFĤ f% W[kyFO516qU`V̻W^S\s] RS$\}Xw<=1 ӄoSȼ족)ut RI cU\x Jy&ܬE5z1qN]e[. J>2XGq*/?EK|<IJb9BGk VZ`@=3񇺩axK\ ezᄎw^ENNpfR>ԫ&s+,T# s4E{%t~!"wIMXH_e_9}آ5r }[\ը':=;"@{Ym{*rt $a#pE&SYfOo3z1I˕E 4H\W0bz( 6،c]*Vx5_@CS6~5*.]<$ԕh8C8Dd^텥QBO5!WUi(0N\׮8_~JѢ@_1ޣ֣_ԛˉ⨛M*0?VSÒC6[1 dR?L@< `} D&8\}8#PJmāMҕ>j,~eߔz0QOGT_8e&g57 pض[Nϰ(~H8o_R=~|9MҌY-W*Q!S~qsU\!b^%Go.X.YuCPӊ,F˜`ݾf,NɎ-f =Z_}6CS=mW CqՌ ϩ76x"1ȱu6B^;4 Vf Ƈ;a;?z k?84W&s,hT0xwAkmT+u:n Sue ]%Jk쮑OK}Tl?ʁ5[G,;uI#/\_A"roDP ~ªyI]) 0ñ(v&9yrPgɰ`LISd/2b: 8?5&LO hCJH-6*"W_v4Zk z來т7f%Z)ovE:2Fh_J+N֏~T s7"q v{R5K¾)M~UQn"hut} Gig̡,1!Mٌ8TɡUJֻ]P}4ÑYrOh&L׳wm2`x"&vo`cp=(*6FoaB:0v.k&olDHB[<6VPW)VÝ)>ӀÛzA>X$gd[hV OQSPP6㿎|X`"p?" jVY"':~kYA#^ޙ:Sx>TFK/ '׏ FUCىp3n- vax3DsF[Q$]q0]YSp$k<"MkZdSr<+j31 N){>[y$Ыd6C(Mᆠ}H5aFP4X8i+]?WXGqaqC*c~SZ cxfbe?29ѡK  \ZYHe£0y) [r<"x54 lp%>eKKfZt=s.P#2רJ4"Rڧ\04W%ЍJ7~*@?~?Ui>-L 3oJإ$-[~_l`uA zh V|¾DGN]bVmwe jkB<0v{8уYY/ RCGeT~\!HS{X%9\NT'_ YL%ʰG ,QWwC!~OՌ;TwsaR/:g#Xg!5fW&NJ(pB8$Lr=ruR (~3m ũsW2FOT ` A}v7F$1AZFt_Uo#Tf?Ϗx]%|5~a/,]\GiabhkE,^vXiY wzvJ9uEE(.:I΂?k╮ bߎl 4#ޘnq+Wn!,}h+'L8^\,%9jHv'[jesF5^b -7 D?V_fv^ %o+GY?iaWz)B{f_$-0Cgiy,j5w3G.N ȸNhc^z%nu(FfGYـqMkJ;bئMI|[nHڰx Oʱ-5T&MHٿ@nuR-IU\GBǥI &ҫ8mPi4T 4RN\{:}t⭮@CXK65ynOHK^8[ jqvh|rWԏIA0uIpQ}K ~(pwh8)w|xq p,O@ ¢ "/2=qngv"r@§j6\H@,XZ=9{o%W8Np\ܒE)iϲgˋh蓎̫YRmѼ劘šc?2VEeb>j5bhMrolw CN(oq_?>/-Q۴Y>bNC+O0O*d Q\> &nfijK@*6LR-]'L4JJ*}f#3_l#S x.{6&+Z{o;j1z:eRk4+zv?Hw/f|XBَ灷Hp*o=_V`4 |,*I\_ڻ  5*-(O:g$1]q5dKkvwǞnZhyULe 3bˍr+ji5} hnʂ2rL*bf6T'YoE7q8qCRhr@ ᝤEFa[dN^#br6F Ԇ f`)@S+{7KWؘxҗIP˪Pî WSdM->!RK+F)/WO{aVJW脀G!څSnCUAɺ0+'_̗ٝ75QaOf #(gJ,FQi&I"q|2 5=n|`^_V׾|"?")Kb=j0+/VN S<6wTIi]1^@lPZ-$bɼ1#78'KTά=0XFmHHFRK$pO6;${{Ф@PsU %+p,F71R̲ȰI> | c)4RhVzp/+ ~4e'[T+x+|$!|U=\30l_ktp;X/M|U4)O͋O+U 68~gX9aW4j'ͻcXTc^~PZߒ"|)IxF'Ŋǟ=N8vhxf71]i]a%fH hL,bCnz({!:Jk^T?ȅޕVWS*7tp͍(9簶|dHGOF*@# f3}O@,)ۑhS JA٥b-AĪ}Ok;d4ŀF'TKڐ(P\_(D~")mltAUe1d~qMB(owł a6{D@BJ`!ksGYjXuy:F7dX5(#bP(Hon#c a@!Kü΋ub+ߩk7:kaQjBZT 9-)`RV$gcy?$ J@uQ B!Pg|84sLư6EZu 9|ьb ѳx53呀gZc(/xlYM yeUU%ͪ*`xB(B OhʴȖ yצ`ĜlQ_M4,[Ԅ5s_qڋCXmEN?4]y:"s6[)Dbqn 1Ⱦ"_FFn}b-v{uڽ$ǢyL&iHl ,3|ƃNf_ńZIf3a nFE;*0Ipfsy|نeҞsdC]cKIpsZmjobQ!6$$nJtΜ6:WG75!$i_6-~|vteFJNpumE#,Qs언+2!7biOݰv~gr}g|Ti3aw m‡5U FP)Nn8dpp|mWɜ׎ jk5Uf[z bCYW S "B%VK#Un}#HRI73|$A,sLZ7ʘfaAG\5ջYI,Aj/7,~sY2.2ge`?QW0)$\ccVWzDh-9z&hĞϙsV/?jk-6bn|I#"En7(% rۥݷ,iﶳPT2İzKҲU,tAc'_GR{$tT^Vi!%k*4ۊyڥiOG.ƾ-\7ncf Lyߐ֥.G3*Ji_ &uEm .K4iMI-eCU5ۅ,8MG}-z-wQjuNᡪp;`vсJ2loW7ŵU~N3HI'={9sV Q&dkO q[N30zI%*^uyUby>+%09 iT04/~p9'J^_05_!> W: so!y ~8SopAgmh\pSUٙkc7KRF+ŨCs1VqKrKD:o/R5ifJo#d.%Kw'g͉#V ;{=B%l@3B;/$ю ^IN`{ZX$!k>vPt3'AEۼNBn풛~ > u,xWDLzg;0|C<:QgW3:l 7/;Z*ȒS}zlDNN,gW%lsDɈ_aKenI&Ddئ-/D ͠^i[͊TٜthUNkG@cOsEKw04ƵbPOIwxt΁SۤES|P(F5Aqt ϽbBsa:D]lzyvk<Z9gDo[cv:Vڻ]_Gc齈L| 0\e)o~.M @J;7N0+)?X7ΘRZ O?[:x/^fE#lfQ(rQ^#-6Xhgibi\r~԰ew g)RQ$9D~X~tj} 71[ubH^N" Wy"iZ#&Ɣ0ҔA@@:(i6O9sAo448 FzDTI8r%|݇Z$Ba![zȉ,?;Ϸj+7aq|zfˁO\SV1(y TǏP ؿ<c=ZꬴĶ <. E3x7h[4vB^ }0QnAr #F4>[Ѱv{k#³O. ǵ2M]YNxNXJ~sZ9VѸ@mǔR`}o`F7 mC͛Mbg3ZqUIݍm!kXFhA!qX%!GjДzna3)+2GlFGz3 fb{떋34͘g`-|52}S9k)'F.=R-ITs<5Gύ]T3r8ʫ(xz*x#2kLđ7XDȽV/A+t<淵[:mjCtԌ&~,GbnJn |]y.MTAWB{Di \ѓB.‡#8f\VFmy W6RYbT'6ojdT/]a6Fmٔ_;6+lVVfoL*9R\G?TOm0! 8nS{ ASUr~R )߿ ނ[6ޅsC#*v=]fK-re4 IF$cAShe5UEa1yCi"g+~cPl}p3"u1N|Ui8+ƞ]!|[}͈0~Eňz䡜#dڐ@> Rq':K[?^$Dpۯmfcl&{b LrlȎne<]h̀ف5-,8E3 @ q_/١la :C-0V̰w##a%6b˜Zj;]H杓jcy!8/bAqRM$d!~U{ #`2$Es>|]ު4̣8/L3C !`;FN>S.⁌K4Rp >_T;©Yot_!jH3;u} M4F>&- tSְ?ږ?X۵(c6uM\D +jVꫝ5U~Ы&ƣq0ʍ[[5W EL sty (oU>8-N{SVTavآPd2v Ty{f\j!Vq wԘOiΗ#Ee$j'Ied-k _dB 6c*hۖ;a ΂(.p>%.8Ǽȧ!<8OL)2 R!lZ6 gdsrg}2FZC[.S+9 3i-KCC0 Q[ ^1tnJ5 *Acg9Op2Wppuߑkt;dlV\zl.,>'̩ߊAq}6n4'4P]i_ ԮzJU#h{5@t3c%$]eGeNGC$6aJg jSU|dZ@q[-U 懏%?hu^`6*je2dś.{%exq>V70 g4񄒒cU490D+g~$O\1_ 4Z#.)#<۟z >=\nHh T{joX>%0TD-&G2WG}Ł<1sOo z8YS^ =mP<z],8B$<0"@uw nU;XEJp6Cu;KU ̇Qc^]Y!B;;9wߋHVZwX 2;j!b~8Q8̩=stu]daUu uQElc`ݦ>R:&LH 7c{ӅAV ~{MdAm ;}\rav%Wd1rz pdw+G".7 f, xX.E/|z M0^p\Nϔi)j@n!`."-HMuںa hr7n,3J='jC):Ukʙ^*#e“H nKr%U xҎJ0T免"%_b:TK##Ռ5~ZLSvbGӷUq 4eB7\wN2ejM#PyUۚsfA $.BHRnl4&n5܅3Ϟ.\8h:|pw8 F;|`[\ңUjˉ>7D{N8P:%a%Ҿ3~ݝM8HBSϢs4 =DQ`襇F?!"ZchwJU.%I(ƜGT1[طRߞQ a^i KHG0Xnp`[v7[{LDi_. A2|иlLуJZp؅ ׯ4cLy> Sp볶υy6)NYug/z‘U`aq%LJzW%͓ Sr5uAZV.q70r>ׄMx (iᇨJkEe[o1w=wv{Gr t b)eT>5A(({c2ͻ /\ds"iT_ ӆn!,3 6SS$n=c<>"h[n才ZKɠܧ<,_O#؅ֳ'.KA&T#B$RڐtВ󱧃&hLyhY.A5>I3$N  e ZrmA4j p\F )z24*T<HX;b"tT^8uf^M4pk֨6m͊V'XLtJp X}t1#);EKU&M}ŕKFHg*W+ 9vEZ6^nؖNV6jg2}1ָ\""2U$cďjzp`o-Kt/n J>d͋(2HԜZKR{b$_ ;op^}!ǥy(̈Wxh8d-q \`H 54ZjLm7aErH/;ǥYZsvW,OMr`}x^ HՅ~lQhg^!})Z15IH_rH/nt:N^۔l"-ȸaB'>ޡcߎAܵP3%8iVg )AnDtiuDMg\@Q`wA8jA)DUeݽ 9,04RC6s {JIDЙ bRT?SN/ԣXppIdbeMdkR6 ŌK:z ppsMn5(֌騫%kxEAI\$rPm~p%US2Uy"k3Jt< iObSC00*)]20Xms<BY I4^u}pHԍRN] ~nsj_ye;%~eY$N9k)ihp1`A_Nos>ԦWݕ`ɏ8=)rkvx$# tP nDF|afL`|3#u?_GDfJ,0A,p˝e0CQG Sw:4 BJfZsLGLkT(*+fOTm;jOIWcN4!  s(lO\g &Pax3#c|HB[PݩceC$ ƭo]X+,/=Cڄ{)@#6p6Y*ƾ~dF<|AIBf<CeTf38"U=6 2BxhyeCqKģ<wrT^@A<ݺj_);"4{8]>9XʧU*Ԡ>H1p Ys8!t7 /15i Q`2cl0ă8OeW ᭽eKp&xv#N(_b2:mwsŞ_+?gTowCBɍJvKO?ZԶ? ׷9cWod$( /|Źϙd7#o H>* 3PU)*H:]"6 Z;:#pu,'`鳎fc蜝C<[K+ĭ+mr`{5+h~I $堩u*,/ =~/kՅ@U<َ"{5Ј+oTr,/fD#ލ'MnÓdpy7ݡ2vD1G;5X*%D_puԍH9c,TE:ra jD%a>cRO`;'jԶED;2wAuQ# PAy%c>3D*QVv2S1֕Q t:( zolZGm2 9 WD$,Gէ˂&B\R7~79<ⲟg&.pД4洖Dzqה: )FFq.R}ܪ$ð8Q&ED`NOpYKA 8XHNB򣘯HӜʬ5Cs)jx/͜Bo*rj~H2Y*^/r nANHtg2c({gU4˲&`$G^ǷNn~&v;.qoڽioL:u/a\7Ê/Rz 6CI%/{yWWHAz1X߲*귁}\ /w9X_v l9 1:e@K@2t*3%g< */ɭt&Ǜ<Uby*"ɛ 2K/0_Zj;qnZR%Ua z~2Pf0 evJg(0 Z++@gܼ#GŰ:o]f~70סf P'.¿‚bِ8q-t$?'q5jٮMHD2vִt(,ßۦgľo!59ihNB 5ow8r)ީ>-.f]c d.)鼛.;gݣ8~`8T`hiP0O3 [&`l78i&fD[:0G5+ɫE;'+Ky O#PΌ#9]f&7UVn/R( =Mk~Q7|`w\X|owiח _7̽)0|3;EW*V1bco~:]8n2hq }|oy|glOȷJ3`;S5 o5~:$g^`>+"ˏS~laEpHoh& v&W^n^Ţ*s:UoUq-pFR'Zo]3&$f#Y ,6&C JX6jd0@x?7.Q5b\N2CTP]ĭF>otJUm/;WD M `r^90>.p]"b8맼|=[]m=.L䥬v(#|rGa%t%7Rݯ1mrcg~Dn_s)l )a9BtXJ@uɽ|*%kܱNRnQQ?jjk3tM@ Ӌ]'x!nhGh8Mrܺ}aatZsKRܵcAI2B(e3kK-K6QtKLI٘ڱ`K5k*& chcTnOP3zzLbǠ~;T,RE .I&lJv!)͗Zw %#x!hXʊ^1 FpjE_:y']QfcF 2"G|㞘g']C\~Z. i[ B.fd*w4#ɦrhLh{|0?X@LqV5`Q1¾y:T_hll,jCH$1P񦳱R><Ǝݡ< sr/tl;Tk`i ll:?jSIT"#S?OY|nMSG= S@_(-YfbL{[:ws5)0ޝe&;&RĩcNSHpP/S%/h')\z}2.11YhgXhP4< L1Wua%OY򒚦eM]ȰTzJib-;X9܆RA+8B 3UtoM 6.ap!c!.A홐+! ĜW@wPABlhtW9 ݇nߺ&V&9: N"UG덊TUdh2$9mD+9-1ܥ1@' a?(3hR-{R{`zU5r& /㘄0u{o`ut3Da&}D^JK4س#+ZL_m.`ҷ\ #\WR4h}7TM֢籰s"9ܚ# 1k&),(]aܬ<,~'\}mpN÷rwDj?̥2:*` D:TЁ@BV7j'Lu]X_gW%HK]AZ26.EhxGLX2du\TT#:>)7y {~?MTDmO›T_8Qe( 7xA 1,47d+aFȖށ\VD!JBmSN9g!P+u8C -EvK?1xA4`a 2ɬ4ӹ䶝uιH0XŚa5w]݊a >fn>~.+4 M}[ER:D0ʥ$kwVK>`FCaJMd%w9mX~p , O[Tэ>Ÿ-| 2aOf#\h̃/:|tYV7P**i"6f3E2ܕ]hL1֑/8_ʠH# 1qM⚦ciO{+C?lcae8\请4[^y+[-WF]- Uτ2"ٺ0C)3MY.cƬ""2ZLVd%P ~~00F%>cgBbm%ȵ9H@ 795=0jMp5BfP9@+SvGv~/m3{)wjļR3A3zz.NJy !.HdF76!T7qXrE|L>Ӳ>+YR4LΧ( uK!v/G0M >k?[VMӞ .K#3X#W X5hgk^K¡n<]rMj`{Y`l ÏR&sQZ ?;sCw-?&R r`_J/Ag4G}a?3F gA86moc.Q!-Zril,B7K]pJ:Gp+Ns#Xڞ1ϟuFB ƕ +aƐ,-Xm"%`mWPBf*A7kuSpU%e:z2+ruCiӚtC>hȀM>`=QlۑkQiM>Zܘ;n<9vꃯ0vj)3Z \p TK$5 MZY%ݗ3 xd#(68j~L`qh=taӿZ*.5lȨBBe;9`rzC:b3B̸ лԪ|>@yT3,sSum"\`of2ȴ'?d%X 䃬lqLSmYq(>V6fy-x v_! Q8"똚>rfYQpFgCH[,(i>#+*ipSh4(Η3 ʘ!DE=Y0"P]p[צUt_zݫLWɞM[ψˊ!hK'abTS!m@쉙tCwj-U/(J~FtʦʏV ǀ1C0V ^?u:jcw@'AL-27iҶofm'>qzf7 ~ƿ>#v`6bx÷3lOҟ%l5 v˞P*q<;Sk,ћ6|꡿Spj!0L ,9N ačxm-:@󇬮Ho6] 1_w>evB,)NSkE\${PRMskF&E !'HTZV90XހQ$֨q7`tA2/q:M|@nP~p"`nEQlw=M)E_(C yR'#\d4E0)]D&D2}^]_7~|/2OB1[4t@^o/N1TyM/[L{aȏ#_dv{wM@lu?T6KGyp7K LNx5k,Wvy(y.ZZcjRgyhimbKϼ7Fwf 1:TS_l%سn)AǦYlMePI3{#f'Cezӕ]zZj/ߗ&\f۴0`e(%_b:=cAMjT -8~ x+[^ÌGHՄq!<8w]HK]OBqa!z4;=OMW6m.6z4m 6xz{QƆ]wqQgy&G?W(v ^G]v Fi iNĽ-fv.?B. a~L [CϜ@`K̭y ĬeSu#] XK讀a^"[лQ\z&o)|m2B&ذTc4Xiv3jVHH F^)t[T[Ff%R=Ǒ16u 1ոksMV e][m!aI$<xJq~^HWK;p[.}{xBJ͓LepK閛U w쵫d{O@]my:Hݍ创5T>?I%cz:gĹ~;Fݱ(vAL0~%iH;=voح'DtƵ6]ϥ ΚA=Dڬٷ]pA Nyjc:%H#Fc1OwPx_ylOccZǁڼzf3nd,%hF$ vWn ˭mq}0]?,=B5/[_)*/V'&"]Z9PT뤕ؠhՇ';NЬ#:Yʽb13a븣Ec"wc; njC_\1 B$ݨIEge`ae@kRDg+4']/u+HzɫPX $類=FQ%Zj6]2h2:Lqc ??[6) At٧ao<+*7#v#\m}zO&")5-SwAjU۝IZ!:qD\r9]T?c.ŊaJ|XcBJ|2C]mpf#A{ T -fv-ڂy!O)%dE-$)-wk٪zC%bh7\DE6_br;>}{. ̦DL ĵWfęʛc<3% Łѯȳ3YuQ fƃ](*E|J>G4{b_ttg?Iu q%Wo=\M7){akn),rɰxr>Ok3!>&|] )V\L tӃ G[Dpgswܾʭv-B  d4z5X@رd}n똲{b40ϵ-YSXS>G0W֦t,^!'+^X\z%?)V`ِ㨯m-iu 7+z !+#2,-h>>G8]bL糑2c bcٖuPR6%ʢ A 2`C!6>} hR^ca&setD}P{aQgя-PCʱ2*覿2B*2DHq$}DR3-_| \/_f$.VVepퟙW!CcOZ;*;NX٭ ifҸH2T8iw \u;zX4BH7wMa̒t9_~gpn'H~{ļۇ7DcXk{J5\r̩X|^9MřPkpt[Y3 Q7 ~XDu(t$H V;=yrc_:= Qo:ҲBWeBSۅ#Z&@%6' [sF)qKOaQRTp6p>sQ}3+8ne||3{Zmᳳ\8܆$"9+ߧ1qU/k4Y2΋bm.L>wqP;2)S&QcFkb6Ŧlu.j:鈁W&(4%nT!nN% ~UXͻ&RjٝE! @A=y%Rnt^,4zR*/;kpҒlVDr-3!x\yw&+UZZa )f7o~S t4}.6B@l#XKϙ2swxqVZ4IJ8G޻m̪-*ŌMȿAڼ$(A.gy蹘^jO#* ulQӵ~ޒ^H>AJi 8*ax #~cCDwI0c~*eF %F8b9s ^CmED\ *ݪ<|ʙa첵dP3:C5b?_N'6H3?6߼2BdžйNHӍX`,yymc/OyjckNg'Js6)#}_^'y1fLax$<O ϢK)ofCDV&u uPJˮ'=R*ϵ~{wӧ[ ~I2G8RNSm A[ѕ~gEhkp'k\% VdٞH:im>4X”7S ԉ9p08yXxtla{~jn9Nrs`I| Ծ9 q|ͪUUc5TH;t.gx^Lj$Y׌9BjE4=9Mw^BeqE7,ylI)81')apj_vI"@\{+N)5y~c]&=x@`|X~DXrq^x'?~cQխo $3N]\fc<눂. ؏r"4#[傐̩dwr.YIe? XUYͦy-2 ,\čngT٫4Ӆn *@SU2fPah?MmF܁u9ۢ&0/`T_tf+.ԂQæ4w0(w9_Dc:z$11@̯#JEHK')wǩB[[%3ޕE 1|a4P&k#SѦFZ)cyȳn/%{2#y6ir%~aSvC.?;qPx^*YSd\'y sؒS ֞݅W ;>19Kf՚ {ؙP'6wZ|i-_9w&V-To`:~}A֚jbw 'w]~3"VWHeo+cη$*wY߶ l'T C}d$q]$Lu]Þ+4Prߴ?;b{4<>+l6ݩaɒKXOє3R`icnOqW5̵qfJ|=j܉)XE,pzPԀG(G4[yp[vCg_6[^OTVO ~1w#F?>HULu _)AP I&ZvDhdhRqW$)Dgf!q,MCV c|NT>xNW}!F6#k 4q~j4@{i}F6N0~~)ۡ캗6|h >^$C$ gm6jA7WFj,iSiV-ץ#ĞxPW_pܢ 4j^|ku"k֛ZOp:*їy)>DҰ\l%&08U^́\Ֆ!OE,P7@gn˘EޅsJ=g=LA._{ʐ'riJ17ZWᕩ 8UXy.+|(m;V 01u!y[џ9a&BT<H5Z0#s[to?v[؈YG4ie\<>b)F{X$#;y9UZmxei BS6(*Ú zKg^ogPʄa ?;f z!c[2g.Bɦ*=Αx:{bf؃92mb0RY'el@Sg#!_RG ŴԸB:f>}ѢsdKe[(o0zaV_/K#06eJ<>OR%&T9&dԅ88C15 w|C"oy{?B\GnB?8~$JF{#l"lF2yCRiTD䆋{-41>Dy;fѼ\uۮw#|^^!ﴩE:לؽ\üfgkK)'*6D{>8.cI.qGR!soŗ:fHI<'Wϝ7=@NT/IqW{@?N.ꜺC[^ 兄={uQOu #!)IJlo?>$NQЃ/B*Y4W _sDed*BARaI5CY 0 N&Nn!caZZZӤKcBZ8[ԳlaGS;P具cNׂ*=ٜiEGzg_&ms2L{ft(zƪ%~ҧ{w 5Y-ޟ"zZEp^&\z߇R%WFDi҉dd<n5:t0!Tl{+fg,s hZmTw>} !fj;:Tn|,(Ͳw?ΨnnXQ&_?̳QHUɴC5I0"TwvSIC6_za.E9,WvX+`H }4B՘eyJbe~:W9P.D%/ JĎY&_p+9Q ȃB#Y9g-63熴~fWopZt1Q/S@FH"EjVJp ;8PĒܮ pk# ZWG,rPU%~\`%E "#6e]@#uKL':úvw~0LIjoN]ՊPn7?w&&O)|8uwBj:-_E!o7T x8"bUa~jI1eh8tۤ[lec8h<5qDF=2WD φTc)wZfnyj־rNvi9a(>M[E="jDݫ !HJR g4򈶧T,@6}eI)vwה$Io#C+XfKJR$V*?0z#KbYĭw:kUC]wXh~(kՠ].֋DDjd K4{IJF`&vo܂{ug0`C]BӇըZc9m]̴!Kؼ@ Jrl}@NgeƘ4To<"7 N"yr-BqB3J`r?+%5Dn+A~%z5*愓dY0#_#o 6pFtd Oҥ j qwc\5:cry 'y<g%zJrwËlj.թq/-wQy5cĥ2r  B*VWe^⮻59?P#۽yDK^3m02DQ2EC%Pa)$OS+PךTKmda/:|!Raf|qR4,a[׬p2sV ؅*=uLQ.ZԮ^Y8%^Gô7ݽ@̞@Q?CWaX2Sl" SΣIl{m ˟0ҖJ/5> J,dF!E7EAFE[.\<|Z^;}luOm4^53E432IaGjƽu_㱧Q>e}bgS4bO >zޗ1Hm}2[k`1zӉ_(FDjBy(EPb(I6MұhCƹ@@N&2QQQ L6  }KhbT)G7vwx:&˾Ԓ{#;; Y{ÙoA^o0_X7EH^'Ͼ葁-jwd=W?Uk h9Q;<2O6h]vY$ %E#@MPM{̛8(Qr$†|r5Bnf`MpU B~B]xܥ2}'y~CZFiʓ3xYrD||DGTxdfLoƄ$zٶ?1_Rcb[ҕboeF"ܩa/F^eJ#[x6K?є[[ro\]@MW{_d6[Pd_y<\k/8]>ߘndk7/3gzwnKz5~h8G]5a~PŽЀJ)ibr% ms&Ixe.x1 $<,dKQ%*p?D$qEyx&Ğж`ڝd+9,8 ٗ@0>cjSkњ]ְPZ[#)z8RDk hSW&Y~´𻶍}êbuI4ߣl+] wG)%y8p\6Uxz^Rj-{DL{JcbGT>3p&\*"RͶe ?sGj Xr$- qś.O7VIQe]깔+ކ&$;TҬh*8e㦘n" Ԃ.*&YިOsמ$L"Z~4C .&: >%qjeA2oP^.k?]1]._0$DK3Yהm+3zth3ȜzD!hmi;Z˿.3{YJ|Y]M.Xjr֐>1V!Ba+7 OtVgz$y۳zFג)kQ4[^RSl7I(}J9DgM]d8 % w@Bgy#=[/4Y(q o/'fՋp%}ĉ $`<ǿ몺f c=(sx?; "EcI0VɾiϞ#fsϕG#WD~Epbhh BYL/4HoBk 궘57# /h`K~R81ܜٰiPnk=Wo6aNݯ i1ɡ[zM&mzZRb;tg_5M_|OoƯ]x4nWy6O i'褚‹h} Z-ӐD*ȯW3uθMR/yҡÛ$-qy 4 ڲE*3$ȁn0Ty37萯STb.g ^ RA9 .ܖDH~Õ½%k1RP״;#CeϠW~(k8ë0Br22Glaze}bC&z!xPe # I(!ٟhq.}QbHFILfi9T1|.(CXś⅓0p\_TkȲ7M2>S"oYA!n gwrSkP~@JR2"5` uS nہg<7r7K/Oo׺pF,cqïLף=/dE+DI9Vx%yװ_dc>dB53^Ig<<~K,F;)DB3<un'`ףJ4ev!&Qyą2ҍ˪%[+1Tq!nv; n[ *(aP]Gpq hr:&A%kU5D쀀CUYR`N^9&cBRzV*(P-AS7>! })i 0B=:3u +Qq wawkhvz3<1T 3KVTmh$ZTOxFsdk$@rǑl:lBTsp1x nȥpLk͵8NAoG.政Z=ݦj<3Ugr7=9Y1܈7үIeTr$l, m+ME|l$2eD_.{4sv]h. 3Ur._/p5Jq'UI/v޼=騅7HJ56K]"ń9'IB]‹?tMs~s9Grıֽ岆U˲s{3ű;Y,T5&f߼2;˿SS (#XY &=zejV!F(FaLU:S5~]^\s")u#4ԃ1"N,p@hk"ޢMD1Xrm Tv(U dldEm0/~? VB1kQlfz"3ƙ@g #Q$ /vVeGG'E q!$wxwWP<]LNj{~ޓGtqm~Ӳ4>q+Y@ M@19ƼMLJHfu͹SPRrU1*!Uf rdXGVɊ ”H"WTAΞv~0CƮJGE`aI LuwX GT3ʐ!h.&.;gȹW(}BA"*e)*b#Ȳhޖr߭Z]tޗu4ת)X *6oz^a-y'Aƪ,ֈa3fxoW<t3[> rc {x;ۈH#'aFsbYv ȝc!63ZOՄѻ. $Z)1҃M:0շN(^ce *EVƼwooxܭ~02Dhm[k#K!M!#y_+ȟ; T(f灍#0]r.9蚵&:N4k뺄=$iY+Yy 3?1uHn#l6q̠Hqy|7({|9jP{?c.2UJ-ܷ+]E-7^P'=?ζ򾻃d+Vv V`$ zQP3Pw t gc7Eut \ RK /di4$bF!g\ Sڼ 8E[=ZǨhx,2R z'$d EitL3Ky` qр`vsNۏGϮLνtȇtn+ Uj^D*Wbix9R V0^xnǘugbaQ11 bW*@7O66)O߹Ar~m}>SsN"9r{^ٟN + inOZk(ݜO=B+pOe{YnS$Qb_[Lxu:{ҥ4YXiݾ> M2!} -Ao凣Aʯƭ-{80M&2 '^!_3^Lx h6ڊTUͤ,\1`zI7ùꎃ(B K"U#6kk</aFasMhzo?S8U}Zӝ0A13 "o/`vJ-V4</5 VRoe+@Pf5)BYbnF굘֖: ńqB΀>KMcK4{9L<7;[ľȓUrz<_cPh10Rb\H-0df^l_y:؆7k)le=T\6a$< HT}x|mgQ~PuˆzMAxa4/-ϟ$v1 r9IZ![NF $E0B4G4w%hLd)}hjO^C@b(bЏ#z}. ʊwpU.6,5,I]Ż /`!j4ٱ's-c1xdU).|b̒2ފ^}JH!]P  EyGx8Ͼu,mR Оu|[<$M[gOd.z\#bM/em\~d* Zg mCQ,^UbYlOhEy5KhEE7P{-@m󢩆'=xn`U V/$0fu bΚ! 9hԊQ[qTC+ =|DK;J~$edӆrlT]\XzFvM*Ib6umUڜ="yVYYt[KnKJiƠsTҋ=*0/f{ QI\=I ؾ1Q>P9wl|^=kֲ$<]Bc˜Wbs:;Mv`l \sOOI͎Oy7KG^./z? ͔E){Ku)߷nzXX ,:_6{41>DAJqةzS5tl6|b: )[p !}cKx:Xfn^fJdG劰f9+*m15*AʡO5ՄNv@ײulVv{{DA`Ղ.'νh`ş_d V [s_Wt1-֐߶|2&X=!zpx]Ħs=fAeaSZCX!A.! K٤.\QY^k0jt(6{cx[τAX"{U KaƳȷ~O(UtPA:MB ]RON$]~I%P7Цqce}?R Ɉ94[CNsa&2Ŕ_r5;ʠ;'N"5{- sgFעM0p| mX8F}iph?r$WwfS+dK?8:;jMU(0#dz7F& @wZ~ K.. ?ILE?ӓmIu&{]9 {{ơ!gWilWG &سi'VnQ5d7ی awhO7SO5(^~$_zDL? .Ø Y[ l\0L}ä; Ri1(m.̏_n;)pNVWƉc4 t#-KҸ*2DP]zvQ4ȅQ%(9Я'nè&Be)va6`pZBr9faú.n,\V ˭]1bAYd+mZ܏2z%f^\iڼp6CAfT {4|y-^@ U'=ݻ :&NǴyyٴ%~O6|ZˈHkdZ&k.kVYa!+L̨}[m QBj1 eTe@9Q'Gs ]◔e|; wrߵ>(78ν\n$6D$V+nt|F"RYTrJۄO~̛XKZku{Q3gIE2@֗v1B.X ( [ـ?Ƹ=iGŐw(߀vx|<V^NB$ -" Cr|IvT[hJɯ󈽓Y @h°E׎daJf`AơHf D*g+)x؎ 5Dw aj~]ZUcJc(H2gy+&턚,V_~Cu<YBO[T-9(xjA+n'c:P~eԭyi(ܜ&0qcY0 [ؗLA7w]+(QdfOh8+hN%hvgNod> <mމ.:dL~د)&+eleqb ~#-xL_Hs@nj:#Uăqژe6("CܥƳ]a$ͷdDEgۏdl M8Vs֯`.6XCyJ0kxAZO XN򕽕d`vXIB#JG8$VfOs4MVfm]+d_+o̔sԒ}Zvض!jt[/b67ZJʞNn{T bu{.oTglkBǤ_Qcw=MeQєb[*rQ*hrXIFJzi4K^M?ʻ4aY$@#NlBo`^je窰KdݚU@ +Ӡ5g٤oI@vԒf.oOC8A/'uk͑>>{euyq˹%bl`]c)b"M2pTf=P ԕcߥm{ D&φTg۲ޥE"&2c4veӤ OFugZ:\ifyG嶷g*AEYvD[Eu%F;LR+1FL||ٟC#/#/VyqnHU"rrX9^.k@6ȁ8ґDM`XWQraaLs_ N. @|Бl+(z>9b P"=? ,C%H3Qdž0)nb@a(M( T~4\d9y4HGhqۢ)GJ؇>8Wn'?.SXE=v~!3%-Wơ=흿 = \-`cf˘} uGUOY6G;Zf~7 !]J$~J i՛d,1a S)l=eXdX$-HW(g|LOUJ{<&5 J`QCsT$Xݕ4yD?ƃ`, SXdbAS f]kO{;,Ͼf;x$cemm3Yb͂k6lӁ5+K ?M0,Hkmqb,'pN2\@6hTkt>{*Gg8?DY]wi %{mZ| P`6WgB8>V)|=y oFp.}ܼP Sx f1KXl1Wm_f+n+rw3 dsђd%Ո/$&]ZAssh.Cs,6 m>B+&}efDk}3q;_>ֱVxߎ2 ?q^U_S(C ŠIBSCw1?BQ 7 #D k:9a}>ØB K 01C}G+\/< I#]!UdAY-aDo|`a+9#Qny:>$EIlj<7Mn}R3MxVSۑvuN{q0uL0&{KXgE"{d¬}Nlߤ`n (@uiu(␟#[?倦>#==ǥMHaW(B e |/v긶lk^p1<m3wv [CvaO0Y8sQW >P\0@ `9|r*?N]I؃>Kɐ)I|RF8B6j&3, 4BP 8"PoK(!Ǖ[Pcp9hw<_ " 14ͱޔ ~9uUq{R~chH:W03 c&-D bL͌F3eCmab_ש- 6G_: EK0SrØr\فed]:Hj[L(0Iso`)w>͍]_f߲R/on) c=4]G %O*\bƔFھ @pفm0{-b+~:U&BL~ O88U=v[{_6X F'G zz=7~TBfɵ"'ˡgKb13ā' W~M)^@^7[9?kي{M8o vӼbGeZ4xj*~ Dډomn':*J4;uV_)dGOy ,U.zŽ @+W` q KC5~UJk+CD7"Q -$%][̝tдQP22AnQșG`ʁsqtkS^2x35Q )l9jUKZZO;zl88q}.SP4)Ϥ^#N‘!QLWKb/l&{XԻmTb@0 GdhoklHuKJF+k[V?? ǕrzVX逜#嫀Vǘ>!V lcLpڀ-cDȳz7n%tt.mE'76k3@2H` Ӽ!NG2`o'je88c74d?K+yRO#<(-t8Ԩߣ? S z^HrӲ/Qxnr5yԁ2vƇ4[X[BGkSr 짵pl͢ GN5X$r༮OJ5AЁ6n&'?H?jRnX vrU9lDƗtZŤ#¶#XQ8Wk/%[ܣsߞAI 5c~ ow>Ե6-))?VB?k&44O%J>iDEA r%+9JէiD#mBk TEd>l" AsZٝU%cN }K[͘ce3~\HQ1FGZ3aW P&VװnۂNкe=]oYUfvIY@/UU"xw6k&bgؘ=pIn~f 55yu/LI{Xk:0#Ӧ݋ob{tű#c&h>T~ n,KHHBqU1JgK:zhY/ҐNT*OQ~FÎÄ`ƹdN&|VU _'= J]6}XM[^Qr:2v1uj~G ,ՖBJu&k^uin_]|p+4]I^_Iu5j&@Z̟Ft@ҝDc\EV7uSVU!q/uQlִ=B|Ēg t6 Gy2~OuP!yZ nxEU4)C&1Oཌྷ |KC憼P ^nKJ#-3LP;tv o(J|{SF-S 5Iņ/zior[ΫVUun=yf""r>gQq}|_D-O"E1FJhQMB|~f4D)62 zbӣU(v߯nmT(D@=܆eO+-_q.Kq>a[-߄<vhAry.PxӗrM>s>J>$[4X%,}uYCŷg pU-Y+"OmseP29]mQ*??kuRzf\&({%蒄_HZSR# jdc`2sr3z=o/]UO-W& %(.9&W _OUay]@r$t͌DCT!1&'ml0QڤץlHx'7]I㧚gZ#/h UW߆jұ~j)٬ٟ}|LѾ*Gz\rP `#@B7-ojI;Ъy>ivʖgdˑo9ۊb\:kGc!"6?ECUI{D%։OEZ&| $.8aA)qgKrrĞ+5HgƜ16I|/_zmo4wUXCP -_M Tٰ limD)9P{tK^Vv% ac&:a'rOTIQ RPUEZ!BNG !\5+=7ǿV%M{;JOZ-R)bSa ^ .R=ixV wl>Gr.<I93-{iLeShW)sf氒/^κK+%P.Z@S7w-5TzMs3MI3jThd1F ÛGY$|$vV˗|B) K%PM-BɚivPGw%7q6euֶ ^`++5B*ri. w`4EXDrHl-6~x*bȤDݖF{ +u6"zLϱaJ\WEt+,Ȱnǁ$&wVT\a@KvdR-<(hԏq9mc}>8˘1pѤs:hK<v*sBiv31pPlvcDEC:S##_=~mh'YuYݛ \ѠeF$z,^u0+5azn=~%psmUe5<S0+*j<;/ou~X$ ihJ0:M DS%˦oijS$7Y!]Unou/ ~BCfĒKB̙aAb/ 8cu ɀߖ{-k5dR@qI\KH崐9F`3f#jL/:af>콖CP̆hW[hqqtr LZ;`Gw11&cA% o@Zo鎗 <#B`EΞ\A,Lrv^ôԀJ|@A\K3Opzrԓan;S`0;\e1l$6.Zhtx'HrqJڼy5uAB̺d.75QL)H6gxyw)/6[ZL$Q9DڥR|)pUj?;xUyĭN`&Î})nߕ ƭzN]F_cAT qlq>Z)yLZ`GLx)GOQzUxUGDs?}>m],#lڸ2s+!/|)!*}/Nz irݾPw//+G>l;L"!+,BojzۿctwIy.vgň$AOѤm'âu:,8jb՘{`p+>Ɏ0; \E;s.ARb=ӽȗQh1!V3ߩ҉f&aFY7 #WI z{Pcr03ӎU*{W̫F .ybWޗ hݻHzg"Dy.s NS6fϭX- uoImB{Qݶ{^B$tGUW:9-;78"}Ƽ Q϶'tU7q9.z~C$pQ-8By)]z7'\2ot Ee#Gz|#+ul `'D3u߲2 'ler8@ _^Ǎ"vy7"/t p/!}xh7<v#ܴO>h 9K Md854[ԨZʲc)g8̕7DzXE kabC=BHQ0RoQ 1K2Cv%]O /ƁSa>|8]'FI_&ZloybO,f`yJp#[ V}(DwFQ,k}uH`nC(/5"U>Ԑzf|fB~t7Oz d`'Dַo*N\0gH%iZ*J"k#WC=]I־F߻t2䥶r?:Dlp7ÜAU|Bf|D"}Hz|+}ꨫ&WAmc%lG)XA_R;w` I5~mbY ۺ.ik&)])秶n$m(xbb0#h'nS训G* ږ0f@ vdF`o?H.\fCTRtgՖ.gjTɚӽ`wpŮߥ;v(h/z 998pF"xBI{:t yq6ZDk8:wy;ju3,42NIJ|s\)x8t2LDE)֬Jl̈́֠f 9 NPP#GY)F}@Vo)J,Vp۲ew3D5ޅxO!uA ' 4} SEDupC|jQ|Dp_':'?52Zc[LXUU$Ÿ TU"zA$tۑj9UMRĿ\vX{~я|B|#k*Ώ<9@=(tgDѠj7l׼2 0r'*\RChfE}5кB237cـ0 g^XɀfL]xͣ^񶨚}Ӿc8_ԩ)/_VVf_ܩW#{=DoB*m/ Txf7>Ku-eAc/Ei_iK'$g{O75ymQ` p8?&?ٿ2fL#|?fF&^L#@&=li@b:Ǹ5p:~%%` \Dkmf|}+N5SdK:n-h ]WLj' p])Uú|ۚ im =|fyi;~$r }<̉W##rEB#>do40t24WAM;2Y阐fNօjefA$9҉jwl`S}ZX3*=$^=Lngmsv_\Sʀ݁ōC9pXf4{uj<E] f)}o 1L;Tm( sq5n72]huD󲮾?{`1F|t8W AyαI.$TX\gL;n2{^^)]-0MobSNfm8)G Ö l Wz@ձ,V5.5qgjޜ3v}ץR([ h}*3̅@vrV*% U6y_@ڠg НHM$"$C@c`fM /hHfwx4x"E0B\aM=Ѫ?>N(kyq!zrjeن2#Jzq7dwm ٶ |/jEi7}r:2o[^ hĝ EQ,NxatXYO<CR`eoȝTNM]|'J ;$Y֤P޻wxjcUOykDeC)2op:*/N1x,G9 uw2%ĥ!$bJ ڦܶۦUWgwz1{q+uL.UN^ޥw`q7ջ^V8oސeG%1S-g).YP%)q꽹# pT#X[A7X?j_(E wջ ޓ]0͗Ϝo_U*,FkfUf ⛦Իx/x?*]F|Z*mkvE鲢QAł{qx\Eĵ ɵ?{R5~݃-\Āsk*Fvٺj5kI'a{t^ah|R(߹5Go5ܿJoApV |G9X8qiE1ÙOCkߘz y1Rn9*W. .@s}a&phR9A%g k~К>M@,Q[@кPq [JN(Zj0SɲRS~F }5LHwUS@_?@̼A$ٍ5ikY vTq R Y, ߀ +.m^|{;j󶽿eaE"F iթ]nYA}w+KDž~J@Z?(~%C|%@0S[%NCʨ7CX]WCNYWcQ'%"ʎ=5$\O261>/nYZgNmmvIz4p[u3k6P˱Bұ`$R, ;Dv:_|}ѽ 79u"HUӈ2lJBcC A+tpp/y"ˢşX)V:)f6 k 眅8r흙u]MJFV1y>\&m9Sg=7[ d b-=r]zWg2LZhcF!:Z'\]b䓻5j{[-΋5u㕽O$G3E:S\&K"38`i<]YӐF|Y}yѮ.HHHF"Wy* 56dBR>!G`̄,N~>/ݘB|72[z'AG ߞSx*pKTxJ .Qr!@eV߅-hE|ės,dCw0D[axgzi!א>iXv0#爝ѡ 8^2aHڎw1q_3&5*wj:S+cQnx*4mvid%ϏY-Dk{]Є=2δ]Z`,+4$hXSmnpuwŦX8ʟ{cx_YF3:ߗ@¶%@JxNֿv 4^g쮴 %3 uBP1쭙.w)}_ස]K Bڈ2}p E|I6ӊuc?_xFcjHkJuMb +Eo;It:& sMoі:~[C?Q^킱ÕlKKhkTiA6r {s r_ҏ}v5 m~ ES>65~EkV -c vM}'Q@w_@OeyᆵZ ׍[P#sgYN8XGźW$Wx(uҶV0eF\!>^T蒥58:'Lloy@F!xk®ב[|Max-rt?Zh.ͭ"ɸ] pG9J;[VP]zp9s1(NC({oAg{P,xF'Ƨ1 Ek#ö [F+JCK*,пW5k RCqjGr$*gJ!@v VؐCs BqQ83G =:l3{aIYNo Q v!">Q"5l98Jj8͎B.x(s$-P@\|M~gӨh)ЧQQ!PekMec8CUE-Yn=ľo$HI*R4(Aq {cZ- fa˧d96UeO:`0Xِ 5NMiBZ/@! 3es^Oց;CFK‚UUF˔h.;yg,1A 6 #n dB`z\Nikeb ೐Aqmb6v1ݐOFkAwMt5'qlhҸwb i2g!%`꓁*Mx2BwXs27 |+f?.x~OKrHg*m\@u=zɭ)@; (7J!qeGC$HGq=0{Pδg’srhƔ|q 8->#uߡ%XhSC*002fɿf[<(9}6;c $4=/hJQCc=gB?_pLTu2|Oݥr\8U?!d]b+w,%j)SyJ;) ;a.\e:îGbNU "$;b8a\4!3j A a%(:wũ̐~ag7x*䛆ă'33VqGGwXz6.[.@%jhHcd?n5@Q(yHcy<^ƒo0m:Z7D; *G9T7?el ALC/n%ۢKĉLsfBl"}mNP-] x@l#͚6ۖ3ZxIΚe ۋc>8P!R -֕M29خmxnA >o$NrValr$ <`*SMڸe-E2m.8F}' :E~ c2{݆hzζ131])lwuy3 aZȾ]Kgp %jΟlm0e'Ulcj2?_Ns%ɒɴ#4R#m~g^NI:>wAݽqr !ZDXC>|Y$``&Ҭ5ʛFL2n.#,5Krϔcx@K/cح6]MU@O g b+Mhǵ q9~v()cG ^f=TqT_g6K ہ`^W!izw6fbcC Jb,yZ$@)$9ѥRpMcM8 rFeCpH:<Û}⳶{t|5)ٞ-tnЂ\͒# w桿D ޻~ 8КŪ`+x`hp+v,踫g8 S LbvXsxLpu(P^* TYkh+{t(,ro0"(+SpfX"&X _I:)g)0Yc05+%HԸV0p]@L&<έ05iP ]]nU -hv*eW\Bm36%m20J34b? {E3dX&=ҫ9,g޸e .SFn.O`"Nryj1YYeAޛULZIS~ic F·(%dZnhY JUQ14rnϟ'lD#n$ _سP@k7%l-xS'n^z! ?܁r+Ŀ#Cvv"TҀ0IKĺr&YF:6sB&AW`>0]I6p]9#b8Kcn5V}7)-~(Id~ +"D*&}'+ptN!9Y%j:$6>7zxk}5,$f_f w=rDn%ֳmVe+=SƙbHr:zE0L9&W6zBdͽ#8{6)vŝ~p{;ji'c]%{+ rF}]E} I:tNPa |QՐb =j^K|6O{⩣LȨv kDÈ'\QhGYs"39zI1lR+fqIIQRm1rA߉6ZK`~ԅ5)!q"'P\?xջkE[a'({Xk2S:6Xчޜ2}`n X?GhuS ,$1ejgWY[aU ߮eI}0|tu ]PQğ#d:]atgƟֽ@Jd=@Dg)zAYwHJQHD 9sc.;.$0˒K2|@Or6;gV>up E#+g5/_^DHRoOG1L52:|]< SEoRMtw`ոj zrb+ܚVF]kZrP6kJ(̽1u aJtS'ǗL>oѪ;kP.{L2i CF]'USl-|KX-T*ҒPK9f5].'-nKMtFSe6#q>{g`H(Dۺ0I'^>݀L)yb EyuTbbMKRy z1@El#i8kY)y=ʴÒ0qYr m]2 xƍ|-sa!yXyȍAo`0QJ`yD* ʼn=b.*WEJX9(W2Z]ߎ2 h 'Cz$͝*o"$ܽH3+uQWyH1X=OO3+BM"Vl) |~r*!aW^wSY!+b ;`)uuEW1O% iӴx166Elm̸¡m_Gx}H-nrGndSDB@P rzS_h"ͯvKV@vts"eVӬ,Qhn yl5 XO8Ub `^tSk*8 otzMOpRe{U‚6&lR&XSi?I`>2N#;]@EU^".W6'm Xt:>X#;hȠ_Es'Äw=;xNc2TW]G\/iV\C rxZZ&e{%,a8T"S=pr1uGm{?6ڸՀ3Sxt*7 ??͖aɹn1ٺ/@lϐ`d &f X,NLZ0`e:Vq) Ug̢}y:\b_ˆ5%5`³%drKƥ_C:ذ>qd襥$ SHX!Jhzyf/i>pgt w : ]TZm^KV&ؓ0$^}ƭkq)rՙ}(FٹUi3^%13$ab$6K4#v2uTㄹ/b(W\Ck?r //v^ЂOjFWN :~!),I*D+FG@KPF838~l?!q:.ӍX5ѠlADE"H)zsJ tIqr< 9xmy]¨!Exe(;jЪMW8f"Ln2x4 ~FgޗuN zG~ BRo0@./wKaSSr/W=^$'o L*XXz?eg$(|Ld8][*S<7hV%2دҴNO0]m/Vi=+,xuIN _N&])LJ>OΎ b17jiw:u$kȽJB#Eii:BNvOWT\"?g(sہ&vlLHxc365G8\hC\WAb&;|RUP,vSTs߆F9)EO+D5:)*QynEmL1tp9~eɽax8$c7撯㋪7BnL~X<*4~bHN`ZdE TЂ&yf ^z^5Adg1gZǥ\?;oƘwT6 )aAJqT` +";}t[b'5 J3a 1o 0R _FzLd{&9$ t*l7q 0eQp#Z`y$5h`xbZnSwbHRJmnP{4 |5<>4fE_y\;SMn)tYuؙjb)Y n47nQ"-Q~hSm>4K,P\Ȋ$J4,:ۋ0U&8~GE9O~Τd-C2zFvy_͖*o|yVx#r'BA}ln:YõZlE's8e⋁`>eBPm혚rҎ;=HR{0>2cd >~/,Q9j;[ ;KewC*qLt6LII};39zT]9?2O^Ym~wf quY0rgfqK"*! mxƷʁBi\JS*#eˠzKh$"c/17V8@(j"R- $BoR/nBMiyJ=%;oz@{P"/K>ѡ+`:60)a{ǍNrf2())&Ūgsɡ(Ӧ{?>"JGTSOHŔhā\@VPK^NN")ðdŦԟ Uy^9JC٪k1_kt9D^ _\Kd\2Oe÷9W',q*&C tU ATJ_;t AVNY[>Aɹ$ߪy&d;^^YixL,|/u.k:0 DMhtIgŇ(q CGF!uF, U\g[)f+gi1O0~)kM97sRaQDliL e8ߏQCh^OhqRɭ÷s^SJ)JgrnQ``֔/6NɺR"(JF@2SRʌ.d-d&ޣFaby;Ӡ w~mݙߎGT`.KH¶0nK&fyŹ;1S}4kVI$-X~MЌ5>bK8at j\.jxBL u0krC<변+n=3()d>NY˕D_Lc1ċL<wܴ5)wcYM)!(Kmj'S.^l!H6,t!EfztĖhwp~!IӅi_aA\y4Z+OdKmG$UhVzg0^}zuh6{ 'Gd.V~;%^$X#(T [a]v*rCQl>l43< vz nlP򧯧N"@U5`gFc<)il26]Ddϰ x %NsD|Q> Fo5OlJhGE1콮Z ¤gK§2AgԊO0~iC'~FQLrxDN*TH]N!JYz8>^أWD})a&o W4Q}4ܐ+W~,y]:꬟%PϷM"_: {>!(4n4ZdV\1G&{ m&|Q Y]/پCBVAeQuI>E=_D";.~<ٟ3UVЧ}=,KPxn ]Į-oE h0lsi9mqw/OU4έԋg sEBd?p͙01Ձ['(P{v tnJ\%8D}6|] L "dpE4A6s߶AP-_&Q;Ѧ%4 ? _%! UC5GxW$_Smr G.N";{| .aFwHвgNù( ѱT@_AwZyL[ؚ ]e&!;lEVCb.?N ݳ"6l/K (_B`zўtLة|AI\`^yA٭BZVK[-r&6fyzߥ7`S+ێ)NEDꓗe:#ƭf: 7GԭG%.y] n%lRSKFhC\kc9npulҭGw<]r ;j#=58$!ސ'RNjT|#uJ*_ =$?.R&ŔX}؝aפ) NUUY&H5ے[l„ } G F¶UZ@V+لfjT}Nڶ/R?X({L ykAKMOwI] /<#I{5bo6H+k^$^M+(8=L1#vGun/pnBeڷf09d}`}ޢd\ZXl ~ p̪bbq/ɚ!g!ZrQ'Ar;iItpy%~o:]+SKЋΙYpS-&H,+o !:6 XjϮrʦYČu4E,]G9*X !I$n-ʩ#( +-[7ŞoN2 @Ͷ+rza1`iDfDvac#eu~ {.5 [ EעAt'py jZ20 %cwr\Rt2 LKv Q>cod3.dw<-s`i- n ⎬\3 #WÍ3l8x${c3P4w2'~ "Wf;'fyJ<L"D\w,)>SwXn NG~ (nsrVk㋡Y\9|A]3M41/˞CM)3^,,af~Dc |{/S= ^ i xͶ'P'!2O+#tmbh- dQ|C~08\[lիe@ (g+鳊mKȵio=jݱJ>c`jn@O>91dl"]B}8n^Irc <{Q(NR^ăZuQ7GO{[$gd>d0Vĭ<5}tFCWDo4%[ $fg]d\̤!)u2!dY[~_*t&X )HCӪr+4,01UD_z@*~$BɻMI7X&P_Pn,z_d}/TLRVʤTJ.J=AU\eJ9uȡXeN̉盃k='hΞKOc)V׭ب|1h& , *ϊWt?縊Pk};K4^S wqb6] 9 FdsYK LhS%|R5BJ#Nb;e3^KPtGl Kw9UԽ.c62Z,6o4 cxwOvKY],kjx-󴄣aZzS^g ;'Q]ӥpWҩ+{Ώ{4a B:ߑ*UQ_#6-B>&#q/ 9A#/ nnjlWPp̏lUJ]l+[ȼPE&.oYM7#^uIYm< ' ʏS# [F>?΀1RPUI 'pb:Z?XxbeM SZ`tn\_ɿC|EQ5-5X*ancW]2.(,w=鴓#(QFC\C>EUm+>/30Aɳa (Ekt @:jk٢@Io}JД֤̳܎ 0X.$Gzc(?^ᨼqqvdM橘k94Et? ؅?H}Fuq+\Ȋ2d*$d҄0yxV˄dFw)c ={w=ѽpe?\ %p4w q)XTQX["㪞՚CB4 .>M7IB^&ma {F<%DձE9M'ð g_`, !+UMI,}$Өsk(^NoIǀa9MW H^n KL1)Xtj/HbV22CNr;S)_3)2HVqpk[*c1X^߈bmFNaQQMEݼ ܡ1m0 ఏ<<`w 'ax,xE~Ε)_?U,AqV/a Ǣk@qJ#@,D<0hEbZ38ߨ`:?$,̐b"|P3}Ovdʙ[ynLj,qvȅŘ_Iuњ]m >.aSfHIڠBlCgۥ qS|ᷴR:"njcYuP:'2ڂpNɽvQNHȊ<AQx |z*h3̽/WaFWx,2:l: ,I| L8avY++%Q5/סgG;0t= ׶M6, `UFÒSlQ aRbWo$]g2J 4P% ӼeWu ҷ "g?Yf<._LZ ]+oZ { SNA&0s3)DpދCz# pۣ$RƆ/z` .{pe2bd{FPE;޼=%Gy"[2دyFGr6R~h"-Q]w@z>x1(F# dp=X) {%p0&S+OrZ+\Xj[U G#Jp;<)oў`oQ#9o>v17ިu0VHch4[q7=(lrD;zjk8%xl +ȯ!Ϙ-~p ,2OL2+٥'eh]@yHqcK-t|/ D0hS@/B,adetAjYyKi{[x~nz Zqn:K8ת_'_,JZfD KSK.Ϯ^ѨɘcRxC)lE` cԣ* _>2W%. 4L~ 6ɉx۫]o?ZFh]#dפ8,!*{ _{&1; m&|pOUJ/qK yWR̫f/Xg/QϕrcV99W2K9c45c2(}'XFWf<޹\i4x/V?YW˳GKjAש`3o AӫQ`,)mQ)ώ1R0q?2< bbj{AOd$Ng1ے^8wZ5'A^͆ fzTRcXL&qtwDw#s럓 tV"K |iɉ3.rU?Bp_02Ȉ%"GQ) 8Do}ᲛcvxzE8\cx[J]|P_?lZx^ U]*=Gx&ύğpQ",a. *ZfOe "r׎Ծt̴J|yԤ36y,^FKyʻi斗<>d^"Zwx '̱ [)>Nf?̟h|6w %gT-~!0aFxc8O;.߅D65vKVu5 bv,̣1cx gtsv\gZE|SxHToFfDRpiDK3dRpϢ yЄ,}l!&S"-5Z0$N6)WO.HH`G1_& gb쀡RaS =8ЫsQqf?܋uL4zfizGT +,a+8|נç@+%L~gӽpuBq!?ŞsL.LStgUlNT@= σp Ac ͉71М%8oc-_MIbJ2 ] #Y|`bÍTDebժGGu$^2j65:Zc S}Pp"ݍ x"ߛ;PS ۥV؎O_ z>UO#MςzІi+PL`,\fZ4,z$ҽ_œ0K3\f'ΣȯxMrH}'>{a"{~;ą T3i#izSpN ae E2EoIйkd@mavLc\_dX C BH+k:}s~3p[KlN _eGz;\yKP)o׍{5*>269Ec؁''ꏕۂkT1Íь'd['88,7x X0#IHL]bI؂!\6[?.W}M$t, E6yQ ֋*d!ˉ z.'ǜNwtu(bRg_N47?"nC r]cJSt\S;GbTk `vyk4;?62lѵN#1q Fh_s'\{ 1q厀2_\lS{Ljoi!łd h&i\}g2$O)=Z,V I6hKM(uE5 ˇ0vحKſ!,“ #DИͮoMdGs2ve@a f׀(y gt.%0,Bil|z`PnݫWQwvD0T~zH:r >͂gݮ}|*:c9,ˤv/-X wd TA/F"8,Ug$yݶ7x"DOF{#yܚըboy^Њ%k1Tn~ޟ#+@$dOET,D͞# l|sVq{) I{k1FP lLӴMZtꀮjLtSa!AJ mn.!-𝃻޾D>f=uZ.+0p2}@ :R;S/+U͈_3mbDOX+7SdDGJ!e} G-QŽ!?19+.Qgdf5OGrX"q-.$Ԡ묩T]* ?1p{dJanr-HE`~uxȺEAfړSl\6Xrp9|\gep$cR_jFrUV<܀̼ }h>#kwPRPC*\l[gcح{ˇ?P/AGsQ;`;&k?~qJB))q .RZ>> hcv\vf){m1wRsQpoS:J?L!د`& 2K~@L'nɩyes.qL!+5 UbƩH3t1w%ț5 Apt[G3^r//-@n$=|2P0FGmIܑN⼆)\XmjkY?} 7`ѵ.;w[ 4Q@a˂-boV2(j2~o;;h䞍&- : R:eS 6]j l`vqں!?c^T'wΟ:\u|/D0N;Aԃ_*X>>l$x#=G~“lo߂񬛖:G'Ec1+P;?˲$DiYg+'jqbUW} ³.h-^ju)`up7nئEca<(pR -7z$-٪?V1=^zqitn-0_eyߌwF H 98KEh?2/E=Mp8Kхi6x 䏘D$Uy3 -\[?ȄpM2s}l ]$xɕCPsHۭ^זe{/ԇf5/uv2Z'\wc$G[76i$4mEw '<,!H˵txjJrSv (Ն1UƔ3-I$m#˔ۏdaL#7'ict.x^%>g^ ,rlϸ TԔUWB8ԟne EK:5 l<^nPp>{ 25d?8Kc,!h>h.'$ = *+qڟ&REB>g9p^F1F}Q 1ۡ(.L !6^{kbi/zQ]xDuɱLєZ(yv&'_WL# 5ZQWu"m%v2[d2N]QT_cӇv(Wv|3 X'wۨKfaBom )F|^OAዥhB'B33]ё+q2 bЃᯑȠɦI*=WxX NGDީ.Ҙ3{JC͈7PyLDxNt3|,p/i6ƦK@Lq dy_a4D\8J벀gSD'~l;16isp”Y(Xr,O|rf>2鎽7nזۛ'"G2mu䠒n'xh5lxJNy%\]Cs-`k­Sf2(oOU6+Ƒe;$D \YPun5qc-m+pXR+c=V+VVmX4vKNqXuwſKJ 2ɧ8rŊr%ɪ 1aVAra,: sXCnqvUؓg"QBth?&S~Cd^B;;#ZZۜw>Pi W&,h^1t|lP\v݇{|wGZ2&Yh laZscVVlsZbcŽ`'[K鍞GJEO&Zga G0`?Ӊ[OA{GIw!]u~xYIgb9ơ]Tƣ]^hAu#O15&X77g`, X'}@\2Dl=VcƼ4#9&ZĞem75n WQ)T4$F5ǁJSv*@oH\r&mAl<4!n:0,z/ ݁XXZl`%a.fAsj03Z337pka1rF pWOLt2Mu^dI!6J[wܐpqY5dF:S 9C%`r0Xȭ27M/ʤG7UU@GOSqNٰZ9]ko|:t; S3-N,Rl q@& 3BO0 jPÜĸjJHޕŵp;̛c:H'S>#S./ k$2=oRBWq\JNJo+;ˢ}`rϨeyq[.!ĥmMy~$J .wLx?CFQHLG:Hy0ƶ5`c ӾuK@7f.zܺO#!&A?ʒPt@^)̥iU/䏧f`VT֗7 9[aH&@DvD}O+U%б>C2n{bF4{Ƃg˞|}S8'SR4TqB3]"?)d$ `__5 چ+ёFx)y~]RdNy>l&:OhC&dg@^8D3BW99ɱF끱!}zSK&eeL/C*8O4>䂡>aQGM8 ^7(kb@\v '-& +JY ;'y &= I03Qe۫K?͇HA4[5>W֚JW"䦳 w`on}ZE7/k (E͟ޔ!GW@(f\,,fs8cB5[zW,wdZK4_G px |ڤܰY s'6[;؆F:=VP8/ye6&kc&Epouc&aͿеXøgdb揮&ȈH)PcŃZ&ݣ4 D}++bS/^2w~jvce3ט0G $iSus4u].~ RuIAM Z[*A%"n69f|TNy\L`Nd<3mJ[@SSa7w^!8@U#  QB=_$y@A>iFSB.ɺ݀Ү2^ ]ۀs.n[W.zrb%dY]T5Q?ڰ7g!!Մ!{Ԍ7`>a 7. MʰROxf (J]o}5#0B(ҟw_r5>kM4#kRZz%@+,yP5mbRDr|RmzfG^EMWLuqk&!d㘢2CPp _h 'IĬc /Mf뀒k#XKuI P HKhA`Ȇl rT0TԁR?J*23M`\pWŒ{A@L׾O-i;4$05+ OlRSۻ!c+r9,fKqaN .VhӯĐ /3keg>l*qhoQ/]ȜaM3 ;J=r~1tq@Cg2!2EG]>e̖3 DFcZ)FcHn^ӿS"IocL [PAٓve|pO&qDHҔٓD!<4T1>Ѩ*0Z! C`V~V Сy9T[~QՍ崢id6]b(TDij,EchmB>_co=4u$1:+`x[u۔aDf+tOiWuhB!I>B Y S,2e~V4bzMpN#baO0iэA1~84QNo5(V9*+󾟘#Q?Bz4}PXLJ=¾N]^šqP)yCuMlbekP>XXECǚ[{sJȞ*(O֛eSF뫿E([_X0Z${hv `H "O(#q6TWe~S G@z>}Ș6JcvdHFra"dl9%ÅVĕbsZ>)#s`9 +A07҄bSձhGf*MV^ܤ.V-"gm =P"@ERb}$$htYZn$clFrӒuD,X$OuvE=`sSAI4b yav}:s#Z ɿK0q1 1kB%^w&[s{b"6Jk]-\/}P_O׵D71]pq*Puʿ" MO{эiTҋ ##"ٽ Kur~bM[+= CHKu!HY[r ªmsøQOήt` `abS 廕)y| N/~{$<"[k,D 6x3%rkUcŦY aќjZ Oֹ_z:٢ꄣI-"U#N6&hUHc⡦6̙ŕ #duLe}m4x .!6GP sn(T#[9O~zY}-|HHQ*OmDg8N"S -k/!:Ed k|} mгj[ )a?bЬsBTOU :h U*ʈ2 kCc/!L.btd#?R0?ŷn\Kq>m|5IN Pߖ.x#n;TB-~GH$*Yu{rx%0+4A쓫*`x^@& B KlWY;$8TCާU 4Hi-A/H\sLѪ c)wDgir$f^l~*$! 2)<lSOaVU'u0&Akۮ* o5aks猁ŦBlYUq N_.DDhœt6>(g{5Zna"Giݷʻv$o 5-DA. &+epfzY'7m\%R~sGq~m£Vu(iVLQ%˾8٤x$: 6=@(\ptHϊ\ E_HjgmCL= bhNOU&$U'@\Uvg0]CFȾrQ!{n`sⶴ(w,;$= }HV='ijoJd!^Xl٫ûebMj)kbeVm<э[AԽ⧥O!*P~uZT}y+9٢@.SṗeNpg:;W햎yVK%3bHbT+BsѬpX +c_V8/yTf1%JI=~L-[cIۊV8ÀFuWìE+x È; MTRG(4vb×!qa DXn蛼mrz)X.MEHΐtWkߌBN\Esy8 CUi 2\DfulO~Η4d Gd^*, Q#z-[܎ ܙlYpmHg6\V VSc\=t8hNȌ_<7&G!XiNCr{(߫ܿ-kb-{o [*gDZ0mq]Թf o%4%oq(QNA"Pq2O~ 8o8q$GP5Y !hwT V [@BТ-€=X 2"HUgЯ_Y]ԘLaZ[ysf ~p=e\JGykyv&k8*K0?8$uew_[X#m~pˡ{M܈gk=U,m 3ں U5^o[;8 __z|W>|̽jF휗|maP 6es(sr`%3˷eA ӡtx9Ӏ2إ;̖/k2mlP=neˊRA֧905s7)5%ɂg;Mp)a*-A+VqrΧ3t_L1!ܗ\YV_KIp2m(Gϯe4XXLJ0yR`HS0דr 1oWmO(f=$?h 7:JF3֧qͥLpJ-b8ǾVT3sp24fMٷˊ{sNAdzJi$xHOT<:]Dي)/ʮXp@ ?\\}MĶ?Gu.+mEYГy`FvěyFF{pa0ecJ/T:p5G7j<-JEq?R -sܞ{8 VA i31եc"}|=f[ZZ3JC^2a0qzU2A#dzM6(UaԛdIAR1IB#4 E*S:c ]\Ϲ*9,`%+:|Φ\"&>dU% iqQd²[d֣gD-!;d5nv%hp_0f:qR`(Ƞ]+(l< Jt]_izvG?(M{k"$Ϸ^ {LG/N7G 5hWVaT+@8CbtrYųSq4SyDY'RӯtɱGjmq$1wpZE,G*– Sr61%w"J"=e`س͗~KAoTSmDy6]_mj0\̿Y:1LS) F1fu{z5sIojbQ's2i Z۴HV-ІNKnH;"@K'\PS\FƔxfsKĴmo%{"8T7$w:*9L}<\}> t9iO#t'PQ2"^7 zžɈ\>3-G&e$ k/ ׄbֹx*@0)"WV+8ڕ?@ta|h꧞Yfs͸J4؟ҧ)#vFCH~O=iӓZQQڳ{i17WhD5$#hF&„c'%]rn+zߊo8//`;/̓lW&0 TM&/sꆒ~P!{RsBB1֏Nmrn2TL 3:gs_oeVQ_9Rt[l.JdAQ1:b^tRB5={vmuvs_m;˽G[]gT Ak>ںtO2-_ ,(yn R`~s0 y(JeC|:TC֌Hk ,L2q"pA̙S"F{;=ay:Wm^axK3]AQ|eH";EĒ:$/:@tpXL2$b?E4=|n*XVYA2jG ʭ=I4x=6yl(Ad%wVS"0g9f?svr>T~Sȿb:n^Ɯ&@z{4F L@(#^{J_R;i<熨IexH}wJMb>8`y 1[ƏC3h.XСg nnA4P 9s*D0'ϙ R_豄tXJ/l4Ims3"XH P*pR$ix3^DZ 2mekq@CnUNN zq{: "gA 1PoQH|rCoaa6{dz[ݠ\*k ?8ۄQ\-nDuhwrBz.3|SY`yY G$קleMڑzm. zi -)З)p0k96w7\JӅ=MB`b|+IWazQ{ưZs]X[׻=+6nlQ>ճpVp-'P6gnUU enTOCA= ȣ/סNZ5Ѵs%cj0_au99>RMDUh ьݰm;ho WY [BJԉ̴  iD4C_%]֜줚ō8P[aX? Q|$X1o'\0PJwfSu5sևw8 Іdvzٝ~(b\U&}b[N*͚* _dwU0{49 Zr/Ҥ佘edPxQH:[ #2 ʨ9FL;O /n/#H vY̓gTx92_[ UR%%ĸ86RcTx, m+O-6OJ"_+SCJJ\?Z ~KzN .ݳs /N3XD`-tfYY#3Q՜=ȫ\&/7{0᚛lƎ)`aB}$2۝4FgzJJr Ir7o+yʍ1{m'}Sz>N[&VZQuVJgɝj* Ip]{ӘDyLBfp_d"$ b $Nm8z)jdo/,E)ꙊI_ !^ka8KخŸHfJ ׸%DbJ7+ γ`aL8?LC-iݪf3EgCJd[9 )kr *;ZwHr |NBG㮒Psm_HZhC}1y[ȖrMJbD daRBoZXp@Am P7[Z;K]*mC <ٍz,~%}00  #tx = ü9Z"@,KpEp&c7=4&$3)#`Q<`"ArQfHB)D D@*̑Mk7%EKA1,WQl48+Ĵ3Hs[p7VMd;P4ĕ4~rIe">i}D3%p8E勛eb$"WzWWp{>g梠n>cXcd&lK+VB5b!?c;eȢB0Z<Uu6o4ިWWcS 5 V=3Z^Zq]BL~ ~~'m?U<8x`賈cMD)R5M4ڈ(3깊W@DA]0܋=3f8DO?|דeT$j~coqX_:y`tւ0)r!V=E{K&P:.LPЗ1{# 9$HwKwbՔ;$ܺS'.S"ǹRV@? 'JXd9Dudo䛍?1W?ϭbzSXqO.% 8mWD |4"l*rw&!q[SZQM4Ɩ|'%-*BjFXЍf=hW"D eHoX\ʓ<}C{{k~u @g5Yô$qWUgh4$CQҗtЋ,I/c{ƘDq,t൙8röz9!j_G>(&~"K==$.7՟BZT@gU/D@YRBm(1Ѽ8F:sX%r<(]!YwG[E>b,wDO= Uz2c-E%qt 9:lHA)MUf>VtlJ{הb6XOuF#,KKqw5ă'RpVQm^4Tt!TϏ<2?Ay6 ڱW9WuYC``9OVzDp0%K>EtCHZy}7>TEf㶭UpXZ#;?ӽdJvR]ℭ:@a]:`e_ QKTړ|*r~bp䐣[#R@+αgu"{oJhu)7e4.<%.]T+y0 &TjFrp$Z6l >sіˈ0.⴦;CJ._9 ) o#օyVrVxa1$ihܩx3g= kclT0#dc.=F[紩P0uaOnjY>ど&F]/VYb"bOm՟{bg< e;K@9Q[J7KK@K'i遛v:~6+|ͼ̏u@2Ē @EgyEVwx?$ƒ 7,[jud8DZ=]w uկ>6Z儤l3mE(ϴ~e JH,d[Ž(+v.^6YNkh6"9d.}uV(!;|8Մ}޳wC^4/^V}LJy/6xB{חc-5@ËM9QUO26_4XVؾu*qUT5|V9ndP]r1Ma^RQ0 05/FwK׮?F8>4.^AjH'Q\|nZ3̑4{c_g%5a+Axc cmr1S,j̶?Ud$|%LK&zپfq<ݮo97V< C8Tgԉ ? `anx0j pIղ](ҁs)/ZV$"(^w>g"'XNC[А`76˽3lP9J,!]aj)\#"]Y9/:IӫW.s`+F3?NQx2}8A!ѓ뭶 rmHD#,B9a%KPW9XDKOX\a/'V?ϴxlcrOe Ugp6o?D\ ѠNgwglkš]x qQo"eZXyƉ˾i|= ]Ju/kuk/"R527Ů%r2 r')e dO%>rb-};D\6߆sx*NvPi1<=?!& գmKjz5n,%z!J|]Diy8p|61R ncB񄹝Oµ}˸9l r1_րr yGS]yR m{ǣfˀtX{|lpϟ,%/6ʦNm2TSU/}ŊԥZopzrcXU+ kr+>PD>sgTÀyY݋Z}=,n|0ߙ4l#rC`Ἳ:{EuB2^!'OCm?dEI=P=3-rWfEaw2B_.Ø~Z%Or"ϯR?oi^B,h$u¸rOKR е0PbDjFNeU92Hϔ{~=gjakg!V&5D?ZiZVHA []^1$-Ux煣1C71iSNO"ϮvVf̀C6; {xoTTMq4tz4vL+eZ O{Il<ٔ8ޚ:½Bd+s%9םWPňR7Vis%c5\=pZdG IjV!Da6Ic+}ӑy@{d~D+@-f9.#Կ }vh.aܕ*Su.cgMWga iݙd1/QL 1MO1\s$pjp%Ȩn@-I,ߐ?Y H6թE^gOƇ+g °?ב"Y}=$4A@[Tu &7Bٌd.y:o6#>%5k!量s|ʞ(QQ `o0e-lPo (oZSna=I )A#_Qّpxlryضms ;TAJgX?2)MsGv֨m ;Q1>ɩUvq~8V=Y_>G- [ٚb4 SQkːvGl8ڸ>[F8b>VX̗V#CH%0:lpSȇZSd6tWBOg*ߩ87kե.v䟷1MBsnJrܭa8_"ۨH1~^H3pOa'+ӟrke<.-?4+/yhQw9>g$Of@/q9*mB9c P+8!ƓёY^D @Wqavo!j?dhC)pky*sL\ LҌFN/eBy+lfFD GsqA/hNN99S$m.3ig5}e/:CKg= 7^ʹYC5cΜ#7vj0鏜J 0/eQ*ɼA쓈,91)fzE6G]4'.ѻ2b8')t?2 fB Մ|K|aC`/2zԐqV"p1-t$] Hgr;U۩k2S Й4Pt1J47>:Q!aL`y1'7ɎCdΏj:ZD"XIn20I([&_/׼͔Ȥƀ) ?4j)7(e_`1"2EY91) Dƴls#.Y:!WQ}P [J}7wu!> *P{sIL-L'IҎhBL׵jlB6f9UW{,Md&(*ra y4&cem%aSQ(Qse-&izȔhM`VVJn3M؉q?' +aTp~Xtw2C%Rvq^RN4෯-&x?~8`"A+sHXFC K>k-P_,ӎkؔ.zwRŗ;OWțQeuNLVRD_}{?&cYCfzc Ojtᙹ%Yҍw@0M蜁&̀]9 MpsEZa%AJW't&iѤhC=?*Uv z,;% /*&N=O3V(%p`JS&78//0{0E mEaL %}Z+ik7eb{醄h+1d~i+ )dy-+.+Ϭ`NwO[Dg4ʬ1F^0۩iP^?Su A礁/v7B˝2KԵB-DbU] PUd[ 2}-[X4^hu=C!Sm[-L[>4KT62b?|9 F3_jlShǬ躨NIqȐ4dQW֕y}I`X)do˃)EGfL{/rz,g{dzr\g9=ཱི{ɿT_uM؛t,s73̪2q[Og@_dk m߂{ƤzBe"U[|,7{?--T+TAYpxwةet:FmN#Yf#]2>)_f*Nm‘- R1A^ *X} q=A$ jleԤVXqЗ CM*MҢkZ:>ܡK饵6MW ohD N4_ug7 5/DD: =nv0՚ADi)a?"lVKtj0ܟגs"%EX@&יk2SqfHEyx%80 7~~6]t/ot_7tOI4 {v.dnYQtSqʓnrj7h@]$Zp7=$=XQlB)*N#wC sjfҸKjcp J"3K]C(1WVz,@vBiӫʞ@ϋnrKAE6kb:$X{z7qu Gu3_O&T\w܃CmD5,H!x o3W?MWB g!mŦ%v;`AZ6#%+) sfD;*9$EcXt/x#6EG+7ύ58z~<]7`چ%*~|rځmJ4>5Y|O Tv "ٴ[p2?:0- x :A{r'-`_bN/05uX e='LH4jQ kǹgpP2+AnTЦΩleUCEu#M"rOi U~?׵VOhWgZi3e* 7!zH,;W9k:f~)u f _CyE-u i[f 2^}{3@3]ߪ)!G"З=ƙ}&4x7dzao|%_0OHw(=)MC0$uEin逑ץBgAJ&>ThL'xqK)=,9]Dge˨y .։vY@>X~zދ 5LBebճBF3%5&Nzx#[E Yl 7V@9]Io%\x1K=QUa.ҜIk'R ǵpȐMr(8~ôG 3O HunJTv!ƶ-Z8Cݩ >"NMSpUۄPyC5WVmEaiǧG -P~'!čj 1x8 !}0r~+y T+PG_u}4,P`UF&Cr$UJ?J'E@`!S+ĖX2lּ )g& +D9Lt {zū!*XN=5TN axSF3rN0MT|"͙'ֽ@-imkh9U4KY\[`,%_j'z_ E2Mb~KD gPI_ܐUpbiB:]KJir /z __`um1!Ĭ'QM|44[%a5EssfTq;aKctN><]@i[2B0tYH%9=ܙ9qGۗWCAT)H!dsBu?O,ۋ͕vi+bҟUsVf9紹"5ǜ]3Ó9NA>Z(֝f-=+ҍo舯#Lq eWAz30I)ߧM}= _RQܛϼ[H `HMjgcuƎ8,:;nVK>^m$31u;c?)H *h2#Ոhܩ4> <"lVUb ;o#ɋ֐ᨥo <bT5G;e膃V+uw)zBu)f*/=d=1ϴK z;>4ӔߚXlYpDc6B<`3&Cׯma_ 4[ )Z'E&ʠvw(omcz#GpՄغ6!eΈ5DJ X혗8\+l fI?ԁY]?_S iy‚fB;o)PS߯ݩ~E-m8 ?SX#0ܧr7Ϡ(*FR:s˛mQX<}?b d:Cا$ģg`[;TlK 7&@XdʞvJl+ u"<< 7 >AR79E9(#r{h,Dp {^+BO.|͐t jTK.#Y>.LHyfl5?B;/4ES?tOD@?1T.NZ7G:y\9 CYLS&T͇(/MP".\0 ri84Q-`s0j!'o h^B`AAvޓsQN*G6th$-`Z n[A޵v.hF6ԧ[UX*"Nq:$Plؒ+6%30΄ ބ6eA+\)c`J 8&6IF9w%Li1A2Bh҃k9uKfў` `̮P57%L1/p!TLJ(%sW[w\)'&c0Y*'Ps:bĠNT,NEqĆ(W O=! BkE+ltlmUt8P)=Ϸq)/m -EA IB'n_Zë;w0yKHtU2t!vOWEJ8lO2 c00a>9jo$?;e ERNО/e;Rі~6/I%=Xژm {o~Aknme` |N⿇Fׁ>6mnG%,jς4.pAJt5u05}#ztAYjD4DbFaN~3\IuVWPaɽ'ԔYRLE|5>Ss5uȦ?uEaf-C;dZnɟTl>UC>u[ ^uoೱ(T &U;XS}f, SX6 QuBrPҔ/B_Ojn@yo=!H#9`zsD1sFiKUiSgީY# /kMCv@87`q..\4wb"5oǘW*`7zU\ }B]c2@SkkzhS9J03y0Bp â^Sjiz*HiP eC93Bat#fsg|]V]~88v(j_Yy\ʊ*Fj_wGBh!V1׬&jNgpt`5G+"jݳ;{ZP5^W !7Gr(]=zY4™vV Dkm@<=5H!%yY1s.ێic^.èV2 5VK_UDվKXMa]a }n4C?޸ۤNz@YDo r<8t%.+-FG\u8t6á(dG-'呝poYp݋LA/D*}SN<׻-) Tcua>F"hr6R3N7|hdƺ̰ҝ-P ;h ol5 *^P Mj7KqM/I^]s|#GBzOY we$-u;nYQ8~wހ bxfgت.sfeKyYֺr91^DюSYst 99M^= E+}\hӔPC`Wң/*'.xYV[s-~jp>)-BNUp.Cdaп&@繟ѥʱħ:BUD,%1i\38z* dCwZ,..2pH?g X@u)x I淌7U(< Tkn|QLH6(oAs=*Іo+n,{ф&j`ĖhO݉0 t7zF׊SMB/}nQ+؁%/+ݴ:RUBVt3+tw 7|&&*g7yFv_)nJjEJ"ABJ$*whs:R.TsC {uCVR:g~5}'JgHcx=7y7ju?5V cmr2bwo*gV6sB򞌬'#Lt"hj9\``(1Mjw7158 2&ibĿSB\܉ב[Litw=m>LPӃgԌkX)H^gzeKIjAQlG =|Lh:’|t;o$1WCMD8s~6 K;t~nf(-?)uhcMe9Q!+_ЈV/2R?Ϝ|T+X3ʙ)yt#Ŷ&reDs0#`b]륤za⨍lW80%v!JnhfdםLBܓV/tNiBXex$AhފC]C6LvQrD"UʌA,ɱMgTi1O@q8S]NTѢίgIuVF!Q%C| 9qqeMqpWPNRU̽ IY/1~09Gݕw`@?Хp7iqo5I=cqR[ɧKס8>!9\m)sGq g^sظtHNR5-4%U’Q-ܻ"hJ۾ F_vws* ^1KٯLӤ Nٸ>\ϥ͒OlXJO. VS!i' pbtyAa˙(Iu*L};rP&*[=/,z {N-۶Dx(6 bZ,ZRJA;Q*1"Fh "Qklm2K߹h|]0; Ȇ#֓9"*z+yঈM auF ,%1"n)f-*Opp$>wDE&}(Ò^iZY3Gw' E8?s%hAc$l]G3Y3䪶䆧1nN)h!sQhC I95U]C%v)} Q(wgB _L|waqkJ=Kh(/i$/AU}G}m?MB` ǚ7&&|eo`ePxTұMD9.Ҏk22C}҆BkV+?=I=ST}z!TvhL0afYخ3K")|1BYdF]/(bqc׿E3RՔm>LeqSED ?Ó$Ǻ)|KINz/%kh#f~>& |H*)SӮ<"WdҠX|KטhjPW%*V`i> {'fl'f>1hwhq> k=ԟ8UOoR[7seو\zѪ%I\JJ:!Ŏ4L`"­(r$r&z<(&#mوD נ%xT-Ʉ'{!W ,^!QRu*ֳmU`dZH36K ޙ;kuVf{/ FVE$*))UZ 8[#*eVuWĐPٮ1aR@K44^NӘw8Fe+X~ VIo _oȝ ^f6$6+|;02rxl_t<۷$V5>&DL'峒YXiJq޸xF$*jݽ$r-ȕN_K?5~l^gRE";<@ST_axur˞m^5ʈWw oe48[Gۍ* C.WuXŵ8ÉO'/1AykBDf-LN˽rjZ &,[G^OIr0:5ox6}WW֖/6g|Pu"9Q[dqqԠW u&oߣw\Z>gSdmHxo,ۛjw$ǍImh r=/ (%" )'H,lLe-|7|7LǺ! : N+wD_ vik5E"w? +wׯWw*hKev2+9Dne `R!A)Vc;2}92dz}_oT4u7Gd 5[ i/^|ɵeS|%OR4F5G}Bt<@Z7ZBr0 j/&-{T G&yzY>˨Z"d6$GbmO 9r;n WO뇷 y:P]|Nov}הق+s1%ؤفr+ 'F?;]39Q.ܹu\N=G 0=<w+JK!wx(&1ڌޱ^gm:U!zd!M trQna&gŠ%$ԸG]UnpwIHeӝY!RiUᑼk-r\ -~О}N0;u/4{e1ԵƈcF=Yh$[Yhb~\Fv㳊_bFXB]04)eI(!9gD;=}QhgiB] B$$my_ 15zAf}μ5]Mu^fp{{/?x,w`5T5TRfj\ 5@z_.զ\gZG0%0ŭoVb<[iFi`NZwC<9)8졄d .}>4 ,~8h4eJ {= jHQvBieٸjlVhPV ]$ItL6M jTvN1ۊ:<\.ӮnC~z"/739ΰ2^w8.%cn6 I(P)l% {_ T6 <*m+ٲ,?Q{݋RqEb>tw ~ mk4.T; _bcޟEIpVvJFqT'󽙓p~ RS83wS>Lbj. Ê0.5L È@]Kjc5AP8+%{[Om>#a`\0BK ,:;^lf֣zxa?kdW%@][ї.x{UP"jmP;ck!-,_g(O6 㕈 =[":1bReki@Y ~`-O4{?F+]DՈ,?F͙B6؈ 䪕Y6S(-gǴoWgh͋ >tnf@k4ur̻49<̡a?BKFBrW;ӲxV e9b(]B6UZJsuCgfX$ DmV?>vI]h'O;/X#0`!NSDm?)`FJJVy2SQlu?rĴ珸!(Z=b\z=43924pG ϐ=7MmOL^>ӃANA-aQ傼4oxX^:]BϬ,z[l1_)31ȯ24ypc E:RA)jbx)/ϴni~"C5R5?;(qf!#,H-vKv?eoA}cn$.ml[cpRoIW ԔJ/$k1Y"Ew:zA#@>?btqZkh7Ij]/Se^ƥDgoGog!iqloYHUcW Z 4-+}菫~ sZSYVerd# Z vUM!Ka_sUfh_.7z=iŦm9/eB[dZ bd!CXR#Rhq*FhNeS_Dָ:ّ$WJ–0J]cY{ { 'MJn^T B^c*M@ jF@i\e2T z =>h/Hjj}GY\&wҵh1b1gWn֥ŲnJOP`K9<{ tnUw / tecڳ;?w[MϢA(^ْ0? ,8xO׏ɾ_NJK-/8|Y'4v>ރuxDW|#܋48 ;\ox,?aarar_xv‹ݍ7mEWAv${oԀeL|#0|&9̠]&?ޤ .D[-6=^L}{(y3jDPl}_*ENBH\1%Ȗ}q͠8ɘƇ`:׉dǸ?)P{T{gL;bz#JMs x`=js] ~"F FTIox87kGu ACHW%4_P\'A=ZB+Lb'q>pK:[ 4)"6<)T>@k]=~gdSǡ,q _n9Ժ. p^06's hU!:V"fz D ^(?H={I9ɏ/oڍkKzYf<3M ̕DW;Xb6ln2%E>3|og@PU0qٟKp=TKn3BRYpkZwݕW$>[';Wߠ.})ل+_^X_6:^r*h.`HpLmXmG9N݆I0\}y@5\g]x7\TX-_deV{[[bsV #%Yi1w}5 V:_]O\k뜤E(紇\3|P|Yכ)F1 YOrMڱh#P0a::=q~F*"~I`29ynuGCK4VciJ_&cTN覌F_LWt%eA^EDWQª;uR3u`ZT'::t dG^LHS]7KoFabWG`[Ji:2i*V(CkqORKp "[x{X i3ª= I<xE&nL 9$N: `OE$̷\G*4嵇$BrIro1*82,ZZ!97 xz ظoӉ;xw7웭z "fҖ OpDR qq}7nǥ) }А' [<"/vɍɺ| 2A  g/8}(O6\Cy5qUhL{ d]W=AI,T|5iΉKH}_l&B ̌ ў\5!u5>;rcraF ]3YEl5,We'5dSTp\dY$&Rv}\cF;Ȅ`Lk_}=#}$L%XthdtXJiC\Q숏&"0[mȕZlR1ڤRA@1}ɭ oE`Ud+v@op iw>:l*-4F ]MSx#T\rt?YX qcL9qWfȎc[0 =;`="l ~_ҭi_8cO ce_M@yu y.6V}@!~n 8Bz32p*1 G@əV}nfBd}ʌ)$]{)[Q7}Ll:]|*[f ?Y'T6xHQy92u&}Q]"fxtqҚ)aYĤ3A bl_UfmFYkO17q.R\sqo76>V-2BIȉuݾ..8_7Ӱ{Ik<Ѽmy>'ẽܓ[s^MފwIEեH*U<)XX6?lZ>vo_$ib,7ق40|d >%ܿEU͚k O63yzo9Cc?d:=}X}ՂgN0,[5m\θ{$4?lїoNB C)1GO` \KBݪ*0w(] #ϸ|,fx12IA rE^&xD8Q#\bZ-+ZʶfIj![ !G;~~jڂOawPӬ=&Fm鰶Ұ}Wyg6?QS !Q]AY*3Ru^Ue;Nl:52D7#Xǻȥ)DFz)ȵeog)YqAXupPMOZq{;n ~uօEqujowS($'*vxG#(V!BJA PyV]m+=Dv  R(.լwNRMvxSEdJmPixFAׄᗨ5rEyEjq?r+*d-"-Sqji @7r xDͼݜ@2C8>j:V}=H)b["הopz+'"oeUS%e1]ͥm̖OXq4`J;zO\nްolk7k!V[\8uT[\?f"8vb=hsO/!g;A ZS5NGv|e}926JIr 8PKξnM-0>w0 ѐ6R P&eiΚ[,lUfqGՍNOIPңUe.eh,uz܀zZ]H[Գ Љ 'g[߆yeOyQeC)-*f< صyK֕}2 Qd bF dj3c[<A>Ma wԂVN{T;o'd?uHfr#J1&jP ǖ$c>8l1\;3 ubˎ2 pW 2O V?q)U c1RlEA?]ui#jr=ӕI@IYafyne 4*wBDهơi=I lY; p>+8ov![ԫm GsNOt$ ImuWFh$wP<nPQY4M{pcvd#K+El`^XwNn^h6X=ӕvE,ezE d*i}Yx'xIA2mApd@tɈP+?XB*);MK C]Ds<:B&@!+n;kIl +PfW עKޘ0Ҥ99rStW'&31FT5N{Y|C,c;Vd([kS9=c>e@&wh=lZ(s5,[J4J>9]E6uaoaqS2zO$ ny8EJ/OPy[JjU]a$YnCyX$K`c(wu_r y7s/dO5K5A=%1F.Lň&;6D}}JTzK'VѓOXpתqw0eNVZWVk$b!s'fH,.>! ȷTラ~$P&6wK3'L}͔I9AFti7;dKX~7c{+ehF"lO; }K+'4K8*fŞ@c1lm\4WjIP)V dqyhOJoiQ2 k?KQ Vx}4_k|Abǒ|8:K-zn7 Tl1uFVZ XUftr ?n/M'EVeiY &/|yHtgTWZ.0[Z&+a-4p x?MFo(+ԝH3b-KV {~-R HȠA^LnC9$9>QLHoϻH]֒ɬR8 <9oL"32-Z)!sKv.wP0^b t4)pOzf- ,wS \dnִHmnI\=f)28G2t`W |&#Aqڨl'*⎎"#a>B;V#wP(j]6Q$Fֵ]04ri#ocփ8+Gix4Gmz.BTIl.~h$]\+nUZ8OC'UϹ)@F9d2%諁&&8[*Q{} Rã6@-@eiU:g0,ADsyfjFsxp +c&JSOխKhl5PE9z]-&DAõ]716!|(#쯪)$r 9O*1^k d_Vr١d0a+"ڱ_AJf8wQtQfduZRt'mX9v3LӢJJz^[FS0.Kx% 0R5+W)| qq43Ye@>{:ɍLLə$ (5q;g7cV:0E4V h[b#_ _g jpӋ%åE`ջ" pAl +GjTTwg4veou\u%ml(9AP 2BjWF?*8og@7%\ þmcyAwub1W0O^@֔ S^ 2ϭ^[xac)ə /.6{1nF-j5{'xrsT܊` k>e#zi-dp\vsܒs:kj6>ba(ϧA5v+BI]736$uSSaT#uL QQ ȪfnTQė^:$_Z u¦3a/cx9'C! m},A̭Das;?Uu\zk ""S{VIIBنr ~ 9+?+PP z#gA$t'edϻMG.nVb_̯jυ6Lzғy_|GDㅲuT)|L?e u10|̶z c{K;tVv>Tg}ꒄ/k_$7&Bvm#56)}˴KBG)h}#@=8{=vn?&;_-唁̹d%p;GWqm} +G [ >CzːQ6'ޣSplя.T\l2HMLs"!u'F-[=ZoNp (}jT5S7ssz< 8t,m̨8¯8٨T#ȹsk ;EbN?BaՑsj cYӁ H@96y$H-+3q%qz`f!Qk5nX\OxAC s˓ aO|7co` ϡM =. a? hs{ڐҨiC7 T M &Mܽ4D260b6h LhՉN5Vf?X-H%.GOs#Qyߑa24ȾsܷڴQqyNpid0A@ʍ)6P8|zAA~Q@‘M Yc~SGBGHFIYmMSfl* .} 5 ߵ*-JhvЦ3Uh}~QӦ#d lblѼEia׾odlaz1)2F,A"?e"؆V+P|_“L5[cҭ\*NOJV:.!?5{x9'q_0\T5M /[r* $Yʼ)̀\ecbUo:~.vy(2z훯a1ֶ c?&QWh5%mY> )tAɵ VЩtWۉ݆o de|4'je$W9% O|}~^ݡH磲tq~P1~ϲcXs{W΃1}>!'Sm5kL+g$1bwa&ZOBIwV_l}J8T|M SYnWqQ~ws#4yхFQ!+Fҝ*0w}4EJT9TC/j{Bah704  Jz@qm7D>!iY"ԝQɮik%u@zWS 5 z+Ry4j\#_߇o&.59p~=SxS.~"-ǐӅ0drx (<- tvpY,d[fDAF$Oja>Mn-86~k}_ -uKV= m('$4v3C[BQN :Ye&',L/gsnވ0<5+E. 'RLаdq)H[C#ݻ'`K5k^u*@u\fH{'6xIdr@T-H.K<}8s,S*I3"qQyb6ZH$D}aC]qK%>tR_pI.I 86Z\*=llBr>o,gIwL5;hvi\ v)M`Vd :SFx(tr@ߋ50p*):JԌ ]!JE#c%kmxIcYQQ e`Zm4GɷfPABelXe'(π3rVLCX?r37Zŗ!<̷A!szjjr94CQow/{o19u w <ھc/qRk)tu/r*tk0U!K1[řxg(&MaGj9Yͅ (ٽN'4D&y+JS SqU, jWify^㇖7^y ex!л@ sXD5|GF=D܄@eρH^ʳiN6,9/r3˞+ B>^ R4>mSdȠD:qcM*"x L3g?qdPayhNbcwl5ٞp5a{] 3"t|^54v%e[.T^}r vAayF|*zbZQ>ɚ]@_&;r$ 1]:ۯg3PfA L&*zQܸ݀4$B약V5>'F)ӽk;-@Sp[:rVhQUmѽѿ]xА.|%1~}{l2M\w E7YZ>uK5Ne6[!],d IޢW@E#h@=C6Q`dLJBqHZ'Ls 3ښq-Ҏ[o0h(^EQ_zVw?]b9;Rr(;]L7[ynOoO/#,l6ӿA`>Q"͌T] {ǢՆw`7蠠qaG#8W YOJ $52jd$<q6q,ja&Mg\Ic >xc]5y?WGRL Bt{/ )f{1*I3 Z3+}k;#ȍ!* T~B[m]jf9?U[ !Ҷj oy'XD-V(ת&Ea;CO  bz!c'FHTfS㉀ *l*9S QUu֟FNh+MEBKGQ<]&rHcFw8 bK*hByuמ'|6$/ed/Rv/OKYm>thm)=vj9{Cpr;%{bG *G1H*X]zu֛HdW>'G :]׬/}y_L 0N9 1k4[Pf.A qQ~ ^䘶X1gjM2XQyvtv )~`+ D>g:C]2$tkCت/RF2HL1/hڎ\R<2abɟh%!ͦF_lvn[·9D/CÎÊ+`WN=R\W2+|$8 ^`XAxl_ ~d"!YWj{FJ47ԴΔ$4t_כ*g#Xߖ' L~k&O T6RjYK,j/o8Z﮺DAJ!*Xoo&vx4xxڕ=r?Gc)CYr?Wbמb42/9ew߷TjXR6z A6z1U\+@ϖZX x-nCrM@W\鮜yL ,GKA>Q4<ڲUN"&gϡ fmz2cXBsxc2>7/y,1lmqh̀Z=U08lkCדa[n )_6ôMo)r \ 8ѫCmj!{F_<-R6R׈l vZW1GN#Zό"Κkmh#pkɺYPD+lz+\H<1lk4V}3=aWs4h/ԏeE|H ZR eh2 30V 7(*Qo%D!u?p6»n'wT_I~OXL k @j̯Ј[?7w[k *NeڇqWO@hڌ[Di#sE hޭinh H?A>j=k)w]r$Bb BBO$7s֖a̚J9 HH!˓qgB&:S@#PWn\/|v"ܧ Q&aK` ѳteQ@NgDp@Ƞq,tcW@Fʁ7a{&Mnp eP]N>@9[q\f&'4ҩ|yZlcQxz'}1aCZE/eB!ha&ݷAY"/X5 sg"Kcʫye(F" 2CP6VkX#Dz ¦`!2Q #83_vʵglj~ONK35LwH rf%.vQXq<0]~t3!\v͊-#gܜl!L9)thf"Pŏ)!|eK޶8~iQeu'`qVZmcR\&6B zjDy۱&#@c*_N0#B曯ɷ>~/7#[ Aۤݼ*Fad0nDG&{5Ҳ5UL٨&r\/ZژlhŷhZ|U٥[OKN؛.#rGGΎ\M|)XT'Xjvvګl9N7gamĺub*@;cg͊=ZdAnhOXt>rdf5+>^='Q;Wҫhd_{6O3L4]ۄyb|Rt=/P3E`oe2.J'ePwڲx w0yDa"kR]떣"Tw{;#omҋdUL|N9NvKQ_3O^VVy aCIG$Z^缆8!Ӹg@հq5\N(ŅjXjިnYZ=!WtE@I*YIϜ@{5OrJ~Lcllp,4mtlnZ:NX&U&pb/5wZʮ-C$SC50P+A/z Z@^A7h ;@lz9 kбm!BOY6lklXט.-hV(0#5d.'t?:!oQ)NB8Nl{E5?Yg^Xk[T'%w[Gd؃=:7vm_7Y,ؔ! 6hbpι 9LAXF+QZ$Y+a`ADLN7Hx=삫8^=}ATS4,"/i1 m'Z;4>TP G=҈3Sf=&<.--ɳH3<6'!y}򎁤}Tvy; } $LQDW- /[ > 1\:0RY #G4*lwgӎ lcw,T8yGeFh8 ?YaP "JP[ًbi}-9%):(  2/7ciUƒ7p.SjgB1NIR'xT>s@MScMa.dqBsRvLt8FXmdЂ`3elyA`!|fUc2:zϞ˽K0Pq<! 1Q^~1 5OJ$t#zXa~^l 98OM),{Qn Dg;VQ뼹cA2ljAj>Ҥ$28?ޞRm4F- 'dYt|z 0' l[kH i!?^ϳd2ΟM 0>gm%AAC' jm8,0 [? 9- bi/V@裇p#oRX"~@wΣgiN1 RŞ88Pkxq$7/VKxA2(]Ͷh>< pWN:׏*Y^`Ydp[;D>AV3n^\8p(9r? bF+l,VM1Wa^%vd95WpKI+ ? lO8xR>i(fhQp+(][.hE,%921kERՂ17 S2Uoq)gSww{XˮެdS{M /m u7‚!s>[?E C娭K'!g3s1;RW(~B[QnX5k r="' ika3Q4\a2\3SZ;X-dAUS{KsǍKNb:㩒")(z@TyG.]4ȫXZo]4)bp% a3Z 8g`Q1v3MvV3* oc}]ZNy@`OTI 1)B>P]k8>#i|(g.0}$LpM^,S!}XK͂Xt]E?zXp~U Y8nTbaۑT<U|Mdx>G3Ʉ|6%@s\πSVbUT+Y١^n!g$?ƤB3`Ow5jC&ߢB,(jĺYL2Z"YU\%iT8D7CZ<#'Q ?x*^w`+ ^A V`$RDrp y2TK割[νAew"@E!/pm :fL( 5 Yc)t"կtK),|S;x4'#xm4Ao`5PAuRL*ۺ m(L niI.<>NUxO l*D{Dp"/L89k;ţ/Z (.,T[Kr:-CO:#[A|…W}L"]~ `Qs\;a0^e$b/o D$${̨rN82kk,}gHXF(-6ƞ&0펱IcJ:"  u}75̶%;@#b'f/ | P8yďV"Ǯ (Z┈SѕC!umewie4n}2JM kjVhd oMeEΏht zΛHΐJ = 3Ĭhbw7ɗkύ)n h*4om) nWSQlFʋSk.^< q~{^-C c2+-( V**֤iS3WI db;!ʔs5\QcP b_?Gys=CdccA9fK+cI5I_N HOUK\g:һU[`IQ-9Z MOitIeІc|i*-9Ll)M_LlvysX#zw¸Q ޽%yF&f{#$*xڿ2/lꓐ0ߕQ'K1.z?Jz3(}u\%K nzy= :MZ|K;jM==.3=m?&H=GT`!"-e|61w)KRWTH<P C-R:Zn{#ޛY?I) dP"(~ѡ"6߷{5Rm9;fՙ5y'YJVK&*% d#b ]R"9kUr bدOE1wkJ=ym`(dՈ-gO&P~h`IQ1ψ]6?#"WZ 3 0*'  ##r'q]!'C^I8El)߫'LEHXW twV5s {X ǰٛ(!Sff8(f*JKMےAb/6(s2DfWg&@6XR9VfJ-O~Όc)p^gMZI(,g?/[ IUn`ZJ&Wq*E ɲ9xۃ3صfe6=|osmԏCGgx*m-V鞲>V8C% 03PnhPKްKaQ{6T O{XGsTo}G /BR0ڢBw?Tӝ񥰿["/vu㒹zk "-Gũc+晊iXlu_*luϜ}Ll:}IR둖,2屵x |iKEry~h d`>1 FwXyv4%jKMk7B QhjŇpR\vmߐAIƚV2 ]FtQeb@Z,~n'/*M)8WVy1DM SzWc~mre!sۜw.Ԭr]!Ä]Y4sc dHe^ th'hO;mCcbK48(D'!Խ#0q..D:'^Zԯ_Yք7s Y+*i"2XCj'KĽ=!x7\"kF4֢ͪܨU$B&PfѶy^*:Y{oqϦAXqC1nS Ք35>c/U’3Rr]yDz0Es7eWwsn-Ë?z3/}TcR:/9^2&Q$Z?y.joz1u,P/"S $vUُ^xcꦙEľQs53e:-XWBV[ 'mc'=b}!"ҕ{|HM|ڏ#<S7Py߮*~ZdQ3cmo1Y3=!v4V97! H1ٺzNpѩ2Ĵu5qʹ%A #j_XLk;gk҆+OKJ@vop_c^R>6" gTrc,:r g]"}znMQ>mTdlV^,yl3~p5Hv + u|=i1׉lr[;O2l09p6!^^HS/5{B^{y!aVc7(i"JDyeu eas<7X)$ pdja) D LBXq]e,]RFf6)D2Y+ntD'2U9Q|!IquvsoUQV ȹVW# 2rzny0/U#39ƒcl#p‹.Ta7}v 8y੶>v0z>7ʣUV໥%)z /ݠsg wi|Y6eO:TܥHV35K,jOOi1x3w5Bh12pMr^皚D,i_2~x6>HAiQ㚤#$.f.)k. 7kքli>{&b$VXg6e]ߧ6!vdt%_VA5*Xx2H+cZJZsC)ޥY Ȏ:@n̽XM้:t!hIEMkvs&[ovT_rgo h# I.} yENtSȣAe;~iMRB[طfV~D 7O"WTL-S -z-D`)#vg~2vsf!Q]$4 PI=ޛxMͣUvO%+M<%.'KI!#Mn"vUeGQR_mrD)KB-cD j9&'V}M [u |QRa )ҩXC: &[˱R ΓۥvS IgDDe^ I@$k֔>bLHsTq/~ﶭ==̂k*xoLMYMk0ŃqRX)X6Fk{-E\R|}M\ud>ֻa;b& 6#eSOG G֋-Ą]wmw9G` Q"7|,*@mc E<6|K5/̥}:.!SJʅ j3bcf5IQ5F _m$5x>7vJM zʓ)*آfKFTģ;bCw~9pq+6ą >KF89^MLkV64̊kE`1*ȎxR2NkqÔc{aC_D?qK8|ZQ\ ڌ)nXM3yJ{H̪EI.ֲ%K Jg=71Dݚ"@=Iv]$]_!_&k&϶p~3$s;Rxk^X9>رQ0T4~A0OdĘ 2,ulC5hw&w3GTr,c;=bkKv('tEK ]֤—:5Ƒ1HPIL602E.Lj^YP q j`A/>]ʖyDvCI}5^fe&Oup4QU+yV1G*c,)DpI[7WZ,2(ݸxqo7$iSX0gbvH nϿ VXV"'G32 qϛb&kR EF}rګ/fOHi(9V)`"Aǒ+*o@l61)J",_k*q\1Y|  h;~׳}d {,jr)ܘx$x1OFN.̢q^vҦcG$N`)uψ'8;s;;gyH U"lF: &!5c#O&^>03 7evQHV3D4CWb2}5-_m,f˽j l!Ek(LH|=B쉏jQ [EV3nF/\7lB0^Wfw =0$&FzpG3gZ=_B7BTpwDC C=O$2đCҺg`T` b*t>d]bsT@l߿<#K *ЬX ' t.EftUo,qUB=644~pRr|&t:yh; 39qGs=X ȾSAYcA+1bmIIf!Zs \pN<PM&λE= 1C3&ddcԪ҂0C ԘR,}AW?;2 W!D|%OOXvӗtkGlhXKi5s86:8:m8lqWݥ#,:Bh~W*QG\574󜺵%Ӷ3i J8QnIݯs7I qLMtHD7]k6y*Uv< u5a WXq/K('U3.O!_"#C;BX++x˸^ 0V[ܺR6*+*`ut]0tGG]8)DTF r$2" ̿焜#F K`RۚOF.K(Lz6~{g4ĭ{0= J hO;6C=,p 2/IY!>E 8n ._H6ۅprjҔ*rZ[7SY3$# _3o/X\EE i4r[:W w+R'a },[;dI!m, d"?zd)6$h@AEL&,d`^·ߔxX9#ZtB.鯝3)-v>t_M쪮˺߫L| m+,NB͋?2cO"ܒ}JƐdEM /LFE\wme"{H/A%Ɛ:Yu Sazs Br]@M^{(KL h+33D%`=Ըv*iSIu$dMW4~0(孤 ,-dp!"ewzˏ8+<3CmGpyZ$ltUgr|Ept"mMgǣS?79ZgH#Rwb5;ˮ A$m f!lLOcjٛZP_k7TJQ^HYa#]Ag{.T !N?(I0bs={mE@8XM%<J Hzx2oXdW'La½]- ԾhRpt w=n0R~ݪQ6 fu R#RMND'e6 '>kILaIW#(%v L5PS Mm _6A`qÑ5ejg>T_=NtI,qw&1cm5/ tyʧ4!z~Q&Ά܌ںݾL(;1ynMhCDZ9F=c8cVfxSXopds9Qy˲VC_Nd'h!3/m_ `G1BZDZ>3<&߯'/"EsBwG>4P@%5 vrW>DgYυ)*6hH(&Nc,kG2p8X`bLi9sd^H){쪂k}ϑ^+\(7c䮪,1R ކv6K`pf2|2 1޵2H!AP@Cѿ7sqg9;-рn$'"#Tćo|08s}ܴw:8f) Wbg=4 1saԓq^^2mB{:`mbwe+P@4=^L<#pM!iZf aa(\9Њ3 8LZ22.FOrfit]njõە'+P2ë́S̟RDz?lc.M0]^-RlU8ʕRjg u#+Lyλ 5Ө}o\{ UQIJRd:)㍺Taٷ +x!2GzzlvD԰ĕKLe{Zp"i537~O'J<TMXDN4>p@Ҳx% ԵnI"C%u_S8D$U6 7ALIYܼY+@PRE[ kN1R ݉8) c<3v 74"!+ -FJ`1jo ϺC5I}L]] %u!vV>NĹ^Nb=r9a/U-?`id$Gh;3"W's3p}UzG}Wpl̥\K-;_rRXMx"?TCMw==ѸP b %_$7cXGNWq!elvЧ+xMAp B6+J!{nlM_cr zK{:bė)a}ʊaWr*^zw7MX+L5z$\1ל igۢ)JY+Ox-@\EWk'~ُxYI,lcraA=aH4ƄLWQ;~rw@iձ2:{PQ!:e4 lxb \U-]%uPꨑsYe0  es,ß􋟓R3_+93Nhai5](k .Hj'%۫)BI*h-誖qBa=PF/2 kq't9tRG .cpwěOYX5\0:X̖l[yӾsP:DHhꋔulPodWH`0aAKCX*" Kp|̦t6-=;uA>RO5b?B󾫴.`J;UfU4~Y /"ǣ{cW|쥊eeb_jÙ#B)}U(='2qSNBu.XO"*Loa6SStފ98Ӊˬ;Q58٨X߸="K;WxԖJg #8QE krW{rٙo{}•gD˱ ='_`Y7^F~2|E1d9HPtKoN/F{֜1po~TJ8H̆NPkγ1؜)LdO‚}j:0fXb|0h;&6Rl뻂̢go~R"jX\2/@jw3!T8j?nZTf;O-P49ʇq1GYXv(2\/W m֩tF[aw5;Dcsp:Tp6]O##d B#X`h(Cr@Ί%˿3cušCD~N;N. AnТTO&p4%6*,+hr-wh`,'pX 9A.nҢ#rhb"ʪ֐H9£3qitix7;zy£z\^ 8)IZX;o3!]ΌB:*Hi+Xp/ jr;۰wuk 7qyĈ:5N$xB" B.2P%3vY(X!4ᅡ~4DJO+Ctӟ.gMjz60: Ř񅛔/;oD4g_ZauXЍr$lkș(8(MrY|}XYS-.vy# ՆBv#$F$E'^L, (c*[nz˙Dqv?!ťxTT`}х-(߷(GArFYs;خW}Mk5n}MlUqj~DۍeآM#`PD1~ɋ޸.nJMˍԤ'OWAc2G,@;z?V qs#cRu<>sl4brT}ڨ]4;Uq:Jל:U'r{»B͔)"IRӑh QN3걟T@LS?|]R_Lrr/c@Mbo ԋVd ` [X \ 04p*'0dDѓȿmaG|Zٸ/9&UϙI{ᔹw}U {w% ZT-RH ;8TJ ִF%Ep8hx< թlK)9ɢ('|٢# 'Z¨6swS@GF-ۀRs[Gø^緄  o2hdw-&BPmR?eӓ,cT0F\HyWKp6Wj;K1mJé -۳zQ |)J!o'%?RM6P#ז 6֝9!'5ț#H%~`D0<\0슢í׌X6$CRRkALϪTQm+ί #^F|[[G:j'5,<3ECRjO 9eVB0 [P&z;ԓ\y2-3~Ze%cѸ7ܗkklX_bɼpb2. [`+G#Td(ZYDMf^)Tv{Ճ h;(X DCSC,d~3#{՞E#F^]/wcII%Sy7E0 JdOgaVX;ɵӂy/U^d~ ˯c{NEfZn|&I@r -{@r#F[qx[clFhX :9 d[(u0u"C;k۪(5-;Hq1uئYӹ/kTu諑J=Q6z/]n+<;T)yr bZ5%ܝg<<,|u] h"P;+xg=j{!?6֜`5şhCÈNsg ˘flhNLlŸk %ƩhN9-%gk*)6NL֮3J$b H~~ fUh҃5Gb = tCj_h3ܤqa,<#"[z3#,xf!G5"z1, =O1xhx- ɠTdRG@w]AaJ+@3D]88ZPi"t>Lripc o"{͗P3f;崅3;T b>z>!KZM<rො [eg%ſd8r Me2ij)U՛IR^sb?Hf,\0PwgUyH$%Pef3ƵOU0{{mHC^d?oDW9zެVԟj>N tL,Zk ԘϺCՁ*~j WJӠi2ç汵e=LyX6 z;`6q{ pQg߃24߳v,V}SkmSi4&y4 o0h`4Y9IK(HXoynΞ`/4Rc'\b@91֡-i_j 0$amʹ'+ۏ,d|bN귟x۫:?_=|466W5mWY/+3;VJ wLe /86*t;' Z,,>zo/o,%5ԹHܕ}QIYz$c-1h lI)m\ۑ#+#r"yFj ̃ljkk7VJH/y|8ZC2H)щ+Pdy $Y(ȸb0*NԳ?p" l#Owu\=}肴k"_ПE1Ύu?B^e[rK9pngTl$-2v/ؼTlec4<\[9i8c~p> s+D%w~İpAM'2ơ-(8C< ՘73μȘHtKwMP); ]}:PL-E '@JDql'zk#եV]jLV j949MiK%c]mUڿ؊n {_B|į v8 H3mEP=wꁯ-k^Tv=bx#fwoܨ^fW;{"ϒ6dlr5nO{32F27~}򠔉bwŬ6H-Dz@V['owhg!!;:ʦ;5٥{gj-h@uYTH9Sh5a}Aa Yڑuv #C}OVcM6{B̖ŵez<*)G[ȈIX'[gF?}.ʝC;(C~*iTm>\?S1րC/m=O`I"6a:d)klLhw?dg0u#4YۡVe겂PEܭ,R<ȸS{r 1 ͞9TWLcuJw[W uUo:2nSz 9!M=獳 WQΝGh5 P R3@-4@ 1`;@_e7Jm/nb* l$>=fCqt90_a| `D06sS0Ʃ5 km0'}~fu#~|dΦp!d;0 en_אԸǽ)p=z̰VXxj1[rorq̉+4ZTNԦ{pQ"krz=%s (Ydn6D2,bRq_Ԥ1YJ])uxn{6 V=!D1GnV[-r"տNŽ瞀.͋?aEO "30ap!5,=fCN=G2}) s&y,du܄f/Smq+U2;MG gJb6Tc4wa-ɠ H]{"e["B,(296H),me=[e_\ 8l II˫\m6@ X$SA'ŒȬƥ/{RqԨ虪z6Xluo_͚B GY'>Lc7qhz:"AL\H밈xPlenQguÊJ{9}0Xgy!$Lm\(SB}l쪎4ɴo/WjMjf _m%>ٜ' rmܝi&oy KYMZ1V٧WP=f!^%B_?AH֣wȐBg3d=QuX+#%;%箿D'r'@p"I Z2%>gl]W {VAˎm! ߳c6_"Aמ 2s ÏgY!{L>)7`˺l$_fs1@ѕژ8C/;p3ds2Eog;E Sv/k痦/;bvX=Si#.`23:ў4@E努2IGkWo#%dʈp_4 Po=,'OPtvJ2 M +.ҍ}wxjʦjXw)㊱KL{cy4'4Kk(ffc g,5/_(S4K 6OBl)  wNBU,U7M_Ab,ѿ~ !M\8 q W\߬vCRDMﻓUa~ը]kD $ 7 {;5D_kψ.C5ɪd.W#OI@\,Wuw6AIp ~fٵ|'t q!-GD:gp0H&^Cݖw`ճ6[ ׾K*O>j,)e_zuouwB꡴B?7/WLh!7>o+I1@+1 aZU|Cy}Gi~ IykJwH(Fh?rיI:_"x ]W753v!۵@Hmu3IR$$o[H>d2,;5+;YIIwզy믴(=ni.^I,1oQ_K9ܶ܁ epq'AIcaKWD5'v[C^DˑEN>N9 ;cPB YT3I_VTlev *+*R*t@LR( ȶrv~H9!}J6c'Y.ck( K6E? hJaH83إhvJXi0Lvw}]-$ 7Ru`'*^a[ѕP+WA~x\翞="$p'^b{-i6=)+]jRcֻ}҆:eى.OW؁ `!Kåh]`)~}ׁVP`fzAQ*Uύ5?lMBDA (6`SԵ>xo..fB@oJQø5$P ˱=[žOh$eW|`(P~aOf/ތ0K,M |:a,%ˤ'lxeuEw6* V#NoHeI d|#Z^•x*5# G>,4J pQ2 .R=>]MZa%#Äi4Vdu|oM@xr3>8<0 tGT~uC\{ig r'mL$Xd) Y&S gIr։yۓ { `V0v`x.zM~Ri<680;(h< . œ$*E!#ńBzЫ#Z{RA lnHWzp;nKD,}ʜY'.xP? ʦ)fݠp_x53?!'[h@f'X'~|;h_ZeeVf<`OBAnq9$#JȽl :rʶݘ?\*hI6;VChq*ZeBgy`ZΚM,HJ.KA<$m>b̵h٦2ͿĚ0 陚J߄)h-iMȗ. :{})?" ̭N/esOd0Bu95<%f7Ȇ׶%$itP%$sPiD ({ב/ 5Ѕt(F3>^r ,]t_{w^ l&2QmdOԸ2R~w߁UT'Pu87zX|0_ЖVlQ bJxJ0AFVEħ"X qbӅ 'cxW~l* -2dt)v"# pU.p3q#^~XG9]Zh ?ذ!M(T\Zs$Rl*iRBv_mQ;*glcw` ,m Ul8 3YRD~(, uz!T;B&Jhvt]DxRxA6icbpC3@ڽ/eua5Lܓ{( Q ߎ[2YI{BX#\nzfg)s&%\Q^a|?1/ĮJbytW A<(@y޶\ aso3 1_pb"J g;QT^ O)q}tz7 #jNFI/<ʰ# 3qh45Yd窾c0;^eLȌʼnAх78 U!6^$ %p$/I, ,WuRPKZě+M6* 52iò[? ^M[W>p+͠ʥ?ŬTf[AQ P6ndjF D Q WS, ٬6{p}IF/ 3xRG +T:tjmeMd\U+yIҼ}P:If`0P氲қIkXf5ǻB "L*5AU(q/Vg!LTZ8=t~kmsEυ*dqH'$ukqcfC,aK,(6j8-(k(D]qmHi)tRVhe =\L8FLDmxijmPspn8)172F>/8|):Km\UFi&Fx0m(c:=vr)OU3(}OkbQi3PW`h>?'I&WU#m #sКl)Æ7L/d2h<AUS\ oE//Z(?.Vw2%2Hb)e!BAKjS>9r 9R lUuK)k&^o .iHOhR6ctp+vLثߎ@`BD`Wbs :7f̂kfFW@ Z {%316[")ix>* d%1'0S-ƅ !h.klM n2n,j,SxC뽺vOmVn·F73b`A?B,Th=# <=ygdeU4ӐyUW 7 `Lղ!.Xq-t?4Jd y"RI5\!}d~-pQ:3,5!PaUEYX_aPzD! ~Vu!~;S}L)AYQlQ'ۦí~B ñT(=SaTZL }e HtE.ȣ.x wIUV}Tt*>C{XV3NNLxz`wo٨SGL7&5kТՋ5/H*/=vNk1AR|hzG|Zg~:ɞ}b vޠ:}HK0-{Ÿ8x.4mAGTEgRU?FPfxzGa/_ax1պӱy\ s7?JyE "Jt GPlcP]ߍӱ70Y$*5!jH_?rrĶ1oЃ'a2,]r؞La8F~+.a=;vgk\F]$yrۗ4KP ,_uE[)߳4nzd|`.OgsbS9degve7N  ֎f2i5uP{ 0wsq 9~ԥ3ph0Io< @"ג,8&i'5(3ùdHrUJ %ڕ 2S&rum$Qy/#0b~\R#UCUs${|qiل% ]r~űNU^B|>tm?89Gشb_]Qrjʄ{} /֧}\րx КdRF&lcȡ1hupfC<XZеy/m"8 @;iX "Z;5& 1].5./sBOk֪÷-Ub(Sbe@=E%LNɩzi<} E EӋ5KA\ZKH,o&2>s쳨t"ӎ J%9rG'c$1()I|X,V(HxrMo3!/mVOԱřGUR?aJRuu@cٟ#<#3<;qm$|$N8*=T&¿xcB>S{̮p%I!_T9NQ"-yA[7Hu}3CN=kUcv1tY^%Akp u1h,u*9NIƤ$;%(Y_eez6Qmq좸8RW;-ay[ :r%H8;hF֪QԐ%T)R6X(Yr7!['y ѵV*UqW݉KrWws]\ob@Ix3AFTZ}d=Zf)v3ŷJts;`Юql1[;7?Þ t4aPm&e"oh ^cwĖ+a̩F1rJI+?.tga-K,g/p. |ҖA{ j{r+ g @}8QY DG l&.*g6?_)^Ӯ- 'ctIſ]CZɋWb!Ub8hwXv>i0& ? ;k[iuJ{`B<>܀:ՑnIaG95vp~gq;hK%>7q<-ڸ7%(j}T".j}^Rnޠ4(sW6\ Q\S^"._8NZQK+{1ߒoی9+a%M-h?'/qG1=l/y@]XbQ( ~1ꉻMiJ`Gɺx -Q^oEg5Ji@[Z<_tMYC=s+RmV,׸UQ^6ImaVs[7wXluP_D:8,rtLy":z3‰uS@1bm3;LnFIh {j$Cj%ML%GuE![W2E%TYNE{:4s/Dg9P ^$~~5 Cl?OC+X\cĤQ-wS2ڵuS;H'cVvr 64?/^;2eB,D?Wo`OC(zUdCej\R#?}C[ޖι_~XWQ0<=MV9^J) `**λ-1C'g <~kYڒ-@x!ʟ+E¦|o8y8eey,5X~L*S͟ow-Ww#XHDUVV{̼ұSb<5lSVIgh(jv O MG4 l`'/eLPD a:Iލmȅ.ZGxSV%ʜ_8 by@кռtNTKM 'S2@lu]6&sODwmթ] ;v\2z{SDl >xKv:F|!]΄>0XJ#w#v^R#>Z6bcߧqg92k\+%[ oRhnoqMr9R-9k/oEd0&INd̔!Ѐ O~J<4(W@SW(Ȉ1nWїpnAno?1] _y@kU͈8ÓUpRz蜱ͨOMGGne4ԕͦ)U%t\mk=ɤ~JRNq#ٜ2GY̋ՂPE ͝A-!;K\,m(N F<漠'RNgzb7cWtvטvg<7H+gXJk`pO~k ( ~;U`~z]_|=Rn-bn짥,*~sKuo^ "FR,eQ:.,&>*CD*tWse#ӄDP^^Le8(.hEMP䤘eB!q9mTGIUfm*$h,ӡ8y]; $6!>%2g1_YH' 0(29e\b~(p6h6 8 M#~Ka%uISjEGSyEv@Y.? Qd8BT2PױXưž#{ui\݂c{b׃R5Bg}wג`;a]'gc~a WvD`oLdׇeց&Ȋt3}UKU{`[؄G;H!!ʽ8>N)j߶nn|uk[H^|*  qHB݋x]\C]BšdBa ׶5(P3{?cfho>c=N4QN\W59ST m T7K64&K&.u5]pmn礮0~v>p5 9Dvںj3go7D7xc,) $Dލf_]{2[H2<X8ՀW VCY,r;`KN4*ZZ# &33 /feHF|\ FP MFۉnU1>o9VFvxsVĠqNmFb<$98XX\I4Hz 7M]Juqr2Cq4@* MēpvOqD^x]筢~ !GYíkR גtgQe0|~=%>A4=MfTQ 4kܹW>yŜ`ڳ"4MyYSJ6G﵎AA6DĢ"I&酓MyKD!}V@J@II#JS!7RQ@:JXcJxhK^"?&[^uBIפ1,"=z+\Zfe^HyVSSN7` ?1>jʄs?`__IR0@؎%5ܣ I0#{([8z%q aT 3~Q l`yzAV+~Y^"9Lޔ|t*rȗ.}z:¦5GNn0eAvi>?Z)_Ǩ]NEI,O\yo#Ws/NLmane `!/Kxn@;eB*u I&>78EmVOe{0>UJ|MҧqRZ ULGtS;YIl׳;FuCkۉUI7K/&.Ma2ޯ YmƏW\Uv|zYnH2LBV5bfrBQH ,ڀgq]?Wk<[A5;LFFb;k?7:xR(QW%T֥0EkFJY8tЗ%qz9u\~` juĘxk# W%KRLy/K׍NَLc8Ÿ93thv1{hyGH-t.Uʍ@Pl7)^aް EcivV#CnF-ƾ#M.JI'N~LPj f?=DLa䤕Ơ  f CVJ$VR%{viO~[S^',Ř=ab.zAg#' UuySk}p:[l NZa̘O&~ ֫aWÀ8u CTŵ`1dhN+p}kF v&k??r{VUӪ$;6gC-2QS^|w/ͶQ%BN).w1cL~Џ+vftu8=r s 5Sָ 8<#,? k-6aVd~|~%x$o )g~`ywߥ.ؓ&maN/2Yk Ύ|x6Wg# _nLh6䧳9C:"NN0Mj;6\'͒Iͥ<.c_AOʘ-xIeTFn g_$,<6"B ]Y%iͭac)Y U3ǫ;?Akno;`d5YEuY\x챥ȧ2N>' ywuyDCnw/q检? nʔt~BKapy1>Ղ"^.J}od9xDI3 >cnF[Q{&[AXQ*;A 7̫V^?̻3,ܦ٘]7>dyuYg-.#T#xuɯ^Cd@ xNaPR?C6﷒G,c o=;_Yq'φH_g<vl INUxNb߭dޟ-_!{jB0hk}kR&FyܟwKE_5P~ 渟ZNؔ1;iqmQL^Dc9:vE 9fGZ8LRn2,ؔg/ґv.pU({Xg8GG,=Wl(\a%  "Wysgm"Yp+nМי&sc헮k("C)P`fcV/(^OnvCs .:Y~N$E.3 v@B sJ ϒNI04TpGhz.v"_L^Θ)G'Az]4U‚t<)U(r *FIdu5k{82<`/9<R+Q:[*Zfݝ,Q |Yi<w,-F]qY86IͶ2)MJ/ܧcM65&n!ܯzG:\ɣ[$#3wO⺕I\; +St e鹕R!ܫZ4^Yia-:1䠷a?Na33uYaBgNJk@*ʹgF/'S\m()w!۷4g $:7q0LPjR]%'k t[RA*1]%W7(p 5@UV+5~%zɠQmp 0m66F甋2 t$B a @Wҿ ݴUxb򯲓LZobv%hBCS'̦~ CRߪhZsX͟UdiEIX`) 3J:Vr&q姈~~ҙQp@3g@w sQl( AL^}~`0j^-kL5⴫0PcDseX-VoZ}ҽo->wM|HaDy^E"iyli?ziu{FjEh6\LPW L Uy wq@gjCKj\,h#J[ǢF>J. B>`@e.eSRǕuPU1cN?Q.?nAtdq51t+0[m0մhuWe+W C T4C)r\/Jn9 -LE9~NحDŽbWc_NL4ptLa$_/Mi6u= Vψ0MU=|՛mq,?٠Ay\_c TʓH<-(K_K?9N4nÛբxD~"Kl՝nU^2{/W7@%1lV+a#+şWiylv3]tO|)mPo牕]GF7hۯ4la!eL{hG@?hCktO,yU"1?UqT/*tyPudyT3+B`\VOy Heؕt (XܷL(8ylܘ"C5җ/hDЯw@ےϬ~MWOX_r|"#)X( bӔN"gՆk4~YWz&H_w#C\ W}Ï6~pjUK` NХLyI3tFiG *~0]/@dI25JH)4l~jOdxD)/:oqCZ͂n.g>3W:PbE:E{YTBDRq~ƦF G-iY;.p7#0ا,F,"寪1轛-GRE?V`񄙢}'z]|fm2HU ;:mN8%a,R5pPfߏ*dNޛ\zs7Z@RIj{h%^&*'7aZ`#"#C9طdKBbX䌝G25樾cj?&q v*ly5YQD?R=aC@w9s|㬊6/<Zi H&p̅>wQ 1DR_'viR~̓deJt.Wx9sK65v?6oz$gXz|Ca O6ZC4U`>5Ne~}SoȪ2%_X3hFji&T=0Kb/y@ P7\] `[lFZs3֍MiL=(ҁWSEv@+*J4ɠNtw뙛t|*XEg]U'=fcBrj@~玆],0+0a(p# V}JmEhc?ڹE.*[wMR?X|0u1c@)wռĒQ]ң4{6[QrtlwDok!!?]t 1 J\ :){u}lVjN@$0p?OW~u[HI'ט_؆'']*-P-ڻ+_H"%CE$4QT7#^b;v}5x$ne"Rǫ8C{dAH̰ccǎ0,P}GpƖ>2O؁|EHqk9c cȕ?3G7;o O7;L SY-\cљIL ܚL? r h/ TY qxZW)Ȥev)|2h?8%m"@_4Vw\])&6XL h1k d+0IӮP]`$ O=Zaਊ>?&ܚԖc#MT)!vYr9#7i_/w$&bH~ W>q U A +5q$^TU^uAMP&"Ps_ '`qLg^dGG,rSMH*V6 {n0 ef:GIf>Us6g 8lA@EYU`&+Rǭ찙+%PEs1uODpR8.KchUf2=D\z~F=~+u[fGNAg~ŵ5oܴ)?Uc0N!wy.:w~swT4#8U-*3̜.Xy3f.B?H.d"ttR`_Qu۽Y3L-wNEfqx)L5 xJ4)i5̅WN`FXC/-Q6SMg(x7/W lqgY,@Pӟ-KXaCt<>֟k[̾9B2.P3[*rOdt?(:[UkBe4"XG:Y__y,%zY@~)Id2Cb)1ٙ224`)m&zRXM/ F0j;v晫almR[Ӽ]4&հtz8jLb'{+%#83Z K1X$IDM q w{\ ?8swŐ[fP~p5 rR!!jn~}d܃a!zɻ-h*nU4 KE_({Sum K 8i=[=A>&u91̥ھreU y6'X~ Hco>*xL0wL Ucg6.YC${ gqfQ iԍdBr.z(Q#YW)% }64yycTdbSFqܷa3~{t1Q%{d#wB 6b&FVg~?FZ=߬W PoX[{/# o&'B.] }?C|֛ކ8e wC4*4j xMB d9+ -NǼ;mY:xP:ʼʴ̍eEMC"ר5ZA,d/ZZفe} H_P`(r:y?栍pC[s\qIbqq#XR<C7a s9i HCQpOE&F+pрtSgtL3.dCiˆh|݂|FB3wzQS)R|SxQ珫H'[/BgRڪ%;=8P7\SlN1ȿXO?W:ڍ'%N`*;x%Z\ l@Uc'](3p=s_E_ Y]ü.6vNTu\n_ϵKIi_kP]_1FmUr>?p|V(sq6J7f)ܽW,hǬ XS+"2[`IY%UO_v| v/=#e_gUU6R?T]߬  gAxm9"1u]jb܊ON; y~ =]=HoUmT*4LJ)h3)*>fJ↰ &.Qz}<']؜&Ҷ郉sc$R=9 ؠԯY]"Ma8i~:H ֣.m"h5}h_ w_6%a> (͠&❻Wޠ d?7yTwyu3KH=|E)Q+Z˵6Ks?8Mög\( udEbǷ=)mh_wW)LٖՖd76HW5wJ% z,o> -h0[;Ĝ[Flt4|nɪN OIKKM:66C*983 Jjd%IyE e);:.jJ<8ź6#)_Y# fI>qki^e4OY$nOiA0Kt@1c+7Y(PN7b繎zcJ֙K@91neYঢ়]n|N~,*cNN룣n;/Ag2Q0(;ݯ 8@r!km<3aCOBӘ cc&)OoVz#͖*վOKI)1k$+4Vd`^?lIM='}^x*O>nJI{O?fK4 @ 5( QyY:bnȋ4?\Q9D_0NK 3;Cp1`a8 "-}k[FLyӚ %{i#<K8:LR8ɟz.ɷsU{jFT+{}zd*Vzs_+ -y(ޑYmgjEvQZq-yz;AŹKHs}:,Y瘿jdD"ӞT FRySFֆ|C{"dmg~*@vTf)nenij|j- cy |Z+$gvZ"2Ƴ.ߒ;lHܛpkpFܾe J /h^ॳC$ԤdL%F{N s.t+@ E-h Хq; {hQ 4Fl} V4s+,; qUR_M4O-rv҅`1&9#\F1`ӈWk8fjy螮.umF I\Id 7HӨnz泉Jun1tSJTNDDtjSRs5(EuEeTWȦa5ˆs-VJD[#Tu^Iv͘n\=Oy`. ptb71PښK=)xLL`rm!C_^%yO܇Cux!ZrS4ڇ(/t-at t]C$1 K%Z馉{ X9m$S-k1ܵrT]<] 'NV L&Œi 9釲գA@b _zT>[:%FS!%}^CĽ=Bk(]gA&N"bYrGYlq1 z,V-֬dDΤ8sjw _\!NO<AcdAR>GdDYi\R\q(c-tSST͊t &JjoPU[ 8zJB4r?6Q{"țK}dn/*sYv 8U=MI=3fbh$!CKs]Y5٣ea7}[8@z]/]}<=#@$GNfEm`zG&Toq!Kr"rx(ظYm?Pg )2>jLj,N*Yu%!i/b=,V,!\j@7s,}Tq(ZxskUQ3Ħ f們@G]g7 6pK*ECӜa,Uj@ߩ|OA"钘y;}E֢ ]7(mW3f~=i֢ck]#N)WbPh$Ud1i*كszH*d}q]EduVO$U$Ey?FSZMUvGm,S&cTrf(.{J,p(01 CE+ޑ^pHE^ "B+1̵!enYmZ锱wz$1҈w }?@5R=͡f׸4vƌ;eϊ00$ ʐ8Ϲ.R $VUO߸lUT|s~fEWjyI%*nupڱs٬j\/aj:QwM>!6/n{fқ.B[:uo40\ %#i=c~5_.LZd #s. ǝ$!lMH&M*T&PjQ&ʂȫ"`CN g8M>)q 5=7 G| ]~UŢZ2uA|<)ZcS|=~~L[Q8(}bO hd 3`WХX;gր^~̔k%K('u<&ݺPYiOJI a =o/dʗ=lJ(zY&y!b.UCM9+7hdkUn;20~odE]i)_+M91P.u]^C]G0>ʧS[,nƯsx&d@h*fFWm](^n#ra9pڕ3#<bOv2wIJ&ONm-SRR2Կ}JwG(/)6]O%+Gf8yUeX@spA-I; .&#}OFI(;mHqU9isLDT ]; R1+j:݌b% M# ZBo?uuLVj: ¥ 'Q+waqi 0*C, hT_#$-(y;e^cvl;Lq'웊j]/Mc[;8r(VfmYsl,0-Xx>*;>j G%?p4龷*N~@4N,b,6$.i$+͙,uZEX2^Ų;[b>R9g$7ڒ{Hj&Z&{V'umJs3qM~ `܅vKT“<}o <)fRu̓٘Ô3bӉp@#. P(JrԕdNqm)qSjޙԨ ھ]մ2/>E10„7Y⯞YJeSv+=L,wb`3,GMuȹJ xPOj0_l zؙrHI\VrQ} [k4,8 .6d $umi$QTnɥ3`WћMvd ]8rU69'3v_EJ0P 64IoS$0cypc q4!ʹ6Ili}2HLwXy 6hg(uJ$gӊn,Eb02݃5 )LkKbV@G^]ΡD=R3̷,V[$ZUHo3EwZ@?Fa?Y}*0pSۣ!Bn.*hJsUWz9 Us$Ն6k|5 ^h PLaY:wʳ84wzrDF!qMӜw'aߘT|`4:Xߋ;K'ē}ot*)Xyш=4|c\q$E r~jG)+8BEpf]=|8m#a5mHRZ>&y6RSFx3;á1V,Y"PZdkb6U@弰I;;-yu`(=1#yh$5v1SJOw eI"k72+_] ߃ApHRWf?cb śiC6m@/tyIǯ"C{=)WWqՈl!nL6/)rC94%52c4Iw`Zmd{Nb8­"U= ^vئJ UeA-tp` M޹,ҽ$%|ᨑ)J)nsM7PC&2"L@ϣ[񮳀%b 1 -*! ӄn*~ī`}un%N1@ЖwLPL DǼtiR /V ' @vP[7vuyO1tߡ9'Hǝ DiįÕK@1峝[ IS9Ù2E, =-#/ؼXETp'Mؿ :( )l j-*xeȻdC32ĎlhHaH@m,Z3)}78HO~t+& g]}v.}4Bȳcdny#RrXXo1#sY-觷W|o[OS}6{j{ :OyRaG2_N l.#Ю]>i/Ǖպ&-[g1X֫4ljJc ڷfJ3tq[~t]GAڨJ_`*ܥ(ٍIg`PR<Ask@ q'`tߥطQnyBu\O]1~/"gC9^ kGm%8j?NRSwe){bU.kTZ*!a @0 UQ?8U˼kAq :L< '6ܵ7 *νy}@CZ( :MGQMT/>5ZFM0u|+u[ґXZjEɬ@jԎ_I.mV@~K[۾afHK\^:5iW #:B)" %pV/%AoՅ #ݘH>#N>w>OGgnW;YdMN5]|3%ڤ[mڂC=&25d2>fŻĄޕ:W2 \))^La #_K-;.U,0l{&#uf/'cⰹlA}|Vl* 1Y9ROdd05߬oӐ>D|V hYekE.m4 M{Fnu*-!V}}壿w6'[ȺƓkƼ]<ÍL#ΐCGn{ ؝^lj.K_*35dao$h)tidAR@2dĀ$C> M ʰ/ۤ:_i]`yv!u{ͻ@X ڟ(:@hPAt*ݰFv@b,E!&-[w4TlɵEl_4vYyg0͗7n!@<Ț ߎA); J2!RB9t4`b4!H )~k4WTd8z_ Q?8YpbPO3rdauMFw C>9,o=❝D8mCқΕ4۷L 2C%R =N&b""E9 Mv- To!ĉL-mQcF0Fid sH T=ZxzC'IlZzaڜMK8]zWy|aHUc<[)qNmNҗK}9T +w1%Le Rd4[F멬}҈[O;*~.}aD4~Q\CYN8"(7>k(/ei`R]38DM_s"{7V~-ٵ.x1 ) VIj`0||$4mف(<=H%Nux`eW0mw4)8Z8*zW'(.!_4\|SEw3пEƵ P ]x1HД;;6.ѭԷ*I#Csmv|> 8EGRk'"xT `kjWh.JuJI(:ՅA.' ܑ]xm4I#9$7b}%mJT<('%N\uէmpf{4ךY86svΨ鹨)`MjKmPsLefYPA99.c cyOI5Q6S 3ï, X,;Ԡ3C ;-f;KiҸĹ;Ul$훼2ë`- յBilrʱP onWu0cJuP`}IdTutlܳ6xC iPw̅VZAL#oc \3Т7h 8j8#8\b(kZ, .1K#'(6<-pLuhwt2ʎbμ|Pa"wgG.X[Y3T qD!p N ȸXѠOx{ȷ$@;W R1{U$N&ءYDrUW]HrtU]I2LJ+`We,Z{Q!Rm<a@q""UJ@cUD3,em&m4ny34HHpOK^s$-f!\+F(&\Wp[zgi]6YkrY8 9 *d~dH'9i*jxmf.=j t2HpedwAxɶS|_d^5; ј;A=?Q{PZm[w}ҴQ((1*fЦkg$Al#oW)}JayFzrA[h~Б9뵩gyEDN]ːyR}_;tݦŝ2(֖zvJpMc1}Ua+$ ᄑe&ږWz\@<]sț…k XJ׎2;W(Ð@΁~DdYo7dweiQY{rD)!fRx"`9ʻFn_/Vʪ=$ sSڣ ZYe&.8 ЦyփCHi58ˎO0ˈ ۨbRI! j3)έݓlzb-M8yKew5@ 9uk=m/njԪ}?wlH: |R}UG)y>]3~ N3bvlcQDjǺoE{˓ J<(^k'nB̜͟l8~ڨsgjF(n,;m`U6GƸ 2wǚ-S Y@%bx_LK\!|ZkW}e/őG:=_R]d">~i'FQ_БE ZWrf<2ᬵm91.D^6jK.ir\n^)r4 qO&5hTie&Dʄ%3r1\oNeQVyvުٲPcIj *Gksp#HkOeQi,1)%_5X!zHr4SikZ0q>!t2i!zwlY\ғ t1o= {pH;x G{?+'@ -\4i>.0ϏwH)\PͫWYռw/ ZG i$:I^s=*ի# Eퟫnyi8ҳ=dVa풼> 7@޹AP6V4G>! h/dSs.CDHV@8Gk%4X} juXY^V$?cALT?K"U /&]n Ov8گdF*S{L:W<`+AI'[W䈾ۓ%և#e*!oYwN m?FꚻR3_?-X?β}/1"TAJamf5Ҋ/ 0:0=d(@ߝːdl߽ WNLؿ[JEѸ[y/)9zpq83F$16 !xQUո 4k5 zζҳbq*}S, >Fs :%T׌@G6(cG(-s\{ڠ9 #܎k$z끶qE='U!mˁ%6e.C.\Q|Y318vj`um ՌQT'f gq̦ʫ OcP8N:}P͆jĥ@8ISd,<-(]&Z\| & ~D *;̫o? p+5RV>&w=!?cKjqn:&q*m 4ߢzj@gA#A Y+t#]ʶ-2['].@iwd~9ǘR6~㹾֫0Kn>jkP hc@E5UGs5O-UhΚڏp~wQ d &J`qc5Eiɏͥ`!zߢO6rlAGQ%Ehwan`kFڮ, a=yhbR+7:YpkR"R^ѭ>:9=Euኘx=E!r!(@ZI\IBr_G]DZrLmDz]}_5 Uiol.C sUDʎawn v#%azڕy-P{=ajr_,imT|({q`}o6Ŕ,7ȍr!z]8{܃Frc0-i sݥl4p웣H FqjJ`clMvV[ `IyȏNu]zV%P|&IHƦ@spw>dzND1"wRpQڦWe*- XuNhoB&ڋ2(I?)h"9df6 ekh3H1EM;WS\u?-.Qdѓ6N>3epOԳѠحMn/dYЪ3[[U.3X ='D l ˒7b㔲Qϰa!v cJjpj(/[CQ T!DwKo6-#>I q*8h mK+*Gtڮ;˨*w7T-TCHJqF15]7d(e[HH3qf/fH)I…FXQKty5(jJ9&(3Uy%7qn ,:+DTʗ!U+_}{3]~ (~d+5 2-]8a${4xٹ< mE=#{(xݵ0ݎu 8Ums3"[;8.z4:2*9v:3FS1keDƧO΍7K_IBPǛC=vףz VL2\!O^%aEuZg$bEbx%F{ԸL,aT< ɖ`le Jg=.7wYNϟy5:M|q"JjvH FhA#۰#[=¾#YѣBlR3/ b֩Tvf& CX3V'$VFd k']Lgi5hە1x9 ϴUq -ȗ[d~9p F1O,uen_Z;|8v#@mdW; ܾ*p}1?V^F-g OzY y.rv^|9ԞtvB=EdY&CueROΘ $(MY -MtFtmdϛ"4՚¤, @[#>-9c5q$%k,Ok^RdGHےJS]E}ݳ> E}(<;(O&SC7-ioԯDUt!C$l(HL'tGzK]Lw'II;k҃_]-<z NU7DxC5h ԊHƅr_O@5`#B!!U~~qآy)W;q ,lYQ@ W UBUF4ta!_ì!WzGq BLwYȬ na_=b43y܂hmɼJa7)TR-C;ouc OScQ-% WNl $^JKW:L17ĺ {-M&/G?o4>_AeN wygqN؄F!rߵ-QlS7Lsvi(y) EAywo1Wo3[33f"v)0]|9-xx|8T8q /o^~tHJm kSdkz;^N3Rzҽ},7"nzXzTT{oH ZJ2jݞdfws{̟u +ց'?zșwکŋCatmp#E a0d.nt33U':=P/ВQ!^A;Co'U~k0h!k|p/f]{BB2@uM@O"R~Wڹgb UG+Cqax.b! BJ VJn>A4m'YvZ:b+mV-HPh SDr:)?d8n]X`+S , Xmm\UxSsɯm +0%{T ~3V !`s@lO6 Ox>_C;;*GΦʧ "$[&4&Ya ^[vڧ߲-9T<;GyMzmԸX]6Nжʊ fH(/H!ç^( >"~^آOݡR-MX6vm8[j!7PzF+@P'am *k.)#8* y%cD Y!^]9IlR_ZG` UEeҷV)g+~RoC"γ_[qnMؒhsO!$Wu$K7S`^Kм1 =<'(%"v  3}8fqM 2%5$KdO@Vj+tY^͑VVXH"^Ry W#3G`zc/_AF.]ssAR2`uVu]@27AH*1U..'Bb<6 Pf$A$5rX_Q%I09PC]jstUV{ D|WhG[Y6|_F^CP"xqA]w96щ ^PL&aqK%W$"ǠULoWz -M CsH QlUrjrů믳0gM&D%n*< ?SFu3 $dR``  HO-몥yHah'c,ytB3r}\V9>[2j+(uIkòJs+(,aS1 ty[VxoyE;1!X[hwbA:37\n$2d%z8RK<>k>0"+qTo[|Uu N@ZQ ׋\E׸Wjv'":HZ"7_cmGʫr4G1qleGNe, .|jFWOZn} ibFT=/E;枤t} V'SH3 ͒'bZ#DS| N+8'ruz ±xoI(`ƈlYVIsguc&jߴ6i޳Uz:2lE=J=y[ƂG4P4Tc2z\Nt'8X×ijl1?qW- YIYm((ӬK.8bMB0+ZE~):(.h+ z?vvb3\lj@!#rYc"r!I 72?FQ$6~2[5=`ɱy̪ADn\aB'/YRGi7w nVs4{"m\IjgO$FW[! _ 5,16N$ovHpV58+dP1my<EQ L֭Zgu}wA{NU4O.+?:nj69kMo2yQ*ۤ^j%(駬ubv`A+v-e!?_a$NUhrpYtw݄@&7fuÆVdObƽyBݘ8mI~3@륇yr+#3uHtU*Cb_pN8LuT ?Rx D\<·%;gٽ["W:/쪐S6q}Ҋ z֒4y_e1zNQvo Sb&o0{f{*Wg$x *1:ސDGsq8Fq#Pg>5 o=踏$!r]~=.iJpW\(qZȋ0p+4K^+dO ?y?Қ304SSORَ6#CuNجnM`iXh<Y/O'M7Z#\FZ>*J8pX:aĄfK/M.PzA-M ;f\!#YJ"fk׸A WT ܠҍZ=3䓇n^hMY$ _Ml-k7A%u,B; w׀p|*Y՟ nH <3 d鸯ԅ)CJl 0^-x6. .)lxX"UF8*>6Sm? Sx(C\X_,q8ˆOJׅȔJ:5 NHhBSJPg4ixӃSj\QfLQKϖjc\cH\/1to]`E9F,l*;8A/iEfe <BQQS\ h#mc'浰@GhB4k%9"z)wmL ŀ۞*ﱑNvJ:4@%wuIl0*Fp@58[jQ*&y6j1k} M&-׿'xlŁEa)O^ЛW".КG%&h"N{:_[bpsߧFgUjD)&:s4Qb/vf~[?ᙙP MT"=EoXJ¯ۂLr"f۸i/SZ<>;FFJ&;9VA "8uѧ p#Z&Z OOZEW %USد&ObN9wBГ7ZWbA] c 4)?}/Xxcjf*x7$i?7 m?o)H99r߮w}/#&t;4[r}| 0$>mRYr22QhC۩xR#x!0yp%I2JmU<,{փk&.Ohyi4)N!s{}vo6͒u6}Z pۏDi&`ɟn ]ڠ܊7Wᥨ nC@E2TwLâ롸1C`d9/dk*)c y"2Br)С^{0 7 1tG\UPn2GcXi׶H5m"cX1Z$C\3$|/q)A<t"Dzpow&1|G9ߊ>Xq/0F7`[i_ZØ !Rq+WF/U ~a771&?B(L^3y*weC\3mM4F.NL_B@t ގ=]'׽B ʺ'B Ǘ\a*{آM0|y%tߗd&/~h ͺߏʓYf|0 =فO >S׸ѼF'!2*%S2)c StTY~*ԨpiGʉ @\d!`V#m?~9%;C&Xت%"O·xlS:'E ejV_ޣc̳J`-XG80(@Uv*Kcvbb3?)kAV&GV(6$@ P]atzy.9ݜrVYt {ݺo"OIR _E.}鏦86LCr*A6 .G][䵿"NҎC >ʇLlG%˧yjBgoQv™1*Oky!+߈$dnt<P>OΙ9nDْ>ɳpץr" _wl~k5Prm᥻ɛ8CL]D FV~[n8YFOn2 5xPuw:RK<>`e bз/<↻U1rfH%1>{%1pm :ΉqLob"nV\}jCќuvQ;$hĿ,ؤpZpơ̹CH[] B}Bh8'!Wio %߻[[kWG3P)5eijEƬJSW7OG>( =P}U=&b{~smIc=$vak>%}ڦ:̟bY4a0*i,RsG-=&HwE` dCH ] 49={AP?:cdb !_?:z^/>s/6 ^7FD n 0FnjLnlج {;!͊Ge{y`.w@yw+x.gHqu;+Ln9dm0 P5gZQq+>NTR(MȌl@I ]?Q-v(FL+r`PYf~w14UY`P` 6i|ɼTx%eJ42mY>IY^EnDV,܌lPE|s?~xQ:/$QvL(cv'P %e9J=7BCixJKcVC4x S2w|{CO۾(YKHƻ ]fl\׽Knf= t2ay8E5z'Z32IC\C( .)r/̓qq+1̞I&?.lD׼|X7C9&ϑ(zw?KR49:hS) ̄d&Aym:=?_Sr`0v@J=. *d У>!>HG]}20b<0ɑ b4cN5ҠJ)duQ皘Dvj<;RBw;X }d)h@;-*y0E3HX%8BEn2T.TQ_1&UΎ`<0w"(Z'Yi2D/pVL^ xKS dn0&W>Bs9f_]VBC {eNaYYpTs8I2ʊ}ƺĤ H {a bP=aNm` c#_cdma c̟.eJrs҂'M";U<bɎl}rAo `}M`IJ6u T xA2a@ݍ[§> 66*/X qd\M$8:e)_~D7?h 4 ?#O_[vٶZUShcϾQ(EO,>]H`8$=WYיa|_,z:ֽOSxMQM}=KÈheކԪS|XűLY Xހ`εa*nmYi1[~nX\$wLA("ؽ9ڀxl\ɯ4.@>t&Dlpz,q?b{E4{|v^?97x)N}rԀ5 kIt˿^WzTeK|6u롨Qj{\с3$`Τ%2'9p!t7G.>nlܝz&~[S7xZE:6w: E H?P5cdo-䒖l~tҚHkL:I cTsԞrߵmEtYs_7Ia[-FB9GcQ!w_ 6J|]pӪ􉡵^B>c.Ͻ8M]EnZsO}f2꣘?8T38`qv2",}/ N]_.u2OPx/ p,VM|p^S2_?T!KJ ~=C)t(@)<$V̐ez''NYI}nis}LUvNrxSb4!`B,ۺ?oKjD͊{:@FGB v.#g Xm=&YM1g0Ѥ8bJ!u,cgfǮ/cAQ#D I7hiC\-_KHfҧ:.94f}?|j]6o/Ρр 8bԁ@91{;ϣ]ޣhk"<5rKqf}3Gd0)kTlt~@>l VIDyTmu^;{+~ ۃܝ@엋Bwa{eڍ/`[ӓ^ b jt4rR R)u6[VU'##ETwsC<Ț5ϲ<á;C=mFh lm1 5ˮWMe~G@G_s`,ܨաh.|X815nU9*Wc RPM[(Bs!xp XA^cU{>#ӳi8 ?%r`Gݥ~|Y% `!tm>T*<#&ŏava4G@^Lɯq4%kZ="}݄iEIfH=r8)B5}CPg1T-5S;S̔$gPɻVFDpm=61dc+T~!<{cCIȚS| kͼBғsv쌚? >EJ) _9x!³}՞LKq/s6"?pUy6ltkxH(sѝGcX`,|'Rcj~V)ϡ kQ33GACdmޭS!b/&͇BLMlt(7Uqn*gc@G)GyEzNN^_iHjښu焅{ 'Ywdao^[3cJQY1.qUƹC5~3v`9oNfR;Xyc-' /YH0:H\_d3b>'QTC. |4إ}4f݄ bjd5f>dkܛۈWP(n"9qPԷ7,WLjH-+g€qlK eNmMph(l5r"hOxgAõmfSCѿ;{Sii&ۄUɬ{om3Y׈\pLdޙZ'1qr~ ocw!v>Nn&ҏ@9SP 0Upr"迬kW>=!(ɝ}ƄOe﬒9A#G"H6Y},\*8E1PagSqj:wQ)0G*|A\񐢤,>tc^]tMCh nK|;,=1IMKŔn]De'}Qޓ1O:?`Mr8XI0_9Ӯj?8"*NK}vp9-l=a8Gřc `Il]ޡl8{yx 2Eݍj0û"IW#ڶKN趠PNW&Q!DBr/lLO8A Bq!L肝^ۘtW˜!";E LdVln8Uz.ѓԟ)*̜ҬV'̣O P <%(X1gT/Y@͕~\|jvG "[zG5&"pՠ(-əq$ `Fp#>>7xk!!%5!gabS[Ny`򐔌ACE.+:~<<ڂF-͓n^fQ:A‰gB+4 ](,U5.EHBg7N~һ‶327Pܯrg>9[znue<46[;W 0ã.+w-Ǖ{Mz^]Gɸ ew* D< י;tF A|?E$rn {T퓨-diq+MìJh1i_շQ?Suc+X5#V_V| ].y./!/&ۮ+N2M(ڠ6jRsX͈ ѧA|Az{, 6 kO5Jvĭ A GC#tr7"bR}p!\ԟ}n,0&E*zie1́ȼJCXŭoz gKo6S?^Mr{`:OCAiCuyT3Qohwljj,GH^Ut\mYIҦf"k\>Fy㍖W=IžE /Z#VD"a6|VҼiLf-<ؘ Hv/f9#cA"hGv[xvu-O!صTץ)BۭC[&Uue_ԭ5Ck'k,X2 b1$u Z e˱H.ъgvw%?嵧qdffm?6~_#j(|wRcb_D_n茇|.:(JCi`%O>&C79tr|d!n\%'/^_]ѩ2;YM%6  gqRK'H>=>pa\x1~Ds|w't :&ɦ5pt”q!dEF,;!<~`nLocξ<ߘ|75OvWoy&y;yRTWZU(\l Y.Qq %Ex<*׫4d ѳPJ;&`LYAȀ?lFWAI vEƶ7iOTVӋ-2;Ih&co K#,/h`[k=ȒқF }{[< GTU3EJ3&b0>t|^1tbP$^U=H9 1ERBe:0Ŵ_uh#Gޥvc.v4Ѭvo aX:6j 3akD3xuƅcϔ4(]8Z~\<(pEh>\,]-ieT6IX89oކ0-Ro`[qZnh>нa(Fn@7uO}EǔF>iYLqwӯ#[Z9dws8; 7qET.o]eS%ܷnQ`$N #>kg*}U5=WHZ2e)OR*4lo26n*),F^W>{asCstjI1F!|=xs%A=.bft @:ڡ^'PX)1UZsЖ]2{0wg=bF`)nn3+aB,,KG[S݅d} k[9dQz.|Yz EiwLii*PV6tMKQQ}KFh4bn#9'~RI|p41yfOۏ|dHXoLkC#JW[ [:x _>eZt6OaX0m mU ƭUƊU\haF<3~(֯2(EW>i6Œ%IASܭpТM|.Ko_ _sLAjvTux.'6TZejP0pIP]/<דH8yd!8f~40K֖Z}APGW![ I&W[h8*'HL>Be\(68.0ɟ]^h`;w3K9" +zHChGwĝEHuNR֋6v$PhWbkmK"wMBL4@Ήjg(fUwK0E5ƨCFtN"J9۳jc&c*WS.0 ѓ\`S1|A_h:Y(ep^y79ӄ]}2<&X&_̸ E#eҿ0lW]*"`uӞ7Qc CWC5t2xsԤJOz^yt+LFݤo|Z$.j5',Xvp܈A%']0*!,X^+3oJFPӱGݝQ?Y9 D!|Aۋ2nDy()ILt^OpE%oҎ̝Tq/7A3,*z VO!YshdJt>A/,]*;t4]L9 Q# ,:Wn+JxA jY[AQa-Γ47qNd(? /V2pj\hӀ OqjP=M ,RFAP:U5Y<; Tq}q,+y;X+Ic|*7-w@gJ4,Eu r& j%g]-lLw#RpQ\Fz[= 2F ;AxԸJ_T,`m,= GX rtWu=5:6Uǭs< 횿`9}5 ޡv ~#HN "}-GK7/Zv?Y8NQ?!G$Aoqm'oȨxi ۷nt>F1Ud(dnJ rdRW5ӡPeȞ{mjI<ȏ<Ζiܥ0KJn"&C>'/q$|E/G/zlKdZv; Wqn1':Ɓx PpU@2vtݪL@UFlLt!Ҿ#i'TG3xl+xreρd3k2I_SNO(ts8tԲˠ6IE!Кf||FO!OiGPܒ:ܔ_a*iW3W؄:OD']ѹƝ9+ht2P{5n;9-% ^s\P礒(:|X|$' On|#@O)7&<K3饪l/SCvK1roʦ%.^#sa-̟V=9]Zb!@5*DMSM oE%c=B&졄*-ywl(Vy t {*RG 0LQVؠd( VQY@%ƜzSz{mz [:~)7fAo[\Tׂ[<+{~t*Ԏ@0:˅Qt]x%y?ԧǔgD[ex5\A9ıQx mBLdQo$ z}O\"%LFTs')<НFvW,>m{{ >+&q\"(ҡ`r[b% K➄9@k͗>Gغ֮ qd2߮p0] Y5߳YW[P_\ׇ{w,yR`?>%˶۪^TKqطtQa5HohrN>Q%p [zӯef <SsM0 4o;KC4|~bӃC4э8N,od—1G Y'TЁʩq? 3a[4/Jx[ù " 19Yq}vC;(ɜ׵1^܍/Ձ@(RJ[ȻNWbPbP#dg9՝h;?P.Q.]?D8\ 6ZnM wLyc˖S3oafI rVUJ=TPMN^ tRZy>ꁧ N'KW;Sd/^HH 7J~;KmKo֐3%-*$H?,z3ӱlBu ;]0cPa//}jaDq1t iSŞ]&p {h5IA/:]C!)3k|ʰdcfBj^ 0$u}UzE~^'GF ï(@'T47W"pՙFgKr?<$[À$[&Qpxt3OId,g៮' ͼ)ha`FJ 8r:or@̬V\i)р<2Y&v.ਣM (+L4Iw͛y\'|Wp@.~S*Yft)ËE6ژϩyp\SJxTci] GӉU aD`txܰ[Oě}1ow#Y@n4CY4U@՜ 㿱р=z.9D4& @=T5n:R;,lmF{ n aӝH gjf H%O}DBRvΫÅnB>?ȷ1<{;<{y8;αE `⥮{m\R¿D(H@a&`!Mܯ~mHG@jYެ]߬r=#42]s #1C!k_rj-WҴx"L qhuhyNGf+3ڔfTsHo&#rai|ȣbU[\0-xMmPF~|iQ1hQ/WmV@# %d(Β)g__1`s<-1GAs%)̹'B^=| a8IpK.Liѩ~ 5r8Xas1Gd,Oн 2Cǚ3I);Kbz3+o1ijżQBh(p5Oʱ앥r-z{bV uV2of\.- ;S ح. km%:5.@mS+zUh9ΐ3ɝd1< q:9xXE59)몿S?fCwzu>1v$z^u ѡSVaѴnnaǕ[9#ۃ؎|ɄiGN0ǙHĮ=OMMe6" }N .!AR?guf(.48R pշGڎI(?ʩ+y>%o  Q9w7j0ȏKY^$;Z&ӨR4a/ZG;m~X0bËG(oR╁6ʶ49 LU||,"%g׋$sOrj4KTj$ c sexR[xJF^{H&9jOYٴ4cM4nh0#r!i ֍vޏ( Y8qYKcg]siAozs'gǻ* R}ؘ"sQS8av\'#ar`:2:EsK|;@ f!Aw⤛/8fnO 6W1Zcjϭȉ@;8T 4G.8sW4}M&ᦙDcp9K+P]"ͺI^SϠ>~X\ ą_QBx>/lJXkfV7?NJcs]\FTt/aJ=wJ^wDYB0#Hz Xiy3[\ZN wݛBmA-A]H_+1=rL{6Qb3 A7nO{W53;G Q#9cAVyꪘ> V1Т#Wn_ڍ.´S)lU+0:7?.$MvX"3_N:|S2#*? n!.i\HjmXކ 7< Nc`%bIEK,պD׉ײS&°6Q5"8 躔)VnIYp_y6,aZ41(A $sހ*OȬ8PwP̬.E8O l O[.' CN<&n$PZWS6x0 g4T%d]k;.\.pnr&\tӶ.Q:N *}pX$hI~w{ 'Q!\T*@oS ˵ q;WMi:8Hr,r5'PUaotCM%b}zWU (~cs@h9Fo1NEŨwYL11EkxahI i.ք ܷ2@b7F+**,חQr3syj[eA2ivNU_Flw$׌_:NIl8 I,o >se}@zZvI%NB/Du@]߱Xfa#^W, 0jY 7a@X_2@lIxե{$Q]0Βx0orcB'Ⱥ%z)A㋠qdAÖˉ5 , $Kp 6*P|8K*R!+[0W ̄3`$ <itxsrb)58 +`mه(?JP2xrI48 EtzSd@g ~G3$2`3^FIXuq2gM.Jn9wXz-(iD5N 7*D\FZ17BWXeo`MLprХJԃ)WDUjeϚ@E1$k}߆K- ke Pl06hFd"Q޵bx(z=Dܮ$@:7qh%M:Dd״h UoƉ$2@{[<إ~(i5mIT=:Zϯ0 ]CwєuHDq#pUX֦~hE=*ݚ{'1rߨaIuJ8%A֏jPDzQ̻Z$̍kũ8"TBdSChX$@5zpf7 G ^10 u%ҽbsߗ!ِD\*"~}ZeUq\S3 ߨCrpL" HQ#UutXEDGiHkcfr6O6P+ O/aX<Ϙy(?#:]pdPkw%6+LO ?G@5 q?C6(pV^ (UTsR wm/–-TR Z A_fW`!\8Ds.'}MM,0*w6ŵ-H,P I3|qF`߃R/a|{SUlmXb7]T1nc5n/D=]BB{ iż.6=.=`sEƫz |rr4=G/xF_s6)# [B %~J9'6@6 ![p0ȓfQWBF(Oj8IBD!FxWtY7$R&䲨to&M%X;4'硍lQ lu|UωdOބ%饷3ueMΓ2[,^z5@0VҀ;zFB 0WBV~z@=l`[}ѱaƼq UJ^sz v\h.; gL9ӊ eߞ".@J? ym"St_1ϟLiqEB |"[>7׏egOfʧ9$i/v275ߐ(|M;Rca´ȧ5嬐[G<#O$5M.?f|{p1<gat%gpHْkV;l΂]F[0p<*X`d[ 6JSMt4 \3hӋ0X1˻sW,[{,sW3AdDQRu 9Gԃ1=m̲*_"AކlkƑ)ֿaY4ZճIۊ B7#]"Sb`6utڮ܂pXaӸ.sMp4= z W;L'0=% QY ,\@L`k[BJeξu`w7^Wd)lWan. tt z6uDl+!s: PW~[ط55GԺY$BOJUn#YQ#z} XDitTQn#;kuUMY4 xv҉ovjZQA(U7Cb6d>_ʩ KlͿ7ém~|T{$~>H4UwKdt5w4 =`R6gzøG ~_s ncC>yx5mG{8>S;)q4ا[U," ]2e p< *K $HK_u,J}f2`Ms/) 4!bR#.Dn2-*Dc1h$X? v5\~<9S=6cYCΊ~0K;͉=\틢zןIU=Y\J0hpYk}ߕ9fuX vALL ;ŗaRxa:NJo_xW؜ ow<ӵ{N{C/lBеVV ŋO§9ot4?LHU|Odm`rF >+v"*J s"/\( #ӜqD){7W kl e0nK6 x%%S>i(<и|r!R*0M +;OmH_X7%ˇKE/V4ی 6}Qw+=.H^$۞~ضM0R}D&)-EsS+7@~tq j[Wtѷ´@,VZ=URހnkmJǪsk;r "juq&oɕyO$RW 2L#`y-\)#gtE}G9]6>Ǽ^})^ q(o $R-6I[_ަ~t]~Z<&_ 0=oT|\#:IԊBoi1IZ˨,Ġ52 vUj2=8#6evˠ}"f>Zf6zdTH6, fYv^& ?G & 1OM~Ac]0+d][-U[lZ(=o~=Y">unCh#s5pf@j R;~M֟Ce NRp>spJg$̂_ۄ+9aCH7|Kc)"V/YSW%(E@Ɵ+yjMo[T[{>R0UIAq@P|B_lb=uٯT21>P x3 4 :b\F$Dd+hx-F`,>s3oz*c 60m%ŎiH&!9{ȩ]n,+Ht9~٨xMB ٹWHzuFէi_m(uȇL]@f *T8ņ$C!h̰xȉp=" NC^ó+zAͪ?KC r--s31\%^kKڴĩ^_A>q3ta?\ R$r[QU%ޔa גT_Bdŵi9RGoRTC^) -Hy]En ϨK~Tkȼ=%0z-8[q["3 i/A 3q"g.-/[ I 1Tg5R/^RB桹!!H91:Ne,A-Fp~#@R8aCgt*lvޑ+G˒O]F%'V"YL((+-KW,e(o}smÑڛH SQ4ޕŏxYqUoؐ@5ؙ? _q74%xnn5y`y QLir)oȼLx9toKLRJݱeسe ;lV[@ix뛱+-ggyʽl*eVvv+EL-B;tct ݳ>g'H;gߛW^ֆ_!zѓ *VxZ45 x%(JfԆK$y8@& j*FHFQ2R0B+  caIRZE߯ ]chT*g*5\7Q97E 2`gZUGZ5#jpV8}mR[z2$7 oRic.43A`< fq-ww\Th=7_+K`1N@,1<U-L(70KoX%D-_LjEd W6xQ;^>G0x- )ΉGqWܸV3=x Fyk!]Jf0l냾 HA>ùqd ty#W!eĆbYR( ތ+68ŨV"MrO O2T,x&vly-&Mj3-M"j+'uB,I#C3^o S_+=Nlr|n m! ҧqMƚBz۷U2WPXml* avnC+l휱:kiᅥн{ڞnsmm2VC!_ZdA–t@Xd }z]'plò )r2h@UJBW4lo( @hբiXzt+CܽI.p4S'btZ UoL~)W\NTח_8-|j# )#ȞSW0Q|bZ *;g1֯:VB`%VQ)sIA5O ]}$},rrU*kcr4jVթbo ˜,ՊH%}hJ#Gfy8 0OWKuʢaMhH~}C0^ ՞ %̭Y Doq>E"}ÐcF nܬģ5t7 7c/nWG8Ue syvsUu鹳jY?&e.UR/QϜv/iqXky[g0.̉3[7~54ŕAt`q):1l\)0aa0(I#*2n&Ϛa}c.hhe޺F ~H3T&@ (2\ #olvc!CN) ;(bCI=EzK.:n۱4Ԁ)( U>ɨ W,niИV}nT#t .oK49ҍ umŖ 47M{.x˾MB 0q;!?Ҥ+q;3{ M]H-B43Rj 0#Fkk?!\dl+\:ԯm@\ 2/?b*گJ.AElڒUim WTK:(6rC =g@!Y9@ 7mVb/pw ,N#p%;@ʥ ˒gA\gvʹtaX^%P}uسXtZH,mh& Y"=~D7GYH,hǢ ]S'iWnW%$7e\JJZ];:@ RmLlK\UFfZsPZLJ2+x5mإo]qCNO5pG!^I xIxabKTi8% FAU,! _!q'e[%0>As_5 נ†~ԯ.7 o+ ?>;d*.Rc[ӝUaXXQWmj!+"xd_|_,]p?W~o_ 0WޱGËvnJwJ7!gm` U .1wK{j xtpWAbDy;ZbBWR?Ɋca^ ۘMT g{F+U2sϼʝ ښ.Elۅy#U("mgȶpQ~SPaKf7?Q_©_ݛ~d~%K'|T>j[nCzt^O=1wH2A?L^y_1}^ _pkQ[D~0lʓ?P{||gj̾z+?PwG'c+})~  AB}!N@UU|[$::m!LYw}Tl9"kJa7Յ2C m:Z)%C/Թ`[LTApuܓHg"6q*HpRݷ`bC Ihk{럋 Ff"Bt}Ց/E!o|j< *?o5ΛgUWo׽i<V;].^F` dm-856ݣNG>6=&ҤMUC-nDrJNP2(FH~ ;:Aj3A\U]8.wl]V_? .|{ʍʙ]jTDfg[Gh}pN2 Ɉ (\du8H| ~V<#lԪnӬ`NZĴLۤb0ڂ} KC[IR!$ ۫c )rWdx q$,l8tmwly'Д=nކڪD7A*%;m<,ۉ׼-Ap ~n1tҵB!#ɉDѡBsRm2XGp9Dz`IFmc ;KAmi 8Vbx|*DV`/'T} Q:z+տ39;F(mA,n OӦ.~rԉo`Bc_1)Aif 5^  w=fJ x̶[/qNM&xMRr&Cۘ.U/1>h_1^/U]XO˾h9ngC"LOqxf4>o~68xs0D`tAԮt&]hjmRIM (SԠ8@ͺ0#kӝidE68,lp{5f7^ fMmA٭@M ?_]? Bhm}BYuk~43Z]MkkAg/f\2aN=9"ҥ+%O@usN96Ӝծ 2+BM۲=`m֧[Z +uM{/Qq XW6J H bVxN^;s~O!]6y.B߾`u$w|\VȘ` 3@QsYxцF]XeӃPɴq"DR?m"&zFSY}͝E1aV ۯH['Tmϧ.0@SF*9Ƶװ .V.\S6)@ M:(, 6-4Zd bT}v-D4PZ1Nm^x{=waIP,kO]qԙ2Ϲ}3"S-yg iErټԒN5?UT=5v~y y8,#=]}|^F$Q{_k73^ >#%{cv@:{280Gݙf# Qsf+K~w=G(Q#%@Gll/<\u!wDc8뺍)2&Sfw[h;g[>2;Xz+UDۻ[1#<6NW&jث%bk}ad%Oh6Zib^s%3PZZ#6RvX}:Núr&N ANt+/ 0DQݸk Y\`6oxB~vE5 3u: /5,Y08T×fҦ/OQ ɑ2bfZvJ vct`(Lx ODO(QQ h^&ʧ[Wohe>wm3{cT:p8AŌMn'(L ߲p .HXjL3M ۊֲ 3 lm#{½GVeqmlMOkw]\:҅vwo5m F{&t =.W5U:Ot :J:#{{.u$oZ[]LLéu tWl 荒7bx,(@F-Հ0^9i59)C k\T>-LS aʰoLnHP KéڈAC]$ WA"(s~vSDG6rs4ڵUDab/L=sWGBqe!檪pcdsS6FY ˍ`Vxc?grBYr{Pm7IhHnXcԮV*B$t %QUAG-47c{WξtΓ-.&c#ɐ0z xJ:!%My|_3u5 'A)DmbK?p9}]plׇ\@ pbw/(^pxT"J SW[B*8m$Ih ƂȘJ~ɣJ)1yĄ k S G2 ž $zaMJr\h|R t2"̰G APS;;Uo_=%{ tfjwJMjXO)o-Gy.,:L?HfSȅ9J{f{;8YVX eSNnӓ,X₾E><7jC\&*ɓ˿ avZ B90;A?cAgLSF''\7 "`9 5s;RpF^/=!֕¿6FsNccV1H'VK@`^8M?0ϟqMC+_.aDM-s|smr'RWjCfoÍb!J|~.x:YCvcx"XzI50L *_US6s\0L`%u䬞X‹^[b?,en(+9U 2O(g'$yDu "|K/o7y^m1MO.lj:&Tv OXk2zn (,l7YGن+PRXC֯yНdqPC3f3u65g7|@qT+[8Z=Ҫ =T*9ay@@$Y )Ntt{Ѷ~R-_`V:^Q) EaDkLj{0sdZKCHPpOBUU=Lj'K|YO4xTܱK]9K-C81 9߻iJ1_arEi,:C!+bLzvGӸdJORf(a2:QDm扏i>l# ̌*Oucp;YMbȽA݄hnN/x, E~bd(,3˸O3K5Kbḡ(ӏ1&y ?HY=kݔIEOhO ,tl$l,;#R>kѝ`Yu{|]L:PAȂhBkǍC}dUm^ȁ 6IɊ02]iwil.;)77\;(,1JYC!ua$s^}iywWQn>b/1hV/S,Jm&^Mkʘl WO+G/<)6ղ6uSa]bXdKIrSB8<> 3|?XL|.0G}PTvG@%֌5?/%C\I.[[{'bcS4y*q^[DqԞEC_Ya%R}v}Jp-;gfRz.[XpOoyDQF%NlE! -LHvX߇CNĿawBH0݀URi!L]kt=`B*ԭ)hLy\&{:֋ò A0ٚ:xc)k'"օž `lߚf*_GsS2l3S,pBr}xbvAQHur3U56w\i僜gk4D#qbukT;W(kv70Ps` 8h)Y:GTAS0}!6s>-SWtf,lzbFʭ -AxZTw]a?:{Z$UyAk!|ŞpT VMp3> +f5F,]BoZVGxU3abF`)+u^IB`1@_+Pf5#^OյrvEf$F ’ {O2K[ӱͯx*l74].[|Т}ITv[:S`X>(+2>IEcs C%9+Vbqcf_%QG ?Vc-i5x;J7)XAziz>'ְl&K>ỈhFw 6B ~:]b^s5)xĈcCW=m eCgl%t/ܫ؆|T͂J'otKŬM]Nb@l%fn%g˻.Pf[Z%DUz`2 O q4 %4ɫkgq" hA'vїl3nky1Hn(zG";s$I(ZJБT2 pzC >&pd*h+ύHLfJ;(xgb[ >m% 㺦Cex_6>nm0X zn  b+ffjeʅLK>xЌ 7HeU[; [M{L *O r9z; ў'j"I)%e?sEGM+abe0_ux6KU!?<1M^?qM3l #75si` 6v7., Q:.չdFɹeaY(qAYHpA4&fh*x? wlX:6҉bG\>=5 J\\+%&*iꦮ*dz~>ZIDs|B}qZVѧjPw&1ZH&D8,aQK̓o19Clrpi v3پaj<镍.45Ԅb'^ MUc3q6ՉU>+GyAjOd `[[I=)͕*cy=!$^ |ER8 ^bDOϽ2"pn/W:q@ĞC.F( 9@C8\m7sݥj ؞YQ#_EF>Ҡ(Jh9\^yNxȗ9qMWvKqÖ5-^SOZsh^HK nr\CJ!E`#H F>ܨʰ=8j@ǰa_apQ\)X` =Oi r?5:E_t$5F_iKY*P P2p -{pB nC.D6;҅[Gh泎dm/NЮeCKq,-a`_^U[J JÙ)Mo)R) Jҹ˖#_zZ-sʵ3 O"]JtJz&2!?^2=D,f`&A_eX2@e^  rj "|5w{21ijo<; .Boym-,ễ /V˼ލ`wi| y;f-pw E) !z BFq5@Uwkw | D4+ .K,Y ư۹ Hce#8]0Ux0`cG1e-3FzUae`J0P $V;8 d/cpXs0 *0ZCYL +JÚv%ߔ& C&ʰ;>7xiAgmwN6v07a4}q6+#xlJ4B (l]3Ŧԧȭ% D^owTl mh+LNj BK j"5OLjϓ6(F3 7Z39jϘӣ7_ńYe<᧧OX$9UODt%ܛc~rQéh%T̉7RX,-XbD~l.p!zňd;39֍~;Dy&R`>6vHpft/r86wH_OٴkGd: 7^ڶyXȅ簛n[.[`~;d f"GMRէޒ.6bDNRo fNTOi?9~2R6\T6d4vף0 +9!` O0*y =t)tHp`͎YGz1/8f$IWR) rW3Ƶlb-5TLp˗Nװqۗ":{ }8/U[pNPfˀO#.-i'I޾1;w&YԆ;>-u^Pfn}d0R\%V\|*)o$v˴dUz9I6ͨ Ik{p{Q(QaX:^ίB Ϟ ֟t౓fgsk'9 6jXzS1VKfP(<,[F^"9Eb9iA{ _l 0 u%W'anA)Y?5KtGث'H`4EZ͋m[wu AY7Ҽ%)GMH+ ? TH7++~f.](W^ T S=kVdlMP*vW4rkO=jMd2b,ɸuc!YW|rXzj; "^Ķ\#|0%dxsd*#f:WOZs.{K n\`9d#lQA dr&ETbEq[9`k<.fynv܏y %@ WMM`X༖gZ}&dİnlm '- *ZD d@( ?x!\ o4uhmO_6W/]z%1~'<nOCe?-k8p;_Mз; $FTߒqt% ۤli؀RȇX=% &qqD GO[CpduK;)A?#}]N&bTc >]U2]"H&#3D>Y 5KEfMx9 I0)׸WE+2Mmj5\auVdD)ͣ]O Í0'ƈh)M3U\*ɪ=Y;MDܛqccO0*dd;\KyXC.n|5%[4ɡf}C%+É'-:p }v},&`](IIpafa!Re5 [>A+4>Gi0Fu:镅D,N@Alo]UpeTh,3M#sS@@_S&}$#wzzYq+y{tTm8t U~4oˁ 7@k "!saR]5_5َ @цoUЁ~D62z,_Ar `Ø #z$QN eBd~m~^]SuvӘ*n̲tCUaB3 L9i휗3: H4\- n\p9\ bBS|yy)퍵>Wo~GW)ppfOH#fq[v4Y@3u>!R6ty˩fH@o3l$b2(LU"Cm|,ҰXn/JkhF]}4[2'w"mJNvЛFb~9{ lrA#˾~qBXսu¯ ? $[GUW *!S zͣ]}N%CUxJ\{yB(ЩG"< xze>RlLBD@tW]ܷ ɬnYޅBZ\22t2=!û=u!ЯJa8@'k6f=F$ڿcƈT85{6+PxY~7;7kf_lP5<k粘ضX}=1E0=4UZDv)wO$ּlYSu!XѢ6]^;$ʨO 'OJy@oALco*gsio,FM"U3s⡆ ,h{͙,I\&fΗ+HzD"nls-{ e`%~{Hx yjs y`ljb*hcñᇗ*ӂyoeTgicPW|.^9&\ehRQB[[i0e gQwJ&nI}./wͥl~2]"vpw`]|2=|gW ~lB6%d-vJIr{j Zόl*=SxgL$m8vŞe:Pg 5k>ż8z5`>GĻ8Dik.g"@K+[ZdfWxpRlA!`Y(GaVkZFPXZS_$N&e'| i`\z@X+cSO5nr "bS @ #H1# EP˒=-rŬ(<Uxޖl<: ]PDC vHm_^ F-Hؿ-:\R=Byѥå÷|*M,dqr|+ iVM["G2npH)^-בe0mտ,NLi& dQj}fe3L:*|bZ2aZNLdSqVPC;%yYzPS,phEl2&;&xGN$q3nx>/*6\~%+^2O'˝  J:ػs2;﮼fxtD%GZ Qtq`Nw/&iEQ9ͫ-| CSL bKFըrinu\E_/n_%̆HLyxRZ7ZjTH.a?Q5oz V*z!+b/j;B-q ^CB 1ĕlhAwjuZoT =QNL:ht9a;[\ !, xIð%i1AW*/qp^ӽ J8_)4F[ m)f~-k:ól#BO>-@҅ev ,#]|Ljazvl^k}Ioz[W~,{U5" 56P^mGV:c2<"b 7'){eYmiJ.ԫ%MddX] gDʰSt P;Bw=gP z|ǚ]#y.LM[yQl^e? 3L͑a׮/!ͻ\n]ol$/#LᤤEm}hΝE\i%WbRM_!旼$ ҫgaj\7xsstbDFXxYل襣^;0sF-L,0 |%&GHu02AA)&IB "̂"oH7yԃ2| A{g8R㴴׏93C'M8gJuhH i̴4^%8aۿ֧ht)}|oю{X'X1sŵ+q0 :⢍#r!A$Op5"t8$6= ei; A,N bQ>Y4!NW $:H¹&b?Q!Q[}uٔOX@ʸncQ( #{qΒ\"'AxAa'mFKlУ9~b|k͋P)"mlVCg]țx싒N߾Ϡe2Y[ G@P!Tcԩ~rü%s+(ۑ^` @orhTYuS 4CNp0GLzc'Xtj"ie4#!QؘKܮ3Qxhn@Y.h"@>Y$D-:KVf`ΉCIS Y2gYCՊֵ]ngv )$탺f h2HlAB(.Kj{r{;I: NPm,CYT`wzjA!`BH7x&/gIx G;ce+vzlO 1kMοߐIKXJ}c KYb /AZz )Ô.<қMQfsTj Q޼U?uQf]ti yt\]V'Huuknb/rAX敋oZx&R@ql~q_dVd츣d#H` h)R6Es#*ec|񫵂oD~=7xKO]DhpYÁ\rOh;uQsb֊Pb2 "^ +Z]FS6kja7 9V$;b[a j:3 Ɯpe,/ Ly?M nb}6,1$GX=F l^K) 43d |uJ{LxBf&,qteC1G m>Gl,C7DԔ`ENM7ۄ >V?CNk0ؼK%JmNx- p(!>7A˝WOq~{dx׺$vWH(#*@ a[pO[ø`y劺6xǨ{x 0ěE/^l m1;Ki[пR&,"F´ôx\[8,[jDX%D J0t!bAe8)ս9*&ۖۓ Jh,܃1#ڼm xZtq" !XP߁FoD "YܑU#JscY{1vSahKWQ2y3>h2W" tkN:yœ@ 4$#DsM>³0I܊rׂtY˞VfjA{#χ<^DmK[G(ǚ3#VOj|bpdqo {&A9*0OFAGu'Z['t[9x_?!薿T ysƞߧ!-MI2?GlY)G`^~9׏le+`@4}J >;^ Y'~B'>&gGŷ(OJ<_s'9!S6ܒ3|'3H߿"ܜ@vΩF?\ F 4r6y赇 +-va>W1^, S`GJ;QB`>Bvl478TY bOƞ5u^u;PzfV`F#zsmŵ̞`=qӖy UD"\V$nqɝ/ta Mp K${dS2ru-'S=S >HBڃo\e^[n* 5ʌ{wiHl#6LI/NlG"\7#T+?m3z OKC>sK=@wVjJ~.hM;PI(GRiCi1oLS˪dT+M>>x6Q8% QL~y -f<B>G+L\Da{ehY× V?L\2pݳf x9>4zFLWX뮝/W{~<7JUD"*WgYu^BǪ6\Pe1c8S)v^ wdۡ.27L_,w$QףGQ?2+d,)ByD̦tG!+03W_zzyvU+C_;,HWx;ʖ5c"T̍ʢr]:VؐV5/x o8~$"9@@<1 jխXX4OW\ _2G(ՠl] K˻u׬f@:ڂ9Q$A Z}1ѷ\ v &{'Ċ<˂F/&<گ$V C"Jr֛M*ʲ=VEz0@< 8P -dl:(Utnj0f Ul/t Jq1%jgHqcVZn-xfo5IgTUMzQ|nlyܰugmKt7!e4˟DjSj)=7i=(=gq4ISp‹i,+tWQ? ߐj/!̵,93p"͞ojycv$onB!?꺣-v< o.Se/ta+R=dYb]0R yhȇG]l%03%/m)a߰oj'C|pn[g> k( ,q̀*DUNC+rSdme\*v2Wˑ,Gװ&DSTfbۤ &ySF*ϝCI0-y+a4$=s6tb$t0WDZP@[Ru¹Iy '5" D)bx2ZTg6e"MIBAtr`wy;S#7& `e3Jδ^\+ `TZ[O]u#ZxYU3J@Sx 't =x,s 8*](&10y`fo+=AOH)(nI熀<H@e6B@aBvSԯ~;eJJ/ax v~J} QuD2./<oZ !ge :< 9+lI$֙Re7d}N$ #bޕk7^t;^Qi;`u ֓G\,rFt@-S6,$Eʐ|{> -(l6gٍâʔVBL5_WȺ<1 _2ZԇǙu6b vbLLWg'chr_}~ZdF E$4QG5h\:>K )M99,1GiG|hn4PhE(=8c<~|8oBhn{`)tC ӮAfHq.v٣BnF~%Yt }^%X3.0\0fM / ItƟĮ:?Jr5T-"2iٺmHԽź'W 0Ch)o~17j6x;z璔%Ph_m0' ?ܟȑSC]C0Vl7 eT,t3U3Mk4@0=aI"r񟜨3q#` RzzB}O+Ӻtcܾoh0rDt/,_/B߷ f |!;)+lޕIbŋ҇38( Hf/WPJ66ŧ Z>Jy.qBj{]}c[]>v-c.ڗE/M ;*\r͏R2}R,G!0 YZspatstat/data/simdat.rda0000644000176000001440000000252712252324050015030 0ustar ripleyusersu=l[U_l'qPVXX{2S.VVcXm`҅:0u! U@HUJUK( )߱Z>)w.,+f_c+n-cr[Y,/{i3LK)^9'Zb6тM :1O,.{~g$I Q,:^w4R3Ʊ`îD> i6^d.J|Zo`X+Ѻem|C~qv c,]zy鍶ڝ>vpGgoҎ)?*~؝tuQNO|KW( Yd^%!~bHعk䣎L<.Nލb<8+&Ki6݂*&/5.up?3>_xg+.{~-P,PqW L=ux%򯉧/߂ 4|n:K<:4x{r'"JECqO7>5֗0xч'C sԯ .y8)ϯu 5a/7Ї̥ ~}"<Ϣ#r=f}v^-S:y{ϩ+2.SgVr$|Ǎ^g}"`UGT9OND/y98@e'6G1^Gs4~B\inUd uΠ_H뼜/sq\'y>)/wQ@}7d oj>c/ sOԣ%| /AR'xBZ[OOH!yűw_.CkkCyR;E7(ÏM 9>O6GW=7ExMٲѻ/}9G9x s9 ?O.]c03hu:p8K?Y spatstat/data/pyramidal.rda0000644000176000001440000001400112252324050015517 0ustar ripleyusersBZh91AY&SYn(ﻺU;,BQJQ$Tτh@Օ @(24hiL)2dɑhh 4z@ʧM=4&P̪~hL3Q&I==D B4'OO)O򇩵4i7m=Gh hh @h"HdI =hj6m#Gz4d@ 41 #@b 12ARD4?S 2P6zjxjj=&zM4̣& P4zCCCOI聓4ѓL%#$@d )POک槕?Tz Q=G@hҍ4h QhzGI¥Jm-_/d@rۏm$$q f;hy^(uݨʲf / $$BI-Iii-A esg޷uY%Ap")tikQ ]ˑz66}^oa./kǭݪa?<O]7RXԍ. TA[JT ʕ9?'uҪU!!!!!!!*UJU=n{.2I1-(/!wzh7  .,cgr$2wWcI$ HI"F5Ij$7pAƴ TKr2B_L{IR"D$D>.J$$cYs7=ւACnjwX&ӆ!AR%'5mIp˰^PH-V @DlgeÒ?*v "*/nQC6ʀJ'k<&:''IȊ"zW`dAC?O3 +=UE;YHTɲZ dBEKu:%LeUC uTvsMʐ4(D ؇ܞAl1%Fw 2MD4-TOTyװDmqzGPW&_55dh u,$bdžY ߽mԕ -P $CQ?6ɁUM!)'uIG=8aё8RIO@7 ª%ȱt HCZץј6)MRVí&3˔NG>|w*']*D+JuTL+'R;2;~ sb.*NY$V1BJ#ݮcJ/61pW Q(44{IQ>wfXR%ri|ʄش5wT ҄ᑡV}4+^*3|E>ʳ%G2ǓtcX흲. Gnb;QFzb9aaQ`JANy 5N/v?eN"`8[r~ڏ'ϴ@32*#6}_Q$B^𴲊wi^(M֙/`Fn%/IDJ Og/aJ!kN PogZE b#iVXkGZ4tTDu ]M)\84F_6qE1ۗcr\ŀ2+v*WB53.H[K4_T~dSFfwh9r(Ϧݾj6 o@JF-"- #N Ah$N wJM}}#HL`*lL66>EsV]bk!ĕ>39/RA! tڜth~wZHfAX'HO(p̪hFCߦŻHdSQVfCM xH\O S/nɷ,H_˧6RǑw";Mg t4J +֦2R]Jg{N9<IL*CEh"":"-bZlO<sV :rq˅ޞ59<c^"W2D&c-M(K52gC}1d5˦.:iսqv),C5S8Q-hTQblSbQIԐP.f84%Gל{XqG08ɕ&5K_1p*wۺ.*]UzT\=F\t}bUA hp7Z(!9ml-g"tcɋ~VA 'VE՗[G4q3hB>!yԣO'FƯ]܈1 naK\dG̺8.ONWz F\x8m|libruTVhjo9Hxv9l9vNȂR) ]+:N] aܧxK;|K7`c[d2AțZ]ٹgA#uHDFR35`v%,8Rr 4!\#'Z,XXPÂpRx\/Ѽ4 xpgV0ّ!!@>qH/T"$ڍ 㐏wL&<CTh R8UDijT79۰~T'aeVm AۙM&uLJ8W\ݖ5= i\F':rMo+܎\8Ҿٵc>6w"nҊ.Pn;q6V e(XGOŤC?4\](w%Jg6tC2|Let@Z= xO-|v<d:CSEqZ+s7%}6+af) tV6U.n)޴%+.wHv`bn84iL^qLɊN& {FC\yc%d,Q+> (б̒Φ=ˎNrٍB|+yG6vP)ߐ@MPYX#`ŽVz9`i՚S`ʀ}ԐLMSДmy^0ȓ1ԝMVK2ud? p6P:}1Ĭ.Wά'AQVqUcrvl-STWJ peQpEDHZ lDdrj"'ז|N} $MWzlL/x!4yfm{'[aԗ#W7h8zHXmʓ{Uh&`gN5Ò&edMܴ֎(#fzmeZhigXGj;. my!c94 zfK@2k_57oʬM/VNbji# QdYc 2DQ@-ĜCEfCz<==_yVۼs"ʳ) V؇4( rl ĉ=6ۿٹoyuTlgH䠝A˧Cx)u)ʾ^2ptsqbx+/0?L۔ob1#R2ͦ"r Th#Uc0i-Ld`b؂m^sU22483y7PA]SIO>JϪ~#?%Hu)"m,A'AtP 8G~3d,)=`žrUP{@$қ^V\3őY_SڙXx3fWT6L06/taS GЂmm:刉mn$bu7VLP{آ%UUhwWND)J"I IQke|W}BZ5D <-$\j7;zYԃL|EeISnw\0DK ӷg5<&S29e2IP9d$򄑁Is(8]D\DDY,EYVc.N|L;2rAp0m4,*ZXC%ueM]hYN*y=[ca rF@j_SPgU5ȑ!zҫŭɋtݲ9k^!bwb^^ԒD~yE 2 סvR|LҋB+d VwD00e"D#r b&-Ԩ,&F $-JX[1¡Pz4Be{;jrBiݰ7p'S: PY CMh4\{z|QEl,(Tu]e}ƒ=S\Q!TߜXCF#>o 6tMe$H?lpKF h!jFv`6"(dI0@G=7$c6G"0@5/jFD"P@NGn}9_:/+]-3㢣e#q bmuP&|2$29H"{ ÷40+ V6DD2 "x"&00YEDEHs]A~"dh |~iYKCs=z0;>:IEIK@4Hi4 0fkDe9JՊᘨa"#"w¤$ ڙY5ȅ ՈU93j .X@y1jtal.S8Z9q3Q=q'+!e*\mJn+!vʼnYj&T'C&f͊b BB=yeUX߷YBCc}`Hw*"(H`lspatstat/data/redwood.rda0000644000176000001440000000114012252324050015200 0ustar ripleyusersuT?LPw4јHuŸ|DW\^[{qdf&\$hD.W^{oם GQVTծf?a &^E !KXsPMn8xpA?멐q4U"ۡt5kR MZ~88h'niԏKdj (n챧$+'%&R$qɋu0w"kc}L;x~ݑGW^Kw~=d+qFZIof"Wy3_<[7[y@"~`p[;|aǔ>r<q3.e\Ek dM]: y;dr=6KzGx> @pgQjx4+nFFeԓ^ŽQ>>C} O1y$A=& Ȩ'nL}Q -xy42cOGwB> Ӷnœ=]z}2XN+-l]4J/35MMEIM+cl]"spatstat/data/demopat.rda0000644000176000001440000000252712252324040015177 0ustar ripleyusers{L[UOt ucDa1,95j54FyK{eJ ƫ 1hKC|DM\40o~ւ&;yw !¤=Fj6j ,6kTbt6M9ړJmTj˜ [rY}?(Dl1M3 g?VP; 7~`9o^S^W c[$c`dd`j&UȩK&w OUYayb>a ю v_zS`ܪ7eؑB1ɫ1O YWPtwDz[9[-2l4ȗZm.M;ƥwʴ1y>,e֠OCNh 86XR=SfoٿW/̻E} (I``Sץ8e͹lvˀ^0I}w̫\-ui JP +s SlnwF:o92)Ojj\6GيTZ2-qfM4]hvVW8PzL]^\sp·4VSf2s /tQGhkBWe؋[!'!';> yq?b\~!?<?x~0->-8S0)!]?/ϾB#J9γa Dwz)p' b{g;~AoH<76A( x&a^3gn_{@?9x-0~ m9-w*39Nv?zm8~#/Qp>|M0^O2`u1>lu0O0<Ϧl_d=>Dg~71 ƸV֣n 0lxǡ0Ŏq9yly{ljfQ%x/*+x x#:v\ rYY}]7+iN<ϸ]id M`3-UcA7Wg Yk[˞1˳?d^ۈ_6Mϐq1(*"ofFy,/ָIYJmXYv*qVKC5y6˥NW,p閵p&UUV,y1)spatstat/data/mucosa.rda0000644000176000001440000003215612252324046015044 0ustar ripleyusers|y8U_ۿ̚$@!,D&DI,DA*TDeQ%d3;Ҁ}yzMZֽ׺}kkS|l27ѸyĿI\\B'9{Ck;}..Q'/fUBFL,cݍķ; IgC¿chߎ|V?)hgfeboo;P6G[^043 -'2F\{ /\1іѥ= ՟A~+mhz/C3!P?_K2"  [Q*{؆J18e<4 R0 }+ g--PHL3tuN`7,\ K;OA |S~qN>y/uuklw=, k.+AN/`y{+зyzVZ"RdΟ%uo׃6<7-tgI&d_i&7zNju EkqBt坿W=S"~^Z!ǃULލnWK46=t5Gŵ tH-[?ߑ_whyu>:n.:;܍.SySE%η]˘}gй3Zj:X班Nk/'.: z?n=7齁Wl >&[u;M]lcGzܭ>W, EJiu3/?DWԗGװU :\aƣbs@% nkJ|Y:G N.Q Ău@S~cPJ(qղ{_ BYrOC{lS8z+-Ufpd$WrdKz=UDI_.GSy-r້'އN{twm{cb][F Eo}$ol;[h$y54Ov2 7REgN"()>N_qXN zPyj/*B;fpR[W@v"1fe.0({W;0gz5M no.m'!]@c\yy s9u_]5,@mr8x(|nP.Nʱey@&$G5B: >3(Tc=_\Ak o_~\x8 -ϬoZ5sI6v#<ЎkT].od{=q=7K.DrZ 08xdW (&_I\ Nd۩9ptsZEC4[;,Ԑ(GU<z3{,A>^޺ǏGEAɡv̍`rU"Oy oi.}o~B 2].΀x֧vz V,o#bL3?C1Y~ nSj[#'0~^u^D],Q *>\c2wtn pJE8C&[o$[Ȟq>+0?YM\w%O 7;q0n:GYCC7-&\d0/=" gڊ^Е='gz/:Swy4#?{b,2:Ń3/ȹ A 820!듥lڼ@+MWΑgjM'k3b$jP9`$]vXyqbI Mzp. rO]=G{*Ol hKOߙ`$y Ԑ8g`glLCe73h*pB}w{rvh;D\Nuڷ{zF u+)",8:)ĭTyFVzX~uЪBwtap5h fOPswGSY>i絹xa M$nwqʝ-NhT7dk9ft+H mbI!i*9Rs~kPTs!j =݉j?4T?8L8SP8"_1sՋ i2֛%PyᏟJoN'CE@[\uTc}1Ahq:bU mW$T}C'΢\GFd2ޯȮCyhBuhQ5t]WOڌFE.{+6@-[66Km54?Ixz?F݆c7BtP<~B[ LXg\6x7 y. ;Psȋh=VW8 ^܌vv4bq z=y$Β9q &Z.Ȁ`^ک$-8q !^nYe-OICͧ{ymtܶ[}hV&& 7̰䆝j3W퟇jͺ{fQH`ӱ U>u)E#̠MɿKy$м=TƜm{W\>[m^N6>ɉԹGhE^ m1?c3s'֯Jm엷kB+Z(h}$=\3#7L{Erp|(<%jS:v 9{ǚu/FY w)u3wדD]j¨*_+TbTU&B?tPb;4<Ԉ&&pEas$^I1ئ6zfoݝ9 fs0_>a-u-N|%zhM{.ZfBZ74eVU  xצ(ݚȉ5>գŻ{vxʤ47F*lTN7s>*sUB嘁]T #z> }3T^ǭ6 O@:JŢ_ s1<c&7]퓗1@ƄodFqla|yNhuVBæmF/x& r:U'CN豖TjRZIF١5ڷh-.t$-Q"hq~Ÿ7k$S+88!l!Vz7 m3}gG`1fϭCIs$ 5S *@+Tf7%j"rvPC~(_zc!>\"zVux7D֜. 7u 3,V9 _NS IrBP\VxR$\vvC]nkَ8>v$ . Aʲ+&k#Rֽ_Qtn^3ޖC [(rDȹY 䰗u -LCԌ6X+i J. h5=1ARRzX}t`$ %|SLq㦲(aͅŭ Q#h~-:o3*Dts.,F%+WeP`Vگ%kQ)@#S.C?~_"W,㗡{39^`7~ d UmVN(իMccI(m+Z _tS1X*~y;r8HJh7_ƻ:H$'wozHs v}z ר[5h_%J+{QT!}EdRy-'^_lz*bsWm\o- t/y ?LYٴ8ÏŋV ٍ!VJ!62,.qF>o(ߨ$"sUT'Nr0NG.*^>+>MwN؇B (g EE>X [^L6|'n6t9kTD| B܊ ӭ۳AH_Y 󙙏@^Â)c&EdZ(qi^S/^1S@by"L۽H1s cˆ}7y䝺]$5(3?z`b6|,g[NCv(5y!_ps]rk8iN Ӹ'~ ͮg!A(;(_{h$ulqrp3v1@Z(ÿ0B5Ib`,_#yeӐk3#f60h\oYpGrg&:!}JG $if$|,\("ejkt03f)Ha9|H[懾 hY:y>#p*^UChd־Yw|C`/ ѭs7c6V!wYSG/4( qYrǐ5g*ȹ3OLg3RmGlr6>k9m(SI H|]JMeQ0(>.W4dA4N.5x4Z7]O'"쯷L*dJsh$6hھ1fmXp bT#P(b|}ؚ7I1}O&|Hܷr0R,1CC`5*>l_82~vF%~:Knr],#M!pD֣ !s057:HòTcd{ D}==oGhMDLKHe #S!A,s*r|n9T~zU[ݠK@)䭒;r:.E&tY!ٺ[$qmrAyoIC6/ EOkxs( eFŴ;mP.ucGfm]Jń||lhJzݾ;0j8߿^)-:_~ oc;D#'!G>Xx\r{i"zO8K; }Ne+I{zkQ&Wh84QX69 yk7SW!wBT%roF}2gȝWC/٢߻#aO֚=Zl6B+I=_csŎU kI,Dr~sC}k+n d G7n1]x+(B*YRT;ft5'wfLil S]m3YKYsUCǣ${_Bk8Z[BaK$UT-%By(( &_&E-׭FEJ^Y-/=_;ʇ6u?KT2jKՌgݝE oRA7NWWWϚ$5@iL;(4hn?%pFss#:8)f7tPmXnK߅HEbSk(Ĩg{I/3Pbم rnz^_DyBs+uԯ,E5Tr3h{Ţ>ph|_ N˿IkV42甯BRd5h^pZ߉/d:1-"U"pib{oFo\ ̑h^api'Ȋ2aWKg5B.yk˯Ch[b{RlX3ZE7)!r&!* ~يn~LKWs)7+D˻оM[_ѺQ!@'(P R_Gǖfxf`*ܺ7Z>ĬO|g-8  }Ub+ 8t=<C4?q}o ~Zyw RF*HqGӦ- iTtK^&_U0mzPKcL&ڑCVB50c0aI2[u 7A5zctb?' c6_}XB Ͻ9Ǵ%;?VcX]8Y%`$ˆ8s^k` [`Mm:Y&.vSuk^AX3P0~=¢: EʗRPPAh? Ֆ>jw =b_BlkE6eM"ɝZ qaK4{R7iJܗ^;Maj.ZdhX$xHU9QIY](g01w8 Q_+kFjT?B4Ɂg#Pw !t@Q8g}h)ge{^9'@IEf*Κ^vu\Jx(Bb$|tr0)CG F{Nt Kpڌ s!E͇-PvFgN@Ն'ehNڿy}?9 9bG.u}Xc08:qsWoE|3rh'0L;ez͛Ѡg |!!m":E3h?@wwˇA#UJNda=~+4u0u !PsI+,k6G@cKAz1r5cf疷Erc oe"š%\y lh./y=:; OvȢ bㆲe6qFnǮQv%ؐ3㴤r$:"AԬ'Qsiun>,za)>/wQEBǒYZ{lL^vONj}cܶ#Sqݛm # {NsYᗃVp/-sr9~~ #h8PMCYR:3e^MFH~5Y^~1YgpZ6xJqx\o"jxWGTěxA">[iK<<<9{R+/ty0' 'Qp~&|nVB粃G!mJ:-$[qs>[L1 i ŕ$!%re ~xQrEFy4B]H6jӦd)Vjb XfmQ$<$-3gsPeIJ`POt7ADx fE:]AϫQ&goUFyQv|3݊ߢ`f^^ F5XA͸V7?VM#ce =q7Լ`\}l$vw3ɝ!Tٟ0w)}N=o!,;5unhoS҄h@;2c:(=.:F-rbUf,&tTN7 e~WA9b ˑ8kZx7sB2 {f0xyye 2je>pmrJha{=L% 4H4Y_D̙h>Dw$='Ks 2je=Adkң'@uN:c3]Tr_[-$z"G}`T;{60݌x9OuK1iNjk`35k-hl'ɠ@઺} %ch|%D toO8Vm`fCc10kr 𠎵BZgΔ=@ Kـ+ct}kR@}`q}L u BwxeΪ⾈twg,$YJdYCtbO,W`ɟ{ lXo>)ktSc` w+ c0|+ c6O{d/U o;?@5r.lUͽCX)iBʰO6!)سq8џtm6Ku| R7LFx6qNnxш'VMȫ5idՇ'?T#|{Un~+]^s*nd470ERNѧU1furњ#[bd3d{VFĥRM%uIm*W_ėW3ʡS7zFt';Y"t$v}wqB:6E}~D,M6T7@v<$} 4g+#^̵|'y#[\Wq2 mТ8+~z^ +Ե}KAqwԱb}SZ?99Et9)yl% O3H߆ʲ;@muHUBPV&s6ff9Z(qUQ&L*+W4G`Q5HČI.+9jEDℽMmFMh#Ǿ  Qu~ʔa > Ǫ]:J mjѾҢ*aO^YwtVNGٮIkhaO;a?+P1^Qp3MrVZ/jtD}(5zAʓi}~M )WBͮF-r w+6Nmܿ%{1mZc!lK8<3.F%~efF! ,VnlPeqid 49є)!mAz[2+MuS$xJԀlb硁]F.NEA3sI@4wh޾^4CZ_T^e[vPIn!( @1/!E y?ݤm<~ T6(򡁽|Zծx]e]_*`٢WY؞#Q6u++ukO[!@v=0U?;'7 \-!{# h@K,vriVHu A&ĸEy#`WSTnl<3%t<=8ϡ%`jd#v`4׬",\ea :D=O޸i?Oi?coK4r0xcYMk_Z⊋fVg/M8fֶ/L>?mﰜm7g`M?7,/?fF P'V`Ospatstat/data/copper.rda0000644000176000001440000001120312252324040015025 0ustar ripleyusers] xTUl,BK6 Yout'$4!Iw :1eۨ ~OQFd(OY9"8' s_MG2|p=Kԭ:9gM&Go[e5>U 5P^}EԡVƚf{A]1P`úC7NDUWv[ OGס ᥜ4~n碚&n75պ9|ZuQK]smC]:ΝU9t4k՛Pۻ+r% 5rō#_K:Fouc7~!=3D>k?8kMWkMa M<(?LT?4M4fuy!۔JC?FF~YW林$8J-Z  aYuTgJVL9#1nŀDנ?GOE;(x&1=J\[1ωx> S͠3꽇ax Bxd'W?Pf ŅkO17# >||~ho7n0~21,h6<f^!Mk_<́>\@k<汜1Jα|+ux}Q%`WRQ&V4t_A/|(\ ٚq?\ YmG7"pnABWI1]zƸc |ӎz2~A푲w)h{vmaYFBW TDY^̟*@43_PM)wxM $/^nd~]㯲q:f$蜬NVjNѷih,T5C_n%V ziZO{LKۣVJߩ3JLϔ]diC+4ŦS?ÞC)]ۭ{`'-lVMnd3}ɲAOcuY|ښtw%'b7zv2qpU 笁]-9o}-=y:`]*IZ!繷^&ؓ9GGAWiu>x,q J`h Ng1sVCлⅰ`hpP})jIB.حfq y;>G.w^!Uw =tW9S]!'W.ӀM[AoqkY p,C\w~d{J<]+u4rb~SϾ qDc9WN!=zlyx똼snx~H:O(3OOJ. #o?]u$mp'$n7% t{"U|rW/?)3[ ӏק진\ >M^\ ~`!MGdVeV&wI߱-w/$uO-\U)8p43L)۰Gż^S ؗP~Ʊr O !w덅e'eYam)'b`/%t̵ &~M>E~wQ=kS'nކ14{жaX~ۏ^3x'~R\6g෧ǡǑ;G1" ܥ\Kig?} a&Ya=~Rn^zazr=5tњإ2?I٧6Q9PuD')?'-ڡ^ |M`/Ͱc'oD~řO 3b>h(GOd;m?wlSX/ &Л,Y۫xk};\I֓OY~'?i]E'e,$&kma7~R.1V nc=/ , \ }: 7AOG`GK=Ϳ&t,y}2\ {h_~3?Pz Y%'g9h?SXK~XIe#퇞WAٿs# 8Cs/G x^|q >9:: TU+TYT\wNss2Nρ''U=]NpLO)п|.[ W|+f9-i[݄GoK<GzE+F {/wW;aEa/#'9QnޞhfS쯮Dl^.a_jF`kO,0:C[+,gO3?tF2xWXN×EqgdNZ^˜2R.V榧}2-2˜ 'ϠȞpʈٞ뮞 >s&3^2c%3U23%ጐf|z}fGfdVEѐn3 O#ߊR^FuwOOf82ɛkWS^̩s8x2GPO})0?ڋ?T>؟=}EfLdÓF䅭21i@fE{+rxwei7z)r㈱>[^En˾v{ eKOF;9.:H'¬,K.[_}DUFPHJ[ǞFAH'9G>qjwJ5/|zC&w~B :r0%wW勷 l堃!d?hG<9~:+jhөP3vCoq'Yv۶YWm1@4B,bq8J_9 q{ @Љ^ Wc㫱jl|56_Wc㫱jl|56_wh*]=CPQoؚI;O7r(jca2Da e(#EQ4@Ѣ\+Qe(D E4A2I`Q&"Q O%T0QEE4Cuc,QD%F&lEM&(J(j>D1l2000Q]Hkwvz%䊌h\We|zٿ?3ʾtqFUتal0j[5V cUتal0j[5V cUتí]m0$S ƑdW#ɾ5G]Q?<#ɮD^/q$q$q$q$q$q$q$q$q$q$q$q$b$kyOXv_1}:<ń?xo-#gSs ?8 OA- S=NwUM`mS`uM@:̿v>EM5]ӧR@}E狴ƅ.u BJzoinitw5oz? MJLRif<WLo+>Zw:ŏa:ƭNM:;WwIg$eqU7 {~`W]@t {P;qHFh;spatstat/data/chicago.rda0000644000176000001440000200453412252324036015152 0ustar ripleyusers7zXZi"6!XV])TW"nRʟ)'dz$&}Tbܦjo OqFn%55ߋO_v$>dAL? ;TAbo:!usqHMHl%ύp@DRxP7pa@H|dgJƣX*";gC;> 8F$אbdkf3Cw,)lxnz}fB FmɅI"RV 5\Gg' h=|:p^L&njԷkN~8X"`ūP wFJW h)=?8Cg)]|f`GP!f7"@R6dΙX@ H5U'a;=}ﻷ ibrb=|#rXKa'͂ \LXk^et3[f)-H, zUm@Hk{] E3,ĥi2vЫ nNJlɂ݈vx!Yo86CE"n$ܔ}Қ0u3+l܅ |P\ W[Oķ7)U|p'1>Vb4V hĹjc% U32sv%%Ɯ`KT B w9Zo)<-Ji˝&mM~?v &+q]|h*2s:>tmkk6:tgGR3lĶihQI6] w@ 9] vn᧳m>j`C0T[U@I5P+w m1zG_q)!ڜm12 y9ռհ&ӎ3]+a ;cU**:Li٫u)11'1&þ̂ŵy]g- a_4MLN/mE0d3hL²ZLί l> sv?HSSWM!HJu%Co sa偸@*iD ⨔? ,u~0PTHMrp)]w)j=N<ϟ<'9hD=i$0LjzWYVVQ1EtXhC&Cwm J}a+B {|:6?0'\|$@LzXTǩ/G޶; 1: _4΋89_m eSqʴ,h,[7E>5;/22BtmrU1:AF/]{/<$m@:p~A\r&c5j5?/|(v JSp7iot3a 4lXF3'[%].{ZZfahL^x4OJ4^#юIPq˵-4ɜͺZÓeY-ebkR wE:ׅ}pV͍w=,}L.rZf'u zmX%d6_^ǿA|b#mpJPʛqp_xHRD2%W?VG.Wt\=G}[bQ[3f_#4OY):eVmO%:\yL0> _:!S~5Hq]kXDw9U zߎ\v 'i-eD= Ϸy)jH'fl{sdAwgj[`|[MFj30by $- |w Z5, 6UْS2M=4O! wMD ‘I&8cyR!+|@l#d'*HWj9qS^İ^I&W$m!2ٓNPq( diW&4ګ-K5'oMA<+Di,+LWݞ,ݽnCD8;gE1&SgfٛG|A@Xb})ף ^W:\$n;B|y"5)9 -{+xU)+zoCc+č12lޢuCljA~w)"`uΡofw*6?=ҊhG(\6O:kwÆYP"(CfUI<8ic ekvXkaڀJne8! ,CnBgYH?\6߆,,lUW31v(sLcoq_;l{Ru]P76xkq /dͫvDx**y>cH&&,5gbfW*?0y:5pTGo\$nek2B$fV%ؐ+Uu @T Ӆq0\OUx12Bqwx&;owoT&v-+e-C*F00ӿH jZp* E>iU}?PBYqHkC΋5XИ8^(~{|HQ-QFRckɲ^ *~g_-eEݛ-ry2;.rUiL[=]!NR#/%YwGAM㒃Ԭo4L>3j"̯bfu,_0 g<|S^‚>2|qAGhZ߱o ~ Œxرzu.0@_P+eݫFnPc]Q 3s%̮O@Ay|B6)i_1%7f_Z|޾dom{v‹X8zn{%# 5z 0ÝX5 ߒ\cc֧yA lm% lzFmZUj)3g?.zu/s6oCēd$/$\uv1u5䰥DpDq XчHYZ[Q*/5I<\+6Dnhl!'TsJUwvi|gx ,>R{GyH@,k5-4"2GJErJO>\&SA4˷?HBA>{G倹>)0dO6|UU M2 3zK:}^owy(71Q" sH݁8&Qgm'n1Fb)ě5o&fn9z!]QH_xCtC` %) 1쳜ɇn'HE%w8G:Uf6yéj_;{es[^Kdq7[W} &]YiM5ISj$ 7D[Gnua2A/VԏS:c {΢ە{DV 8SN*ƼrMU28)P'믬^)eybm|'M<7G8R"88\-izNFdT/% {fv%~ybrVw8$è&`WT6bҽn4P#{L%BQ}?3V%%w%ea_"Q Ycu5%ФvvًróY9V~8xTnBB~M54Yyb^\Xْl)~mbPo|L# syfP0U ibzr<{2~VDa;Է7'3a#;W+?̭N#$*lNDAŚNTĄxg \#=`BhNi.CfZ,;ALu 22m `W\oh=K1y}K` )Η| c4eQ12/~}i YFˬ;Vbp}E@Kmw!lH`kv#С xeG@{dZcIG<}?QmQoѲpH t ?6='ghO!^ϑ΂gzRrع%Y2VCѵ/>C\lg2K2č 0S вMloi?B:-]Վ{%wZL'"s4+o* }鳦 ~x&e:c)6Hi4X=ٵ|“"'JҷC FA٥ i3EooU>4H-sPjv% s7#T7{ s8@#X~oaw}Q2veܣvtA ״@?/!{yU]OJb@Oow$Q7QS?G6 \"?7xxSlٚAzXkGcD C-)b,ͯA:drJZ 1K^TY8Z;xٴclۈ'm.I0rϮVs/ PجQ^N蕤ذ3_T 5ΞyEu &+hgSVj(\+]x H "y-5y)>!!8J@l~{ uBlL",: ]@ًufȲGtKA-"ˉ_Pa<^D7AvLh,wreA_!Om d"ptl: o^b"!@!5"`/kFH$mf54:0w1ܽ aW+okE}=4gG@2uYT4%u_|$d7oQ CCn7.E}>bNN3L\نLeOW?ȗ,VFzQޝG4Z|;5& >Z=_mX˜ܜos͌luϱtI+ t"y'u$;Qh&s?uDtկ×S.,gTCW-S ,|P䣐KbqH8Mfx!EBhvܼ//,]5a*|ՐѶ $&%#rF@  jf9k(gQ6xdoEY5Bvp^XO~DwjDK.QStQwm/%|΅nzG)وS-? K\{ktSisJn S\9sjd͉)ۋ9lz' ~EBR$1XϓPcsIZzhr8Y^<׶ )A7FDr&.vuA>P# r/y|Au6}xQgj1QڛVY&(T{@`M\):vJ1lW"pYGc&k {{Gq%|}F>ag:Věv ,csgt 忙rDĆe}]at!\} ~@=ȄJSx=Z4̓1-ߥ#ڨ?3位)lR9 _spHz%`8:9&xD!4퓽Y1yV<8F ތL]]ҪgER0~X0:umk5e8!C6+K<";RF;rL oOk1GEԟ?uZ9bY#D<P/%\_xHGS/>[4~uYSp"geD* <# p(Z5l?ؐQqV4_ ڀ7vֈA*Vj+ & ,gKZLC)'lNwU$.)Rt9")f]j ECW|aJAjyV2?Fe˾x%_)3'dK7k#JV>#~1z7/8@Zva_My 4`gg'Ozl2QW<#Ӽ#*c`rxJmfZ(L"Wl=}Gsǖ gR<%g"^#KސF"rL<ΰ~Z=LթML]a5c ƝiѾMD3K Aƕ+h.\ oE $Cĉ גt5Vy!PvCX; ў܊%X!@ohAu`萷Wz5XPsx+6,PDРҫme t+Ǥ'`;241 z X-ŞgPOP1 *^<וFGꔼ`3|$ζ ]YsRXLvLm4j % sEdc,@_gƯEl`xH‾bWbՕq7|>UbYl([ќ<cAEB M77(zn8_۲3UdWz=sh,76/PAj6Jp1@VsνfƬF+Y)e{(PH} GDP'Y]V,٢'Ş|҂i4/&.lv, eS!Aeũ32L0҃vh:݉kۿOebf ˉ ;8A4T 8QU^6w~G:%7 s'W>'kw9hgײv?1LLB2߹a*$2S*!fӌލ0qn_$jתtV 䭇"cK孴 K#C\Q)fgLYJo>}4-HvkeɚYnCѾ1##&ByVCrY d c%C%d`ROT({¸iipڹ)zHAU!򡓌&R}IjOHylv9r3F[.GDC)!'I3qZw"p<+k\!gj׬O$vr^ex__SK^XFcYQ2kz5h_;F1v {MUĆ9X!HKEK!Fc_qeV8 ^Z.fY ZqF2QZu,D2~dDX# `l Ss<;Y?ӿa?ր6ˉ_OBpDW+h!a`7[nq$j+F#n 6ބ\~njUz!K)6~($E e*CHo)  MngR/k7_|b%j ZZS{쌥l@$p# #+q&EI9y7oJ{E[+ B\aEE9Qr%7U_b>82Rܬubnw$<wkdbZ 酘2E$A7ᡲEK\r3g7A@2ppÞ}CbQrz r׽SA!$?/VAWPI ⼅].<eFtDVyx!U>mkEicZYU`ƿEbcnE171F"274!l19AvJL&.KJ[:{q&L*~%ih}3 |IӤk\eF`;lvݸ^%Pi1S Ěa ૼ ^+d/MOQ5 56/1{g#zi]b sd &O >ao Z-)_z"_}Ւ4آ.6(TN)B1Pfq2)XdGYlSPHaC+>]FP{(X)SDm\@F#.)DiwOI3,5r$vCΫe,bΠRl^xs_JԬYVfs. JP3 P`|FZ0۾EژnPdTh)W\T1Vt/CE X"rc.;^ߔzLzB *`Hqkʽe.B3mlIu'0觥0EvawXz8ﯪ*-1ph[Ӵ 9Hby"F\X׶~SmR ʄl4%`<_+(>XD|$Oˬqeޭ]Sat{=Lxhxmd9G(zW&si삮`&[DNnѺ{!|x{Gl"BJ)q^[K'~n8VQIcr6M R|paz~kN/rF'_bWH)9<$(/\ }&6SO.B'Z޲*"@3}-|Le=C/wEZ:ymCETFkQ7Է0 +mVgWFneM5~ L\sm]Iǖa >=0pBUh*-79>wdDXdlJ<r~9d?-p+壿*HKx:dgJ-w`w۔lj,ޚ!e!HqQdp̓΢h7;KaSڝ[ϻZe ̰ZL6nvsq3tL'֯LA Uh6L,>i0ArO=QѶC[m6w2Cx_+U*l+G16}@8cЍ-o|H>rۮS8rﲪoiHo:X'Rk&־=|B"46GpC0olxG;!ɾQI%J㦄!k*[fu^}@Ku#X1'Efr(ԁ3'Xb{\!-D!̰8 ~"Ԟ¯Xi8A;"n޼^%S5,T4:D~-ߖ><_D򗵫9$ѽ*)õ_lW1XTJh;g9/Э @+BCwcypg pďv;hg-H!onЇ̅Vy[_Ep&s{]w=woJ&)6;Sp|¼]zCBtKM쩩G'Y;P9&ja1rE-OXME[БZ*1zр o)9F϶&jTxx7$I>W,^{\eUW@C>NV6KQ4)J&&E/o8 KI}oD@Hk:#@լ?mJCP`^JI $q~N1(ChC,63HS[NT]2].BU$<᷶P)L5v 7Db Tb+OV~r<D T}?)vĐXF^}U^,.MZd'vݲC\HXb%EWv 0ZGo~ȯEϋ1XG͊/2X|W+ Ps2z[4&}쀖 ~ʥT˜REpx'HvG` %xFNMBY[>z*xJ%F0j~|?LXF4pc[ BOaUAJ}:xDbׁC"~}.oOȴNaODi+۲8vBRE.\ ;2fsqd=wwfjy|)^ع]#wڟY$LAk)@B.=m)y^j^pCS$렶;uGycऍx$bpK3Kh)3e%bl>z>!Iǫ\$t 8bٲT[a~.k2 yk5=rJ(ALmrY|Pͼ:+P2s8r(lY#/yCrM79GLjDg{3HQ8~ z*ͻ nBak?e.(47oCΟX(Ӆ?F>xTl& >HcC %$UwY~E̹.zʡ2zpXP$}ZJY "2ύ*\y) "jB&ɪ'9/vfxۄ:ϼ 0kB1/Sx:}gY1x.ذ h=[fYy~,_Z|Ğ%6>1ݷM dvVZ&&9]jS^İyނdAz>(~"1jlO~Cu^%fìʝ7[h:5.~YhS_ v9;/{Lga?|'K -&,$ۈy);?h꜐OD{nk?yө7?RfB)y&L:վmYTԤ#zu-TpvS"J0FQf:3/V(2M`M.νh /Y0>/ 2QM$iqeg<("dͼkMn&j; /)=i_e4M`e/xz\Q^H 6ψ 4?tw˗7HE-.&jwU.{,7iWQCgR4W5cԍV߾\I^pWG˘Qz# 4Gx"cQ˚!nQu3д'VL{ޑ[jsF"OevmRB( Q0Im`NAY=)&بR{lS[*O#w`+ZGaIzdqF>ݢ=Pߟ9W> w .*χLy(mlTd8٤Qf[ 9ۿ&1›JG#HVY)lt[!N^牜#ubʉq /WI2J1m9ö#`Ua;saQD`z'nGiońK m7WFec ? O'P$ul*S J7O%(G-@eRc=AڻvifPj)׿JPQeCo뉁S럘Q.s'8Q&<ԣrK&L `\{(|S͌iwR V6$+3_1 @o&G$dSeA^lyEguS~{g q *9 27h_Q{eak/zؘX_tgpʷȊdLsG?Id"JGZ(0v֮l~ sݛpâx4rO_6}F{w~ՓKQdUX5T3U:Zԕ7a_'9'֘.]ͪaSY4KO˿BqCפFJ>%`ZsK"'% 9dYCZ=Aq\wEDyD1]TDKGSzFzC{D}t/o < n EY-Hcr&}+FRiIwZlcu+=ytkReP-iq9L<3[r(I|8A;Wa4sWC(\aA`k4-{%yaaz<&Bd`4U9toe +߭w!,t$^]ذ(Ɋ( ) QAv`|R mn9pr yo ؆l"fEV2޵imT3 S䣗[f4\F]q3ޙCuԕ.0N(N[[PՔmsɎp KWK&C ɜD):clHU5!>N\ B=spthR=P18\pџ?69|SQ{acga`9jAJ@PΪJ6h'i{6d*yZ4>k=ګf$&i oH ]g~C3 3 9|*3JkVgn3ҫ s7lb#u+mآp.(J`N\S@GP"wN)CnZ"Y9N6N^1{h[=䊏5i^_DXۑ e7l@Dsu}d|IU٪z)O9JrXΏ"[58']̀Hʲ1_2B'on>b_+k>6} ;#5kP\KrL}Z$m6v8;%#:OlFeV[fuуZ{~M4JOn@5ik69rʠʊ3tXKvZ5 s(C$\9#WI˚`/ƬiYfȋa X`P)L\ kƕ`Ue,(Tdsgd%dI蘽%o.dBˏ8 uة,II"RW% -)=Tl9G@ޡfKu SCn$F>^7`~Hh^~ϻēh4Å"Qtf հ1a}:pJ$^ Y%ӏ-nc ^ߚqѧ#Y@=g!`f77s¾j;'~JL~ J_Y!H :;A~ը`V$aĻYknrv!p2uYȥ6gbF:Le͍BKpRdc@a( ,NrFzЖyY?G4Nv%^^.u >= ockRQ 3^Ľ#K}" <3sį sԹ3hZ3Y3X;@Me-c&s d ExwL'K/Xn)u~9k5&LrH x)#OE5h6AOGd 8ǚ\UtjB`P05kHֈ>25 q,&!CH]%'X˶žzqv{um5@x.A Ild_tnAxʶZm  i6\uUi(1kCn ŠƐ>yM T\"E*շ|#HшW؁ۮ)P0?T*=yKweΙAv'ɱ܌yI.=QDQ#a(/-D"R\@g(F6 {`o$UI!S6xG~8Tj nRh)\V{üB dbE@` (A0m @rӄ->J6mD_r3%+kb:C>|q%@_uQm{oG6j"<AGӃ[/FUi@P v  4C zKx>a#- W0 '8 pyQ)o~.q\40mBZQ^ee((Uf/"0C$U|]dV@;UJE;a9E3Ty=s &5bqgY^A7eS;FlR—ʆlJ׽8yn@9+ D/%&]ZM6 I&FC&mBq ?9r2W~ЛGϕjw[+`+cGC+:PF>WۆA/fFm\夾RH|3fnCzj@Arp!TWʍ4i?BbL]`DZM1lVTM\CW*Nޕz^/3cKʧJQ0*Jƫ24[8}c]yD:`Hm+llc)Ё+)tt' u(Imst"RV?8ٵcR^=ЫAL k6 T撍3՝)}"WLCF^|훞NAiƇGa{?E4IGC=kBٖ T$W0p1ʹ;AP؝E`^_wUYنhش4j̓Ve3,Dw!,HD0"ld+K+BQ\]=;bYE,ɴZە;ʅ؝k,KI6g%-s:cG5p19 f|c>9%\i5^Ec)4n-niDЧ}Xeos4f pUH`_$&Z͘(GZ [X' Фn\(?eyuކl!䏳xC ٥/|Mmga!0jvi4F?M9&*"'rљ`46MMfCus|LERSy?^UX)+r ?R\4mEF|ͺ!丅TжkrGώ] "[ \X226=RZQR0ZCwgU; ZK\1[Kϣc)d3.&'UfJݸ<U~?2 WU-܀O, 4tީ,.ZübȥPB ^Ms(kRIZ,oEi߫츮&epHwTNkn 5).b>Y֊H@7*Q%GƏQ06v"C[c%NC:NLX KhI|'Hf4'/RɎҹDޱ4*q5ϥlt>~īTu{& M"\xc)N"[뜄u!̛}_Y0aUt& $t}xvav8-v|F-Ffowma(`{B41̨.3YN@>>?Մ^i|1 0:xI;ohR;$OIJ8sõۉ@.EBL|7FSW=wtpVZn9녖5҅(DU94H F-ΉQ*滟414gso-=<]T' !:CwV(E ݒ L3H=Iȹi(MWq̶߽ @[{Km x859 #o-skIQYBX=ubuBxb$sĎ?5UIkSoz:lW;*Ƚd|(oo8B t5!.zvf؏T,9&̏u.m҈/o'x,>!I6u;m3k3K[e cU,")^ V}g:%.VޗK 8k/x'`*+*UՃ@[8!ƋҀ?YEtޝ\Ъ0!IEg%,:է~'ML+d;*>l)q w$nW#J'^6ގ qVg+Gz23KRSH +4ɗJ1(܁Wf! 5{ܜQ~/hŷn|taf 4;-Ÿ9.TY<8`"H#_Kw04Jp.*ScP|Z{g АEx4WVb](g }N7cR y0i;{X:n"{sRܠagL RL#–V?Cb{8YXkj\$;V"a1Q~ސQ 9q "bB0"ߔyQժOd)>BE5džvU tS4.ՠnڮ(nˡU F0OHz>Q :㪪Z8/ʖr~A׉zmY%+¡ sH|dtJTɺ?q){n*,΍7`w0`KdE-OSh\dny7y5D)GŨ_k m.'¹ -Az(3Q˧wb1ުʼ{ܺ}Eixc{|[E^xɹ҇d^q5l'DgQHS+CndRB ,S [`DTU?ÕM@ܕ9yp;L8Ѯ}$>Dsa0d>Mou#lɻ|b6m~ ։DKᙤ%zg;3CNk̓jƍ_w',t< yn00E y2-.h/LOjR_3L$K|fGL4ҭfSaJt*)(y79F=y Gߋr}PD~GM 48PoڥAdlj-D5|nbGo"{9R#ī(^iI!d2vgs(S(4z;͟MaFPx&_i rfTFo9<:[XW_h$M~`[ّ8W/!P6GJÿs @avon+*_+@TitT<lY0FZK,hp^7}hu]*@$ʂ if03R_otyMR(vdQub1)۽:Q7Ն^o cLy\$@NxC$Aq"G@]ȌvsR⤡v߄n[ \O)Q>oش ^!$8V%G:u r0ZaS +%A.GE϶ʔD9Y[e*oyS<3U;˲u %{k/x0a!OgA@Hcm|ޥ:"gKQ1@XJޠ3" P 7ʪ0<!C^׽8085;aP+ɓ"d=(~)eV^0`>! !< ]k)ģS2^6V Rן9"flSdUPch"*ԮA" I=`<jkQxQD<Sm kYeJm~533 W@{\#6K>\0 d(iw5Czy::D!dٱ1q>lY( =?Qǹ ڕ*ڬ.+WV! XCtbQ9kDU0hR8Zp1))& QA4C4xe\!/hy>@ DtL>MJr.q46B1 DS"BTDşx;o+S5w"f-qI`\A d!׊5gxɒ.|x<@G~MHw/LY Ë>uWɁޜ 8!$cP8B,>*kMYxGN;֣D,j1ׯ"'QN'Ɓ''k-\'6` F7%ZS6VqsdO-.- c' pܹFkLJihCgw"78#aVMZW شؙ $Hu]ؚL`O[j _tς%mXr:NG()MYXG@,iF\jbi7pN}sj )O)EβEQ3^ƒLKٌ5:h9+pBZYAF Qo$p}dI%cWШ U"V#E.$Rַp,YGHV)XUz9dip㢮Wrĕ"'6 R#;ƒiIM#(ӂXHd8(>;҈(>q7z]2nt d^ 苚=}|Z2[0 LKRme<.[:#fC,&[|N _q:.x^Ǧ>ېa']`| ûP},]Yo%Z%{^bw5 $Oak u',kS$s$3ykj;\u]쌒FD'@䶸 4a͏+M{En!THSP8nhO#Sd -r&Qz?yZo#AR\C#?eq! Crُ|T+bs.L57 f`_z"`|г[  e4g.0Ep7S0FʎY b?z8ޙu--e0TJpHQw/&A3I{31PQZ:|SZYW=L2/)(ɅOvH7Q+BٺgQqNAu&WӸœ%L6C1֡@*ƀy*sGl+_]ㆲs'gAS[\s"ЩE=(IT6ŧ12""&z=~~Q06l'ҁeњ4~'IήƪV$)$| ԛƁ7fqxÓ%@!Sk_/>53\< Nve̕cCd=壷kŀp<`O,ʦ[NEd!LыB%vrHY|xF+I(_$H " hb}ݴ !>hu#\u.ɞt+Am`ϪsgaQ>)tBئƦ<3ղͲ*34S)}j!ː7[uH?PO C d&{YZ` Z ՓE%6_$Uy8i$*>[BS3e8N>>>  !oM^CB v>g.D޸%K7:K"aR-;)SY.0p9ʴt񥘢Fȧs>]m||^-Ӡ_WXPQg.4"EcK>3qVA\]P/6ؙl˪F C Aie̤ҋH;Y#eFE5q7d4$/CQX$,fY ܼU;r70u~x1YЈrD*Ђ)zUҁ+ުП7NNGQ /Uq5sqyuc(F/@ɭtH̀{^jԔJm|wt;*^.Gڪ4\`>gG$`"2r<]d>U$H`{e>)j,z۳ggBŅ>&n!KyϕDQ,:c~AY=!j{L4b e5*)?ѿ+n̕i VG@XW3Z* Ku BW0>.7pKp^9L~\h)g`/{^HgXp,C">my fkFNj!*-nv)eV~ՆdN4%hD\*c?lf{%H<D)h Q^mXKRx>\w+:m}#'.Ý@mD7o J@;kw1:cf[q?OrO(РRG*ka,3_D%b좚D ߻H %V&+S#o癕W2`qvy2u!B=#iOXQPQB s6uϷ%J)wnӗ㶫H-cM=/zhVrAL":Htf0)f^f 첎D]՟0i-T>ӘhM􇶾ևR;rڒB Zۆ5@rio-Z` ;#B=y =DL"s⓫.ᣦ1!Eq?{cMr i:4/EX[fc{DAt1\@^0wQ2[6@Cl㎒l8鱮kϔ( t0} 13ID~Ϫl6%L($ vP"}ݚDg:VTCNg&s3wp$CCRf0VHv/&#faOn\g- V2Yk,;ἥ.XכjcֳcyȈAqRg¿+dTnA0A,`aaj^~áSv"10\mUK` M7e)|QmѢO+ۍr#-Bcs ?,|?ic1 :U)rL mghʷ=!VӰ79LD9DYO˜Kj \MSDrɀl},L0gp:)>$ 2? awt}~E{ .@]FBB4ͺMSzk pKqcP3k>d}BWaAa >ԙ'Jt@FjI;peKtqHb)ޠ8pyZUC_RGqV+(AoߔCvA!a'ۂ,e({v VL\gYt @@A)u@*9iRe \s j4$H ?)oAYѥe~,iMmB0wGf9wY^mZkُC!%qmB(kw@yn`5jk.@mݥE}I4vU-TٵȪq-aI 4.\(GO{ `Sj48EGt_5!n.29Z@@R~zИoF^y|rUY)v-+NA_~!<Ô'yl?h?:.0s߈f|"Eτ_$wB]Tqc7x2pKn>nHWK:Gr@z-=K#e:q YO|hv-I(=HFkx9Q#>h%]U,fu1܉y;:E:AʻGMGk%oG?4v )aq aMƸaIB4тGbf#_UsIݧC5WAt*3Bî͂> #^L8`uaP!xN)j:WZqriP LVpfGl"3BF‰ZuY:,*l,ЋTP["GWទ, u(, cx6պGHtsүI\hU0z)|CQnK_>~mQ0#^xw_\A}7ES"Pώ2O>2梟 88{JQUG\es!X/ĿWW@_}J/',>DϢ گۄrP3Ejŕ&VRx+vušf3b-LGW)R('"8>k?SKϟҜw uI0'椧3XjJd[A !U3>=͏EyUꏊN4HYt<d#"VBN+C]6ðczO]yo"]ifFFqf"4'헷y8>}Uc#$͐ ճ<0ҋ]:i}Wn(ߠջ7kVz7˼ĠP`53vذGObKnkU֒A i EAQmq{~_'##[kRD K["ԝ3X>06{%Iϴnqe_1LT+&3R(#q=&{7/ͳʌ\,ޮBQr?&oʨD(iR)}92'i]dȪYX'joz,{G]Z[a%՜L8TPeH!7 IĢd" Tnmaީ/9ć=X=,w ɏ&X_ <97\} b(ؤD L6'F.p^(,99?K^aVh"&\OY ⹦x6*maQQk XI9WFڄd z3=&1$&@kE!:Ӡ` A,$?u&/%f4nIukdUa L!8x9Ix24CߐA { +h.AC(0H 'VF rk>ڃ"#:1pݙ6]9?h#0;U}SfPQu0p=_W.Α~\qanv'{z9>Dc su#& >N|.Է]8>s;SA+d: #B?a7ooUl)ޫu{,#Qj׻q{Nm|#dz#цD46Wu|KEW # ~TT|UT'] ھ8kG8ۂu0qMc+ymrD z>@I %#gjҗM)};8M^ZSԈnp*h&hR>B%q."{a.Ƀӡ:(?`W}-sNzRH/۠Q;ђt81ҙ'1 8>Hx1` X-m;.NC&EB+^ q14Nnok1Sq2+7X(: i:d N%ͥ^ h%[]oqR1›QG`n5#6Rި3d \lBg\,޹{ch] 1q]qgͳH>OMڃlB%vj32NSgI/M(=7sk[r;/^tJ3oxt "xmB^5Nd8nUrXHx+0wvo1(- #{?Pi;C3Vs rf%G7Cㅚ8հ35M"oNjhffVI̘6$[D p'WInY1PwE%@o}!d"C0/}DK)hTl ϖXO(GɟWJ,z?t7ؗOey)7CPY!XӶL+nBR(i¢aRjtVٕ-5NVgRX(c ~KYTema X]hHvΆ$L:sX'C.ƂW5r ,q)V+B5%x73yk.(Ti6оU{+&@0b؅#g5 9fG1L}gQCKrmܞȭ_]]Teb"//eUT֤fjhymHMeEzߔS4.+P{&7Bc:= +H+0׏9 LOA8ɂ]$EBEʇ{373ʲe8&bROQ@/߄ fBP G.+@-4.K![oVBawʾGP+X7! { 4' ryz;vV.:M{.Rt4?WꌍjHU;s@iy~FeV/jT6^4u%^% )Z#66[7%OFh"Mᡎ??#$JֺcА tfWDoU>+8PRGZ$ͥS"Vl qV2.L)80rHKp7\g_)z<{' ~-a-2TwfCYoM{3 q[Z tacyq=П&vCv)XKISS%x]oykZ`sںZ|YS!#w\_4Kz'i}h- n 82QT@1 9w;z79L'?Vɑr>[dgZU,qPk'fw U&MJg W\b]+@1+]X} $0pbjxe;`'ȄE߶BtN&[^hH<̅b#{Z-t)ZrW!=quBB"[@L !yHVc=*ލX"7\Bfʊ9XfkBЇ S%0_34q}#epw6y/6lyHnhK} tЀސ:4>IgXV_N2T-Pd]>bŌtgˑD/1cޖ['(@{K+:f63pr;bIWl.2] rA}kg@8@xۗL9Ȯl׷=ed9с\n"{VN}]ϽŨބ o?Կ KR]֚GozzE\:pQ@R7NӼnt2#jmd(讆!iwLW:P3֋t+,aQwۮH ˾ lK+XC R9K8izڱCҤ9J&et>^nHfVmMei%( OkWPkvkKݵ =MqEeINבmgJ>/X#KQ \f9|Q[.{w*YhrQ}V`\T#`w w/p]cf% g@CH#/C+abT[=8^+7NsOFbU`qtA[!Z&2UNkpep?a^,ehM"SkMr/ Hxey3{f1uR4 77|.wq$dm"n1}pQQ2̪s^J1ԄޡY6tDQyqɮ?Z@5C5› FG9^$@[Hbdk'rHq{\[9 Gᅩ%bLj9 Ӯx>W 1,6N!ʻV\rIa)nn/VON&HeCz?Bk4],ެBX7F:d+][,O33L~]Pl3̗5%yhAr93j1oܹpKޚ)Re{)p[i*h|-ZYV1Ȼ).Zÿd40 kHi"!,?Y"KV LeO|KP8dCX ض:uIXe`Rj:N/M/iqN#pIX<?ۺ^lBPR!}S)R' U@CmOpO+c]+IjZ. ξ i3EȰuA_1糼[.Sd zTm|#.`i(?n_+3y P˒b~\Z͸:cKu&`{ Ͳa<8ާ ݜ>)#{"$ U W?^UfyN#:JO0{ɤÐTp^?-rVXOlqrw:eSVZOWޜ&0|NA@up|?kqo%Tg;?fp࠰z>b!V~~%S֔M_^9EFA 钜#SQ @@Dp0Ue\%8g9Lh;CzqVfۺ₅ ɏL J;ax$O zϡ ~HPɚZ]Cc0 =N!$ڶQPłʌ@"tuiM톫[`##$x^&j>=PY|OcC<KwQ4V Nc,xٟjկ~3~eA-EY zwjo"$DP*7073tXЏgy4hGvV}'yg(XnXQ^ޣ<3x8ӂo7{}pH$/xf{"n4+}7R 2׏n4`Z/SNP-J/YТąc1}`Ll=C * &ʇ^g{ F}w W0a\q@I`=v_nI[(JMn)=N7M0#)|;qgvӧuJnFJކʰ2ўBU{LӐJ'?{.URa6؀AZr}|b9u#ЖNw#F,#c|5u>xpz\mu0֮8|PAiD~*h:9Q$̘>H۪?^{w>w&moV#2tj ;yB5Vc=ʼnPt:] d&Ä(A5Ώya+8oŒ;+xBfzPFG KB^2v0]NﰵpzTU>9!ѽ;(bmuaC!PΌeš9a a*i TꋥV ["3c ;mXk(E0E׈=*1k(G)=)Wz.n{" zfCv咄=A8?`2yps=W#"x APNz6W I7\$A 7PNY;wȶ{ʜg@Qp>ˏ~5WjD^ޛ?$# F#B{ASKL.z4@1>'IҰړ?^d1u 9$98uXڑ`wh[Kqua;*O@h7pŶj\/欄@Km; "p.%r:||ff|zԲ~htuS 8nPgNڬj4B[g ϹkY׏w^<9~C(tjU1TBq:Ȋ^D\\Ai @*Px\ Ԑ?{3؂FBkJwQn[GҭJ(')&|GCVQ >3 |Ϛzsp"'x-D#Q+t.xWnl[hZ6~F%EnvPo`%Q L\2J\+kEG QW%`ⷞoU2c~qQk:@K؄Q r!r#/Z}XIP柏Pƹ:@wGl.bzw!Zi?)9ʎ&0Ό'g i}q&?z)(tJZF8.Bke+oz$ͬ܊Rdu y;+ }q٘wOZk!x۽z{(BG}n5$>|*+N^\ng3]=O8t7v^a*Duu 3e)+Mh]mHۘiw0eS2U@S~a㤮L)@Mc"-`xCs3-c:j%3u=DeO6ׁ%%eUR{'תb7/K,cF|՘-|B.; 97Og±aQ.f]z;@6n :ⒷFzClvqY&7nȘ"6ZG'ay)Wإf󔎏oN{$fƲ&ת[r/U] (O'cЉmE đrk3 ؈Θk.ƀ+ l*HVͧ3TKf_vt! nky^E1 ýThdjxvEμ($"s+Ma4H"RY^x?r䴫?(.">9q %K JY[Uӎ~].+(bPd C#nPp0i b_7Pژ2$C8ټ>.:C~Jb$TJc␾`Wc0(VI.HY矧A /O~ v|4(5& Gi S51 NlYW&d}`SFh|?64ұ91<[N\:sr&ց"7b7*hDub'i/GC.ذh Yh >}Mx`㊕ t-<.M^Æ%*(Fx (a=YE˰ZQf+5ËhUЙbcLk;gʋ&Ţ.ǚ2~j?IMޝ l>j;/f1^TjG/+3(Nyѳlv<ЫP]_IK[/eZ钒v'R}{&*DH/+ңZ)D4CJLEW9Y/o8TtYJODS1sM ߀X2TDP-rkR>h'ɟjކ gjr2MeۀӞ!U@[Qv!yzĝIbY y߹²*!'MMIo!n;.95DZO|:bJ<}q#sszZwWC K/gC:!D7#žSVk8/,/[J%B#Oˠ@sWwsR1ϧj*gmr^=tVZ- ^c\RYG If&>x:G}XЂS ;9&#r򄾠 ܰ|(_C#:BFEۓc|D ̿2?zV<|ry%#s[*QIp.40UcAB=Z(ub6H~6 ޷^ ppbtT;|-W9%̺ { s$3e2`\p@w|Nw.vQf~ī8Ntf5)O#Ò+,mMS ua j9P^8 +c3& R])~hyRp#J#oatVPjfV_go.ύ5|{^G:22vߥcҚwu^mT<|`ѱM bfQpGl)Up0y0=wŬgal6%_ zlc4 1Te\'U Nq}ǜcn| cMdaf^ǝ/Yqj0]/@/a]g6KeNbᥘ"0݌ry­ ~>U=Qz993cRDL,Mi"M;ņ*-i;k,O״C76LZyԙ9{K#o##=6iumkpY0MjzCC-x3($plY ZIBUb ?Y ]RU#n;S7ߪ#;$r_;AZFQ7y`=Xd >zhP;p5%(/ۡjY.])nZ*~=AP;Ca z6Oi*{cKe"2&vyMq|*^ e[:j\M6s >SGnYEY_K&p[|Ԡn=byѲ}OINϼH]/2KMCМ^hWrosp/IHxlQT%R)"e:`uhr7}ߥ]jYrsTpp R “C*~j+*-pi|@#)t1z'-XeqmXm'厳p VZR v&Q]/",xK6J/}N&M1=K[K+}͊-q>RΧִrN{yAnrAZ.V7v[`FXT6I?~<lkmSv}0f*d Me J&̑Na43M~ ML/^!~G%E2.!t,)v&d#QQ]~p7I+N2aߕ`#|IW*9ʳ8'Ǎi`? PMg<)s \uADf.tQ@g.s5jwv dh'3qʴ N{4{٘gRlƦ9 wP? =Hk= hDνN@ŀ\K3tʹmڴ[ ˁ. pܦUu؂ UJzHhg3߂5#xDucF'ABJsKzF[]m;jAr~^;Ļm> mJU/[ɮV'URU\vǍ yT_DVrNxg0E}oA?EߵycX0w>$Δ/̋d, 1֏=׹vvd$MnCZC85!o_Xf~/LhHoG"[1 z>CI$_&T jr"g1'IqEkCz7_+spnFF!1H)pQ1t.eGhYۻ>,}3r~R cDXjиedVVÞx4ee8-+"G);1EZPnH;R('E bB8 ` /+\S 6h zDcefunE9ǑƛB"녉1*M;2gh]L:.@w+_r34$$mA\"/a'xHtaжoѧe ԗ^Ѯſҩ9h"Zn"gI"twvB;=NFm\B0 =uAP<'>TeM#Gsgl=v/D_縷Mݍ@lh ^nA͡ ]/θ6|RR["diRUY yU5_NuX>% F+2.>'F#UܥNq. CTxfdԲGQG]DLeM '8+g 0H + !0 3 M&8m5]WN@' 3ӭY-S`)I@U섎7{:̹ (T&/sBj*H{ =`\8#9 $v!cM'-?(9dLȩ qlnT[hM{ %}U8mw'Җ+=`pÅՆ Z7 B IE͎5Iwւ2/]6w8-d'._94 )F~u+Afqv,u++p%I"+cx[]iDEULJҾtn\꣰uIY:͞. vՒ F^^WQ>lO߸wןAGIKeuX2÷ʺWv/('`p6"o;¸6m16<-ʥ#1Rc.luRP ^5+Yt V ͥ9;J]fk?n*͠/\GW`1]=@O29IF6 Z$fV:Gsڮ|Oc*j_zpiOd@P} 4*{p72 ؘ#֒ t'ʸFK>b)zq&ڿW_LIWՊPF#4]>+d 2Qc7.U.u& M.q: pLրRNin/qϟ8PseO/0kbs SUnڳQ!(;Wq6XaksJ wfN%Is އӃiwp ,H0^F^1-ٜH:,+,8ڽN-o19N~a.Ɲ~6T2{4F;Ww2ï񰬃1l yBvC^Ӎ4X$we:WD*>̅P+I8>&K4d2?[z"߫"4<#p yzH/io.%N~պ=J&#tY!9Xf~ w` mGSKUu=v#qLŇާդa»Eo~K2#ӂgU%zNݓ~4i!E(Mҍsj3؀"x _:"x;i's0r:o*a胮,_2hJ& `ei "H)OHx7.: eo0m7p* 1t`&1}T!0}Ns늚;Y>MC4M-J^fӂz_hVd@~{4"jf҂xzVX/N~Csn4 ;<2/ݍGn5+rҷp=؂5.e1Jż$%MJ/hNila+!Zz2co osaJ L*|\P~Xgsa\&T(fB4 |Ho@jCD8sԱ*A:46zV9"QEsdiE/O6 S+?+QߐÁ"@Ut pV0F;Rh/ƩUف$ :+]l֫7`7 m%י[R*0#kuK/BԼ*Tʥ58⦏l#.~jbjCZO]AKmk`-#gf~&⃣\4.׌d2/O #y$6A]84[;;zcCm.̿U{vorV|ڞ9ݼJ@ HEbk2.dydSmg3-794Hϙ~wV: k(:ʳI.IRĈЩbCհ`$pF2ؕeK$ ]rh==z9?.v}F,W[Pf3Dl%ǂ7>xN#y?['‚N0pg931 t(&#a@K?I1WlgeIk?z6)({0(oo$l qָE1[|_GXr(|>@ F1K6,WYy\ZpmI3A0~Θ5^c r٩HzlY~:E\?3|LCu"Nl\=3 R8$59r9k 3 ǟg G40%q!}-dՎ5G^\W9)7chlRkIZUs,LI9gӢ{\3f=cL4 8o=D"k!T<ݓؑL\1ܳ,*e ҶJb̡.` 퐞 |j0u6!iX)~Qc,5->w+[:Ͷfi8=(~ .b&vUG/\ tapz#>%r/'7HޱָB阠=+QZ{ `̂\XNt>fUCeDsO-]oa%Lu]ʪc + bJo\6b@xsmf}u Urvi 54%PFtr{gUdK "z*/+ѓYmqLSF+.V M"4ੱ%8x g\"*&hbߠ9GJY-fu m/{lY3m@13iCqRmZL5۩H_Bp$KTxSg´A-oNP/p <xb G ŶKQ̣MD/t 8GnM_ШCC7RMzb'ms;|gmEzk뀝߀{T‡Yk!ݏRv QU5KlVV02!vgVӦjG[k+qn ?P wyGR ͩ(n1]xQ rFs&bq+3A뷾Q=wXLb_J_G%Dji5C@ݟt%HfϺM̛<#MdM *(\aG^nyITt+6%,n)FIE .ΐ6jid7UCk{ӷ#Ny{OHxit"wu\&S+OKjcۯ$S tV?i^r!V}?0~<]W.X׫m:4.@Q^hrn iQ)#mv _JX S|X5@hVc4a-ڛv6}}o˃pqgƢҹ + ɤw8iPdƆO kXY`ߛ6ᆁtƂ ;m~KzV2dAi*;7CpSѤW%n4 ^'灓"m>9^p\56v6V1?pJ3UK] .8 9m̀-F[x fvr9dsſrcŁo'a p۔gГ[b V. ml:^ҁq ,d>sJC )Dޏ>X6D=}V7̂H匹D"+bo eMrZ/-Dq+ڒL2 _X^IXl2_z}r#fF-BF{xGi%Ic-Fջť4#;77\W.g= {𧡰7f"B٭V\%Ҫ_=Zd},i .+ n w"-S<59M K cΝcΰq(.QdNvH刾sv.5V.PMY1C [0uƮ %A)cXjmc2$8AS: J+:rzmS 11_p] U6D0ո$)w"qcAY7~/HA)HgX11[6עTd]M"=U:͊.ݬHݷ~+3s%sd+So;p2ñ{PГNNrTθ聝9f:=GSznx1W(h;7WdBJqrcFvkz"Ϊȫv7 X>10 q@ղRd=|"{ e|=󧣘P`E'0' R5s"ϗrZvDaMpƨ%aVҪ{tF39Fc\$͵q{_K؞s =>Q?t ={I/lʼ2`Kfl&dw#8'p UfM~t˞ >yl?D_;-R# VhTo>ρa[dNLz~6roHU[r=.)ӳsÓ(ofCcAMo$ T"rVkW I.e|\ьmL٥M2)i2lSɭ_ax\fW<0tpm};0V43mJ-ׯtXiӚ@vA5r.5%M % dͤ_tsގwaqԶ:-*3FN˩wc^ t 2z?v2$ jNY2HS5Wv6AጲZC aD.x0qojϴiH:uMiۘh` Nf`ssCdN K3g2<}좚 8<ðˌt:BQ"s!E^@3)䃫4]yGx ]/ʄveR*55cLxnZ&FKs 2'go&Uwb0bƒK܌[mq!U*"_xplS\Kn3|d D . |_KUHr=.@*5<=k{D2m^}ʼ2s+F5. #b[,`;'0.3ݙ5v5vNP-#t=u /؆k/}+~/;N쨎Eku`Z7W*_W /"~$=jYCʡߠVҫPԤCP+EhꀑLf{FG;!Wu^Py/1Vm_Xc8`΢ ~#y$Bno㋢#ŭX.uϫAG5Vp땢- H25,Dpv}^6/-unURyIgQhm:P)W(@% I u:F]=NAgbV]}Mw oqW#՘d@΃N mT =j#c+u+1L&8.tпD ɞ~Ͳ\, . D6}"߮(\&ws+S ) ߉S/L:YAɕޖq@,<*_|=#8oI=b7qtVYoh;H&ǐ$YlZ>->D rJؒ n#@ 9Gr4:,z 5Im}TCv9g ^ո >/\vB$<&t:XʛIWqӷQ[VK͒N+ R;  5@L/) ׵s}Bnes4ޏ,r?g8P>[?p*`FѺ>{P4ՐO\Zg,-;6M $WÛin%<9۰*㼏e8}K`W2]H3a-[:j8a>(KͤXHSWR{N;<'PI>Rp87ex"yFF[!B]m'pm]8?=`&#e fYr' F&%OǛ%j;0#ܯ}JˆtY11Zzw̒͜@vwi2AaE4̩J[ǩ8^,{&_QL.%_L$ܺ@fGe/Հ<Ď{8Ӝd8G/pe]5Jh$U5_(;'"[RV1xv_v*o#]AcUPrZgkk*&k1ϴ7 T_ɆsY[vbd.k0OzDIpz? Q{K;=醸Vcv2> KxTŕ =7sfMw,р6܁g%!칈21@cYbOTR1Ȓt-=A~ڦ2UMx穁3lBWfĜÚcV 09DQTC֝J˯ϗEj*8Epy&yV]W|#6il &@w0\ޅzy,Q o+22j4[gj>'f4vpa(5I|{(jUg,h\(l]W p=[6xMX ;Ib#sRqLh1 k[턯WL!I4Q:wvD5l8R=1wODH[I=VE6\S\DǞZKPva rcf { s׽ Î6i>0L6ϐR#LJhdJSٺL2p`RZ?~mЉsK2AnZVBxdd,b X8}T@.HCc 9͝7&hԓ2{4H<&V;"df锚:WKbmْzdvEWDޫNgXO z)EҘin gJC\}p .7~w ʻ/v*"wR fn*>?&'Z9Yc`GƁ8h1I_ťMNmJ91ZE [_JŷS.4eVL׎{L>qgUapU1%WTO)Lrq^16Pn#y][r +rpKSBD.;9Q/]Bd?mRtS9ɴ *<,z&z inR< $AD,F"O6)_wtׁŹ'ln=0:H&L@fU0ˏeU+*gO)y3 39`8ؚS\}L|^5QLćyP -/#qT*TbZ|N$[kG] dXa,`['T]53E*0RVmВp Z-q*hD ^I+]Ҁ랊-{)|fRdvjY0TBVZ8' ֘{{krR]jE#`R^Ԃ!_3Ïp_a:DM: 9d"Qʒy U+Wݩ@*O{6|GUl5yTTaG6ݯrg=sb,pw@__S"W,'&T ynqRhRsOCײ$`KR^#;̢k \Zcizڇ5l k!SzmdoըOζg9PF~һ܍dFu+b>zyY 9rqdR$c[C& .0CMDw+sF|#wE-Jcws<3Ł:艤sWg=T" (X FJv1KY_Q6VI&h2pjQ\:Xx!s~bkq̾,ٓQ! =273QEËO-sj~uOv&fF !ur[z%-qF1ˠ7:z;I{Ft >)2j wlxjsGP(yͳDjPzWQ))bQ}\d< _4Xlx9SXw@cǦr!ҵ5F"@omuђEٔh Y Y6QNn°2M6 s& 3,F{гV:8 !89 r?, }+Net¾JOˏh24\L`]opSy6Gح5b;. m F[2ȡ{y7);Q5 ݺ%!G[xļy)C$]f-&]5Z+'|m^sY|R[ T1 Ru5t qsⅲOJMYa$brVT#!V^uBr%r pD ibMpc!l:%‰#/.̳׮EČ.uGNua3! MD]/,#%~F݌zZ$] ѷy1D3rezؚbbP#~ls?i|&f_r喭AN\2A󭾊aP|cLvPz:( hB}͓@ٳؒ4wg e]eo} f1+~:"w#F!bR[jlsug[?ܘK* +9 oMO;#݌Mè^qԌDiB3GqkMփ1}16C;wmuO[ɰ}*)VZ234EjEVmL9}ٲN$f=l!6D5"? n"8`  i?&ig4e6*>IV˥ٿ{8W+V.ʕPpCe sBhB{sƊ6+<3b3\ 44Z!:ܨN V'0|GluZOJ WiaƟW[BEzMxL2`jӶ fۑbͿG28ke/<Qmڍ[) 5W9Ug\ #k[2P4Otɣ3aI"Yif ÙD7:9g[/sWlT [rsHd?)Ww@me?=-4 hͮQv ia%D75-Hc浫S;kMo50$ڥ_ďq}͸n2k0ifv̢ ]1F8+i*cz׉GF TMC9[m 0X}LdC~2նm1Q . VƦqZFQ F4W5E.uE>.) E>&FjV_m4/*/F'il$1Da[Koc $52xpnZ.fB@^(Q=v / v.k|9sq;p(sHVEY] s`vàJeaJt| Yn*Yht,>Xe8jHn-ksSiQՍfMFnI sݼl;_0!? .3C{ /Xbx9c:૧&Lzм`q!ɟyt%/[ru|L5#5^37 8 / MVAz|Wݻeb)Vhɀ5Uܹ+H7bqN k]\o6Oɠ,NF7;ctWy|:.ru9, siS4< oNs4EnLK_a=O6vR%Ab K6\,LΙL,+ Iφn{g:Wzeoa#ʾ?oh+Wy\j{.UNk}mʅ_k K^DdP?`oWǽ{F_S p̀ZZ;U+ k pcڄ눀2~$W sT# o9EȨLE;#ޠU>$Y|M@W4&ojf r'cN&W D#Ws>҉ns;a?xfMn6EMf2BȊP=WnHapqm?:{\4O͘s ЋeHa5. ʵR<6jyfD$!%E` UP`&~a4j i*#(o}k͘a ]d64O_ ;]L)Vsksz!f&WIT:wR$ l'PƭXꢖXbfK\EË CW `{DE^ޗ6k'c` n^_ J%W -) K%zؑ9,UU ;qBcO:y+b6:7~afDX&q\fhq\,F` ۄW^eTVb 0s,rW|z.CenT ឫuop|=9I:i XoJ&_ir"KVї_;@6 'D(G 6|r~,b'3DDb(IC Mk I ήPg3Ӈ:.H-9Szj6uC-/ m*!U b6f·t&Z^ bx}ueN&h_4ߛI0vdPLX7aMB V7Y ^H8Мz'B|c>*=grV\fWP%kxU"񥵃a /",ɍ ,%UˢkYbÉ02e|cUX]ÏBKk_?0֬B iB˴w NO/eYxn?>HO寃a^6 &sRU"3= #Ȩy>Xe"-=9 έX*QjU̗Xy\+KS0:בg1<فi~_<E)Jc9)ȼT y"/..HG| Zoz~Y['mZı|yVzD9+ap K3AGb2Ƒ_𣖌Дa;'Ses79؁| Fb1WP{} w,O_~yzϫz$xx08ƪ9I0V*&9tcą:QdI3yO 07Wƫmŝ㴲nM=Lc:mxG.U<1fxRg,)B}0'kK=[f,0^Lv;H}Ý (B'DT Q`*㩃D!uJn=jXHYslN)wpDHB_@7ů/|ڝD(5EJJ XMg4hސ@w=I sj?pdçDZ c Z(ߋ/ٽ4ߋ[{Zxi-b-FԦ^|?ԃrLt$Uٖ fcK3TYcֶ}Y\clAF1vܫ,i YkZM P>gg  :[,sQY,IEOX> :CK oӌ2"Wz[KU4Ffgcvf v[_ *Z+E%ChkRbp&v -mM.Nq76uyԘ8a9 wi컽?Ǩ[V4z Seb6lb6^DsSt/t)f#&+z*EԲ^91Qv~&$:En j9O /\G|T|P܀/e Iu>ѷ83ThULgrXeGF^il4pӝQ0*ӿS'+]iNu>tNRs J AY΁AY$'s E3Kp5Ա:3ZSDzg9@|c؜նgu3@AC}^ν@|%u{ϋWE]/㔡Qrn24-y!%Gv$;-_Jj"Wc??4"nY3MܼȆ1} _}IƙH! @x%c #>B$nGw"s2l@BQ*H1O 20CiE2Db 9멫fm{k ѫD^`5#Y34EJ(9ٰY8,yOe "$ m{2e*/Ei(6(~S Uyoo֎@c 5ݙ>~#ُz /8{8UChƆMs0b۝%y*-`oqc{ۅ?a|9Rbm"R/S̾NXO@JۋiME'8BFej}wpZnmíh ؙl_EXb9'Oێ;JJ~%ULDϞ*Oy쟶 i:PNi͉J/Kd%~JM{uX/F_$nTֱ~Ny.I4^y@Xt -ؐc 4 G){sbjn5cqg{"'^Q{!rM|D}wk3 ]ȄFH7hdXo >=Ym-'n[s#zQ?WC1!Vײ՘(%E1U?W%%9fJ;on TdDx7fJ7O4ŭv0ؔL:8qUL[Ogv+J\٤ZO3_AIqr$2쇑aZDGz{$=GxѽgU"ȱX $μШATa"=>kP^4mJ*>JlPk,^Y0o0QCt-3PDCKE? 5| V jSS.уqZMCJg㝓| {$ !hUN-QĊ`oL,r{!װ`6L;%JEDMU!~Gt)DgFyWXRxOv+'%pԳ$tV/ Y"Y :{VL3H^Q8Ta `Ji?)~=ٲ!rY gK%41)#?JvPt`vkZk:ȸl,kPxuUչZ4kٞd*Oyeϑ"_Zes:j`AD!iW|iRj03!(B 3y}6E|fAAKx~μ!׮p/TqĘ ~QGkg*l[Sg@4_0BLfΖWSO2Ld=_,Y^G)a4`MCrb{I)^Y!"z75BdϠq=[gՎBU?>xea*CCRҼRK3,iBe<FBaf~=xL+βelb'fu-Ig7ZCesOz MYq7 ʴv2́2 H8iwC D hϏpj!ےmދ2RfȽ=5zO͒m 8ΘYtkO=|—PA\`KsY3<:>*ggz[2EHau0:_rD)'5]N:q1C!(:ӥ.Rوd52=|BH)2jޛR&Xc ä?ݳ5("(9 ֹG!<1ulkV$Zu1"hU(k#yP1*}LF\DSR/1ڪmcA9% D,i!G6?@&94=` lC#Cr`YKҋ=>|ߐ߳<'sUszaYd̛ /g2jDp@` r~ZDp="yTPN >^BKGfDS5vbv eVoRCQ3b(xU$ѺV3"Q.!hJ_nuIWwn?w ʛ),@WmnΜer1_@὇Uǯ?WD"6EK {zc6TG<42s1_l>*n&ou\C:XsP!{s y/4U˞bgNfLg#˲?ftG ۭA·}MvB0C?8p9n(c.`/dyl|*D>T z]?s"aC墧+U3XPMh"QL\^wy#ݍɧ XtU/@ W4Eс!Ď,,gSO`lhaI.doiDIB+eFysw1piJmf@?[P2nZGmt%h%#`G&z][ʈa)/llwhhq} !9_^ե8 ~6NiF#Zk b-"Qyz c|tI4;3iZ\cӤ2֤L[^FŒWBZqw 0#s*%Jhdo7#/"fוAڿ}]lj!ء$"CU }!cGX|W4YS l(]^۠2|DiKË? 56ey PܙSvĩP|`Zfox]hGܸÕZvmii7j7^`ݔKQR5hMc ;rkl"HRq6*gH.>k0JK"#P[Md+GqlHQ$Eyd( ~ly`/5 B)!<}{05M[sGiCu"ژ|(tv_Ɉ LMz..:xp\̇L޳y: \}/[pjʽ\4:J9DW\Dz@d$Q5LWEX^7S"EVMgv$WI :KkXKCmG0dU>u#SCixIr7IU=I<ܖB\)UYTi?wbhyr.jrѵ}̲}}όEK;ݕtȝQ٬6#y'wr]T}+",M[XT7iۓ^^:Agqr`$tudfH>չ] ?sjV|҆c_n119//X4Du"MAs]7'/kՐuS0Taߍ(ᢉgx~\y S>]îW}ǀO֜\p!gF` PS4 IcX0-N` zdntcW,Yѡ4T=uNM?_>e=! Lp{Ǽq䯐v e.‚Ju!>Q_9g:Iqƙ oC0t ߚb%PlRk %Ē!U;C[Us}՜4t4 B+6VdE=~&-}:~i'tӔei!k0#d{f` J|ܭ +Qlx.v=G`99HG.o䅭®BCv7 "Oͭ/ye#w 6>.S5 Ǝ<('uחei X>F +{x)Z&'!S8ƀrdlfcBSP^ } yg@I>2!jn5 Έט#,]Zh ^mracLD8Q@g<#K `uemcXK^)N\$.NR4);\noH/#`}(ox;p9n'7Ϋc6D F?Jbt_Yyf*V;b]9Eۖ gl܎HF@ Gw0˻KNH Xo$]G:g*˜Uݱ|±#nJtQ/Fr% T."#Ѻ4۷m{g1b5V;xgp| 7[Nϴs~'?flFm3Fx3R[Ȱu|lf:n\Y~w[iƆBbXf,%mŮdǞ`DƬRImG*zðް L8rHrr.V|MdPu89A \S;|g/<ߴ? %Y{C n(/?k-2s٤t="wP_,И9R_s ,~0ڛEC/D }ȾU ُ`cۙ K[{NU-iZ MO9$JPU@a[2]ϲ :qFļԫDGjDѲwl]_l)w[[P:f<#@Lt_C}f9i( <^H[:ZԴk#)tWnj#_1S- 㰐%V6->{@(cd0( ذL>ꩡ.TRl9 Qſ_WpөRs,UFƇ9+$3A+ۣt F! y܄jIև@qX=UZ 1 iЦl>}^n+TndjD Qf1LLͿv"ZGL]]VBoy[9oPIv -ZovМN<*jȫ/#<YTIĀⳫIr+yndb9IP5shњ 䜢.s * ==ts4H '~r4Jb#URTҡo༌rÏ<s"E$s55Nq`U'E'j̕+jX?3 yTwz43Jl3Tb-'PڋǙȸK'k?.$ (こ^ئGZftrbrzʧV haT,ϭHcy>,=(MsPmBU6>F}oyDF*IuorPKVh䧋GUMB -W\*_~KؐU\]D 3p$( PvrFeW 9b71\c>? qК}Ϗzިz1RLm] YiVH!b_0-=H~8J(~a#l/UCc"ŸLN%l*G;J&o;7U^\~jʛMnErla.`Umث%R8OW˩D3ZсRPIlYA]=do.ԪD t 7ӺmEd',y*~kfj0Ͼϧ]`WinJ'w<_g>~tG:TF)\SzIT_%ޭq6i#)O|cPdK*+%}yqJ< ϐ;(ZvVDx=M}Bo HPC'pV6ے_`ܐf:`dS񶦛dڰ&Cx)X"U,MO Y\L{>BH m$u@+aN7kWJ`٠R +.#i|="wo)5T$Dv%*pی-k*iAQBMN%Ivgu)-I?JGr6ċNw#cI~Q !<}i5 5jOU3CZUvBgVm׊ H\h.rEE9EhpܧX}oΚА)Tx rP^h'%OXrn~%WGH6z2Pe06@Ze /YjdM>򮺹rA sMݩrMQt0iҐAUm&86UQ}L56P}iݏ^:b-wWufgY3B{,n[dcgҌ l|7yUPt9,3ܪ`_fуx('註+XHV|*KdT32{Kos^-Cڋ/ƦS7`b 0OɎY>@G@/<@ K8.,N%- 4lsfM~`&8_~per,_ `KroP<۰X8r*HM7|Hq,Ftr,;]%w!!k R\eħ:|/xr(T9b\ IJ,ǧKq]KHZvxY J#>DMu3ݽD [u#|X9)U {0c*jцj!q6Qvȉ'$T=q'3eBhm ofd'Rvs]Ad:A%,mJ㔻Ue~SjӇ `FsIGlW=9'f2iWOU;rQ952 kh+wfcrU.dQGSA%s۽$uÐvێo\h$]>fqf^8㮜# UvP l牠 h|$ 6;QP UkV,O~\ǀ;^j?ùĬi*hWOsPnWp8G^)V0!ocb?dSP 6ȐE Q@'ܓ1/`/6YK,nP悉7^Ȟpk^M Rj+ ≠Gdqy_q$"&&-%v" JIeݺ`lfZ<^ϪXp3o>*ydFG5u㑣CS{o"KQЗ?=ayA'h=~(Mi{L?UyQtj'#mɿKKsՁޔx=.N6ctq}/Ҷ%l%a|qQ=ڄmBݿܝl"%g -(\$lӨrYcSA@ϼMm>\(̹/SxɞSU{.<Λr.R5^IE3.q"-Q5ܬ.vWEQ\oso+ d1e]+HV8ERΘZ^Ńc< 90i{,ul6;bI=QTϓ "xEP|̖H'@n{㣈UpwǹUq".5$PbV4#bTMVӀ(K\-x Z#[becJܭ|i{hF}/N>-Us6ɜʹyݤcGz}sе8?y;52bq5'S龿1!]/f/NJ;T]D.|v#j^<Դ 4YٹyK ae lmt#w.xmq,0U8 gͥ0 D1@}!v@>פ b0\d Lct=QIAȡfRK|{`?2(G~=k&mN?J? l;/Ӟ  `ݠuk ]T amvb~[.M}m;B]vzUִ{;HpMa"}DVs{FrtL<_&m%{q'Xp")I&D^_DfEfHk>s{n8uY'ZZw9bf'8-H ǡN(T*{&ZqlTl֯hS-y6b~.wOv_WiVRڹ+H|:qHl*r˧/vZLC p|1l>{[=ҤXC_T0oh\?떕R{r%90td^\6q`!$;31cq>+H6oo޹Y*EB|,nMȅc0D$uZhRW` _??{br;njt'o;jMoS]tuHA{oL8(Ygc92ՖY4qHUVO"V(}!`-SJt2?RuwOweb7@sɬrb! ֋p:a才d/T"]ҍ<ݐ/nԯl@-KMsC? mEXjƤG%{3UY^Qs'|kBK&*dB}(A˫ԱGMɳY4k*~%j龓'i )xO KsX5зRfs4 z~+l౒pE &ϻp<6QL",$(Xa2wU Ơ|5njjڶ:%ˣ(JmMԕ a9M&"D䊕-ն쌞-7}$fk4D_(p+MDll5R'Pǂ'uI_ ?d9S8<\p~e"|ez6< 堰x3Yh)ɤ2O_B~flB{w`yt@o3rthGv^L9qحTy/ ]g=hɑer7 <|5}pӌ;3N0O'6Uyo'e^i`*2  vra[Qh!K@52HpLj u%wf'p.bݺ!ᑫk,D9;bȮ=쩡9%~1|wnkpZ_uR+0"RcdMFĘ(2F3U9# ]wԴaf?Ƭ";RNLwR):Lv988~b8K&Y`E[>jї CĚq{e||KO0$!tdoy Q ?`VC%'Q\d3ÿnC[Pc >PqETA~ؘ2?s.Tڗpz2U$B@Ve{6/|82, OM sE+#nR# dfBw2X%5iQVu\έLkIKR%?ճGjb=GX{A 7ja[E=6 )sg7S٫!b29LgZ4ۥ(ӱdU}jK{R"@ WoOa诫uG֔6_IN1,<,znm>ёw.oAOcVT$DRFY׽<!o/듡o'Uzڬ@6*;lĈ48>2&L_\"}Y~6C_R1rhػ^X$&q_Cs]iY^a@c ͷW|.f!ċ'z mYLj^\o dVZϳӣX36J,{ih,H# Ƶ׆x߫?2x B#x wiu&-\ zr-sd,\a/OIA򑊀ZKUD<cYG}EIfYvz ksm8v d@0(҄Q`ۏA<*w9ޥ`vfm@ZM/^;1b2-AA\Z jjz&xR ɾCA|/C1=$7t}OՊ=) bɤD.[U^/t;DNR[k)E 6 UqPܸ<j|$-̋gQ 6iB?f~&L,|}GSjY~phw'(pFP^w@;m@kS׽@d&6[.=P-%9n_ihٶ̤C@ܡ4܎)Âc>E6*4EEaPhX:JH+t4e|Q28> Mc^G{-nm)%ܶwe[4łA7<,2(?e+ ĘoY)2? PF 9.;y=UY 7.D[dfm8Ԩ=x{`4=RtK{f{&^ٟfa]4sBtv9FmOv3MՂT% zTm5Qxay8<;#]]BFͼA San0ett*AED.k+#2!qOS)sąFpک x#{T1aСA Li!33dÂZ| MHJ_ ht@_<@J&w. D{uEcXvà P|űm*h3!/BM@~j+U⛤svHcV7=j) :!SP~!"|E)BPI(ޮOFXF'z eh;5kjԶJmbH33c3C 3Q'uo2eVxjɼ/}ˀQh,`p'( Nn:ҍi Fd]2wJrW(nkkMk6Z|N 6&t4prO#<,!ˢo/U{^?y) E{/huUho褼ֹ@OR5B`B7zu3tp뙭FԒHc2e:sIeЁ<#n]R2LL%"۱D&S e |b">4ҝN@h)d /*ߗ\+>o 7UX=i7E%a:ww,grtb0C=w#"-}t 7U=IlWy'iL0ЇcPAC9b sVynXӇN78[z`}XoKpځq⺧VMCC1u\<иr+*ƓՋx> fias\}slCn-gt wf rYWL֧J\ͧoۜD1,R r꿕4m /Vx5ұ 9 d 3.k#U"e;1(C$Z .k撷5O GϩUff탫bq7{ %VluO WYx֛7$8*beM J04wOE>%N`<%ɇi_1j5䍮2 ii.6 EjgJ-x+iza+:Dh`D8Tmf$ [$jwɩfkO:fԎ8Jjo1ӽ_C&nOVHkl}7E3c-˄zәDK; PQZfW)Bbk"xؼ" T!|%^N'KEe|`3Au *7ׁ|?\@̖HU3@U!0;9&-:4  utn+i&ZPV_Ȫ5ߑAki>,^m&8㚈׶PD6HZH_|IиSGB))Rm4g~PP*" [' \m"E%8B1z+jb ׷XYLi{smRf6eBTT}I%YX a"7nMWlBPaUYLߝdcK5p3|hQuZjSW}U$U7dքLm3!m_u4 洊H*{󺓚n ot|>Ϝԋ!- }5awH5Dy:6js~Hwd|`\DqO< a$LyոoQ"׃`-[҂QegHq]+MTrv>QYؘn? I!hһ)9iKa @D|`hlAƅ!NLGMoP$pqKlV] @X E$h^^W<ߐbB g= KƱ^R_ݚK@ʗ.U?ze&W<3)TY :jq;#6eP *;KǶ0#ϥU"PS1)k( zlʙ51$9o݌D/ CjnqS[Cڠ Jy$a>\hFLTWCvodV: w7 o\W.\ L_C3+Rw]$ YiCV/C襹L2-&p&(ڔ> d,E j29}c>ygx,i͊=29qGݢ)Z+׆ANm#J01zQmT=C; O'w=$ #psoʆ$Ƨ)b~;,3ؖ5xv=DߤҖ'9ڗBmQhSeLJh㕣%7`^uڱdI#03 - mRfMtn "Q(Ǘ%NkWJ@O0t @'C a=w"7[_#Jxtz@9z߿&YԤqܲ}(lxU|Q"e)'s]JmFW@/ݬ6$~ "z$G i]D.GӵDV-/$W ̕.N4mbC;zZBSM42mCZ{x~GE' 8&~tav16ZB3v/8w[=)&'̧~B"aTB"m_>#-b!Vrl~4+VH8C|茱j cΤHxT%Y0}Nc[0ifALWBj܉Ywgi'i^m"m2ҠJ"-M: ̅}z%5*^DzUJ7kt2⛧D+FR OIwZ^?J#scJ4!](5Uq~EDG)N̉\C[eQνZ*U{ITTuUBxHomV!YJzktncqba {,Ľ2WdG-=\#&ǤŭgѽGr؎q!lgɮ{oDU彖L1Yw+Wb<BuuSt%3vz5^ưoe aWaYdHڮ#-"";*eǜ@?mڤ<7 0.뮏4 D&Tm=l*pK8IT̍*nβ v Ap|My*p596$YbZ(Nj`7M tqT"r) \J)bQO |PWr'( /Ln;oM(~f-}l{U4S+Eϥs(_[`! Ctn: tWDȏ!v+JqY5n*ϐcܝԱG78n3s!T?,Y!A0"MC|@pt#,P!c҉M3 iQ( @)]V.K`R8Wh@]("2VaO5٤+}p/l/V]= ,9hQCki;OphHfkX+VPEJ) k 31w0k^Ul]]kvVpj;g*"9-_+7ҌeDK솤-1%Ez;Cv'sQJ($zB_zG>idC6)HWmGBdۣ8Lm!/1= ,wUa)10ByXM4S'5j sh…2Zz She"KE_V?ߠT z#9G t}gؕ68'@!mޖsFcLsi#}\i58<1Z뵯6 Lf^1,]&Ǥj.</DGqm1!ݨ6^ԇaȯeY[yiG}2L?Pr>,GaǬ+;:Lox_WDY RZOb9?]G99)O))rZIynFYyp:} Art@㭼XwQI yJe01PAy=~& 8%$fH'{4-:a8ԪI?e{tþ0_nf8d Tq|R2Z-T7ޑeO>D8x176!gznO%}mcG g)}L) 6oYpi}Նx{FʺqM f@䣩VOX(2twzP9 :pTwtowVhk]!Şr]{Nv,g[k׺$| 1d=5Y}Ip^ 2Fy 9B]іl{AraDt&a3 Q0YXt^H5Z{2Af˶5\@_[))haP:$yv0BO%ٰL6ʔةnu ^S{8c|%u4P=o- *HfVDR=ǓDZYӥ9pV 47F<9OX%=$`ke1<ªť@v&cAR 5mL ٖv0XbnmRwhD[N/9L0l$#] Nrgg/ E!j.F+pں!))l8cjD[9NOovB+Ҙf6: K >vK>>`tꑔ5+>}8wXo.&Xj9߁2MVgNw_ߚR(" ? }}a e,A kKI/}/|ꛭk6~ {a!i9nLxkaL+ IK\B1GgrႯ0ap.țuf^V f.l g:FnA8t"BZ3bnJ.UXgk*WKBC NP~]2gZ&>yG+~ &p^:GG7P&[FH|ST^SCP$QQ)ѱBoaVo{ֽɤҍ4iy>.elJ] xѓt *BA'lh'e.Tw&#SuW Xl0`\ŷ:f҉87#:4`IMdžGxcksGPhƖ4Us;u05o)2龚F; 2Ꝺ[4;f2IN"mm"=bZ^wc <3+E=A`^g8=M* l2҅|$eZ&pG8SNV gyq]ۆf 9򁺼3b܆ˍ?yp vG hFV /@(?"pk~wiwQI"> kc{.&JuFaҿmiYs}}-Lchs"ސdrdz€Jdv@,&&f 0&%~-Zm]^$U.={pF) bht ̶ģ应/{J8|#=zɻo(W`$dNqhc>ghyЊߙ^(߸@ΟC44, 9ТUJN'ef]R[뚚pӣ'8T- eQuGޥJE7XM‚D)0 ] {]qg!Q\?i-{\?}'/C`2Zy*oYP*d> Ԇ%t/֩`-Z쐯Gx19 pa FYev\%17!DOt vb~+V%5A  >iJC$8#{P(ֿnyWrøʠ ߲FM VS_W \'461Ewz igP&~PQoUzy9jV87fos~˕* ݷOo/*p3 v##QCk_MuNDY3S-+;lNҨ"ٴ 02Q7XT1$:IWX"q^` -}HqDܠH͛l,nuO"Ӑt% C!06;g'5}ߌTj0ݘx%|ƈ[bCbRּ&a4mq@Lv͝\ホEUgSҞ,;P erz|Xg3i{vL1=8 `Jo~Vjyu3.vIJ#v+RQi3j7DV5)2 UPB+=_¾{ [!pJ$w GGyY~hM]-}h*@Af|_?r6DEy=e20GOö?K5HT?np.QK;^jSp՛X!JBx[1Z꥚4GPUxrj>ק%lM AUGC6ˇHJDfx`[! v Mhyu* bgߞ]&aE l,̶k.i' җKHa/Jj DPpH'j#4dG!a0]薠06zgz7}w I qKl&>6:bu$ďK#K3TB ([Мr7(bI=@j{/*Hu^`[I+B@-*kl0:?".~5/(K?p0HpA&z;A t0|aj' ]Dh}*yQ^n F}^6l4 Bл$ \t Í7kNˑcʺJOHŒJk%wb1("8L`n`nE7[Ń) &%f+|JNKMΑ ~8{'h_ ['#rd.V:Wwх w2;,i`=81ъv v͸bD X%&!`Gś0B< Q]UznX@h3{*wkxDx2I:"'ܠ+*X5/{tFvH1cW+nZEA??,uP0Bc+v6c`5ܨ5/eϨDCbxnf%DB;:r$4 t/UQä ҏds>s#Kޞ;{#dddzl=KPsl=Xxc@$Ġu\ pIiw;A'n8 :aA*Vgcdan*s2k ρQBEЁ\jnڷWpe臭,h,ߵvTB5 \lۖG2NY7Eo|ls1qJCZR-k﵎%8|FіتB >N+aDkH߁'sfN0L9zFaQ6EkN}^-\.z(KuP=GinI0w ? $pqʖW,dvoՄl2ڄ5:z7/ZXӵ͍PDeraD.I|ݞ/| <, ъ-Dm!b^ԛaG^Wӗ}M琭@=x8Wes&oƥg]7, P_F&[gy}771DR22~s ~էВӛרw=?fg05/"W zn| O=Z)6԰C*"}tY }*`,RjoܵLۦ>=V]7 4?[UsFj=MV!nP2R'E_/Sɂ<_z.͒K&'8yǹ "K4Jq k~vHir*'rlÝ$f}YR>ix35}D}iYȏ@ B36RHƁf1J>ɐ.9eV#Un2[HUCpd*T~3#S0 +ó^24{}O^gKdGcGL[v:պqfZv ,hI1׊е6/8lGv62-">EYA &̗дr֋+b.=xL#+ҒpK"z%'Db]oϟU.S#$p QF pʌ"G;'%kp^WC挃(:QD>Tcje ZYic Ϭr3Ѡi^N۔F6:_J9KHmrLڊkm^tK z];5:kp.)AQDMp}f0W\=lݽv)Aq7YW sPչ: vPս׫N8z' }!^׉{:@,tP~h#l6[$6VNgfIL{u+hu>S59=2$K/8xbZ83fwO ճJH/E;'z#E~cl hW=we^Tqgq^ݾKF6kO/ -邁Yhnp05zm=%._ê1\xS3 /=ut]Bhc47KM"vl9GV.35`>q oK_:uɐRMp QHYVt>{ߴE7lX91hbCWfHg6'AHZ<0AB3Ƭ"aBnHba u0M[GH7FFNrݪӼhU:M`/_\E@3Jmd }[󂔪YlS8VrVr+o^^?xfe T5ƮFP [fruv KrD 1G k{:z7oll~3ԊSlWbrNT HݕWǗP~hMGgPz1ϝ)'*Jcr`2=?e2 Yh mWQɦěbu}-bHُh4wg/6n.*.`2&N_OJ?P'Y^Ae3)Ea=eKvESJ,Z VO8AK F0“h@`jIiNFxTT/1!zsh}; i0'Sg)AWfON/򺷥W ^.'׬p6"Vm.t2ؖz}H Aq6NDQw[LSG{#59 '|=ƚd{$/'YzG(_am |O35z9kg9b M#՞fg968 0v\bչFם8h٣̙9)QNm[DcX:́sM"yy t>4.摦NDu: )%NzɛrQN! 3VQET}l#ӵHfLmž%ެqO4n1fR̦ʫPTw֭Qj1G!Ͻmq +Yg=f6U3 Z~)+,5 u<[?Q6HNr5fH4x?ޛ6KtdJBE!HY(_s5EJa]+D˱ýcjb ty U V |OhxYZ6ē0B@ \^qc>b\K,{%Jf̼5lqԛ=*kDG._U31\6㚰)5w bg%=`p 8&-"4t22$uu?K,$ "%XީAW͸-Qs86٧m[Ą:k zO Bd \]rSdatb f6 uXF(>hQ<3*˚]_mbA$+-pĩKy\]{?9dPt::ʬ;!% =/E抮!UV"Ȯ~eXm{Y?X7DGl8fpDžu-T bN6+bTM*Wa%4F(8xZP,G*ˋ}k |^F!Kjg!|}o{$qG,'>/.+oka+Am{P6[*Ke.D'Wz2ܤB/ Gʩ|.Ј}vл&Es*Ȱʸ09I{Of邏b PVP?Q4m2ѥZ0w0ȐúqZUpG+n&lc$gW\hf]ch&h-\Lw&AJGwF!"7Ih}twg[g,gҧFDW7SEc(t2 t$-gKǓ]m/`Y32R/d}*)K o,:1r 51ifBHV\&Y}doQS`=dW*oYx- H$(!l Li5 d8i^"KBjh 9D(JʡTPψMڼ^6G6 ٶDO(vW?@(~eNϐė_U_C{,j͡ Ufxp$n~uGA^+e,/1uЩ'De vg>Rݱ ě8Șcɖ b(' bl{ܛdSX$ݕ☑ffk:# ˆ=AkP=W j8+f%+ rCϽ:u;xQ#.F]u# 0QyOڔyd[u !wpV i39~$4܈D=aT%,SAq_G`Э, tS0[fOMuu ";f%3.eB_z _* 0"t:Ӝ,l?o [Pƨ I|2O[Ts:ﴘՁ37X63 u"ҫC%3>~Ss(08" /_A>moj %z8̼wTٰAd^]TFGJ3c^~d'{Xg5kS %G@^Q޵iIm1l}(,? 5Zhxw@_3ƌ]{0/l+zt5@IHt0zT$A7W11-`_`g8O\WK>T)1 <g/mToȫq.tZ6"^M7] O^t ְG$ֵקly߻Alm?0Ѥ.:U\ϓP0'MmMkc[..Rb#9:NB%" ÇG^d-^Џ="УII Hw}LXݳuL%9`6l݉Ww1ŻPMd(8HB9]ZZ(ϑz[DZ:{8l((/rWe17 6c kO]cKs]z~-g -1'TA)f W GHeT.F6x,/X ][E{dJQ-ji8.8M2HG,vR:ϛ'-!Vpp_쓱jBnl4o@KMϓ6WluPWݱeJ0)ϓoeNu9دSV0 QnZ_7-`n1ɩ ʐPw! #Gb@|hww^ 0A@r .d|k` 7t";͐oqZ]{kx"HĆy{P "T3CpExxhj:ag$*qCe޿96"F`:)m'T 7̌Q1˾`J0#L!Q'lᔃ%X k{-Z_Kg6ۖ.#I0`.A_,,jA۵C$|?'XvEaA^Isݜ'iY6V\njTr|){fNBbrycp&g<Ԗttz?JOkaVet*/6)j$M'X"ETA0N x+B^lYIe FfwX2Ɩ97IvۢS`*9GFcx1Xő_oz2WaSm>9/w *}qN,쁛*9<ȫxHQebvDoqfSA]i@n7HE0㍈ҩq]1(\1yRU@-|z,x޴Q}3G4{%ހ[.[F.YN,Tj) XafpKzl(?@<׻W7F, TuȀ(0(fIb+2>[ׅ S: QƢ1.xRVK1 ȱ G)u|U 14j3ɪ{`xBo@hL{AL*'J9z3}4fV01-S4tK]u]h]}uPAaY>8X\5 uM -~rIk$M%W"J /dQ_B51&O4RB\y\? (ODCjU4P=,PxDOĆC?gX'8G\2ΘX#g% VSG$)qBPIքDJ*;qé>\vFEw}D!h' Ɓކ"ްOkq'ȸdcL)޾hDnIEo)/ Fɥ **(0ޟ.BK(OXxӟgoc+h= X]P5߸=1{.4MbcSyq&a8>WU(BVggW1 3uB)QmJՂ`R3aFks%i[QrP(z%c-ZIJ 0/1!fSO\מoufB A ]0{s'C9cޮLBgوִ/=AB\#ʒ!ùbiWѬ.Zc+ -}H ᾂsH`9R^Ku*٬Ȳ j&;WTi| E7DɖOgUQ sb!1RV0!j6z)n4]bǜ98mOm1%b) Vv /jrDDPVDPy:(SȁP !ơ/L Qѡ50!u,][tQ "+ƿ/iy[pxSZ'A axhǬ/qDZ.zXtCGMD` WeyL/| Ic!Hq-ZĚVQ!0Q}5+ Ҹ絗U zmr`:a#>@B,ӻJWOh8ħ6LS@ښmވ VL_cphHѴznynOrm< _39<ˆ4J0D/=\z%\cfvzG ֧ 9{^#q'ƂS$wN*7=܋Q9(K'M]i5%8\"YjS47J4Ib Lᬺ A]\ 4<)ة` eЌ4&¶R%.9ً Ha_ Үt mćbY0d7 |x̃tΥU69]8ޢJ)ȱ!?k[_##$ Rs@_WlԬEȒd]9^j76ȜM?vRXU8@R6H6+K2GQE6 |sn^H2 {^DJĴcS)p|J&[s 8zSYP6#Z;s8guU>b$˦Ҳ%:&v'ȜBQUFehdm@$ъ:eZ5]Tk}X/Yb߿ԗPSOa(ETf =MD!]vU(`)tqs6m濻6Ӡ?8>3 :<$Q c<zLan>pxL}~M-*b{L0B{+ˊtKu8bwBQy.:dU-d:}9UjƦ7|[EÒl2@ɏT|kTP7>@'0ݡ$*9)9w9:=cݑ//4I,ˣ ©qɟ|b`dVӪ,1}q+nSŤM\CSFg,hEb ycq7 AχEEn[\UT- LC6Hjۑ uA4cJ_hKEMb!t.waEI!oo搞ι<XV2[AʕXqxƸF;ϞSGSwnF,upeT{R vűz1>)Sӱ&N"yUeAR wĚ$-o8BZyZ-Ne !)*%~nkw MtbճLPP.Eu ?4RtM!R@>R©`E MRxn%Taw=-`o-N 5? 4B7uGw{ZuNlt 94+QM:^ם ;RqqŲ䳎#_S]iU*Ӣ7,u Uyv'6V?B1;/FU!ZAz:BI0hNg׸>^F;x6p{~q6vVdЋf;yNy|i5RSB|ř6'{ WX<.@ە!lXjQ+H@s_^@ov T""qM^ Wī7g",ahs[4s$'w/7 3tA;\(t{I+-)6zЋ=\SYGDe,0$m2"30pBh6_u;"$v6&W&L2KTl$'U LI[1lUl'/RRgAYX ,#=L^ޞ{Di'-3ā;<ݤ~@a}_$9ۣrk\a\B~ڬiNJ uKYTbTmPis#ď"}, MxT'Bں@Ա pa"f6:5 -j hSwxy#{GlzDbZC"P(Nxrf[b9$?qLc-c 9t_FJvh4d Mn׭0dv>_oRRAOt?{ )mI3Mn_֧VT*XaݴUX+H̽`(jʲ32-NljFҊzi䛒H3-ŃKx(5 0L8Y89|{^KwŪ&8S/g+>M&!o/soi{g M=-Lx|Ϙ^@ĎKr@lu1ǟV/<;H;S{SsA*l3KN)m72$1VO;ϧTBS6)C&<qE>3rxW)*`,a6K,ΰDϤS%߹ir?t ^Bl:!Jv/ {&sʀJvXXnpAyiջid5%׳m ] i*@ÑH4ŴcOzpూ!"NA7ﻥ`9J|.¨a| YEscْKGAsy&H΄ gt)FepANVګ•GBШ*M+ ѿg4@u0h&L8ΥM` +G錞.`?AxO@)h_*J)q$Cg826"-z}#`S'NB7IEhGzkX\p)mA"E8{EGLIf۝km Lz^E[FZ5h4DWГ)$=s+#tP! ]/JW'2p +G MlV\IԞ~,ttO9[FĴ9},0Etޫi,x7yb_s#,z@lZI/[|ڄt B8!'E{D'VvG݉1No}/l!+kc<(HTi8?|̮K%ZJ* \f׺R댻5{ڬ6`*P-;Q(G/a[1;s68+le*=fJI[RՑ{NK>"_{ boQ?uG fg 2K6Qm<E]E7865-F>$.d"Xٖ~9WrAT]qsәDfu2}5G>M%6C¶؂s ֮ZzF☻W59@ ,=*?j1}蓽=w,Un^69eR[4=ډ&u 3'Bq^#ַutQ ea꿴Y@Z2Wj`)T X1cZAw&gwϘ\(i\!yAba(4~f`ܡ8/3NT:[]%n&W {/4k@J7 !+< NycT$F\]P~+sPyG#_3(żO4F,kɊNːaL:3Nh|XQWEo}>D(w'_A> 68:(Ɋ@QI쀎pvNJ ڪ7hױ_7ϥyd,2n]8PM*7DnjBUsq$P U[- 3 {ཎ '\TF!/譌5P mxGOy_#Uٮ_lI2Q74^~BR^Q1KzygS޹}*Kq.sYħ9+|9i&g:uWNV =ev^lrDK7_e"ɈjZ: dN 'Jj-)d\Ӻ#wSGF󾑷|6zWR# gY#'¬QZ˿xGus}ɦlJ:ޗCwbp\(f;4rGAʑ=2ki]sAťlc<6՜,DD?[v.(p-mmA8.[ꀰK0ǃPL;K/V8 OsϻXb'1)dv8Ӥ{Qo\&S:ڂ]#8}$2&Nd[;ş+IJgsEH[ޝD|P>&$P/(bؕ6oPJsHcϢ7!rX:i1PmT&t(mUrTnLBiiyF:m7/P?G7b56ڭe=A# fG= H&'<(w DIJ`* E an,V&]mnD`k#fBW4[-O7a׀ L/uڿAM{j5hla& `?b $۴j`ԘBmC\3(}k11)~2l\zitwq5kDߪdc' oۛʄɤf8m,ЯٺDDPL!k2@R; O5R(!e-eڤۼR^%Mzv-tIXs, M88m,!iȤJ"mI>\=Gٍ-sZ{fZ&{Mߤ|uE͂L2WϸIyo2`fX^[ Za;_ۣqDti|fu ÀQE71yCERg@{YFB|qPA¾%|ƶgDr uZ=qk_[@9tO4مtJk2):K%dlcGZj>;j{фxb KH@R\n0ZDmQLD : {q~A)r&ېۑڊx: ?ƛ7ަϔ mS\$%NoV^u$ü>2W=6 }>8%;Q.] ⡓E> #AK{_ F,q/5…u!h@J$e0h(2FRFwg?$junĥ,^|[mܞEœAql;V9 0E=!ܿjse_.{s7Exu(Hj#Q1c&x45kÄ8tknDN*•zϽԲm4ؤ0KaT$p1P#16i̳$?+3߸E=HgYIkߝѥ}#x`]HTb\7V :|yE & !gi? AA:|^p 2?GZ2޵GfkoE2F]8Uh.;vMڇ:)U˝rmY1欴{k:XǺީfn-Z;po1Sa?+ᱵWTrr*0Q~%BsPj& o*c]C= =k[#⺬FĂ gή%t_z)탏`?ׯ=iW?KEbo?# ڶ>ҩXVn`b,H';QfuۦkW@r$6eRV[9 IqYnz?Zb@bDr g)/>P꾟OU-.A6. K3#ˆgrW#YWHb0s#;Y9Gܣ{gߌedMD%DXd 1芃NqF)|D5c숧5ǝ4g}t0['k `s,!!UH[aے{HS ~0lO쌋5<=Z'Q Jۢw@5U#;S"_$us 7'< aA`-1N1=g uX0@~M5w49)tJku=K5&f蔛d}^Lg^MßXb30w& 3ZN X 78& ePV|#I2Pp~pC?/XO8kk]!E N5e b'D>nM5>QeܛZ 1Z5WYF:j$@yq[6dHXMfSyq@d\H^(`du v:\_{y4!?ݕ-D ˰\/LENˋVnsS.k!3a4lEi0-ZDFDF`ns #+Ag@^cngFw0?7rYY.|'+ tx&&nSJ- ih(/L[uC EȐן7x>q]e_@WW3$6gm=8WUe&nx:p#pAj5gpv٥'~=S0'.{&K#`{cY\0\~ҙVF,IW~]mR5*W)ghxOu.dՋG!av[y `x>պ@H I_T,eĄNZVmq-bt_$ y̮fI` Ô1QtɐB)vï9^- -.g@Dax8rr kR#!KqmaH #LE36rGUr!ɉ@Tѵf$TaHApGi*&z'Pc7<+:#C[uj"ɭj̓pB5 ogJ1P|լ"gUh,lK.Ce8JV[EWr06l؉r+NAGy#r"9Xs`| A@ RK~[HSsGfL]FC5U^@ٟޏ" ~SdF{|gvq W;b&߄jޱoelvvYr辶 T/wX>WjۙtĻ-YTJNP'Y1HjǣY=?"u| :)uKa-!E-* 3L֦R~!CVHfqJU`k*`6&mt0eɁ$8/ ]d`7B؇\ }Y +]ziY҂#rނ4EG S##VEÑ4RW\!MS`{ofXN:QyZ9WJ|(w&}!DYEQn:Mllw.0ozhWWP휡j8[ :v3ePSV$v U:srrrar} Vg=? +u h9%&w= eQE=oRuNvXhZ-Ä@ѬFf<zNucϑA)qU1k3,'.bw>6w@4ӎ0黨EW J;bu/T}J;+جA~ќxӅmdF_ {B%EUyz5)J94_#lrfb-md~Sm3a2߰et']5SފwK_||A|0--%S@؅gn%,ZND=]-rlR?z~(C w7?;T<ȷt]X=N}S[ԊO ASN¾zqIy {{']kC*x#B%- Wyr _o r9}`A޹5BÒS^m;^-H;\}:w?н9)6 [!U"' I%gqhW1-xR1_#Pøs1p)\㽞|K?`4!&NUb~kEG@h^Glr\),tZXQ94e#CٳR&?cb 1uHy%b{1wQA $Yͧ9"u2hgC԰E6I/`RTCYec)}rDZ/ %ĬDp^s%`ب4򁭀c@jwvw*BDP*l+Se7h-%vFөғ3dQȕ퓋1/%Z^d ewL]U v:KSlŪGh܃jd[pS ebk1|hpwEhtXWlj_A9P4]J ݤ5\KES?l"g :~n~­~掚 l%bDpO/*_af\V蛀!cA$cSռ4geZqq?}:tl @oWqY?p:7FTF`Ȕ@$VOU-')bzf60$ ix^qː=PrXf%+V's)&"OK{ĀJZ1>)l9 tb:1H<婤'†i"ΜKڑY"!%gvQ[Spp; ߎJďϴկ̯=!'ms"[npܯ*XΟ5XE*ХBNpQc/79+>h8ab!_c C ~6_HpJKͣ1jn^ňNTHIR2m8h5("t5Ꙁ/9}h#Ei4U!I>$P %%J!?[zl"꟦Ģ0mxnNpS99'=dߨI]91ɣ|D2i>nX[YFz/c/A[ېOfO9MVwLQ3ʯ(CtQF"n+N/_h'Y9V-t?;d4 ]ǨlYhZ2G~ʗrݿ 0XfHCzq:ְ 6 aL 8xyPU#ū RoBLSn4Jo] 8tJQ5` Ho*‰_g %5EQ/qVZVkٛ&dkCΨ =dF[3#CGav4bRCŔy,0R}6z4U;Q±^{.Q%;4(}?B@n8%ӃLKx1q$+` I6t[Q7P^.6'Dw+s# Ns9  Vs;* -C`eyay`8_\L~/[|EC4>[]]|&j| Ue-bW^_J8L(I눲!i ,IgAI 7PE3=t_ZݽWAfp[B[HȈT&Lvbx*mր,t,# gͦc H]LM坫 ,v=BʛEp͗IN:&W`ܭ+n[,H}̐_ :Y.Ȥ5c`0o{֕ 0ω鸔Et.˚\Όd$qV\ E<՞،p6ąeC{YLV$ąݪs.!b\#4`YG"qN*ctyITh>Cn6x i' L}HlULsڒdKu͌%ȱ#{QuR."bcx6^^}+3,XU3dg{.ur§[ϵ2XKC4=V}'w o!p6qKutNA|2BU&@I_%ο(Mk*-~'b"KrH `UjǤts5I]y d?Ԗ ;bAHGE ~<, "DrbgJ1&hN0W0-XIlpOU|HeuK;'VԌ6 ct'Lto:8hүLQ;om#~C%]L<Eci\1KQg^}_f4љgX$v1Zw[Oڭ zʤ0>X)o7 <)4ml׸$2fcU(Cx4u' vO%s->W,G#RPoaEclh^عL 6?)wiI[8Uo\8Pm$*Ia , ֲd X]oP_3j"o<)*6~2SfMdоKiy|Xv-v.2$d׈_BH~w0"wZ. 5TiT |Mj/'bP{K& _REȷA<KGl\aLs5YthqENI`~ ]tWmkNʅlus'NE-щ shҷg|fѦY5RZU)Tî:A,Gr.+Ec/e\?`fhGM,ĚHǴ!cB=Zm t B;w2,%w&YW[ gÄ6W}2H#%w\,""m_u =Ese*uzw%^Nlq'{6/Ɓ`|Mi[mD](dft;YEA NT,T=>L_ ":|s39 k2 ӂS|ф4/n5"U7"A,Z$r<ȾmLHȀbWfMVGF_I)/^`y1P!Ao!T\m͵jHLFSDIK^"AW[ʧ7h[h8!R S4A_!6IdIiKJ7[J~yc:"xZKbw@EMMxKˌ[܅7)5XI#m"u.˲&G`Ѡw0sbcbßi*0I{m-߶ڷwhp<8\ue "<֘РVJw|yQռT3]S0&aAKC:JݡHe3,v 7B!_St XJ{s+I >:2X}~m_C 9_R#; ςI)4hxVYeD%ʿU{Ƈ0)aVkEW3y-a `jÆ^yK0E 5; j8"6L9,N~:Bg}ﺂxIg#AsJ6=?f iU6Q-VIIִbsLm*#u/,A^x;Gq$k'B}S n9oN)7T4Y4y FxyyY[KtZa,}nk^,2F#b'^7^v߫LFn!i,8:{ɹb4?Xt1gQܛQ2}AIn YFdT8(qv,۷2"ޛB,hС8;o0V27;J{XfhEE@6JCF[N(aA#ǒF~)xc(DFFݯp(J!zAt'n:4y}6,O:B-FL7EAY>QZ[ "lUh_m jv TAVt#L.x8zw^E!f$'|3n)bA~1Jlo l6qj[vh>bci9b̛]&QF6>cei?߫bN5O0BD##K$f=Vw3ycEG6j7k<zRFJ:?7GA=gaWIWtDꦬ:G,K[VA:+`jZo`I*ݰ"b J7˖XѿLqYG>'o<}PK->G~ۥj%sYO p/uXUy/􆅥#W@??7#c)?wn$TmѡR [[rD'hƊ< ?Hֽ7 2o|_57j︞t~J<;GWG+uБl 9e7UtNQU5842L3{#I@Qt^v-κ8{hފZf,j h( $ȍ3ǘ@W}Zuyn܍VJ"b mݙzy`EG`hY€RZ6iC/nyk067 ⧙{m:0,{u^ wyl]]ԩn=ܥU8f9580{_|?;GD,>Nbн~Ø3fC2\-.љ(;b]uZŮT-m`6,'`n:tK=;#щ7n#Np6ܶ Z2QNsP*Ù=^=%T~F"d9ֽsi w\,ؚ~%f'8ɐ䙳K[iջ9!S"m+_~YHa\i*O 8[#-4]}ԉ%Xtm8_ &nUz#B/#Vi0A%95Gw L7/5bHnGʑ0gz yoD|Tz Cہ4 66УhʈwǠze)tCѥ]ۨ =]sm~QC S2ϴQ>D4~E <β._T%3vwk yB9an+{y"Jmt\il<]H*uUu 1c!usSc{[ulm8?gqwWSˏl.fě 5<85F2%H̸ْ<o+j$g. \I0.yF" u.aﻼSsZ׌Dw)bHUêq'Tm?K3bl5^?^ʹ6Q`6h!lrƂ}<rL% a a@iuoشZs+4uCjd1@ շBea{ L+P,0SޞE_-smBvG’nA3?YmeTJ9#OnA~ ޤ/e!q.G%<,pg481)>Ͼ{!K'|j;Fn-`;kRElN4ۯiZ.ÜS|;U6^;;ޢ58&UDTK{z9w*dmWf/Krh\ wde]sǴO%~dG l4t)1$>֢=b*sDDM #Կyl]A`'5Ies#J lYF_ܹW ":TpP2 aDLvWBBm>ѠhT*O?pIub cRt>sPr~?9lzoį` Bi}];YsRa k;F# !ٮ:L7Hv|?U G!t7p(m>ҒU \yxgK!(mg\ʥR˴+4򳖫6t6%{VK(Y_-2u7 %U号najxxO~FTx_;¹lk)ߧa HstԮHDSqb4FD\%o˭"13"HkZ4턌Tzx$iCz3n;T\W}n_+8&|&}CjS;5c~t/^Sm'Z3 #w=IMZ%=^Eghny!AxG ^!'\yrrS6G&+PI'd\x@a>mxyuϦ QL~D|0&NP˅֋4$,,.jq6=˽HFK5u))t US4;B-Da@,W8q<+;BѯJ?7AC W|^l*z{dIXD @ZˊL&gMPE#:-C׉6gl\leRdUq9FqlJ V0l8󁲅WtX2%xҤbe]L4^] a's_|(;RU #5_) &A:*m'@ae*䊻"9TÎg$-#rn1Lvl 59{746P1'09f5KH,ZId#%DE.]ClӂmosUZ|}%H lnM4xZe* CcqD% N٫ƶ)4B3!mWo|X6Ry5 {A' kJu,%|OT6b|+|3-7촻g2xo ␔)T/{QzI,$Z6gHWWxɍ]ٓczF>Ak+S!uz3 _(*nx}Fre=()Nobu 0V;|Qv- _gYd'&;~N3.z 5+_B^STP#eEZ0sJw |Uf)"½Jt ftkitSX89l疩iI"w)c"?Z4bElߝIYbq:kåx57Ts6It~ԝU; .d [;J@v!ijoLbaX5.XLY6KAYeZczlg+u r,iDk3/2f}nhfu~lTV$D+hZgVW]\(WŁ:k ߉|4=j4~d8)f>&yiQ- ]Nf0is_vtODf=k{nGWdQ>N@͵3^ ͫ4Cbu1x;"_^F##J#c0:lԠH U Fh5ó/ 5&t1^B|k0YٶUohqvUn[VCg9Kbygkf.$.:I\xR0~ChO>1aehJ$>]X9 *LCko2Dɢ=d1"ֽ˜'36.=_B' ȭ'\8ѼEtvVS{P1 HY[ͽ+)%|$PԣnߺSWb+%Hc\rT `UhB!/|=PwR$S& 0 (% aOڳv;p>ř&ꆿn?a1Sh9OHc1\3Mz,A yw 3ǝ@"=l;]a- e>8 [o2a)]VodjWv8pplsj\p7#aLσ#YL /,]f m[-, qF>f[;HTv㞭D/?x0oKA^G6`Xi-{W@'3ORܿ=NU~GR(Et{I 4"(ξ)}_wh;\N<}NwAJ']oUyUb=,/5lYQ֭su:uf*tMA3^VZ\N1COrrJųQ,/Y P\@6VaF_IU3)Ksn&VԀ: 1,1;xH3Rs.d|4}hQW=zt /~Hg1:%mͬMHs*^ cdgw Jًl;780?l7 ˅̊R.'AOe\06ofѯ1m5S6a{6O>LG~611҉3:JA{ cY2Pokx-Τ,{~_'LC=6(YFy| t\!a:>`_uƠ?\d|u_< F\VS:c+6oH-DVFcaVI1 ]߲5ǑA޿aӣwۍ-fpT0U7ڹ4P5 @P;eq ަC`kA1j h/D0NTeeve8VO$3-o6ݲƌ[IH\rmXҶbKV¼+uJc`~'Qm^~+An`I(vٜz_7:r_ҜptfRy}g`QrO \FbД˦[jLS9װxAÌˈ(e!شP1Fcۅ"RRU; Enǜd0/S0l ]lny|ǮJ uZ'DŽY:FyH및zCzEґ5@8W<ἆoh@[gqX{wO ؕCD)}2$/y%W򩕄۴[G T!sG+(h f2&W̃۬f'Jа\J-lW4&0lw vr]2A,8hJw,pcWuO5▌G D!͡mn?q^m ;D:|izem69KӞZdGvSN?>oN#fU8$)Cs"m*SfoVuHe{y953u:uCܴkmADfcǦ&k6f.7Oe0O5U㼍Y5<B^[tN=f 6ahO{a/B 3Ä;b4NHI)?[ՉW@C_cyQgYV'tA!5EzZd~qK Em?h&(vpMdPN5DH5h=`>0a nd>M;ޑ/1h}H!Ͷ b'S׶I(O{Y!܈ 8r̡GEr)}7t?2!M.Q%uE ` 87^}' xkIҲOVO 2vpIqUk,<#MQ]i}Sקp|D/4my c.7mCB*Z/)%{O'GjQ+*0-eǝ:8D5sl Cb8'!i8UL4Z>k `Ba֎4Ex _voWErW2f^<3[nd@}awTorRqREe1*1BEG7'1bvor `|彚NOGskJlxR!u^n33,MVm ǫI0a)-)BUJ8nN߉FTy6[[1/]B p7@wXY!hd r\8ni0bO5Z!ʌ Ӏ^^Tv!(BބMTپ9T`3Ž;{O 4uO ^w 7 X?pg8]I73z_u:¿]I!sټhY*&fL?A*/笁j3Iҋvf(Kr-fJezlE;q3;P{qKJ+-o15[֬} KoOllqm{ˉkɿޚn3\RTþ"K ݖݢª^J5[ǿ`96K䳖޾w uOM]DûL:H.LMix} "TB1u޸ 6 pX " I& 1Ț5HR7cg^1h׭<@{J'Ok` 'f{`E4n}ow^X U4yLO|PCIX_U)G9T`ft&bEmNq;Ǿ䩮=R=n[v^`E@ -&A^r)w0>Z|BM6H_&Zz5Wff~~kg:A*e+2gYv^OU'n2 qr0Sod5|;~6ޞR5STA),~."u`1+6[n n6U6˛Va׍`"4^8#lfdY4MdWhb}CUKl']]۳G1-L鼓Iي/DL;ڲQPT\'Ҿ:WV'ğ"ưܗu݈08j&Y܁u#=Gf_.1 ZB #τh8躶2y'3M9cxp, fn=jUΠ%XkK YC6ڂc.XSH%&K1mMϧNi/C!eVk b Y^.]?Ld(`BsOT~ zz]] jM:{"LeJQŗau^ w ,U!Bf}',NX:/u?#,OPl^KZ U~BAz]:LB {!{hml;$KF'O둖KֺAȜ|%Hnz`ӝd?Y&Ash!+?%)bOg2&k!%X(9ιKyH\btg ԓ&Qdc wǡm R4fPV&ӵ͋ HzlAq4`O>V::S2a R[lj rsU8)^jqq2hsUKZZlM{1'W+ _CEtm\/(YQH\ZmN퉩BI•,֤&Eg(ńra$<߻PӬLkLU(5is#$Td$l-\P#C[L4SOFdidîlSʮ)eѱI. .Tso;Tgܺz G`& иYC h)@\c'gk e 4O"v8 <=[ V̵x\H^t7K)m‚ y{n(Z 9`+BFa Apk]x'\R(CK;gyQLH۞#*=& &||d 4s!{hDeƊ[bs= u`,RsS=p٫7#SFs> G{!XMI!u=9!5#aobڐ/#o*CEȊIe[yNSvLh|q9 C2eʻ>j[ 7*U4QOuLF$$'Îڤє%fIX_vp6m(IL<3 ?&B :E^o 笸oZ_71GmWB2h+hQ.@CAoV!@yNbĥ5^<؜eP7c!tXyȩ@Cnub-caʎߘQDu)yyN#mIxeA`y5Yp؂ FyeDo-TŻ4HL +68].{<}8-ϕx+u)+1o΍b1TqySV*BϜ,s?Lq iZ`thOlQyʌ!0ءEI1]4 p6?3䇤ZJJ6'19&ҵIj%ȵRil -Ǩ;*{,yjX~fLmw^y*^)K"WyS݊HRbGZ7O1;WG9\bޢ>_: GEb&lQAof{sF;58e*yPpB'}X$3ez)'w=Vi3 tIW!ô Go=Kx/s L%P[.ͩǗ}QsP`4^",AÚ{uBmhɦ / pXI0Bъ\pq6d;?\IM# =Q=($ݪ )B[/oc1kbIj"+8`}kSh(kw'\p)e ++RUɆjn2LBapC՜:/`o~u:[-G^[9gsL!I!ӡxҐ}M>w/XH:n//Lm>n22w`)BotV)gHHJz8iW oձ}uj;N`UAD&LZ6$[ʿ,`#T}%bB S*>PB\]ː:\}c:`0lMꄅV(Ӕ5;b/~Wѥ?⬇t,DX+h:\5^@xNcG]Rb,KQ`̍4/H>wgQtV7Q$ XJؖڳ vt )[DΊZ-CiiI tK+y~UK~ 3Ð$$v ]+@.+F3,f xILm&fȝ.'t-,/Rok^#)0;ZCBU.P5>ZպE}:? &u<3rI=opᱧb*աIVO0,j04ru|6u,~zb(Gs1?%9u n>MTgg2q/`mL, ˏΤQ+AadCj]5=XB+^#3V"!5<03$N)h 6#<H&kpycf^nQ~. )X걬ޚxO X R*~թotv&|Q!:Ps^xCՉY-YXYY|V~rL!TG]=0Vgwv v: g_j) QJa^/PT3cz7׉ҟ@v~Q735u$(aZ)eSu,~ԹAל430c߬G/9zmj-ֳ:c5)w[$f23Z }YYi=,OZm-~5x^x|-l)J8sceHw޳+}M.:#[2׮C 5 8$~4}ndD 5K^EۢR! ^t]qűO*PA}jG]Gk`b;Nt4v=c/1AxuN 1s"IEz`h5&I06FfCu3ϕa?%ɘE_ԔN1p0 77\q$}W|iTqs鳄2s5BT KҦģJ~>3>I!M7.cNcsK!lWH9>p$EzW` ?1m c'G^ǵ_Űv m$1Jԡyf0`?M.Y=wȘ蹚6*+ X9j=ai5忢/5 UպK˖PGt8/h[3Rގ7ʢY6 6WI #DY /; 46UvwQ6t I3{T͆ +ĢdQLbQ:} 0)V"nkqhr)Y1sI}jiKTp| 8y~,2|Sk;n.ͱa} G:n/&UTD4r_Mx;& l4XlS H7,xUz+9}/4m@Ԇ+{>gXX,=qJ7Y4?@V`icu} a{_O䓂Z9+Z18kpWӖHGcv;P¤6Ṟs+9ڴ(&x嗠;^{YK&&I_6CivMƙoѠf48g -޶p=lblA:gbJI{FDf{^W({sN_J(ո Q B+8C_?KzCxwa١ 6 ޳~x<6QZ$ [.<ճD6 Bfu;RF xV=:@:R!>4/yX.Hr䙓=:)1rjzKA5W3Ŭgβ斤P5 6 [Qu'V#W(wV s/CgQNud.[z7o$9gH`Y͙e,f),Sd$dke\CoWɻ],, kL3uN 6RTME&smvpM$${H ^j K% rq-3H E2nm }H-sH+ut6㲣'Ε[x =$+{Iv}{Zkz\ML(#@u#2(Il_&Z3L!)Xʫ^řCF\qĬꄒgf![ұ G9^& ɰ,gYëqhDMe>@@+/e+X{wsQqyڳvJES~ª}0i `Jz.X}QK1X+ShXAx3KZmh|)}(]ߌDc4B~+^qeڿTaOVIgQM]'7UǞrs D1[;XQՌܬ%Qx yk8=l`alb@Q<93^NxJ6w]T]{"ʸV2[Hrkc$ S9(T`QH֌n1 :0Frb샵s;ukf}dtcs#=R-C#mƧ1+Yh1TJHfutZ>Y{G[ yB:N9Wr EÞRNTg-/O7?;m,Z0@Ka4 VI#Rj)f0T~ +VpțApAi)DğW:ۤ~x!겔 o]/}N# ԤV{Ї>~. ,<\PSSX< DOAjIxX}z8 G#<7ULϏvX5\{PKiaY NBEn#л.""'Pwo*S+TRu6'X~`{h:0lK* |}ϜhhTޯ`OL(Eq qjVH~!>[I[=\D|Ef)vWXG;Hih+OW aȸ y "3($ ˃D^{:rggsWLtvĬھC]ZjA,eM']OdiBZ*3Saj`CoP"B+SB7eeqH,?Rlcӄr*gPu66:4y<0O4`1cO'n+AӨ} l`8i"~($ڗǏ$ډ\*ox+N|4|.q)00]BVt=,D۫`eQ<{Kw7IͥZ)%@Qq sfs2Q&e$AKu;4Ҏn?1駿") A`bvx]<m.3v[ }%jh@?(Otew: Ēҕbdʟ1x]f!RX[)beDkq@(?{OEE7]QA /Rՙ%n(I?;7X]O vϼ6!Zu>Nᩆb>UͯiySo=*&VE5[o,\?sX z32H&1@Hi-bHahn6?BeXd.{$Krwhs:OA::}ڌ[n~k#7wStIK_ؖҌO w4n@.yt&:&hEy;Sr͛&% )L>T`x'\mU|GpD>HP az:ˇ:)i8v M~4TҐ4*!v؈'.i0ӏ\TT&\g7ظ&.JVaE/AU(&d> b-b〔8]x"^2ӱDsD ;q{3qԊm_1=HEW$Kf2+YR~v3H#kx[*'C>,^{ ;a5:_"*'{"\ 抴ÞH '@;ϐu #S;!G_ҟ0^˝f*{@T=D86ᘞ[76בbgA1J#zqdIdK4tBAyc8c- 7OeTnMϖϼ4b</ˀN 'o6;Z8GJ`_yӥɾm%؀Y:4;VS%]Օ,x쩆 ʹx O._?-EU҆gh Jxdŭ&b'4sceε&>>E?#'D7~#jzӸ7% Gci/ |;9BmCZ.r## MލWR}OZhJhqgY_bx{y.$,MA0J)+W}\<786!2;L?n@,)SyX[|sҫi|K0F s~,K\n(̌嫰{U%]=&Jm8{JM9C'[К[}L\7r 3:Ī;Kym9 &m!n?q ^&C%7''!kR061{uĤD=vݨ0X<_J A)*qoʒ)cU u*%% u#f'%K|*ZjB;h)r!x/e,՛}@qk d'_*}"~*-Uޏ$ȋ HQF>,?~xHTv9uhǷ.u|e|ȣ E]Ȭ.B0alK;ڶ2ls6\E?evG%R RXx8|++<-z]BqtOX}^3Y@'ٛwf-[}z"?X)9TbEoAp@W6B;ڟo ðpaќu0ȑ [$uRCNM[["%^NN|>hƔgXtt,NNpbb96_ ck3s4t;rTL;``k$v[Oϫ-E`q u>a΂ q>WQ[ŗ^2B%F<yD,A* ,?.HM")J[pj}!g`*z{˚VXm2D#\r0cZ΍u<ZTL1{ӏSFTL>_#5uh3L\|/3'nT Ǚfٝc7MQw'0}n}ƧOS 66K|]Ҩ^G-Rk%':ʄI\j-$Hm>֚O%b{N?Wfm#{+Y=p/zSFB9tCGa.09r!@J?Z_mk$8^]VHВĮ\d?3SFP V_ΩRfV {䝷t//P!7LgT:vyNQ jzٓanm"U WyY.]?'2 DI{H rcyQ y3]-t,*#b=E鑹&#w9 PbTc-LdYBPCaBg}@pg#Qt_X9oN&|P kȳ_J|pmWӷAHnew H=ftj`RI.ѡ0+IQv^OPm _moXK"IE 0 nM#K3Ո9GDlх8tDZ?Zz<_lGv /YMps߼ur5yQPMu<@2&} ZOk\m&q'CN E]ff"|~#KU {i>< 2BE1RDKTfJ4VPA*.N̅! RIE{Tj]Uu6#M ]4*swN2t+ʔ YE-a[k qт Rf }kpt-Pd̖ X:l@v\t;+:|rحGLa&xe Qv*S"Vczrr, "nN?O |w5YbٗWDo6E;NQ:0qЙ I yT5Fc'lmp&KZ g<i=\ӘtG$ Zx$U>cz+^+ |by] }ֽD tS xom!XS2OQmMo/&O GF*wV^NklZ!2+Nm l2{+I*^-Չ}GjjìT{i) ncAm" &cbKfT!}Ř]ʃƎ@:E,u j8VW1S V- @Ֆ+0oTW'vv\^:C&i)K)-J"䬀ԡFFN<ȝŦ_5Om>= yLgv1}u뫆VտLJ48j:BN*q߱q\mB!B>ep䬜79 6j:m(l:6y pɆHqN.DK N_4ޞ_?fsA_ bh SUZN2x+%4&H29OQn%b >d"aD$ҍ ɠ8?ShOd\8՛u3Z_Y^]^H0,#ȭѢ| -,b53 `PYpph̵3i~ p:y^ V#j=2k؈-*h,9uafHf26;KK ld}.6JQYLvK \Rv`:0ڻo@D* : (;vjG `ZAʡ+uo@ƒJ$Z_Jx47/,]&$Ϟi$>l #[Rλ` et8.%PMP!-0ѧB skN*rS@]ߪڃϗ'ݮ@mrmg^1#} ^ nBt&egǁ ܯ'cl=z_AZkI#w4^sUS, V]l.~琁!$e7# M% Rc{8h|ںVfw4KpQŸS(%ޤGLBJLLa,,0 s4>i1 rc(23EQԔjcqZiɴM| :+/3{8&P ݒ0݁fA'f9(\Ա]Jv2$\(t;D9_<{YN(eBX._Dp'q N^l G2;& 4NHjI/A.9yyBn)4K/2RʐsQ_;ǀetQIhG.qGT$=ܺd:G"@&hю$" qZ%Fiۛ37Sm@X `ie:{m_ >Le6WhgסAjMwDZܪ'ZWv[O’)YZzu>ekvSwi+2I'Щ!xEsԜۚZ5,19P6hsڋDk]z+1yU2w`ҦUr $:]{'_7z*M_Ђ?qhRbZG-l$j^{~}9eYʨS[>0Nrtl`(>| A=+pC33F(K Ei4xcrFB6gI)je+?r!v1 prv?0ej3L Bq-(Xhp|AE;eL}(¯~](\9os$ y;9̴v5 .CHZ u4P]y/+W/!mcmC4;.Nq E.mͻ#O0%8:iJ Wc4օu53jrBHI7`tzV`*p>3pDXiA.*r@]H>ުdV*P7A'76 uEݢ̞5.k0$֬FK#eQs|g>\tZoO=o;4Xut7Ç{{'ǵ)r\hY5REʰ2n?mxhi1Ѩ(u@ a%퓏j0ZCŧZ4- =fggA| t<jVO´2rpgӉpi> h=XKa $hC~td-ӷ:u u ._lUJ u2ذueO`/G-[X,_x6;ڽ4=([Nr.jQj'yL}lq8 =>DpC]4ěvU-*}PQr`Ы*.;rվQ,¿\ܷ68*5Cumx5ib [93Ǟ9K7p_/t "AL i= V}-zEe7h-@b~lK5kU^2VsD~epetQq`9O 2Ö6h\xr!k5l.e q8/[X ӓzEHcxwh)/Hr&] M&#h'%q_J}JZ+~4Ԩw`1<ή+9-!+gii-ج­ *@rd%ܤ3^y&"̡&T=-523PtSQ[5R0Z`Cu ." *a\'QUi7|_=9oaw9(WeFAm<0S&&|qȘ&6*:FG2wC;? 8YPedzyqwJhJ`gH=LjE3h1ՠ^GJ$VCy-bO,+Rӝ7f5Y|U!0U$="Sm<qIQ:1},|tI>%ʱ:iEj7;K][ъ~ mݴlzD5x|k2p#rGϳ};d>07(;}H1@mܸjH̕4!?%&:~ێ p%$ZCf'Vs G`iuرu<2*tḴin;W@sJ.3vѬ,!xS2FIcx/"c7%L)ӔkxB#Q.tJл5ό:|M#/> :y@Gޟ (NiN[OjjhvPvj"us@UYD*Ue?TM[Fo1#}gK0SIl]Z.?L#ICT- -YU®UH[GO+[%}TQ`Cԑğg,&B;&CCiJvWm$VF1@,X[9Uke(M>qRB'o>lL2GBَdϰ[z^ɚ2t+T nɿl;hF!+> $2x,MA͘uV{1yl&ux<•z'MՊE? liOƽ{l2s[)j KY^5G&χh ,ԛY)uN,u(y?SZ|J{~'E0+=:#*eҿ_Gy?H3"у쭟V9Eد@hO:dR텝^/Ey2Pc8 w Ze΢9XkO-&ctS+ދ@4wm숑[LU!]Bn ׀GC|wt"<.e]xi$J|Ǵ|)݌fDON(Լ g!oIo8Bj6cb")H*PǕa~1qdbqsj;V:3gT|,)u'1ls>-'^hݫ;@Q9a/XFx Cʖ9νEc *5 1h͝5{=(KvP<{fȻ.^7kKkL}6=\Mp{ʥCB{}}?/Պ1 _~x67Eex UZsz:E[f( %6\‡ぽU>Q>wL2 dUXj%?q"OEjrlLx;TrӐs?I5?R(hDrfƆG({-ƕ6k QwFX_\BaVd2}t|pz<^?*Y\[D!yJ'&xF2ɂz7 nJ0[(:b,@ݒ$w#ڽaX0@]B2QkX4l=0IFKAYr!h2!!Lr )FeW(P_W*8k.D]a3&ڐu`qkvȐ1CPz, Sc@U|p2Rp;NIW R֪9 C> zEdz~ee7Y-!@!ܔd)u71CD%UJNF v:$ޗpPn,=eXzXmA];'ȊqU{!h/A38Wz ?;w6]{ʠwmJ;V8ܞ9΋ra#qT/Wi6PL"*{8nuح Pi6^n'AzF?EB?"73 徹L)D:z9@_JErG|~pdt׮vf1QNe{/Akn8ːD* {[GȌe׾n%UMؑP}v0dt._4Xp Ykhb vߍ&53Tb>i>?N2nq9 ODMSخE+!RojRHӃ:Sڤu ϓ$e;aWtB^W _jml-XUoK 56`3L''4S-~ Cfw!MUƔ&>ܒ3i5aZ^&8,"Xl}5nRj>EpW+$5EW-uy~˾VUIC .Id;!EB?Ƃ)}'a(k#̼7H`g|hvM{ZP,%-_yvc/X@61ĪE.;FҒQ_ڿ:c74耬lefgt|otRIn61܁,(duI3@TZ @ H`=ЩP-' ©aSySgلʚ8^]j;gN7nXƵO|sF[A],0x "8+FHXDEVf~Sm-.5ɴJ>:k:;@2,c *yY@4U0<>e6X]qs'Ib'~]C2HyYfMTKN]:p}~F/'-^ڒ8sa%d"Z};T+ F^Z-+"i)Q> -E&K{5*Ώ.Kj?#`oWX2//j\F H)Iy!I fiBӖ9t DKq> NJ?x} nG_rxSmNwܵFQGa@;qw[؁kE'z n9]^6wOLdZHED5Ȍ3jMVyO@!io5!*57%%ȒWM}kb&}9 ðT1/C†4##(=~w L(9MTƦ;kjy= tN4.;#{7 VOjRX21fY:+Q:FwXRQt^IN M14-LPz!EK%2qGÖYpޣp2-4[<g:s,H΋JN^)6Hvx׹@ʿȱzb3+|ђH\]2MQ@\uV.]B:먳T}j=7J(!bci/XM^:ͽ45#a*߸Ff9% ,T ,ҍa ~3.LMt}a{khY8^ސ6,!G8i7ؒKwX]i 89'ʛ3(Ҧ+/^orG;? ~(8;Md]ٯ?&4l̛bʺ&u fgk g7{b:qYWC / PTY%tk5h[bgˆ`*@0c 껯/bC-LV9y&E,PXi[@ץ m]"Aj{_7ʧ<^tL*lRmU*ʎE9= Fi:SK)vA%݄FBkr d e1 ȁ1d+zڴC=!iLbq/ aGJĩʜ oɉ!M=oxzT<1ymbn ^ 0g {ߢG=K6<_$(}pux8]X <N(,ܼ^ďخ.0Yt|I(yV3T^3pcgST~V=U"IM)&XfW_뇦D7 ?\$A%o'r꥙=c:FJRL˘h0B:0į;'Kd>[3S,/$T]8fB uQۑ>'&s O/=F&dDhDF aNw!ǣS loRjzB5:s H)>̱+ksP+ |Zg@&4۱sEVCv"p;+UnJc>B7!7X0׉:^l"([Cۗd!xF[}-n[U@=VzMg$9)%,!d,qUAC\' P#'cM;BsM{\zV3X] = O-%b =z$nTti^`}#PF7 TUL1?EWܭ,.5P5uc\2qYrZ |?y>x_D l1ML+N}zN&޴v7Bie^-F]k;R=u!qN' D& !_v!ձ3[M1ٯ}yjQwˌxz WH P5C|XCqZNi$uy?f5iWnmApY^kylH~M7%LRqu {}`\$T/?l{4ݐlݵ{nO{ 0%/;bżWWá3W< ǵ5EMd[MBQA̝_o[n$Y{jC+O l)!DNCLMO"DZfz@@0P*hT+ v 9jN .d#l䯘+*QcR^%S*ݙ&#&? l-.r4܉=H$&țZ(Gb/M"֩v"IMs_eHF^ =~PGx*Q2#OBZ/؆+fa`{Nӿ<ɾ6j .m6 WTx`|`?BzjXY0hݠeWCҭ 7!Znbѻapw:9^*AWT/q?KNR,*Oi\wM^.OڑOe-WcfWy0"u(9MTӢ SEMgEǀ@aR6 V"-- LL+-h ׁ.` xޥ-n}`K.,ȯ_G6šD٘E'./$TV'Íܖl>Sk("VzPA#dĬ힊GJ"|J,^!Z\4U_G=!0NM-ٮp1b)],:Hzm9_ebdqtt&g%pcq5zק C9?UR rD 9dR?7#Fx,ov, zkYG/[╋4wI&:݁3,p]ՓCgw6? O'nw:Ş\qK*7>"^gJ?QE9  }5ynjPFo_| E.kѯx )TNО=BrE5ca%OqT*._}Q9o["Yzn(kuEi~CZۗ4@Rg|%yKٰDYRT Ө9m,`΋wgb ^YfDxSFwWJP ˡEK}8t~+۞#消ٌ_A$gx _xت8!5*M-E)vҏN]0A A sONBǽf'/nhUe !%*N(Ԯ+k?1ܙ>%}UEt6`ors"G(a)s"{3 CCE `BطC*>^B~-ABʢsWIKY}?s< l{{AG"p e6Ph!CȘsL(˝?} ~2\W# nౖ(f_P%+0f$HKjD*l0~w7/墛;΢ttz˲XIrKR23hhM!% eL+ R*)Yz|Q. pB2.Ged:ɟ ^w+%\mYrUks 63hȢ_7"i2ԷixZcѦXH4 _anFl/REA$A EvZ/P+A{HLR)}NT @K y`PDxJ80y ^Kr6H< *Uf(|OT*Tq|ѭ`vZoߠ_pW'!ckqZ/oؼ(2J[2Jqwp;xo;8>DLx 4q;Xn9jEWNyn'ca)E$n܅C-!Iºri.z=Py:tޟަn&^N_QCڜk pE7ic@^z&^l.?aZ8Kx+ۉK]TMFf#ܑz)~&# a,(ۢ;q&ϋ7Բv93f&#﹅r\|exj@H&"RX[psNj ˶(dA0*[`)iP>J,QR)g;7*?KQ}#.i %+ iA'bMϫ;7TXH*fM")g~kƙҵQ9VD>V:p)8䑹  CeQf !ImЃSVY'ͥ({ja nx狵$1E9S)JL|~>kixah+fi<0L92$#E׆*8S+Q;ntα3ldtJw$;MCW rLh|(%^k8^Zy*&؞L*M>熐kHJ+&I³;V^o']^E2~Cظ?<\ hA@]M\p Z^JyiA :Tvep `cpm4V4mRz*!ي {w CCc h"YO>u53~:?pvѭ3X-vjF7ik"$zާ .kdwPrvI dLMS1'd}[ESǷGb1G>%*24j岰AaU~Qq&f HmZuqjZT GO3!ox,`ög!J& e@6wR2GVh%Q$-57Sq8Bs'Q0_vsDgYZP }X+!9ϰ2/3<$ڛ_ aΉR܍(x?igw;9){rbCsl$`Ac j)u􄻗7}rX 8XʂpD wlp RPUޏڡzZ* Zx Qx4B[BBXlyGv*R5T|ثMV" e[ETւYu J^"ݎ7nea +mFQ! t%;,Dfۊ}ۓ߆Q<-$u7 Nzl jR)KY}n. YUl<.޻Rئxr]ۺz"8BkH!uu Wz6*bR7>}Xן->Ñ^}Re"tQXTF!Ǯ[>˹dw l\D"#YKc*XV|\I6@&P)p[s~Xk'W%hnJd5on4z`C[TO!EG9S90~>!T{&U|<+z՞Pǹ!X m)Ʃ-37M5F 'ՠ }Rc`_H@)0(\aEvjb/1F?ŊL(#=ܴOCv{&AP6K$dN5J:YYA  /E~oڻb e-&Ϛ7ďv]d~l=KZMP8_7bf>ڋ#؟5cxr+kxz wXb%e7iqg+T9BCMݨBt=#+\ 4DTYz4ught˷i:ZKWg'pr*Fx‍}]<0/ҽVU/?ԲOŬ~Ț }j%Tҟfl PՎ86Yg t%d&0k(K'mܬO ~c ,?yтCyW*5uW!}v|c: uT)YUiZtB}u/"L;?b_^k=b0I^n^/\̈́KȐWASڜL9_TRFtO9l[=t]>-R=/Ɵ$lyCbqh~V3rb' /XY7Y4˳k׆6DPZB&uoD9LWyU\SzNJ`~oALֳTJ)˟J,դH{&5l`IV@W 4a;2uwQyrc@BWPJV9 e&+zA2=%NN,Cn P$LJ_vyr?KoiK~ pxI<j>aZH[\<$*4g[V|aDRUk8Xo/f:(.zd"b8qvX͎NY/YmTpp+zo3@sZ4I/?QDJ7+TO+:҆T$W csfl5[;jS ~Gg26K( |dy{'2"DONgUd}5_B1o"HRԄ4ntj9^MV Cx vܬ^k@bN 0[ AE?*6]y Y="L +!VTJfNJDbm•0)'W8/%3ZFuD W>謌49W)>ⓖo+W9g][$#&ŅաwU-7IcO ׅ! e@/_ c]7i35QĬ+IZ던>)9s0*$!7}x;zU<`YӸV(ԚRsͱ_rݶ|I6n_$tf;Aบ^e(GG'͂6qqk̳ έdyV>{3/$䈖 7z^Ze^0/g T~g~^< e*ɤW74?chҴk*=)EfǭҚC^lqC\P4Ͱ_^~ʯ3d딩ogk*m}i~lbM\赵$m(~ٙQTY1QsaxQhcF4(Z/!d9tVLB8}HWVӾr;. \toXѶQD*d3dj^L`YxQeY<=0vGVB-$ʢ.$L&*̉4mp PSyZIJL04U J)fqRd%Y:U;kH~iD_Y\8XQy097LIU"'`U'0_dFg@ U܊m1n^ŗ]+qiCyIN 1{hi?cyDzQ?Ô//l/#EHP"Gj"-G^O @eK2]FeWD8unq2NTXڑA(^T#"E”{q*3\jN}ewx.)m\ X,̷u:C׷@ԧhT F-p$0<)Nb4s $yhﺼ?ף?ѷvff{^$,5T"*`r49FQC[1IП(۵Aږqd$~J}C>tѽKR<[{pet64^3ļ2H9QR,\ܞv)8o;Gϗ:G0 rқS@X}vQYkigFLƓ,r!-^B>Z#4^l2H =B|o'Ӟ=u[3C]ٳ(|?#+a` 1  ٺDMR/ǗuEDqFP/\C@| S`GKHPA Zjڂ̉TC&F= NYftP?mJt1;L3oK:i)4H\*Vhj߉`b_~$S+I\XxX*ʒU/n%g֭n;ᩝ`:M;-LB)y3|/Mp̌P~Gi(!thrH+wJcW˼j< {O h:ם.l{xXAޘYi⼼:;Jf>H.0Bdd7i0v!{8}@!U +};s4d'Uw|zM{V8qKR]?ӽp qq o-AaqnEuLg5~e^>BDO⾔Yd谚;wDbH֕cVX餺]eAW,˯II\,r @I.婹ICD0~8(hYn%CGƌ#D'a].=zDQK*N`(yKN o,lF&|G~! ۏ?/tH »eOT1×43,"[4ӈ; rmm*EGQ9/y1&ca9 D? |x*pQv] 2-{>)D[ۦ?=%"zz7!獦8V{18ALzNm^d *rE| ҵ/o>ΠuNvAnVCЋV2q 5mskdTk@ظ)icl[}_i,k~{<@QoYI Z0ER5E]٨g^{@0~KFXC6s8 UatkS_Nh܇OVY':PxUrAfbPgȇ/NߔkћOTNZwn,rAgs0[DB8>P0{PR;~I4Ǿۆ\@5MGScG]rj̀tx` ЧPo^4 7/|h 7Qkh+hqc5|{hS7icDUCUW {d6*#ـGe S{0'pfsa\L¢ߞy\@^Tx%J6^AϞt]SH vɑwSKwev4"8n:|h# +T!n{pmڭ茾EdwK9|Q[16h&ezfӁL=дcc NOz@(* 遒U@W\"97/G#LOY9a(i 9'-=8|t!R"Sj+ [H^_&ɩ-8 ;jvV5 C1~(qmJhsD"^ { .bĜ ޡbg?.\HŽ΂v>LdX*N_4*IXQµI+Sh3*gQ޾`qD/W ?@YTV uA@)mvZPFaxC\J]&G}',`J5cYD_/fGURӗztq4 5>!S#ƃb`P،tڥgY.ςMNF>V6ǣQZИ˙ UHH6! $GVuT/t%Jg\X0cۣCP41:m#ڲdQ3{T:"&rydySYgPyKDipЁlgu;VIbr1;rQHz EzLy4iIJՕslwrpqJɫf גGc<*ٸ5T&f&)݀Qjb7PoRda BG߀̞vN[&S^8TL- Vv,a#} A (x#XT̍\=|.:zf[`k\Didxbo#I-uÊq1N3YFB!` {B`Z Ry4(OLGIn7 䄧y[0H3Yn$ΚV\(S&:1 Dx*H"t(.@ He.[?OmЌpjlw($sn?r*FE6&:6ٮ띵3﵋JLq]hM H4+h抔_ߟy^ܢz7 qP]AlZaE2)HpAv 溊VZZYUg_74&>FNHK'{ށa<2y\H_zᤱЏ3wlhݛZ%y{JWIDqx__P\IH w2c`$,5+e8 5&YJ"}{MGq=M6h>lWVO`;8.UcPp)(=vjyzݥ%UAjauy"U-;N?!#RM_"TBw=Br |y^jhmpNک hha7့Bb"oO6 Cr]UVɯq45$;?=StRY9U)ˋ6S4RVic$f vY5`T6M~OGk<}ApQr2U~]f 毣b_/?wpHdUnqSC݆݊nk&; }1;,)qsҚvFʪ)ݖdg Q rG [ %Άˇi?i SV>V' Gݽ5΁0nM:x3Jf&JFӭacSXHڱøˇ <Rs]b~E`O4RI~PmD3~ .KN~,iN0XsƕO]OV Z+NhC~M?s>@3[ȝd٫C_PTW㺋P]Nyׄod(.V$7k8~־<7'4łB2oa`*[eF.j5M igi*$l(+\C_U?{S .Z9’󕡑: avV`1Έ4j>ӏM|G'q]@t/@LHPW^իsg2Wx JՂNaWö5` *ȉQ 3dAsw== 2@ӧXmHTUSߪvɓ) r:6 8r"**|TU }íB$?Q6+_k rwl%7x?ɗk]>ypiQ ײ"r5^O燗Ը}f3ra^gJ3^Y^OcihpU]eLcs9`ߠڢ}ݎ{ + `5.4Jiԁǧxג (  1:,SUpw} %Um,8dJ_k%V]-(rQ ѵ(RSnzR bd+4Zhʇq3%@o"?%(H^J*ޅ՟A ߆!# @YGnAODle%N`0tD)hYl'Rq";} 9nr٪{[.hiKa|z f&_ıJ9vzf&.6̄kJ?Qg>e:v2SȺ테IIt\H:Yuw8=4 T٦3ܜToɑ;8z.jxGb=KKDw}DK.ՏH!=r4E7% 5X "3WdE9쬦|.Q9y@Ϛ{[KR>ӲI/dU)G,9ro.K -iIğvH^^m!= Tz'dž TBMn!د}֤&l"L4R?ď' y0ɸȎ jё~dhETH?l}t Hc/NQ's:Vpv-n~jzwnSܮBKdzpE%&:}}& I$/^[ԆO ՓLYތ;z3-rЦ._\=]I;Nr{r6:2ձ+.JF*EH g42x!K.w$ Mbtx1o-,$ Tw{\K潩L1\mws:#Բՙ=C2|sÀS-̪NK>ttC}fS0B3"|oHcgpuza(Ωb;C>fi=iEQvNXԑR Ւ$QQ0:(%j x7>f$iXkg(c |g7u'ڻw}f.T+k=͌{ز Y\:X3]ؓXT-z`p|~aAT!t _FQs9_P*w.g /ǣf s#u։썵OqϋFNYI n jQ+OJMB(T8]WMwLoSP&Y66kAڞ]:M.~{uP<6/RPx h23}? .eL6Dр v\xB5Wr8mcX0*`s/VLܢ2/"D~W2Q͏*aQ|k(V>`Չ(qKj歏lBe@E-lm[ؿ,;F vaX=N!n닪IZzR-HRQCTV(Uqz8P~0w毝ڦYa~OE޽UT g6n._>seRðqo8-ڸQ7*OZ8WPPR~2J'$1 ؊SZRW230t=<\ Q@Tm.V5&툭3FB)h3$P0N? 1r' [63R; zBvl]HEr=PH*^UTUSWJxTq93k-P|MV^u'ZG:OD,`q EI(z_ Z2=T{h3qUX/ ֦72 ̥Fzd$I MD {GώRzDqD{<#z֖͒ ójѬJ9hO8I]j03,³]]IDN @} ibEZf5lK8G4b4RSk )90\Ijf33#Dݧѿ66! c4SִW%\\8fFNaga*9G=AiOh@sƿ$hO$AypV"z:U@E>!O)mܪMM}Ƌ؞C=LaAj9=L($A5s=i,9|x~l%9j fwHȕh d{)0Yc亹z$)Mg]'};Z3 #g qCiwiLH!Gh^x/90 o*vPc6r24[`lMw q6dE{|ϒ~kg%צo0 T4|1{/GU[f ̒f,tҏ[8X)?=ƕ^jE63ږ.WL?X1_meem lԿmg/ѕ1spB{mwYWW&ؿbЈ6Zݯw^h@-X*g+g8F;g`MɌq<{l^92z ϪFrL]m|Ѻ<-S(Ok `t}*7HX׎M`1C#׹T"&s9nd^c3OWošz94ySr"9#ɷrM^b+^ʩZrhǁޜϒh =Jj*GQ%]<6F]yxk\ܸT"q cZIN`A# ϐ,^3& NC"[pƱ ?סqmF>FeD*͔kK]F݈ƕP20ﴭn~=H6E7V4WnѺʣ%2Tӊ#{]+h/asO ?H"yM3r> RY82}myh+Rx?Dʣ@v#hE r>N!GQ/lSj$#/6壋;#䖀~$ gN?B؈4*w k/hR5]8nb0ϻbzV}xN=t9^йþ)rRvZ-YƦs_Wrm]NV' t+NV5=uyy۟_J8VE6dW "`c0q Y0NjI<v} 6puW?8, (G" ڙ27@n Ԙ N_\6>,< -I];bT2vBٚjE` +:}Z(`1,+S$D)} Հ=rXWtƒVŋ%Ԉ$٩ m]>ǡp-쟴d"%a LFЅ0v;*jj[ c{NC3JjH@hMөq$O~j*Y!1;&zw\d2д -N4[g @4A(!ǝ'ɁTDyKCn гFAH(iGȆ_%ΉC ^N0rFm[?^҇"*h nH_D[hDD9kTc4Tx/)Jwķ%n*.}Nexr.rbz" *s7$[-JVy^# xrXb*<]׵;<+zE$=Za׭ X>uMMK%(.˽cAV寤 5 .o 's5 5PJ ٦ys`0\kaFkg޹A !U:sgӚrI|`tC,Eߺ6>3PCDajE6'Ô0Sal{K6$m_R'#94f!GK P|׌нf>,iNes 2XA>zm"G+; ֔㤅)2Xr9Qۼ=ػ~l,x(sa^ңOE&"k1`7N%$Tb~q27;`@nm:QT\I]8Tzv&{S=B>qt!jrtI7@?]߲j~%Ju68c}5$U75zVɱ~dR'tYY]j^^,.YQk;@ݚ\ h- ]?i N]>Ҙ}+"dL\̀VC4}ʾ*ER/eWSxq$Զ+tEe,Ո[-]7|۽K`^?䀪paG, ҉CVgYzF{kFjˇQv u܍>UD)ʹ?*i6Wf&(2E%Okxwt`]*nw|suJ.M@w(wJLyr RK=EZ|)Ul Vm~na^۝(l9/FI_يR+u$uEwH&H􃴣"Z)Ϝ wPno-iHQmxЩm8~{(\ŢQdN\[ COz<*Zgp)R,ylLP8e2TMT1lAkxPb5*&xͪf,6A"}cf)+tv#>*]SȹE*pQ5Ed7B1Sq"PD19Zr%Vu&$$ !+ɒ3:A)76c+z4>c׺ m%MUMLbL߱ <{y!s#j41Q~If˯Y;WoS~XDKMZ TJUl+˙wXe#Vl $?K ?))[յwq,j+JY ؂'YF1E|;FR/IwT@Us;~Ө&i<60b Ǚ{XUxd3o$ ,7RH\Ȱ\jT; F#A\zySOϭ0F;K\&Yr6cS39:@TƱ *i0Uu݌9-O~5&uS9F䄟!!hzQfs!Lrsib`J[?:iKtq<&*Jޒ(}8\6EʏL fZ>s4*& yH̴Slu51!WSS`!j}G!5MQ(g%HĮ>J-S=<#ݽGeKKAJ/:K_Q\|js7p3ڷ(}jJvB'_1O)*FGoq^@jl1K26s5"cf\趉t/\A6yƬ htP^⏦6czU2Kxa-ԦCjs,s$C`)Vr| \2Dau4pA~Թ>f-OިRB''_Ͻ#z-SeaOKUUR m/ɏ ~aJj7הIw%jQ_C 8`pau/? QajK6hKAJyZtIz ޵Rl (R βXM,s桖&ŰMq.د 5XsLƟwQ{ >H!ӡg{,Nb~aY )#B@0qH0)Ps5n;4^Bg:tdr#X@Jidpcҥ0SHa!q, CNpS,L8L[}`gP3:fXK4־/%m@sHUz6O4G1i*_^Z;diE_]ڻus;DfytuӒyJi"~in\jE#J n;yMb3eG *i6G cgSKT4&}0IBcmG 5WDs"߉6a~ͦ0B,W:ȳaUobly_zU?L˻qx0M8z/(7@~l}V@CJ:7tV'˔&vG> }Q]v0`h jU{V:&8LB2ROD3K'dQ(}bi+I?L_ƠQ\gź״r\_(z*SV@xsxJ6C"We768=[Yj4Q OS%Z=}K BV%MTpHP%18(\' j*[%_iA\?_'"^7r 1z0.%o!پVsh]rG%`*Eï {kh^6S|Ɉ41 h3g}>BE$ ?GI@o qNike(Q#Z.Dޒ DAD1fSG%wG*V>g\}rH}35hfH0E:, X)؎)>iF:sXvZ~ICK$\·RF>W!F@^.Y\8~(Y qg 5HʃV;."WlbLձI_rT;,:gT_VcX)t?i 꽊0-iP%?#4d}Ѿ~Y4A-Ԅ\ϯU}_CMv ѯ&sZ|Bgg@eD@9cK"k8xޖp mk. ?xN@Yy;%O.rԘš&Pf)3D6"}QMK@F(gxW2\T'Vs_8k8P vc_=K֔K';"죰!onFP?cN5it`jyZ7F:RB"plLݏ}Qi ɖJعq&9[E?JnR4 HXO㵛d*rǴafܓZ֭F>·Vp*an_,_nyX~zC0[CLi~=hաsx(K ",}2%_>7;)CZm `CK* h|ѐEa(`ٰAu-s=p0 4G2<+Ք͢swT#ff$]:"=?hŔ|*\B_7PU /*# =T NZY0j)W§?nYלnRD*—]U S,NIof##]6,Vq+}ls<#;2@ 86v1i $t1n(< c 5jW3ޭgS` 6kThdMx}a#*yX?0wPUoid7{qo zd5hM|v'yܜTad-[ \E2a#w3_'#͹hw)K A({h!pb=kŚ*h2F^fBSQ]A4+!(A6PZ1|X򹌊^R|ZI)K@`_}J1nm+ 8Y0˦(ȥH[u)*shľ3#fŊX(KI2k1?ȄAb-Е3ezFوlm}[?sgz|k㧶r4夽=lcKq0w( r'"CzC\h#{W~GFLk+i^U՗$ [ASuY`ܔK`P cvSY>2lx*l2.dy#*XTm߳nK@-loK'uétd(afS!reT?:UeD!)K/5J)`WDƲ!+2iOU.! 's`E<_qcؑ0zOV+k¡mQ'Kӯ u5C? lyMӉ);EEȐNH@)mAغ."KpN:1G/I^S) U;T7YMro[Ta͇U?YY&TnL;@>9YGMqg YcǦns5:w\Octw9"1O[U&%PjIeKIdw;};,>2ۉ@˿_tRDZW4S-A+iZԼx-&&둺v)p)Ze 970,6Am`dx8IuOJl8⇔'*Y<7 hJbNdZaݲZgf!>)Uę"?ϕݸw _$Ղ)t߼صoM1cE.lc O}E3xcJ7Jrt]=N~65Jg]J!$:pQ|*DwQ`N3T߼!9EGnG_ӑhMiƟssMQA5PrhD7:^8SQ#5i\@Թ]AYҜ8kr<G¨lіN6ο ABcH*v|jpK }FjϮ~q}BcaVru 1mH*L\E0Pٓ4oNCȏ0;I ? ѣ9H5ރ]wgܡ2"3:'n[n<<vI^ZPzT}?LӰDea"tf˽4EOތ~Lb=_N$B U%J1(n*nczpwxuTZ\w`m\umuZ?pW:*U{6}b^SJRLZ;qzC1jbVA0]${)iKJ@wfr? bZ:.UC-|&dcxP MPOh=~GtSRtsǦID3$[wc$ఘ|4*ކFfn`EӠ|%}jo<}pܑ]?)(m6OMLC KJ1̄,mʼn Bվө=BZ4N(}01Եb b4F/8FR o^ů"-G =a.{[ S):z.r/eI+4>ȝWrXًVR*<#,b٥!Nj5&FY3A1pL8_ĄH'.V7 pKڄvJ& %<нQ, mF@:R-7ȖA)WaC \Q4}(1g[MV/$DRX$b*SNTʴNFzHR|A]J.OŌn:7[Q\j`Wh#I{UuE$ 5"*G .; ڈۿM t̡s;`$2] IԢ ^ҳ}ZH x&oruS9^8^B1)q^|陴Bvja08h`*~^ٔjHɐpՖ:EϻCwdG$0F{ {[CʚcR^skK}WܭĊ`[)^RdE;MUK[$ 3ǒP2ewPԎc)EOK".L[l}v:TXpҏF[|'K Bt4jճ~'^"wk砙&myWl~g/o`_N5%#e>p'nO<ϯ1Lh.OwLZ,humc:zyh24ÿLҎ+MJeҎZf=* T:ţ؟]ݽ4bgp7 w[dՑzU cL<%\wq ʼn7v0z(,l&]: Rh Z(mwS=|2Im )2`g+J U1?xlaИѡlψ~5lg9y#7f Xˋ4ey/ p$"Z@? y]Y[2Ҥ7iĦ Nˆ z_Pf9bx?2T&e7nOF`HF] 8{Ύ0κȓ$\ʜ>gQ>DyvsMޱ˪jkҊX]o|{O $MRV 4l0BB J4d5B/&@.Fxҹ *G@,_ []2 =8Xy@LCA$;^jt s4FOAW<nᶆ [&䝛q9 d*0tO2NbM+~.H;NglǁΓ ;c;wMطOXrLw-zz !XcYqRr-F3Ke/sr>g)jx|JtKX.SF {ϠoWN`o1G_, |m*i$(toR^H0G=@{_FYEy-Eq /a[ =gvzEθӽA;qj8 0ٯ$N0u}us))@R ju TcRS^aF"v~AL6-#)"xN`e%Kڠ"2^+oFh&g#l{@YE$?:DRfS{4){"W(d"&aqW;5W9K-;&?\t#PMe3}aٴ:%i0|lC7˯ y˻ul_y(ŧUeJdca0;7^ }r[eOZg0R?}?…7 T!`NPL[> ~hwJCr*#- Z)`H{`[h>/q|ވ1Ϋi -D$/y)F̌a*/JT@Nڎ eFYY=+ <1 POHOU)CSIETT#4Bua\ w^d~bu! h;gEt'б&?` y]y/,>mg+٫֢b]PNjHBJ G$u'%HfUUWrzS/]M^9Prو&덵83B.0kn&.{QQemQOcr1-DE0}=$Po TV~ jӒN.}w\~q_vgqK𠏸 cng\p6$7 k>>tnQSvg(ԓx1*~E@nA֮(H.TW_u7I6};Ky[=Hkz>(v m1\~FI@*#։:cRc}3I@`Uo'rUenk<|h{U40ieǍv@alu.)M{ifRs.&& 3l40_z\ˬzSſe*Zt 4A~G|j`9 HV#>-#B\>i@ Y zljx(xb_߈S+%Nm%c;WOg _|n{Yۀp͹:5e:39dl${ƉnB;KcѪ}: j w[Q3' Yk(3B¶Z] P@tʼn#80|Vnh[+zK_? wL+:[Z5k4kHe :^{\} {2ʡ0>?0 Kbz H'߾ɽMP䚤f cz9-.vre/(JK\t3: 4x +~>Tlz oR ez{5NXq([,OdjaIeT*4(] w6deE&Nher}؅㎫. $v}\h4CvTڎfUź4s}:%vWj^')}VO7atW?7b4^+䀩~*E/rxn?z$4##H-MHv~p}MXVZ=uq kٷx{,p%১%OV"ouTOݸ Y'u^=*}9S/&QǨ,)E,%;Lc.(#Pg0p%k`=I] KLCZA!>R)&H::$QҪMZ)I V z֜wLo/hDq:~G:}0_c7Di4-O$b%t/ĶKd"5 VtV^E2gMy5dlX45\gNѓJc2Q1f?ÁoRPuOd3|>zm;TJ uB| 7U,0jK[,_5{{ ^q yDQ:;?ʛoYOao^sO!+R.:Щeh =WI5d}CN3 CSO@"KFg*HIiF!u#tRum2pm)F $xb] Up ^ 0QzcjX3!ot=\4]D9#飼”8;Ўxj`*ӊ/b޻ Ak@PD' ގ+Byrn!YP_͵LbrETtʍQaR#0ʎ$ߴ<i:J,%u/ :w`ȶZQa$I2"Z}U-;;EHs6g뛮Jv[U+8?^Z2 5nS5cl:R=?޷QAi7aT |iyՐT"jFxy=m{zEdvod(}'T@6бk2nvs¿ҔLaI`y‡d\sEYזZq3W="BgݒaC/55*̈́fyos*=fI-sS5[hӝ;EJidݭ;\@yN rS_FKh25SIiDNl&>^Ztirc Q+/7h9kWj0 |Ģϒ[Fѱm`;tPس0S.r.o_Y:jb<5\ʢ"]IE>wRdBLJf0XG'z!<$h^ ;R5=8PzJq"EŬQ|asuJG\a㊱zVȢ<W!d1Bd8+)aLlRPB훚4!fعGIlVbJ/U% !gn-$b i`dnv߰2X,]ي/}"VPcRdALn4aƹ<<+~9:ǃv΃r)j.m‘sAD&=I ۴x:1pKkQ1[`a 5R{P7] 4 Vhb_@ ,fةܭlŭEDN.zuoA?ﱮִ|%mP[-1ϯpܟiUvU=ٝr-hSP.ZLj/7f0kK"ʯgRý`i'$.{Mn!yV pZ6'S `b.=\+ qWˉcjUL/ )֊=n8"nĭtP2{$'X~uO)Q *e!b< 7lZDVl3):uw/|~-IoQ(T!D]x$I(-j'#n +)4;2N\lE0וPmK6>h7[vY~-u?Қ+70oMCn0`| u'%veF>E}|Y!d [c1 #J]>wftF J Tt6*‚vRX8%xdJDTNX:@ E3TX9PbЧ37tz;{.v.w&\n E8ѢHcJD=2J(:G_wJ (7[fv6@~x$pn}d߯TIqc:3qǼX!@u!>Bc* PAaen@Ys[3Y?!,ķee+L6iE٫ŵfBk_Nϰ9So6Ԭ뮴 Q8-(&xOx,TOD+g>֍8 I$Hr97l|;0Y%.YS5 Î;795sX! a61%J/ꎺl.|@h,W r"w F <ssǬ-GJ%.r$fĀKܛvz 8Cf9!QHM']!ɂ<N79Z-5Yx*<@Mg)I&}ĻTtM񧹈qse<KyOd1uTJy``Bꡍ`p5)jڕ)t&s=s$qo)۠^XuOS_Fr5yKݵ~}0 ņNGRA>|1гdC”#J H߹%^O;'4n:~ABgV`t莥`)ذQB'`6~0| 6rbAELe@ sGK-U=eG##,l\{$w:_q}m{4WUiv˸o^[16r fbz(5(EߕSӒ݊2I139܍D~ E+ p!gP)xFB6R[ _zϱT :UKZə~,S1vuE^6BX|+qZQ}% @͙b\kRM6˾gXPe~ QCQL\[9GJ 1JZY_!B~c!whs9S#Xì+h̟ݞTbĕb%>aOG<&ߗwn* fMj?$%gX^t#Bó%g3ey]U;T>ކa.9cmWp)Kvp`%&ƣ|4*j$EƏv]1wbv .$J]G։wA݈Z8x1xHtކ׋G.^gp78|2%<~Ԇة2 lt0+ ~ cNngrpD/WH'0y/-GH688gK,œO~@LYKQ[&J8C_Ihgc?D4TjLp WxV<ǫϹz6N$Rnio~^v5W/Yd"F?xW&XmMXw.8JDg{7L]7s3cM C[ʷu}"̖aZ,Q:Ä4E,v8z_!F㱷N8Ք X 9 cf/5+:;1|எZ8Z þ_ZVR0.@XEƥ(V7`7k\"㪼;YoZ1ڍs瑔P}k[I҂aE-D72aȮuO`ȶM߿&H%0x+wJ`P׳U G(ïdkA)02póI, u,"/>6!blۺh ΉE(6"5:(BBt6oa z? MvQI>47Bk+@k:#St Oz8+R@Q'BpR/^eaY03Aُ;6h굷ǷY]g竉:X5Բ3$\n$/Ky}]NM: f(e+EEOdupj+ABoi5`2zF aU';\<x9UP @ԧ DvQu Jp;z 8@ߛVWv]5uQ@{챾Dd פ֓&=1C܃eUb1_Q uL7k Z|LH^WCZֶ,;'}j :-Y`K ` 4=iTJ3]U&,~77"`X. =kv+PSqKq`} (5MA~Cw7/)JM1JH5rו wu6HDqVރhMm}n8)wO1W%rR2iT džS S2O#q],Rsfv3j-I@q`o*MO WL_V9DAL/[#O|bEC|^@-- ;Ǿ~c'*ab7`ǾV1'EYP }u89n=s4X(s:Fݧ$Irv+ò\@.~LF[; Œv@) [}Cx3'w\"VLq˷Kw'p/tE?;`ء:>W3k/`M>.V'W&; r` Sϑhǵvc 4AadPFWE4BY.3WX)g0yylp&(mJzt3k^N&/MNd׋VVmHI)GX($#z3lN'wc&bznsq /9>-< ZVa0ZշbUCpjd&К+ TpɎ/mEam8f-, 5sRƢƛJ!Ir,QG%2ɇ=* O c:NUݸEGdrѯ(*RX v8E Er JgpMc@:Q?ڋS eC!s : ^ )1jA2B9s zz[K>*syg'qm[E73F9aۗ8OznEa1/w"mP܃bZzG*F}\!u(H՗rin=s?¹Hܡ]I}% `qCz:Yp&)Kޡ\ۭ0 0 W5SLԬœKzX5 ۿ} [ Ѳy- 2pLO#8jRVq%:#V%17X5.VdHǶEI*}ʍoVhU?g:lb |RQ]ux=u MWX{Gݔ}wD|$TMڼYz0e;pj"ĮG$pmDvYso%: {L$b/s^zi}m+Lgoat'C*X$^aX_~4q fŮY&MU|X׳c~vb+ Tb>;nW"k+/Db6϶5#,שګZz'&_x66e4՝ aPSe /Ez*ĭ֗nA6%/<Űwpu8WoiO3nZ0.dqP YlT"<1>᧐Er! ӏr$P*(p~_F'l ]+q+G7Ukb >'"Rb+yL M˭ P{4g!v~^Hɇum{M9{ƺ.[Zv+!9Kqn㹊_zzoaCi;1$C KɤD"e?[}F ("_3" SINw7 s<17is# YfIt tc|0>NZ^~ ?4aCWf1m-6C ۬:ʢs55塗n?Srz$ EAIg^lv֖t9:$}8k%Vc ?cmYO q"ݧTЉ5L!Dtz?4?jT`(!}ĥdn<;\ ҦӃmvq*bq#x[iETԘs7'q{hEZT$Ԅ1Qw$$awʍ_Cܨ_g[r#&85m_*PKIց(K-$$% kշhysx@2-*꾂m`:n%$VgoMv;"чoGFS8_žHN+w3]sC8+o;2u~LicPK:0)ףPP|DL \4` \M 7J7ᑊN,Vؤ,1[@ě+CL?2{\QFb>c5u쵡&'ַoYKTQ&&--.qOv_Pn.!`MGBd$Mr֍8 wՀo-x5CU`kӱģDZ܈g"ْ2jy`oDEyK'>žTiPV瀜'JsW{vhN%uy=N'pNM'P8:J~ER`p F}DIqkm۞imrA-'Rٗ/G-=*NkCfeGCc;HIO@=5}Ȓ @@x/ jk+C_4l𴿤")9%Ee8)k\v?#/t^;en5SB>GAu뷧yNc*wj`_&ݐ lTO&pPk0FqA!nNJ}^QoGj^qgɢt  ѩd nw=K_1ܾ"?O :ix"R7;\3ijt\(Q̭L1j萐KW"x5>%?\]l}qUN^țݔ(q!bOnAG(̆SiwZx9ZY =Ndg/\d;GuTk=jcW28GkcUQxCR!pzA%˗\p,+G9`m&\Qn{8&-<yi p\C-׉fb{N2)#ML6ڔDyFQǷ&+:˴Ќ7l;i.8 .87\iHKm6e1 e`=f +}ܮ?z0=]"8*n .Qn=PūGb9ͪoȀs3`ѐ"6mi%1;-,ÚӍB,GefOJ(:#oFe@8&m)&YR o†:[ЦR(lO`,lUIK 'Qi6qDu]6$źC ~pgDTahR9s5ђ?^a"ˑ&]ۈ5ƬF91?_ Ec`M5ϫ)+<@1P`AƂ'Ltqe RJRC Țwi yx'[SJOϭRb2igK>]@F.ܳ> t@ ќ'Gmt<>$y0ҥn P 1Oj 55 pO,ǧ'C^HeK s0|G2-E!v=o. 7o eUzdKatmwEj!x'.9BspGyU9l5G9nQbM40inp@*532%{`&{Ohֲ7:wGf#9?;jAG_ \UOdbi1W&Lc8 խ _n" uyEUCƱ p{yԶcP9 x' t<yYs!9eV wY!ʪ3*}ȷt=L6+-D ?nrm]^T`O]'@~j$.WpLq+HTq+d2dxK+3iR`s/"J [A46NTaLIwׁq&ؖ<~Q(hv`.Alowm#KB bRRpM:uQ1 [}^} *3~a͝C}:ï-yxRʸ@XBhtb4\u `#x =իieE($ ,Q(:ʡeI QVϜ,Ij颱5b\2c*` |WY/W-+;w[npцCߠ)zrj'4h轸ZZBՏ6D,*hWm0;w-v9Z1`;>ƪPom[WX`W;bjP퉠Ԯa[1:29G wQnE J6]'J{yˁ>=NPDwsPɩu!>Q5+ kQ䕌3$kߓS g%e)I#Q8r1&pb^(d콵GK 2)F$5Z6+w8mC6.hV ISE5Ba`Uc sj^G^P;sOUzu,)2";ROq6 Dxl+*ewn[FGNWsAVu |:k eÉp} "C)0lAkn u8 +B`'r:RʉCrat}r5?S*AP9 0v'! 1-&F-{Q^%X} <&8hNT^ /#u[ުQvvEڤH}rOn' =P֡ Y}R_Cْ`;C#p9$}qTJo7A}fx{ _dbskfL Mb*ٜUo2P"7ה&]T~b&.;),ˡF&Le9GvFHZzRrǧBI\,Uy"i'o`oU ަ-eN֮sk?n'LWĂ^HI&5~Q"_/$;6ŭКk`p.w166t笈`s"Y$ F^Xky!2DyzɛM;h}c\ֵjiL,8  Xq XwUp6H>1F a7%;>@ƭI˚> %>;9Z~?K9VPxǗ&JYC{GEM۾ \댳vZSZI52تv3cNϔGr H( Hf;kEBLK`lDZ|³M?mX,C`yF\XLs%v- G2hvb&2Hd Кp,/%tj )@f 2۞^ňp8JD{8wja T$ϥ-Ԣlpwմ͏RJqC3]g(H]')OFhalyyS^=DQq,.֥inj:MUG7TF+[}JӆO{VƫI=1o7WR.nL2S=ʣI~{&9S[]&*!nZ15h Wu! \ eQ3:4dҔ`.]8(]y8F!P1ۨE)\ PJNbMܸ\K0zc*ikk#)ZOaa_Yy-kꝫ,C $M]-'z[x"G# VNMvY/.xjE N5l WFlwwBtRU9oȗlYHpBNB7)w#]vDzj ڵMhŐi74@m]J"h 2f/;N}N䇋X%A!؆Xwvv څ2q JsQ96\kjuwKtSiP k\1"D8;X=*HfVk`CsbZ}} \^WJ[KI.zN[\ёH$k$S۳p¬c(.ڌbȗτALx|1L/G~06^Փ )Z@> N`D&ɝ]e) ?yC~X905)a] u\Hx~HPR[otQ6_eP`};;]ˎt5q_5fBm3vq,"O@A!>0Ϳ4jᣀ D}I⩂ m<«G׍OwsV^@*F~\+3*RU=78$.On" o"Gƶm2KTch6O=,nؾ'd 6H@m3dNX(H۟`xİ~'/R~Zix6νQJYmSuq'g p1FtK\5FiV#.]ZrBZ@R|pF ~C]b|/v&e@YїfgZbOڔ6'a4\ 94|%t 9-V_b54%D-M^F۶t@L5gH2) x>THh$'Xh = qtcUM:W j)4+Cfs;:`Ĺ۵9HAJ"AXͤ{}򏅃}P>ꮀ; *3gbWtװ.+$+۩VlBT}/#]fx|>g hwrmJX(ye;`XT7947Icf%״ş e% @e^ v3*rM]j2`1QVه>t=`{rL AJC3&\Xk\]کr.];z̥\D7)+[-^diqZ:tV MgRxx)r>`gVL-ƗȗHDxV6 2?Mh 6,kՠmahvM,%$~c_D:܄/O= ipeFɥk>&zŸYH ]^87P`0OưKQݪ2gZkv9 O&858x IuCU]#@B!݇Нf)u%|W(kT .ǥ,FUFyh~6;†1SF:&zaȭ\pX[ mC#a iJݡW%ՇptN5١[T2L$_?O@\8H'CC =s?ݡUC|fOh;B(q1k[Sig h<_2|iXǠd[qpV3"Kz$ Ҁlg|Me-\R".Kz|qm@3r6y Bw+i034z`g6ʢjaJ:<s6Ԟ"1z"?(t6vr:c/1tC'$0߶de&yM-E]kdi$z3(MOӯ+fۘrY1#&!-) ɑ7V(EJ|ִu8Ɏ\'4@įē_F6Vkp$'bkvG?thkWd,KLɭ6ԥgp*g?Z/rݮ(^4qr"9טנ q%ϴ?{n%BW*Sh3̎: &`aAxnzsPhp6,])Vz=$ }`k^0p֒%Hl{ŎDe8q&"یR2r Jc8A/nwmm 8dS#yL6eIBߗ3^6KkZqT(0K$beHQ&}O6;fu&F,mF5Lmd &桉7aѼ+>7=,YHDuV0:KC{{[m9^d#"1ZbJ1ʸ#&V`ŃjY)Pj#C&&on7H/YIcO`Ư- ]"գ30Hk4;*KXt)dx7R4nf҂9:Xv5-)@fp+2_Sq+k:P+N7b7ȯy7Z')~\NK-짭"jUc*\aސe?av;_.%x'廭wN{Q~VF/]NƊ G¸XN1u$ߚ"thpT?az_@Ĥwe+kJңXHT1j̓1LkƆ59Bf}@ݽy(-rxTjCJ@$@Z8s)?sTWGl/;[OA{H.f+kf%!©?gs<>ҦlkvLhJ7 @T"i@&l⭘=١x:Ja /.p_W'6˨J:`UY v$5o<gmЁ H)tA2m/ Fx6KhŜ F"\x69RըG:f"b%8i|DFqک!كAl gyVjPj?BdWڏ$+ТK'aWIY487r3mCOu2֑bCaDpT)AGjq.H=$4,cA٭sޏ0/ҧ{<:o$c؃rH&e&P?Qg"zns‚ `PkRd1!7DXa([8wV.ۿhH*b󦸠#(AϢ,Bi  [ms+~KMz̓Kn%0 JD>{9br2`v[WbE|Zvŋ)b+2F]+vȣ? Oyq:t֏:P4DI,fH|t}'Eֶ7*Dlz)mc/h.؈hȕA\ -p/) $5M z$p PNBؿ^Txb,vBwU˘ SrЩ &93يz?`%ni8eyy(8Qeb|ϟ7x]MwO+ ݜ"}\]!Vƞ+b#&҈4yc~<!I[TeA8(u==QԐ!)7)'K:R|&Mu~Lp[&-N>qCTw9Rr2Kq06f!H`^nv*sw};t þ42w`34s4M1G_+=J?EgcC/7~:E0gCѢ.@ynJm\6whh3m߹kpVq횮Ӝ;b&*j^{z"y~Ly9BE IQ%댘_b5ұ*/"Cs"[&۪m( =,˾iAc FgK/zR}"h]]cF hA%׬idίNu٨4lnkxfqI3UBlG#6GT5qUцVw{A!I99yŏF#ƽ^ S ?7ZJӮxF~rTLy׏HA--EUKcX+j`J$f~$z= 8A0#`}ʣ[w^F݆A/Y2,k 3>4ۈ3H!2CT@4IʄDt92 YmZdʵ %~HWx?>W'Ju= &z-P+R'o~7"d98iZ)3,?˻?2xl. %XOD! /V!EGVMC|0œ!I++e[X&+0k,T'0j aA 0|\ȭz8UH"b ?֥}r%Mi1Cu~_Zεz+1 =uBRpI!LI3@#ZEPyNVءxpc:vf9oCav՛\?1:ѕTz' "cjaqN0GXЛȚs34ۑhˍA]Lh$vSgO,QHz ~-:dsFL]Ӯ,\ARC$X+ER$:q\nMQrh&$0 J|@ `8hTK[Tƈ*Wג"%wEz$"}r=ϕ9TӲ%3u :d֐:oJ+>8ϒ+2FAh(}S 'aGi;£D42 THL[%:}tc%yFkzN"$w+'[?.D8lyгv$"(d %䇪A Lc k0J܍K$]R_hPMDnD٧xaY 0MLNN;dytV=ȓ#Ҟ#O4th2[#5APLW-.ss "PʑlaLܟ3J[l%U%HƢiABwL͎] kbq4UUx>VOEZPw6%3UK>x$ ue`ay:r8;!RDՂH$-ɢ~L$ÔT$ QnDi7d z=cєیxq*⫶6"'T#[UOVOBei^uY$i v`V-KWɋa&YAQQ!mQV9:~ AlwjS+g]쀫Sl6! { tRy^H{H_!l.@L%X8%g~`b}N6ꗉ.͎̦'6,p*v_f>;jpQqu1B["@dd`A_1tt6@=-Xn܎_l#36}PX#_FK bt2Ɏ\'KQ_9:ΉZ*Z}[%Wtw,?bv-FOM?f޾=z C${zKeGL!6@4"u9>Đ(^7>\*hB> &FQ45\g5~::N{(K|ZbDDB,AwK:{SwZ4__?"ok*|C DWZy%.J9awl(W0~/+8d/qF^Tu^iYa/BwW뤿МnJbwYS01v?o8j o NNk4ofvQBA;~/0=71f%Či|H*skނa֝=+lk_u$O+6gcāL[6eYsh^ X-7epRwHs~irR]h6y&o̓h\Ql1eIctjxZ&*RK4U%~˩C7ǚr3HSЇ(c_0h! l:9.=D)A""*?iD8]4M1La+h-wġe ̶þ}{8U> g#CǟV;2蘦N1Ww/^)ј ȩO3Y?L*Eve2SZx򞷰 S̊fD1l۵PQb)m\ $нp!Si9PUiev8*i {-P6n lI~U!O| \S0ddEq,+ꁒ-b>lK) 8`Wğdgyч=O.V:ޓCj CubX#@Q+{°U(4,͓5Ӛ^[rODy3G;L~WWq[LFlEy*_U[M+e}wUtud3"[t{7̘w7l,W#S[ڔ yq+@A;g{R)MlJ=nZ XNO0D,o>:Z\y>?AYK0pUm64dJWVFaKfD /;N?C2V  F넋(ާH= oTl(Xm&cWNY^ËPM[1cy-~Vr(- ByzX0%R:ٶ6Qmg7ֻ|! ^0-H'֌Y}(f:SD^l!L%f9 f} J&To衅EsMD΀_۔0׆¾=!MDZL[f= gM5 jqθќb/r|aXUI۟Mµ$ΑŢ-+Bq fyVH6pwZT# ! wp],Y/;{|H"!DP@+h[ =Q9bUzq7Gn=: w5ﺝ4"I{5QD3#ztOՀ8 e&x,ms/Į#kV+t߱'Ҕ^U, y6R|RWi_M\ \z6a7RWQ'gC .Y>=⎯b%X#k`]ҵ1sCMt05}Wg.KCy&vb|>mbg wlC5 vV?ˆV.ţ%9.&}Hj ,|.:?[C5m3QW>YEF>0 +M礆} M<|}.fMHZ*B[*r6J"E,j|YX\;'݀4//R!`+c6M6T^6Fe;i';i@ֆo5X [q2Mz~*^~2ˤ̺;nO( -Y&/mR]D}9'2O+kYlP9{"R"ƥQJc(J6%lŲF+'^{Ո0F1׷َ)J[ROH-d{1 JA:fqe mcKBGI)["6l/ZxUvˀ&JBz_g@6Tgb>'".4l<b1&^WUIۇ3 ,01f[bZ  `]lQ?ZѷQ}xR.pUI]ߙMqSyp=ѓF7T[Wz >#1is 7խd\GklV$jv0U,U͞ n6fK/0ʉXnv!_$Lw7 Z}G J9Yv+#~KJu[< vTamƗb*UiȜ>*0\&暥$e_O}j.QJH9,;wQ#jvp w  0P& l-y7}YM:|ʶƢӓ55m&߇':G}q(tNb8q+0hQ hRcp6W7*7K=2\SBtz>~ q@@%2ZE2j[!79zKhEK^5H5hef !^ RHRl= ѵCXd&fw.~)hӪa>R(r54+{&)`Jԍ [q:)C@zsLMOD ^@/i޷ 4n,}̡rk/e<`$ }Zvo5[.!U:)AkA|XX{U:v׾OGdQ)#y%A6L:n#dHC ^y=[ʱaguW4mKSbÏEc6k'?ӫ*۱hWx_H ѱk RmW:'mcv {H,]1Jj皹*¸6|aW/9phV9?f]ɨ9T»f=Da<,3?Ĩjk j`XoR~8M,Z"P͐4"دwײ!J_ΜU-WGf֟MSQ:Jgҿ"<@Bʨ69eh˜+6x3mXPÂ6; - 2sܮl‡ʾ.(ɴkѦ25b]k x?.˔vX wUK ;{"3G2[~}=8;vrw@sY`\j CF>W6lNM XEKķ>o<-9! Nd= 5:ki5yuOOrQ@YP3kZ@1?o҄Xf}ҿkeAO.|Ohunn)4<Ҩx)5*!#GJM}yߑTND׍߹T=-!J-S3c׳/P/8xFm.YK*oehӢUOL1By KUhZlgϡ$7i^2ѧAr=lJ 쒲&W؁bv?KN p8ǎѥ.dG-p7qJm @t+FN_.ԭwW NQLyv @nK绦wώe$V, P*]uK8jxz",{Ni)a[6U;yT؊FKSw`[R\#BHMV&YM񾂙䅎$3%bx5:G *bXcfYK)=$Y-Y6*r!F;x'. ӑ&W+n/+iV{DJd^nLb#UK꣯(ݡ*7{觗FٞbX|k wxrp3 }d´DUARύJLCɨ Gew !lGlx(&EVJ NUE.pc;'`WQ+[[(g%.%ˉu_iZnTFȢ g^c#n2É[n9H]rZN [-+fk2[hl$Jn2ը"!'M+( )P 16'q/y^Zj\XD7>aKc29)Dx>x0ى܃ G!*֝ː)?mMu'& u-o ol{W@R*1l\ҌHc, t4/:K\+ʒ IZ I,H*#/0al| kjΟ_"+>psϭS]qi/(2u0dG(E6|vgJ+Laذ &eS{+%e3CUvKڞ`fdRp@ዩBN:/0ׇ't|+C3b3BO'8Dso+|1_XAs.UseяN*Ob7bQ~2(M5k|\)0B| } dCa*^r`G2y?!,@/yB,X0{zWuqO%K/:aԹ?h>f 78GIzVmij!D@ePi`.M`ĥcĂ_'9q|rlűlt)G6xjôDp1!yr$> IK _]Ug+mHm^`= >)0cEy`EX:XDVsJUc&("+;Fc ᭯g瘝̧25JQ7-}\ xAMJ1_,ЃʝwGz%Ǡ؇nAtW0UUR., NB簈cuVEM-ṭwc\ފcZwm񏳗Ba14R4x3lΡs%)CITUz1+ ubc4?sf Pe:ۜv4uVQEG\!F~d}7o@u7EByaJ38~hNzwy), #tںf d%G7PõVj4*IhJ{!c94^s4+:IA*$XI$loo'd5}ʕETfO;QB`  Mqݼ|+Tz-*10qUP7-K|N VaCtZ-eA-kRfvs?2;*y3c2xh ,͂!b*aKeT&gv+uj4lnZOn)>.ޞJ =g ee|m5?ze6ㆳhYKJ XPN:%i]~J-iV/ ޹~u +&t!ddĂ8qf{IQIP-x#XXӥ%+Ⱖ6;ϘE舆sEhgU*v##ވ=Si\<%.nve!Mk}@m'SFL TMe] i<şn(up #p(%Ԍv2޸! %`^T~vˏp=0(с6b~e`q"]<֚o0N0n'~bx9!Tnӆ;rGnT`rwt8Cۧ`7p#,"ުs84/"sa"x ޻(qt2- ).% LYHK A<蹀㚩It\Xl5J?PH߆mH+GDӎҌSt5$9vc+*|B#./&<_qX'R!PZ0'.$¬YxrntT07yJ0RbwIOr,WYUP;m2gI‡aHAGC2TnjO:mX=d7ڑHUj <9ncӽ|Fh~HZ ̕PeB#>%?V=]AlBApSJO1?V,e m |U 4'&lh3KM&$ OݞzMe}95h\íw:%'ZTUzD bRrd}o63vmtw :v%x+m{5M0oE\2=HF|ƹ1{[$N تGl`zl@ pJ>^ue> 5$d!2hBßݺ|u AIsUZ1{" ͮYչJmZ (@@Y}9$U~Z`,o—&DzW),gva[J޻QhKaW9l}Ɗ\ q O)3Tļ]e 1D_7g׃V:E3+i8#A.wSգε ϡ38{ԩ Y'7XHAOUSO8+^ EcG)o/FsBYbO7w6pj }G5q<QRc:1WC"i>hJl4,7ewPV4KbL#A(Iݷgq` SART>0Sj̖-]_:zW,Qw"U+[oOލ>JṲf˝?ދA1MS['bu(P&[nFެ|(nr T,``Zń=;K.mՂCYiH]t3} 38$SfX@"u/ {"ģPp?i?9l -=<sAɐ}f9 Aߝӱ<'Ђ؟xdSAG/f_/rIAn ]6|k=5avw"PAˏKSԋ} bzY݋vh# d"]J˵ , aû䐄XHԞsP^GX[vsy=¦'xCځ(&ai]sw%Kl YFP"ZR0wg_ ԀπYcǹYy3qHXz% DF6);o6cH"h4r $OѢ"fI􁧌D&lVGgܟEotY)1`?J9k,&c;LWKW&mχ~Qf\ZX N/|."~7{GrU.5u&_~?&N`UCcp/fL iKk'_!ƨD}\F`>LSyNPĘK8+:Ϗ% N7Ąۥ%O QQO?7w*k\4㴅OY)@#rKxOՙ`i(ۥ,k'.;in2]C53F23_.<7rW*Xt{_ x\?mDzڔCY[>w3I5>TƉS@!bzcI ҥdWCWXbn8/AffQTAWc0_M!͟>N5(Dn.m^m1PmnZ% QԢ0 cHs f@:8iFx0`%7B"ddUW<,+{ÿڙ'wfi ȁB{ c_mKZn*lm4y=\֍#ě}Q_`PFLy!Bkfz^rbg*@i|֕v lOޠ4ěv(=dw+4* >2rnC-j2 DER zq,]$:vf)],m*fumA f"{^2ŀ> D0%eJagQBT.ɗQ=WHiI,6,z!m 7g 4lTͣ/lZ򖙥ъH|p|mf!$V^78EM[~]ES a /$%$xXhԕB jƵ2Idߡ+&.qGŏ]*"Iq'-;ߓZ$mpۓ$C֒y2] "e~;b ۡ\?Re( (P~PC?.QⴌdObw^2:W@7d#sx8Эcch Y"@ E/bK(h5JG 0b|TY0YdXR.{KW4cv:t. #h\ j.,O}r`[YdNq#My (43DNσc'V܋&_ ǎ@ٗJ4F W%b Tġl m֩[%:j{̔CuI[D7ZZDmRnQwruT 㱠(4_&{#1Y'5Q>QӃUne5|m2$k7";jUh Fe=tΓdsȞ7E"w}P5ƲQ~$mؚ,AQtSʆi&uW-wF & JROL7(Wg:/H! L+:),j` x2!h&%*Ye6E.6$O D[Fv Vڝh36xRryswJ4٠2Y5^"'7Ӑy#-¸9g&>q(J gP 8$ZLSNRʿ} %ջ|pGЅXI@ ju(ʏwGMZNɂHYS7[*94a?3Y"5t`R C[ydnK<S^슭l",UgQ42ꩻg&~qG= wg`Z\eYmK(ĤvA|<8 LЉ0I\$`DKT;|!DM?^KfEl߁]/NP8mS;vQ ( ]^YaX.Vj 6vU)_eĀEP;-{} q,R,'mڂ?SBUeio njKzziPpa2|Qqb9B1~2`JAh'I %llJFa`AA6v~G'>艰LJw_Г.RBّ*Yix,,H m=wu`w`"@"ЕVnj^|W ;;qoPj1z|׏u(74X@lZnf66\oϰ9p8QN%zNzfXA׼[oȕńJ\x.oWo wHD)ة*u 6-+i+$@-FXu8x֙a9G|ܩH?f^P1xAZ;ҭܮeLD^ 4y,NfMgʚy+-MVlBMO (~R*;QSKiffSqX2~- ~Wڎqg? =h+XOr2㈈d5YbKQVvOk4GtozP rV!:۩[Ҽ!8}A[φ܌PX?Wg{dG#u0@Ʃ^~ :>ML0Xrȃ  wM;CۢJ1o04bMqP)6mMCf&Psy=V =(*{ <-uB_9#21Vݰx4T%Ϝ[4' I^UYPdk7rp%mJ<_?dbTCBxkCUIqup~csuPm&C{{q[*E/?E IqHg)$mA%n[a!\֢߾DOt$Q iX{<[cO4_s_d?c1o\yo|)B翚مIl "~Sѣl?`B B%z gMfN压 !!{:6vlQuʼn/fib4 [ÙuT]U0!7jх Q[-8m@S2 K @e J!SƒCVP'ːlm/_EEW9K/[鴫 wt)X\-`򲋆ص]=$J^ScQwI'-z*~Md'T"5I3'D}tQ >tU HI=_j8,-7<ՅGXZwKYxzXp ߷a=} ǿW8 &Bg&Gѡ} G!T!Ɛ!$-AFQFT,>7h282Zg0D D (H-1"Jc yzDD!"b&:SqrUzvsUN,kR՚ ֓4Bm] yT%AFGSև-.^{v&{j{ Ej# OGmL]@ne֝'ّCtHʦV_A6eMm ,a3xaڒGJY޺#cŮ{w}W5$/0$6sMS8׬P8+p۴jYb}TJ io!Δʨ׮8ZiD0bྗyKnoi}Xn%Ϛq㔇`hG=;z'HH[Ui]B`$u\-}哌.i[=|_d7is5 e"l!3KxYk64e;h͡ nɚmaO܉)od]!.!@6 +PeW"6QxglcI@fid R@WamÎxsk%AfΫG~?֎ t{OaD=qΜ[whF&L_s$R8B݁JqG'4= NhLKP[vkXٯFHk iw NWmχ.qbM*Evr>Օ/h\(?M̻rKeҀv'XGAp2XTu_Q`B1Li/HǓ,^RXQ#떬HiYan%rNҳn0?:ЉwrIv$=wF4=Cuoc1r{H~ULۥ`yD9)/sm5>;9*5!sHD>/PWܷQ.+o&RZCd+< J~@Ds˜Vʉfc:.18; UH[8J<|sDbe^"iҎW|gQ|<ľY)76,2 ns7aslq;[<@rDoÇMnZ~^k0ދ9C-m#J&n^…=aZC.p])j1La%mBg<W۰/ 3] e3w-*bٕC-3L!͜e/oj\_K %3i:ϗ3 >4 _c4E禄T{ hI}xV{ΪAg(7`D(AB[ᬧ IҩǺN8}@h(h:`vw8wl}uN9;}@ʀ >^&{w1AbXгB]uZEru6KPvNCUN˭ZqfL' ϑG&} Jht@Xy5~Hx`qv#&!? Vˠ_# Q)l=>WKʙ/C~Â:_q;iJ(H!T_X)`zuH)k#7B\ FV>luC"6s ٥G'md'"`X ]:lwHu1ZoֵUI6H 1A0M'5AN-ߟ?"tJPSlUcןb tj@f2br)|F^x/-?C'xBu W-SʎWi4Di%{Mzۃߝ7̢iI[vji״vN.[$%'dMԼ6E>rӣ6/S$g܏&,4܉%UD Oʅ3LWwju̅ce#2K4718oȗY*#[845@NX ϣ)Ww=tn8yj/6G>9!`PR8 +uGublhì:Q!7܆FIwSl8y_{O. :y Ӹ*A#bɲR 5꺻È=|\&7{Дcb;{H5Ɵ  p|q,Af\l\|OAT;z0Q =p?8a8_ dmi85BEcn۶XUm(ӵʊOv-j.blciq)7֯-8GmEQt:UbKEd~䝧DYV֠(pGT R!1e<MYQqN=+'CkLt@*e5]P̽?:Qc;"=q*@/DL[++#s! Yh]co%Malc&+N5~l0(Di;5)IUl7h$ÏUSl2rcg;9MQ郩y2'M`G9ğc ݫL! ibj!OhG]qEvyZJK1p(|rB If4UtZi|,i16'SNJͤS7RdKYz9 ,`>ц-+,E|VXBҜ}EK)[;LNmiurs/Nj sפOCY>z) TP98Vnt(\v"B݁z1·/}x+%ur2Nf(Ƕ!#e|(pdt&I;qU ѭnB*[) D e)}6WTN|,3z2SǕ[U|PUhPt;ᣜ$N21(6A;UYUKc8;vL 2d-#IXL H駕%8mYq@$X m:3^ಞjxq$uG}`'w2Ѳ@ k}sȋiH}%C}24K)vǠ Mz]?+dL7*UU/>2uŜQO.?&: n Id{fS,ʊMd},ZXbk$k?fd::rSȆY5Ԭ#,QOm=-a^VvkK^x> )A=Ju& G{!߾Ѹ |kN+!!vA*O , ~3kR[(5R W2 RWO r rr^Cnʧ[(Q> ۻw茞Zfec\hXV=Ƅ8.삖=W+}jwF01v({s&^u\õN;x׸l~)v)ґXQKҞTO(D[Bz9M}?`Qt\^ɍR Jx1; S/*M[nxJ˹5C(_цD),. (PmwJ!y~:fT#jŸ)97L" lPB@?(@]J>z[[!)?Y"z13xͲ\ͧ&)g^SCL+ZL2rz ɾ?*ՙ3.![O%e?q$7|" <ɞ\Q/?UE@ yU:)5~$|O,T1-[s'df#[drhyS7W'knRTwz9s r!֬<4 BR: &IϤ59P.1RnkӛU Bw;uu;no_%,x Ըڿ.0Y&fsۮJ9ۮg gtgм.B@ ϕNn3<:j.F׵HMش*͢LjxQ.+۔m,`d#XWq+ggV,*HƄ H P a k{%ھiM+@I1t|k̼מ'0{f'HԑJH{RH5󦛨,ԙKzQp=dHCH*ݘlҤv@{\Z T;Wjyf;kG9vN6tX C_YVgŎE`9l+!, B|e =NdI7HT1mYt5`K1vb4e [s p7d4uuCȭJ3LvL  y7HpHxT37SKu0h7֫=;zhtΉdQh~Q->.ƂiO@IYm!EPoj\e $AAv~o-sD TN*5 VO Ͼ-#GR>AڦXa8K@}F*:uydD&h6M*Wg}[qx$"?߳3)s>yfJ2 ]Ay ae<׀D8gǖ0KRrFbZai=F&ǸmQ׺B͇npk٨d7hޒl4g|N>M)UM<ra$t"lv]\ ܄+k2?ޡυV#_dWneR1̽P5)D}D:mA҆'׶pNMi;V%h P|Q%,IF+[CyxIB$QFN]P Z҃> "ԾٱVt&z ``@uv`^"qw[DeeTR55܁]SF̷-F^.y-aМ`03F꾩 g4'OE9pqFu#?-HuE#W;pkfy;0@pVV ^wljX7藎=CT2̪!v^{ b՘:dgW6@/qxl&1VOl%{YLM8qKkC%ea0C7WfV{CMޓxmC?%>(녌V[>xHuY#[ +‘+:83u%x Ē%C>ƛڄ[E|"tY{fH}kГ֊Ț)?Szww=HMR-!x&zdK~i UJ;IT7Bѵ f6C ׮T{2&Kp7*g)Uݘbg3wE<;j^>)ΧKY}߅rCNJ/6|=g~Umj|>]{qoDrWp *^Y) C=~,i\1e kpHeDo8GRd%HJ|*J=!*qk4l[fRKOk﫩HR ڎմ@E=?7G%-r=Rg(M٨]魲  P)Ym|:]wGa]l*b_%IoQWZ$;=kY Dk=I+P<%%0l_Y>m%XP o54Rut7[31f72#L w_rGOX1Ř۩%LdGJo yLZ<vG_ 9Y/!UrQu-M05Ĵv̊ErS C! 0 rPT}!7ӪG ӿܴ?i~2gfIB"M}`;g7K/{TL$+ ֛/lI?f|l^MPgHGٙr,iw3ةԳrwVE\/dx)\7Zg$&#=$m(}=3#!vK4i@b T(;*2NQM2F*?fgVEܼbjʌ0XRwQRE`͸7lWĝZvxWc31o=d i6si,[w~ME=LQAb;[ V< bKGV;t*m]9Hc<W2`_[1tͯ\trjQ_!Y;uŤ=:|, + O];Y1e>^UCudpЃ鈈:ޱ;vQV'GԾoT#4%xY\֔lvUʞf&ɚhF1|Ͳ*ŷC,9 dv߇`3U9>* ~ O&ӣyB8C?]`WJ1zdjmz t=qE5Nd9H$HOsVe;|'}:g63T;/El(*'_dڧP?ߘƨx㙯M^Ceye UjM-L(ZƸf^FzV5hl5PiE33S&7k 7y%0f$lT(xcpSr`C9s1=a7d0U{[<ۖUhy uEc4Gq\|Aqrv 5Q:cVh롮SnxLgS*H;4:uBL7MҁBU a3 f(+̎;@eTbEIDJזBݏM#yN^>{ PZl\_ٸځ9(-aY<`/|) {?8d x2rRp2-t8JH0Tv#T^S!j;WO=b9Y3ybEY !OdLGJT(Y3꣝71/"r|WT;.nɃfZ3E&jiْzэ>k8u #9r' ௟(y O!aCm1{讕Pxq~&>YS9t8}[ANu2xgi)Od(}WkyS1|u⃍2Ig,\DAg $ m kCTA{)Nƻڡ3PDz}! e%Zk"Kb2_7y=ph/Yo<(BOS_ˡطm6- 27o\^f(a1t NLQ16>QyhDQi/J$&;J?}x+Ii)gѿ k5[})q^qN W M>E`=[ ody12:]h.t -[{qxF!E;1~ڊCL}/n9j^D[=SbBo'>8-搶h(nIZ\1XI|V HQt mAt4l^D!ř[)lmfp&`ai[%JyUsAs/31lι@JKf֋:*"YY05%ӿM ϼk7%Jz%R%[cmB$,(<9ݜ2}}g|f>PrJs7<-^SlXf 1D'uRtpiotH`lS Ə(^O;+M˵z%4rheS[D^hت,c{2]}3k.w-WT! _&AX@҈M7A}a8R 6"oŨKO[j9rUFiu6CڙazD-:D10\Tk0e- éʏ7R}&pf .\;يU\NA\]ک1![^@JǫtyHgA3#Vjݪ+ 5j"B4QДw. P(^8bi.(,$zA"9-sl7Hv*M cOiGG~HչL&I~e{@Z0zէ&|d= p2eXFY<'t~%\Ezʿ }h'D3Ze+?L2"{8TjrU]GPզsGKp!7l޸y$5fTs@#(HHg/=+R 9АAi儴3NJ'OeE[ne(PH)R_KCIQʹ'wtZ1a4\wK |i ;t% !6#2^ 6|0?X9̥tˠSTZ0ζ} 5rؽLc<+&RO$[T1>GhvBo8[oOzQS PC)\!9%`B纙1?dZ5I_zyZ7lz#[&{\_9:&uhC d%|&?3]yD(RfSj|H`?gE {Xٜ+Jq+32i]RoPThVvq(_~R8CXj9h AgBr áJh57=-=P@CNF6rqݒoK͆ȾPںYaZ{hXS2[-^.%w>B_i0RqƷ#1CL{;a/P/Ɖ3D !PuRqrRk҂F: ]9=йozj.L߈PķCOp%.6 D&!'01'ڃԿJ/x"pӂœrvce0JB W'ʊy+zzVvw@JiUQ9̇srW I+OzڨM`>e# b4@!Vh+SbmN-cC4ubg,swnˋt"H9)5;PA,$ge6~T@e]֦c u :ȁƳ3թYV#SSǀҪTa&د]6` tSj28i`e]ngv& W=Οp=#%UFb=7!=C8Yy2ʬC'}@=Yt5)+bQ/ϑq窈C 7c69"ĨW/ J쎋o &%`β3iv6٨# f(e' COљcNTqn $sed5qy &t!ZdMUByTw6r\ hGaMGd/ 68wXM8^}'wv!nxHD?Nϊ諿y/M`|%N:cpz-:tLl!.qtxas^q37S)RRVsV-h: c)ڵ)H uRޝևte[^h}n5ڣYnddnj%rZPT"*p,,䢗ОeWD+NUnLYy>߃ה, szs:ت*rwcO~\K⋒ O_ #\g.Ytd%Ns0/y;_K;L c$@>Ò rF&i{@#Y@VE;Sth߽mQtP@4x} 6y)Cw+aqBX-œyIV_Ƭghfh-pC.o FcIH Ss:X}0Ǭ.mH$~kr8V0 uZ|nAcoDc੡lAJ,'IMy1Y]ٲ8~/U3p)kT tT&Ex$.sDZkFu%=g:CSVhH@⢞66Hpæɭյا,MǗTǃO{}ײ*O՘`L5 lU+l n-嗼oM4m'? ,#s@z4˭OX+nOP[k$g LUݘ_[[QEjTûj!0,UD|UPMAyPiz^6q3NҦA;Gݰ+p7>xg9!oa\v`AaIl R:ՑVՓ&\Ӿ+|AY*X!Cb06zl^5zQ)B9KxH`;S8>0Fdh9`էmA)+Nezb.4ݗS""M%_WY=Us k.3x <{r+R* 0ǣQ(#Qb#=wE~j缙Fϋ8T{r8v/6 vI>^p?tMwӏRF+/pC/B9Tkm.ފJ7]c&[فۘϤj.)`G74y [YbT]p&ԣE#p|rÐ3}OC=aHE1 P?`2^,zy >8⸿^NZIh#I @uɾ+_4u@4%3Of6eԙI"w#Ճ#6B|ѳiweuqٮ X%KR39bqiuը&*9LFQ9KNCx5A!QćDω[|6,w5VbUY( XNXu_l@,4,ci A9h0r!ӷ3E-Q˜bCtLgWezYTr'cXpDċ[! BKႠL/p:Q%=Z8Ei8l) KhpՈQ&db+WUxRʼn("PxD6}&kxD?:{)T+֥MHk@!);hU }me|m|+Y+FׄwiCd FzrN^Tn@٫@ faSR]FC{ǥL ڹ[~Հ:H]4=v҅Dfܢ!,Q v(:Ae=5z"Ee“XŠօn(_ $8߿~>-)aSϺ^s}hlM iȤmnb?Zc{5Oߥ7dZ{B1B= ~;Lɗ?Y*bW@d_Z)oOfWd;q.SΏDט_\,;~/mm2Ӧ9\`] ݖ +N3TLPS1إmE3vZ˥|ؙ#@2N@=?@ۃZ{b:H]U|neo+3)eP+Je!~ṍ~&˗uDs}0XS_ tT\eIŸ) 'BoW`ڰu*,?yϼW<:< ?O٘ɜGhSEFYb+YP=]VMf#opyٌٖ>: 0QM@FMs ^ oYd%WP86<ϸӢ@C2:Q͓ƸmU +fF--ekS9T*UJRA.egtl󯶟vKvN'=~˩HY)EAgM8ͭkr<)[bw)J(Vy\:y/[vaFbs+gQQ2GB {҉iNG]g :.ǢQtȂި.'cO)uUTcA];g (ׂኀ~6cF>;K ?}PU\peeZ󟲧VZ}` Swo/S`T?3C@9 6Rp5]<%𼾠ruI8(ќ+zt%q+r4M2 83̜{$Uz2$HTh WeT^ _C݆tIl+p'#[ &?Ii9JV(! pNqr'-Ougѓm#[`S&c;YR^cwʧ&Wl.K 8D>+eGOݾ!A ̻ܱku1EҙE}s;x@لyqNjK"\UDn>^=ܧy"; a5kAMP#&v_јԤ|0v XbgyU*3̺rQ", ;.+=mQ]|:`͜exʬ|a&=uѡ6Ѯw(Cx5m`l̻QkF 'G$X?OwlmS9}qhK#z$[Ryy؉l>>վWw$y?UmL1푯K22S]#j4jSo^ȁqΧf sr򘐰Ko3̶oeTRψNț`lxGʾ3";,F',E@k@Ra+ ![_$l8(ݦYD*ȐZ^ b=&*_Q7bBKBރ,u8Э~ 0.M˯(QtYCv:v>|`/'ZK%ǐ8hǁ4Ѹ+wMj0,^qxY͌(; ڬ!ӳ3 K %[[@4AI{Z bAcy\j +uFu+1A2(Z]1:y.kwF6ڕ 3-( Chu]F=zpcGxv0C|HȮ)q$F"wԁJ yBif@%K#f8])W|i4FS^+KG˨>8>ʎX{;ؽ/`2=cj- є6tQF+aqCtr3 bŷ T;JAHI˧2T}FBkY{)vimR93&D4IoxjkIJ%GRimjtJ[2@), MyFD-?Ī'hHlg_|Q,öM n6G.5`Rk|IC6v!3J:$V~XcXj9Lcyr50a-{/blBԕl-JJrgD<qM%{4ϸzt>Yzsjb jX$x)嶔 ݠW8&A$dVP6-u0rpsלIZ {h,;6rfNx춦yoGJ]5(iJW%\1+[fv:ZVQwQ)@+(UoJWpzaU>dfQsRߣ#(3bnFwQR׍)Y?Bԟ5bM8Q+cl+>fBf&xFDux? G6T7f2BV)R0.49qm,*XSe1R@YL >]rڕF0 tGM&8}I]SjL2Sbz;-Ulṭ+lLU;Ă%VF[mq17Qnqa.^{OU +Vk+c/cfpD&gGǎVK2N3hnd"13rH)'L-a^ځC]땙r0lVT ]G's_+X&sS ,zOrmo[YMc:]$]NAp^U+~πa'm9|U ?<5@|Bgɻ|5r{JdPu:n:b` 3FMRS+y j ~ʅ ;j(]e r_A/,pNIr.;h`O͊@8QƧع0<}$!<ȭH\ xcsDxbŕ-_!  uVD _|dyI 8@v7$>k}Vf1] } |d 85I&??%j <C+ۨLwZK؉^06f8vk) B%=v A:6 ƹ+h:Z rhv*~&OuicF>~+HAWm\( QGcY !y9W9ĕ}W *e?˪_mj1? W}IaPw&0bezO^AZgf IX䙧,Xhz8߉V3˂03Us((}$Ui6utsT_;E2sdpLjP(rn3 n괕Z=29񋄚Q1k l$bB!v7oxq/X.%+X)o_LI J0-WCrUNqm;)CBǚILgz M~#DY?,;D3?6Jў1~w*iG0 |o$Dmdˆ%,Y?Iic ~ TőNńj WCzs\jJ$ Ctf%f~H|fAZRt;s8P3CzKd녾u)>I%w{%4/R;*}WQS&Tه̓" œ'K)㰙|\x Aρ1`{XINδ Fc(wxFqMfcA 'KdNUZ1ʝDQQŨ9FW$!Kɍ&Lh4߼kǽL݊r,OsvW_iiW">6ѝѥHFN ̈́PQD=IJPn鳡AC6 *ᘇ {!DahpDe\*}xBͬ9\bh[7iWNWF"YTGV&/ ${r72j@ ,eFP_{5rhC]f9C- 7ʆemh9t_%maEf n"R -̈́DJed`)Nzݨ.Rmpf#.QqJq|wK^`I&<њO,O0G c,&clč/Ho@ sLFC Y;8vt} tΒ,b2kҮ"KeDBv~PF]g+VE ɡϥ;xtSN[ JG5|or?Gp1-ChuLQ~: ̩BT}TEVGE?( D"OZc(wb:nd0''b'C2M+^6Lr8esb5N}\QP6inRZ9._&n'"#<#3a13Kg-喢lLD#!KH)'S7%ڂ,_gF@|u*A0ڼE Ȭo+iC*3j%m$zGg2- " 嚿z+Mhp[bQDj@CXmQq4^GA'Q,BA(ě*fz4~n!]H84lى1~d$RA%8 r >tf+@&a/f 0tܴ>e*tr5OtN?X+)ct0l3i>ݼjܿK{42CƊ҇oޑd9,t64U,' :/槏i|k̏&xXYa|0PHEej{FgDEo}h,Ju,W9+=ReK!-Dr%T**?vzb8m(bt R;" NN+-TF5MŰ,_ jVz677[:@Y"P=ǧ,C*Aٻ%4NM RV A!m.GG8^V3ۤ roM$-290j4r=JcZ I'.؄mx">ֻպ@#H]3T'3.}qYZĵȳL?RymCucߛMRYGMD$=;@ }cߥ(ƀ.a;j]ƓȾ|B!BWT=V> IX3o4ԸXfq M rJ$^Ou\\<^t~ay=# ?,gTfw::|+p|)WKHewBUFCɜ|/2 <]DO\@3ya|ɣAs\u(䬓6`|߉eaԫ }]$hyRF\-6H F{*y)F"&GzcnU~n>rY[(bw 8>ˀXfss#u_nC=.5w8Bs ȗaYc+ggNjBˮ =ɞC ;`$O/ o#?Bu-!Aʹ5QLo|dLqi-gANeމo*:EYeW(7!RB"ujU)rGi08P%*mtՂ0Cg2/`GTV7yIt @}SڃZU/`1 } +@doDT!nC ȣQc7}F2Y-irJ42 F̷lkq{-ǎ$.I;(%0sfRz3q AE ߻O-<RtxQ8nƩ1%^kR-F3ȼ_|ssM!>TC>|_Eo74б[ gM@:1h:L#"N9 k@6w&s jaR V8Ðg)wTO&IQ|+f9W%"V{pg|gс0A߬o5msI%j qURʂkZDs~OEb$%bSۧ!Z1a+ȶ"2(>^o-x3)xj@ЖaJF_szzm/:6%k~Jp0:vҕZ&鄮4}͝~~YcsA]RDOi5SSS&OQ&$0iHXr` g{I<߆%w{"-b!wyQ/`ߩ ԔM+byDL"u;*مw:=,9Q%O]M-G\[VAC(Xߘ3Hۡ5Pa>hԈ| dy0Ζ/d xINd3b&omd8j7nido ?jh6.鬕Pd;/\vb9Ȑ!|X^R@rxȫġyح1&oxId8n\j#壻bֱ{xfddµNMM7H;T,\wI/ώ獺T#.Mqć,&`-}aE8c1T;A`. #c $u8~(9(ڕo6+=xV'7@ U{ *pn `v08#ʵ&4-.Y[CvQG>m޶c%B1\ћu)^Y{4B?_s;l|>hGlb Za}wmCuBv밆_KF"I~PҲ;xUN1V%\\a lLs{~G!ߡlLSt4Ep6SP ֲv{GN j9~d1Ouf2,7EdM+7RL]_"+y*0d#H/9lEGODb_a(E[TOfni ފ%jba,Ǎ:7'{y ˊ?Yq1IμK󡼀$metP[ğr*?dWH!ڪ*7oFھ"lە<jEzgD7χ`TzC96},t6=qZJ Ҹ8æ"Tٮ)fTn'Қ&XRkmuq>z=>D~|9{Ӊ7}zb.*j3N2f ײCBOYV@%#>qd`IFOP&u Oa c*h!R;E+LIjS`HOǏ2w ([,%žDqK,݊\J 58Rԛ4p/u>l&?ss*HZ/>Ί0WѵHs&Ff'JLc7IA"LAzibbޞUv|.crl8$@wR: /fAQRb><~YQKhU:n׳|Rv@mj aNl;pr9M[Z '$处Fr֭mCd(BbF6$$6Ezhpk&#/!8_ ̑:fR`FNnx?2x5;cX+WKhOAZ"o)d^˷vDŽ%,B>YvrKr{nZIQӎW"iސTr(cOeRkPl"ԽL77xO AL/%Co3ȈMVF<(XZ|y3~ 04ik<Vn_H03N ֈfX(QQa'S?~R_|вa +)xBvdx,<4˪ P%WW?]\ ETt!Yau. =cY]j`)PN#MtHGvT0Br g @%kit2DQwDSp/T'p.8+uh Xk:CKJ.pik,6VaNDU +S vis#U5["}ՠ .%ToJ)!]ְxþt9TASa#tTfkVm@!%pgtl߈Y~jY"}}(rthbi&`F/Τ?|[W EI=(ܩUHiq8x`f~&N٧)VXеم/bmΫG憺 Ygrt^gU~߆$(wNϑ^~fT3pK8Qis5NJQ:B?d>%_γЙ6c-˵Ǝ}קq `AEb= ?Lu*(Zȥݲ^ij͗ȯPUWO, cT5zLrPj*cM>ݦ[W!k(q lt.:NH pIT--ϨeO2$r3XN.,Fa3Ϻ=*PA|l?IN^OM؉tpZ}jW&G܊LUպ[`S*\ƪ lUVpZ_򍺙7E,圄QfATUyg@+n 9,*0hfM4!Jl*xͳMwMՇc|%r*SM|F uZK!B|3qHX޶D-zC|tSeJ8>c\S;Uw^Xn>է nb#ZcL#0?em +$o@w9kX= Hֱ"?l ӕS{j_5'xwC9&u WP dI}Zl=a&`Xg543dtǹOcl2h;/D,X=̜5FqRSat5-T)b* L} _q` gQz\ :tWIà݄jL#63Cv.\pG՗oۣFeCE]"4w~A5 'Dip/&YB5G股.׽fh xX6l*!A? ]f5An1@JVuS]&+ë@+1  s$!sP̐~/y0 e$w5'Y;*_жz /,A]SwE)k3( hC|%Hбg%ҸV8t0t:kx޳u]XRzhyGH7F&sU[Y<Cۃ_dq͡sGn32g08*=P ʾ:1uƮ wS8yVT S;hp/O.FH_+:14i2 |1&K -fpPyF2AxaՐW8Yͺ)1зˊ<7jpϯdNH<y(z,1U 4s1Ǚ4VSĈxs:UB7e204F}lԄ`jo" T ȗ,0M~UKP{It cč8j#w8V_˰|inao%< @h^BW?]tgR^r 6PfI(N:Mp滭7q+{K-pYb_0i0tSt&ƦBjC0c/W*`SX}Q$7F1`o ejQɛ3ߠV^W6Sx)%_'MLrFAl-;Y.cw9c(Cp94ij`w7=*:`nc<07ADoq5ce"kҴn O6@0͛"DSe9LJC2w̹vM2z$ hSpJwm@[ܴn%^ YzNUʎaQI} b..ܰ|l|fVd: O[e@ADrVCξ8$fdD'lgoqWBp=jhRe8_glHa@/ WwHzìۍIό ~,c/nv0_޾E|;t/c:ǚ uu:" zQlJ}Ld*LH+{E3CG|v8SlȺbV J3{B/-x ҈7\F*\LU9q KKI B#oŋo1'g > 2׆wis,f )oK~eQ*,`\fZ*{Deh,\멵gi"ҐL5o;@@#0-2[82* xV{tɚlؘ`O",Kn㢰] H Ez`uErq/ r:F4daYP4WCM7m* =D\Wr. yM f,/I(Ǩ$©s #kLrmW͡ǠC+YLY esVFb=mrc)`9&smOh˔x]pJ%F0TjwcZy`R>Wl ͫ]JDJ2a%]>78PMn+ :Gq.^\a->YI\rYV p8K2Mz93z{yB![X\#@=%ۦٓa\tYY a  NVtc =m%煛ʶba(ߧRz>3šP*]'awXyDĭ`ۦFY mCW8~SqX6x.^/rm^>qQBb n 1rKPn;l90-.4 IWnyEZ>>Ͳa_9(Pa#^BHN #X}5j{}߹N!Im@)6QfTئ&r&CK02ktex& צ١#5qe؂]^!R~5pXOy @qQy)z`o4-!;&1(sE?Kjݢu'0Q htaܰy!w"}Ï|+6j\z[ßWSG3} :n8#Eb>Gut6yɂGW-Hg*~5 њ .a<=К/<)7m H1똛}>&rQUpAoO~m w՝j\6="Ʋu]2.E$.oR\Ib nd9dv8<5^K[)mM[pvzf >ONJU#)~%3n nRYm9 ˚[0"5C]h+0&*Q;pǞ;1{~Jvnq a2Co".4,-E:=ߕyc =J3yK s#|1 r2z7}Yf>$HJX*g2P6bS~J$s`_z fUp$JBGW'ic*oCNciȑr)qE;/!㲭}+3^̀a?g`#Znpy)OjmhD/u0|D0T 07o1Ui 9 /0~npM!YlS#u3v˶߳a2ݽ o|Iu1ede_HT^Eঊ4kMm@\Ǘc^ߺ ӟvB4vw;/BvH`8-So]h(.DkUr}'8 :0}s*uM (Nt%ᒜ=[ewVXSrsqX1Fg2UBAW+Nv!@쉐 VP@\Yl<,8EP8C%}8'Wخd <P#qT ,y 41"f<&mIIF)d_T-$UhAVpWsN8%bE=p|I z Q2 m'9ujT\lcM`-[܅ogՏs:{48 ` nBt6'kUZw魻_Y EiTL)NӾl춦 HVg>TWTыEdEA(6T2;.(Mw w[À!e.)Ɖ`fp'0T|2HݰA-~h4P7=\~:N'Tj, qqPqME7{۽Vֻl_0Q+{ĴDXǤ:O2= ~QUW;r#k!SJ$ RSA7J4*ERU,{;@9Uz!5)&e4|o`wIz F{n^ 釵yjəD21AK/5u 4&by@ 6m2|XHm5xZ l/Q8X\BX3xK&s0 B&f]Z yn7‘]|>よ2K[}ku=wY69_'AU>%}CvO+N t0,7f$Q INg>Eu)[K )P,x%V=SLM3> "o{E&cil9ч]Z0/Y4oHPvjkUlS2mz"sћǖJ(ήd/}5?I$ʨ3 }85/Ǝlw,& ?>W+\^e3Z ^7; cXDMr1W JxnY)T/4te01xt#iYX7àra0Ӿ⾬4bӵ"ؙX`. .dv|k g2XvÀy-u0g[?`lhPyS؜-ik~_vo=}ĆHVG`]Er1K m*: r uG5!p3]ik||mIsc@҄ ׽hDsi4"9|CH *o@-4߫t|rjdzċRSVT5e$-$!!k \U$9`x <{ ^dkK1#`l,<Wqx? & B*}{T__Sp諊 ! *bJB|w`šү,YlRƺC$aSbhRLFPpPObiԚ;7^8r9tPNz?Q:<苎YyH&db.̦Nɋuĺ+C޵$X~Kh*\p8Rhbkf{TʑC#>jbQTu#sp: ApUwLOe$nc$04#2j9ӝU]z|P{$ziSM>EX{1~6|+ HmX :ۮ]J saX ynfER2/hLfSqXD}<ވspp)_o4j;lAFC~HD*zKwn'妇 6tׯkXA2_<`$FU mTHђ:ϋnU=Ӕ Se5oI{*BvC1jTV98O2r@">8\jڨ"q"Mp$YQ$42IFPٖuM"YvAre,hd8 qTTħ3 o%`~V)Q83!GbYhp͹ä].H1gsw♌C}g~)+I]xo,]]2^ s;aR[]W8܎k. M8j(vY0q?tFP7rB2QDM ]:&_{oUеq&i2Xakj)G^Ď=@aP@F-"1 =~%e'J_ѽfb[@n/̟.,,9MrN^8x;zst{;o<uZN2Gl`(D`wu(=p 7 ϻ}vAs9*mO{5Vl鑻w'dሽGT,zP4}YR] 52 _ AH꺚_c-SA“BĤWY @W^SL^ֵ Kނ}j. 4YpȦhs"SB**d8t+ f9kC򱖺QY p0eQ!z4bfh ݿlW$-$hYu(D+ oINg"*1k x_F+Q"zus6.Z֭) 4Q@' -xrtSL_7!XLܲ0hB[C**<$<3?93 ]-VfA=s| ȏ=YřL}[U *[|̆n 9y|,#{3>Ɖ}v^/V=7d8|B^//mmsMt N#~ 1-GmLYG@% *Q_K Q4Tb) g"& qL=3}0G٪]Cg D~S\9u k}]'9TCaPU?TKxKVk'gQYȡ3. 6vץPgqٛF4 DH?;BfNy!3'*xa㓩]]9eఠ/1-SCiáJSw0?\a;~x$r W)(ۮ8pyU/:# G6ޘuÄ;G9)z'V^ 7~~kYav-GH-]J6F1j=7;dEu.šW;+8 |˰]S#{>Co6¯lel0Ȗmd|e&=)"旬eQ,hjM`01gsU0?o P ZAl`4Ks$L#g-~0L};@ܗ`8a(`+nsɵj'y;_7>Aq}]<Ahy2С3J4OElEʲ33ިP)qeyvPAcRJm Z7?K%5T#\. "n!DxZ3;?::|Z<۪qv|DaȆl0#HL=8beD%&clf!s:'@=a % e%uZ5atM {l6ްh.pTD^ʱiB~^*ukA \]lAQɡUfzM~OG{<؅hۇl>*|u”T `3= aW Y^WAb1€@ď\ų/ȋ@5ۅq܈1{ڄ[ o)^3-C `xN S|4JV&bڷ7v2b>xp'LVy7AΦ[I>>X!jh{ɻ%{87,q iYVR򄷘]Fsq|IޓOAsZh+ c\J]:#;ojFsL=bGTx0!p~ʵF֐Yl_i ۜǍ<aәI6$hka2Q"˄bŪgo?:rMȯ13݄"[\gdjC!NA/e+ 8t%9' 7qI/: bJ- +guz2E"5F P(OQ9K%`\3%_Y \ GӘ3;IB_׻XLr5fKpHbTW@j=͡өy(xAgG]P]J"=):Q';502BXZ5)u3vXQ91L#qg6 n3axw qޮ>AJ\E zDZC96fZG "LCzܯ~O1+Ydf1]<Mk!S䈲Y8Cpy5hC KD_2t箙5֪%L y53YQ]+>#薁Q᜛_:hAG4ӏX‹`ȵbuw0]pX=ddϖy5M-]fi=t* ^~?049G, '^ɔKS? 3.U{BÛ7s`:*͒m 7U e#X.F#}d|B(9:SL)4V,S[\zN٩&mc!;\6z AerWh:c燲F_s\oMԶo/rÒ^yp^7Q͇GTkqWKotX* \cpo| 8gs"ˤ)!8''_~V7Gzx2's"ik{'Q5Kn@M=yےi}EƬvԛ⿶)0 ULԘB S-[/8̯d-US_ ՐD_-!SYs4eYFc*ֺ{.<iޅ7K{R˝jeĺSaZZD0-0cqHaPN|"/ a▂9=xӘ [H=^e!P*fuy'9`!= FL{ƩhӗX[{nn#[KZ-%B۵Axm_`ޞqLnȠ8щITbi:bsA wPң{3m5dd|5R3f[veFߔ0e[yk} ' 2&afy΃ յ7e2i'A;a P|/ؖ|*%!;++?_Rq9\*ȶ|iϞÏe'^>hX_ݩwd"x}9@~5nO=jq|dcW|eh;Mr\tqDg/{ $.cu%d_c&&=,:5s{>&Uoy8_Ux9FUA,Ӆ66HɏבM*6kD'qȟ٭ȻS$O]:P!І_"L:Ŵ)<`:lgUJ/۳Xi\I>"\ěI킇+A$P.*t_6zWטD$܎o(򙘴wnqnbXϢ&&RbJ~mQ+-iݧrF%L[rVD7DR MɮWvݦ*LJmٗ,@q>!VZp"X#'0=JM/b1_ p \ "0Lm>RΣML{;=v|?U`yT U2Bh ,Nmy"FbD z!kh0*xb0ģ%_b=dq<6AைżW: -)~Vk5wCȩe=b|\KߚVbtP9Noφt=ɞU_0/I*/zTEf#} >ZPe8 9NpǼ]Vr A#ḄWQ"Frublpފ+sG(# .T4F\g`|5BZ_!1ݏ cjANq,?%O( ٌoY|u#@ hGm 1̪L g`uE7 3f-'q6lЉ-*eNڂ@֤'`/65,KR1u6!OuxbJ.ͩozp&//nL,^qo0UyռEkst>z7PJr~@/ FVZ A.æ CMJX5둅Vh?ShD9~ؗ? <2en5cүیϷ.52x.gS3?IGp2nΆFtu"lX"] }? xsh{vޕJ<Uooc >:HpJimZfB(}XѨoQ.i <scNs,XM;۹Gu:k+6Tynt}r>2 ,1RQHz %ktv6(dZdSЀge qڛE8W'~+BT S_#8eĹ#UL.NuhH y=[_11 p)mP7Ձ )L7}QkXi !5 ث̙̇7E`JkV#Qtl0ܩ!&_sW*Ub}`ɋ+^CA*)/ԧmyDxl.ͺ(XHNM|1Ӈquzp'{h"qp4l#i쐛U6V5cBtn0pD-}cz D?&Dtk$AU`a' y=~J.Q3wA;P;5ޥju/t Z/0پ=ۊ5 9cð@bb1C ^AUR[xh?@~ H#9_b1׌(:; jmKQS1QKT ?JӲFC.'TuNMy1U_"?!4[[8uy5L68LQFKYZ+#o}HWXྫྷGF'Y)f.40 y3|vJ;ܯ/ WM rrQhl-Jj]`?L>̅}ОUZjlVQ///l1?Zjkh)̮+}fľL̄v>vk3>xJTr " fuzlUG!D"ms;'[[iZݢ,e.uJ{cr4O}E56?7nqJy+P]&0 גro 8Q[KRSԸ["k1N/G'%CWzչh7}o-9{Ҷ&p7$ϞO]V6=-Y}Q#,!9zuOynWŻcE2gk?_c)6̔fY* Vrfa<`\e1# =Fd8gjt()>'FY&L/"#AהT/PRj'(PE/oZ;U"bEMywEHqvݥ>%*ax95YWh6.0'O)=r5PbB߰2_i[ 8=SIbE2LjL;="A?QfktQer9uu;?2N(_ÚPSJ b]%2IKoCi"\:cHIͮVJ)3~!" wdd#VJr,,!#oZbh&RdIh .G鮐fn)%=?%y:Mj|ɗWd 3dw60iwǏs!؀"o7PIѺ<&f}$/7&z],g(º7eNRӀ FJ3Ib܃zYQ)uɯB WdnT߂'ƽ)<8b⧇#p8-5ğ0@>#5-•FXuOid#URtQбZG?!(}&T)G6`x* BX껎P$s[)GUu2]>,y`G :WUEU1f%ΏQFX/MccŘ~n'IS2űfZO=dFy'j1[!Kaxur  p^{r1mmbֱGb#oW#:o7(6|nrJZhL}8Af z `*8ŨЛz@z&.K/7::UXxj*19HLr{zoa<3i[z}QS=sug -J[0G:p/: |nJi=*%\$:d&\@y>qP]Ơk[3^%vk0%*AqFGߑ KֳI0}xCi-^y^MZ|m kӹepǥWLccdAY0.Tf~`."K ;l˛\vE2ջ}xh; ad9-w=pjC9yI/+_e ss7.k<>ąM|_2FAD[_׋\MoH~ID|{Ѳ!b֋''LnD tԎ<"V,zUy*H2I8lSϳC9+ZQ6&2 :j\W +{ny RIL7eRa<%@!W]P'!|}jR(vM:4dY$O-M:S>=\8v3d;;#1L~ )Ov%rE\ pyiJV*3Dy4<ZR+~NM}oo\xnj%t $yVo8JJ-0~Nn |ne:7͌h҂<=`n֓SShk*>sQf$1;gxL9u99D\6wYpOUsa׫q^N("B@>-'%ө3#v u$)FXvJ)2eQ  : Umo2wb+,\E,\Mrj[e @\=~*O|NR.ׯf p46sps9?z3ɧj[i1$. 7A,<0ngYDg_,(7wVZ$|(,oe[3lQz aFmNIeѪjE50rΥŎ {ExIF {"'JoKi@[W^PN1z.̒zgA]|8{M}]"| ֺwa>E"eG0~]F ]::,y0ڄ<\I x4UW,Lh8;MϘ" \){dHB jC@m5ɕxçc ?mi*j(P 32 QX9iڠ%>y D #d&iQg)y#y.3ҝW].:[0:@f-Հ'2?P"ޣ ;'@k+N,"*rǎk:j]*&EwldhQٽ#iU9Ln2&&F !>u={kLMN. odԸk#ӏ2R-GHr{jW }AFQ0+F' x}>Ф 0PJf;/$iԸ{f9{Sk(d*l&H>& >JLA쳰8baFAk'SxπDEC±ӑ9R]נP "bhqHUM!.sXݞ^1;0_S{,]ut}g$IV].vϋ;${tL߿HʛQ`Jt츚d#AcN›t/pZ /^Uo,3G&J{/$q'KcFDοr^+y~otTgUߏb!tůاNz* _9+_{P/NPaxGiMa~\Amo6n*Q|\,B``z}|x2+(\a*`0DJA}hueĿKGxγa4 0=YnyZL'$ ^;00SwnM(g!z㾓XndO A=tBrWRKyc?xD߬8 C G-Ф9hf4W4IR֓ѹ$;-(0̣U$#TZCjDH9_/ޗ[ztM-LV }X\I<S:AYJXӉeK"C!=Q Zol*yhlLI"̆"!jG\ް{Z~Ub#}K//'H VҸfCzPo.]oYm}zdCdMcI2"Co H'!A=(950 `WZeYSRkϭԋRyl#sM=¿(>)J;##I5`F ޑ^\ r]|P[XzuZ:'ȏNvӘʝ+WcW0<&^i*4e0Aİє #c;hoY' Cz-PdK]ɺO%<@ddWF4G֣'QJҦp%?9*kPǚu llyqu+nEnNniP" kEufU U=c'>Vfaka!ý+$<21&NJ%| '.uBf;מdc\Fs#w".Շk}nűǛƩ!nYO_=$Ɩc\Gz+7F)>L!$VsYI JS('5vf hiۖ%-@)Ïjs,IO t>lQ DS'譲؄|]Ik=j I4TbH!d+{7>~HYU/4yGNړM8wOYv6){@2n 2ANѭ¥r5?֊ř7. @Jd7E]xQy H7b%H"Xz愗2ԧ]EK4V)1arHeYmQbz^#jF=3;pNVdޥ8G{@]q!r֏<दa̢hA˅Bc{;@DTW{]DTbq.Ђ:t=ΚOGq!^[7MlYi4Y& e d&Dxl˓0' \⤴]-9>Y*qSƇ`PB&DbaňuC :q~RQOn^^ONB4}1]E28 3 ,ZuC-Oh) '9unR`2|He?L} in$ۼ!t{p`xUa;G$_Ց&@7q]\\1= 硝F^|ى9zPmoO[/ 3M砶>#8F7|]*w`6k?>b 3IB ?TD-)'-[^懧B/M1y,EyW G[b@ZuW%SЮl-p"6ߌĖY,2K1K=.]}P~oNGm-_2-/j()/JH3bcmg80OHs$CܾI?ESY3@68v]eN-٬\ Hu*nxDld4a}5R0Ԉ3 !(w|9$2A*[8W_pv/ ![W[2(,SW!L&[YӔ8 }ͶQRKsNеҎ. ]PS%omělaR|nA[(oHhV0 7o1 4[`+(p` vU`D}X T  & I%)C&1ׄz~嶶Pnōs@P" |+#>QUYs(' %(UD.LGlQjVǡWJ!՚ ~աolM_?K%{iAeٵidH>9ȹfݽ]Ub$A@`O~J7cs|_m4E>G2HzSJ̹]+onc2O:Qul&RZ*^!=q/qOQwM5As(TҀG?/U^;o"o`rHhÌl{f=sH;='~BL@$(i] ͽ?L.ɦf5@:̴;A]IK4.r-b-|DеJݳIJbcwpt= dŽD`mO DgQ80RmPR3v]|{BrF\;9ט zXJ dCvaICV ,يZց9PfF%8+=*olaw߆i".7Hmp4x\cjSz/jLIE^˴$*RaFj}QG",1QRG0<߽.=zLvW`-ݷeÈ 1p*o9P@VSv'1]R\/o±gq=e u Dښڎ-Ī$h<B;(W|ַDbpgzod_ 33]#naZ 4x ms#J9an$wwRS17R5ml  %-[k\89W7~ >S8VPU'u룤b+_p#Z^iZW©g_u ϿF+î3Ig%. *BfdP88aJxm@pf-Lw5~}neKև me*hQC9%-p9F߱Q&/3Vf\f-O9?;XD΢M|/1zE3Ӆr|)+= ky6G{gip$K>> /۾,@_\t4y[͒Ծ( f {8ٔ=a`v e2]W{S}|eZA(ʏSH.Wt6eRpt+6oK}2S^j%XU٣$!hի}|m&R|  uC5z{<3ѳVGFk24eFf!xv.!v$dY9E<[O)s`/^d> N]+/ZƒJ5dv9ߟ3)fp˩8_ISUFg) vTk5 #_M<4kVٲS:E|\fQi_.pn5ޅv$]_v=WA|ƳmU36A!Ik& 4$z7<ןXA &eCWd]D9upr8YS#KC+ MHP0U.W^TLL\T-Ct-" MQus.v2gwPNμ+*@hvI3`y:w P/kЊk"q[ vLb!KFV-?Jǟ9u6Kl)ĩ (b#R tX{%iȉJΐbDdy g*h6˰/5mr.9ML6Y5npքMf r`!-Y`nd%mMȥ0Pc(XY{t0]n(vAv |lt-t$}dBrlS $qϗ%6k6J;ՙ$4Şt֢whQ8Cg>00X_丘'лhဲgIP;41O7i^VeϽn5DM榩ɤx ȂJMj{9x B6|rnۣ&Q>(ڭ/sT)/XXǎPlN(ىu oxlVvR4J!+T?,E@ZF DW6&+ 9@,'ҾoN9>FſY>xz~'BdCVEJ&~@YIu%ҊrrʈѐJ\&S.\W9|t _5Ʈ"_s-0v^h=ƲɿB^V<]IKơ2TtkPW~hlҼuc * ~Y)j߲ZQ{Ebmv7Rڢ u:gˆ OU6SP^ @<1s/NoPPq +qAyzJx8PM|HBm>M<y`c==yqI9C3ipZE:_BJF\ 3Zߕ`{;dDiՖѧ~p`Wdxy"+C{uԪZpkhmcNzPO9WaA}ȺoDKiݍeăU7Ȣ]I~92/$7jJGTI iC qLc03 롡<#2"!U^Q{wTӆ"RԣwASԠzF'yǸu# $]o/8ۊMVlUbMDvCsŖ^ iŃ*鶯#Up%`Ffv:NZR1ev0KH 皡Yh1BxQ[a/w@3t#N,rfiO}D1u:)P\MIT bK/*d)@Dے]%t+f# X++0'& z&dMXx7Kz~(WK+L403.dp_5TK -[VU$]C GN,lMbHMNzTl<=:}B{;7"GpN%k 큏|8u'hE`JrsDlAlz79| )}Y ͐mp>J$h+cN_Y7Px@&lĊEw06HC9V'u&[gp2wFiϊy˃C[R,۷0tFV{Bs}"(Xc PKaKr jq!1֊w>7v 7\wK2͙{{Rg̾5nw¯ %\firG97H#eaCd׷/'uê}Jݗ.XEW C(˂^PNZH!=>&]& lP!UxꛬyҐ(Jk=+YqEjƽDl;"44s럅VO2=ϗ00q]Ӣk_!LXqշǸPNH UfMVc 8R G !d&^yk)dB/4&ZMG +!,24´"CTZT!l` `!A-瞘 XZbx/p^i [Yߝڌ ZvkW׳2ԤQlv {>GIm)c$Qe$$),}&q_[;mdk%[ s?pXkhohb1sK8JhS!%c|nΌHx,γh1p 11sb&N?GDz 0f{{ ? Qyg}Vp;ĩy]K^~DN_D$x"%mPN0ؘ'|B69x 1jt\ҾF#xL-K0V[h >!L,245 38 V>Cj"_\m7+cL_Z휷t&Tr%LT@*֞ͺZĠwC[_#q޼4! a]0v֌! I.\Z49Y {WV02 ؎ 8Enh0>Uw29<) \d^2Nx_bv$c'~aЋ0Kӿ)["4#בl:x6µݍfޫ}K/Qo-a!jY|aV6A}%w/,;1{ `>TjS 1IB"+*j;T{D.~4l!]ҕ4eg]~& nIFjl u8($}‘7=: ʹ\ {gZ֨b3#͎CK!ZX /5\ Gqz` n^;%hZFYVfNu b^xTxz.0alŽpgQT=~O0/DA\w} ,ipd!1څa]bb=3smtP$qe/D1u>ȳf8XSm.4 QiOӄB%ShRQsT 8?[ROw]!270fN> : vU8.apѡ9l姬:>ER3t.씣E>!0b5_ k~MV҄IFp}r\H D):f`NG6p9X~z`{eI5|f6CzU{ʶ@m]tğpß`x )p-QfqЍR">=+0V^PU~j+sˡLO& t,ޥu|s A=h$FuR;J{ң 3a (T:m Wiσ whZP9&$Ӓ]b J 2vl{0uH]'gʼu,ZH7pJ4B P"]MA?Vm~ =ʸ G[:-+-;v"ߤ=`sIDmt|eKc9B[_~  ,ΰkkqhU f6AnFqN3=DR>'` mu<oZ7Fّ?`Pk^\lo_+CKQ_ !ӔDW ]>bNt./ڻpb |DUQA-B6c|nq %c3{%sՊSu.hK%` NVcEZ *iZh#=SBx:LE/F>柝z[ǝZ|z\AP 'uX1-@HC sAe/hF80$m"IVq $B%e2۾+LVքO[}vFH]prx#[jA iE'Gf؎ր e۹}NJ̯9;B[[UkL1)h D$uK*D/;ڨ)êCC< c|foIՇad/̗n@p6 jHD`7вӝn7St|Vk 䢟{bZ3 >6jIuV\RĀv:aJ lRSI!j2v$sSQ_Ǖ0E8٧"J9@vwd=: Iv#hiꝊd"Mo&'`WFn{ahćr]E?I$ :EA_rK-"%`J:Ml~2'Q$OeVZi$7d.+V_x|34w`LLA!5a_,ctyux­`F)ƵwTERJ1>Fgpuǚs$ 9[Dn[1ī=UA 'l?T $VwNW(\ScVLA+5hdaMf\sH?_ {G`rjx'Tj̿_\,Z8xi^fa1حX\TQ`9>B0FN@/{X {=hvJ?|*Vowꥳ] Ve5 q+NvsS9;Ocs@Uk"VT, l3`-w.g;FR]tb˲}󩝪TT [ v͇L,II,QO`-P{y۪pxcK\u-,ux[ 1?nC`n s`-^\;w\?R陭"eQN^u-Y-PBveGTWX72ձ:l!_чӚͅۻD@3*vb-E88  r]L8*~?ؗŽ3ƞ/E:NZyyhSxxZn_^y E1*Vx s?a*UՄ\ `0']W^ fDU9Eis$Ǥ=dK,1L7q#R*Թ\rXSFZV 0O|=՟ $'Zcu3WI&gv1c!Hٺkы`9FlD\Oafϥ1ߐQӺ,I6ږLARiTxt" XmČB~7H]!6 BWjRA)wXaPe4Q11mM^I!TR12u*?, "om!*g J9bwDΜ3:ud~:Rڜs=g%EbCzO7wvX@`FپvPВ,KYl0]2ݓ'I?.)zJ̕!ZXWWұFC81qQAvv)Ezhg<ֳ7=ӗ 5HϽh%_ʺBU6+(\M#bK%'lٻ ?'!hU?jRߎ)DUhJc|*@)O0>VOB`ѝǸnFǽ4g=^_/US-R8(iO掽~Ps|T{V{$:興Riq~D#[' ,B'E=9c݅}8qL6C!tv)sgEzkHH%Isdط{?d,ZP?#ans  F^f E;U}閽&NY#Ace,hFh^KymH<= u*[HQ\T̥C!tzs1;D 5ʩZqj U^Y #6j!S2SoS}RsBbrUx.~!cZ%+;[@o(Ut$?1cg^||ڒJ=E.fS[LP@pNnO9U6Pn>fp9%VƱH(,JD1HdHߜ EjsŴm0Ksǥ%E~M1w{Z}yVp)-2ahĄV_tGz_Ǣ~ -o6.B48C%\C#JE&0#wd#nY } ;wʷ 8yP{G`RALE.r.Hp;l@&WƑitN)^q2JW./V֪}m;Q֙1L_XrFĊȿOaChP[(,/ʜ%;sFh SWQF~gCȴ$W}\ w "{%&P^/@4e 4ׯ&&RjuVrLy=KG3쌧P* ltG{ 3=o HV]C; -)dr%teʜk\ot%GY+#k`嫨bTQgg/ZCj4iǣtp^O [||~P=BGޖ-b9ԖRjPɅvno"Jmgko41fZJR<իӛ}lA;}{$KS +W9["<1._ _oq$X\@žw"A:|'u [%ʵ2'|@#͏mun,ֿ ;AD_8+S6͑LK_EfC$R'~.0}.۠@yĹִWÁoVVXY^ZdIyϐ]Mf΄i+#U]~K[Jy;w/*h_W.c\Z8_FԑƱ:̳q*چN Ԯ 5;iM79S!fz~ lG0JwoW'qWD@af`Ty+sUh [>SВ5f,*,&;ɑMQ]-?$tp 2t&=tlrҤԔ8QP L&XcUFeKb$1Gc4&ёΎimJw.B*9? , ,KrV 6Cx zC9l+Z6s3UnO*R M,`CU#,6s3Jq ,?n5. sd=-3R{|ԛ|91"_P ӅGKӚ~.jnHo_PCķBJāQJhF '5ۻk߈kC}%VfⲠՑR>j*g^t2i%_v`xܛGrA+̓vc/DZR?*kx%ڝUHc~8ܤɾ wƪ}pq+lan1]$y^`4Q'ӯf9 ~[Bi3kHۋ/76&5yR-9e'<IL=H㦎U<N 6c<@;B’]oʳṪ;Yz 53}0Ԣ۠˥|L4W@Cr<+[%A:Bvr4aDC`La1~)HXl D*&}%si&R$%y5&)YxN)ey,>~GK]ORy6 :#N1c^$VP M*/{2D_\Ps3TK۩ 4EG홓(>L{ThUXFFdPP)hRk2% oiZuOijv/>G%oj"#%L=[ő[6>/֚>Q00po J4`7[{ ^ݏ3w a/:`l5v1'W}ΞJnNo)Kex0rvϞ ke(M5h12Ys-I}4t36a$+r`vأ =$hZwjCZL,KS.ˠa\37WC(1~c|ouZՖfaagPۘM.D;kK3e}7?L Q`8u} ȟmtksdDNx5˲`(~тc="A05~kt3Zi~I9hG:sV|)^z"Y;+wn^SV> P"JW;FHfk/ p{!]LH6*@cZVelyts:]܀ W(͓.6$o+\#pxڅ˙ /ݼhRc@n"KJQoQΔ5/I1(u^wm:hLJ%&q^0wj yr@V#W NPMnL? L{<8"+ 㦅SEPPz4^2#98gZi ?r !A|>cT}K,lc-WЀ"ᅛ/yS A~UjcKT5:YȄ0Exfwt #/!~( `MFG!Dy٤ Rw>ޠ/UTKot򟅋!>efB-)&,^CMw-twF$մ#͍Zsc3(UȳyBUp2ns$kٝc SA0%1 ׾F;nRڲؚ 2"<9XĒA^rNBJh >zn47) T?;a:r'&qK>r!:dLaQy1qxQ;GqY-e蝕B26ƙ<ǻPQg?q60eHg|B7AW.՞zPE﬿^OXV+\~aՒR$x7<^`U|o>TKq2mףZ<̀Z s6.[en_) 'GɪM%ν P^sX9oDq/ޤf3x>v95 Іpjxԝ,=^o]ȝWbO: gkhj#~.Eܡ}4T};Z>2&WU567@tﰘ&R6i+h]5S:"r,|T*w B\;u'm+<JQ#dڄ%Պ&~UB`,\Ae/;ru]3ڋ?1b9\->o0$~c};htT-?# 9*sW(R k1(z${>}Lv;ܾWg址W8 ?qdࡱ;\m UQ`m *܌12|Iq4=*죭 |z21ܭW<\On#BU >!~׳`HٰĎ ҊAjGyPIF!Q.oۘ^f4m#V`V$sVD 1N q|^hrb~ַ9gwsc$͡Cu,J!]zX}ˆ,nE݁&ы_%B@1VhG6XLBN~3gy^>J16<۰!Հ)K0EgnɎzU(4zpv7% o͔DABϡO % +K, Ad׆LnPgSg(9xGOۧIJ 6&T+8a~0Ci}3`t,/|n:W!̸zB Ԯ i!4kz>C\_:ٚH [ :b0z5˥dcf|LO\k)q:rj"~ϫiP7AA>!)(C$ǽ{xų!"Z;d>4XXwPo|$|L#3 HD-^cj&M;L0ϔ,)ϘciʄІs ;hA"oql!O]ěf'z=a=qEʈyzii0fM.&Ľ= _ԎnQ^Qu;aJ/-jO=tgz`ВkGTb?tN4e.DX;xl%|Kq"|m?pN^6.40:{ DB>bTyS_xqDX]34JTIZa\܏$/8P..-  E L ̩}\yq5^.8)S͉Nqʝ̐sl\PY3YjCuwT]?Q]U.٬lBk?U}ŭ5jZ0IwX`in*hsdVEp^7SuAC`lcm.'.(Nj%S*XGAX4=/.Dܛ\ ]ZPֿמciTxs r:ü҆!ɀ3&Ĺ&fFp?-mDkZ3nZxs}h̬`XWivO1) 㭦/"8'4#`ؑ7RXWtzDgnO^< O͈CJ H#}DhL_N{ȚÏAҞW0%clQ?i%) |!uin6"#Rh/ߐx"L}P?jp1MX*@S39A5jM Tҗ0%e={4=JKg^i3N0r4jr#%{sd2>HUE/b);:*7\`ؿ`׮  y8Ĺg/tGg]ZoIuH2z DЮZ .W}гK_qv^I挀< ]BӲTu,Z yG&"PMT9B.~Qthm(m*xI9Fc$`j0F݈Ȟldl?o*{,69="oznYjnvI?A@D!`MAq`oMYq&(؎(SȽ7˯kX[J/ +w,Pc*1- oxN[Y 2jCoV/dgQ( WU& >{[M8=ޚ5 "0B!,؅]|fC$lѕVdlTמcO35a(Wg 5n $}l } S.Q6N{7?״N:~$nQ ٔMO_ڸdӗkTF؛w)xYys~{AtEb08p%(Nr^Jo[PO2AFtq#%]33@eؼ  ";|_Xo8e䙀dvH]搇G{X6BR1{rQ׻ta(SKJ{R]_G:$\Ȼ)+NBT|gLN:a_\ mfNG9a(LbP 8D;I=Vʫ '-k կL F5qY OW/SþSVt]bdeѻ9^.ִx"s,'wn])BaӁBHu^)ܱL~TCms{v1R,vMW"`uZV|1$k8H1*{\>wBFPn~'[t̜հ/K%;Cb1/K ~҂;{{Hs3I nNipC؉(l[H_ӹ(2ݛ٪p*`0k:B` eK=Bm c;my3"ũN.}J&Y{_GT?u: Q ]9-Rɭr= lM8 ei&<|\"Bj̤24=(”>~,3DIoݷVL~*jLseP`@ (֗5eHN[sԫ7 j;ђ%4T9oWtgbPՋ'k~ФCypg emHJC/[R~\dzXcp(N 7sc V^|z\/L5|#EE(^i}0M|!XͣKjM0\=%~tج<yQ\%0r,vL %i r͚Җδ&݀6`~@2J$`#JP/ٺ7HM 5IRu;(dٚ;Zę@8A5 Sgkd47J)}mTCJTZfYu G:S:ͳZdpg{[XO X_SGJ|]9OȒ @)q?)[S_۪k)TKܰ6,mlkՓxaeDq%cB*!s_#&Χ4]*K!3*gچ18QѸdc`bͫ{?<%t{u&40'5X*v)(-0 XP `WܡDfG__5]L^\D`e/hzf y#7eL)YyݗIcsˍ䞈$J4StIr7` ?嚏ͪ 2Oϗ^NŮt}/l.Z]pOWGc|٠3xMI&+ }~" T {pNOh"Ӂ<57fgcfc/3rf4 Wik1F~۴Ht<},zF= C7Q8듗ރ/!\EF@$=Xjttr'w2v# ]0@׃T2tϥDdEwW\bz|+Rå36kd|F@6pn҅(-,}Ӫh68kx1p|DU _u~#$dx?gAړ_pEn{P&t]*`fKϜ"Pq"iB_^[];^G#ZxenWH3ީkzks8y}"ʈIɹ-ńtG?@^rQO3aqK%w&lMzZjI{|iLR~)+ms`vt#AW#Tc HB m 3%KX u N"A?L%9fɂZ1&Ȋ]Zqۣ^lZd=۬^&uai+ka: ''њDR(,Gy5f`|xF^6MvE,9I`7wj BFM~r{2wQ(Ϥ"ݞ%̊Aw̺1wdQIx^1۾i")˘Kߘdk~EpO(HH0ⶡΧ쐆UKW n4r ]2uB|I~QR K Xtŧ{f84OM( xtzթ!}|z>DqKy03a}_U(\ p>w!eѵ79\ؽm?{.̰'I@\V1Eߡ0nXifRyiHh}ӛ'^3O\m`6I68M"˅43V}Ɓ>jWut̜.4ku#al$+-bn?edW曟`yw L 4aa-W ~̩=?AV:F8r-2`ؓ 8 )Cs6_(TЕ X \cs:h7okLtm)%3ٿ#{|] xi_^J]C϶BpVP{楲 I!t6ZxuIԼﹷ9j#(6L'E G`muGl/3dH*΄#K +F@Ŷ1 MGL# GyYpW#og3OE!ӓ!g4afcf Mi^:<;Ca1|M?N?ի@Lf!<#[Y{7( O_&ʕe>ht.3I{&G=^;s5p03MߍRaH $p;CԖ6]Gƍ?>[mDT]3^[w.YF,y,[YM{q{x1-SmKXM11}SZKSܵ D)ML:fm'V[ o5 ijP@`wL!@dnTG a?o"iO͑_ĸ]>TDr?SmB&O&2*^#9Q7_w4~^C7`IuO|xP1*(i.iC^|k<̴|WR iËИ&,q;u(\|-z)1 QGEy6Sŋ!p5= P 9n F7fu'{<PN7D u']>n+q`!\< }4PW)$' 9)SvY4ǼWM5d%YTҒW2evqVB0gW4t_X٤'I\q]^L =  9RȐĐafq[B4ց;1.&c `aĒn׼J>de4 y/,{`ByleHFAJQ6q*(p^dkELxΖq }GKJ\1p9[k ר|E aw~i'1K } vmJDx8: bw9Z e*YvLJVVw,fL+PX@A5e|iX=Ps^`" Xj Kگ<,{BTX\e$א:ɧǤ5ie⓪lj;9$S8{*t@a CG& Ξt2g: .\4ZT̬ؤGDB[(e!' F'GTiOXF.[tșR63 eR[')N^`MO> w#LQƵj*MNw\`oqyJPF,|d z*~6d\]Ba^eY L2MMt1JԌ["'PtX[R1dnc>.X .^;IbE>]5L2X94x=H 9ƺwJ^a #j; Jrmh ˰J"WRI`$)=^p Yo%1jj`=ZMzbqiƌ})Kl}=jԼBvۡt봎G¦3/ UxIT**`(7tNAF1uYvN98 H N HE*af-3#[%qo~h\Xţ19BJJ"$څ9%e΁3\/;SzM0܂F0ΐb^78A")wΏBĒSByG~G9{ZM%:!ƛc@zl̯gQeȰٜxεb~G0ևFF'M]ib,Ʌ sz = ͷh鐌?J?;'rej75,@gNSXUE5ZmDq4?KL&@裕RH#t+JS,ybvZ*kKL>"IbŒsd)7M@&5) EY@?1U2%U|kA VCxj hF\AZKq*snl`ތNu4ԡ\zG3ȥe@3j. }83iPv$V㴳JPh{Vkҭq+ݘroOFrHNSUEOQ۟5XuZ;sQW?MjGy{ S\ԃ[J{ckQ>'aw !i!2KYE; >JyOK#b|t(Mfś" !wr4 zFvv0]v! ۞j-/Ovn#M؛aʹ*SGTo\o62 ,H%.0|i"k4)AXjh Lsk|/CZ FD`ӱ%+](CrR-e'S3[O$] ^ـh3+Ԙu`lV9 ^u@x~?#_Y{{A0RjMb6ɢ;Ab `砮ll)&J[sJsϺl%ۻs(}VwAr8Ľ3Ӷ}:mbS!doyCZv2ZZB,AX9̍~Z|Z}Y Ĵ7_ ;8ڍtTlJ$^Jb$ PkXpCЃ.;z,祟3i5}0 zNLLmk5o83 p w"]4Hsm62%f?ɻ$ݿ@QZOX=r>ΠG΢uM邲vj^ ŜT! Iʸl ʑp^cjx7؋ ͫa5! t0'qSF?MީV 0HW!v.Gw4l u4QOuHCeD:UbTr jF<8d@iiMp7rg9 am5a{C0WfxE֍K ;sUcśRgH L㺲ATB( z ]=?k=3N|#b\SERl$0w\Z#|e >C'9b2ta.$ "Q|T芿کG5nJ*aX=|yt5Cs Jwp.dC:<0.U-^]Fc~D/+<urmS~gj Y3˜:FK@/J"/,U=^hk{ƖW6p t4! غ(`4HڳcPA&R 姕=K𞘟g _IlfJ'd' U.9#PW*΋@FrV}ϣ*wЋ4\bĔT{.BuY/W1>Zuyū}=([Aߟؼ{Րz">l:v\ zwHwaѮ]Umjyr[lf#Kz5+@fnYJ .M{fɯtw|66uPxNM̵$AxWxzLz܄Ro}Ni%L sPjǾsYOY+wedHu*b|8?qs +/x+Ͳ*x! gX3nO eOp^^ro 591 :v8X>?XeQ y05XvO֢G J:y%E(-$&1Y?8-bR4XCU}dT:RҨfS,{&6Fj("groऑ +{ H%ŪQQCM+Kv`W-H ñ5Ȕu4CS+X)-*2hυ:[tYDK>p:)Z 2/21pn=5c1 O½^O Mu晄_}c]ڷf4<ΌfqgiHГ^F5rL@̴:T`'RRq䈶ͦRƮVbRm硳h/($ae]-[l#7TՌjs,N Cv =C*z_T1(/wGHXsH.\|{a+بQ9:⃤z\-|膴% z )dꠠTC+dʦEêˠEKn]#L^5p՚Q=n^&sڗ2uTL 6bf6H6a7G.>eB=VSYrpˏ<0mWfu2`3}^窔_p/c[Cc$a=2Di'H7SWpۜx'2ڗCt S,tu'@u6_:nNR_!R,s0 '8 D{[`#`?4F~d"aR>j)6./,jSk[H>cDJ>NUxR|ǩ贶d?Vjm!V65=邧MRؒnۿ2ĊӻyΨ@PxiW2ޒ HRjVLXI쬾*+Ze !7q.58kOOyunNp\-r}A.\I->ݡ8Ѡ+F02e=>1[uϼ{ȉMW}:ih 3[:wK:9_9!uA(_!! ςoZWu}k;*Z7˿Botܞ0]q_D`ޗӥ?]x ~!EE}JIZ*%Q}ʗ )e"ܥCtjUT tD`8U•MhƼp(k*N;:̉}i>,xa=ڀO6cytjRhgZ蝲q]I҉|w~Ɠf4x gm..߶!Bn8r1+S^"_cp}+I` |xfZ!tզxUk[)1=-f>7p"|`nTP`C4ሕ:BM 䵚 R`fQ046׀C es\[6Y9g0EY)."iMgȈ@GPcS> zcbLL,|hԧܒN*,$!!hqw>)oqofvyK< h.~팩e>>GbfRl]{0+@tyŶ.ކ.RVmFh(OTҗ S}4}GW,pQ wU[jןz| B07V VtjRC"&H5']16E`Spa -ȿ=* 1,".H.u']v0"]ԃ 짺`W2/|uo{ڀ7re;$AK$YLjQn\4a@"_`ܺ_MaR5 Z,],V5!\=՜QRQV.p\˜_79^񄐎"J*bK$lF>HFa 0#s_ {6/{`yCD bvb0jX?DŁ#./Ocd$d/8E)i\C J˕,7k9]I,#Y7pI6yje=&n,V ,?%x`kF6:4ne:7Fѫ_L&u;D},ygcd]9_ϪP4_pc.tͬkS:%V4*2aX< IWl앖^)sSQH7(AkyPUS r%]? 렏)p 1 ݝZGte, K8 )x$BU #cۖ.Vį_ ՜~`b8 s Qh5BFO[9*zzhACJ1y9!s - ?vd$w=TەBY5L:vGd$PlT1֖GLPcӾ4M;f~/%oD8.%I)EdLpy2ث}^#>(u*gJz܌$칌 ;5l#xc{]M %Ե0;c} ͋c)%؇vU:J*U" 4&(VA6P4M )HWzܧ7-W2qHU3?vIlv|Zǂ|bJ$qKMO9/id3`{C`n)ߘxP}{6<. 1ӫp?m\"l͞XPuH/U`G?~A3or?9fh]&yœ'M>jvVYhCQbӒjX7w@("ˬ6.§kOO}%nP?[31ɎF+CF @G{jsP(4wQzXS2sXHkb觾:=4QOY{*FWe oMMAuLUb_nb4=B81tWl|]N޼f"KPCyTIHWi^zY\OOލy0>Qs C :pL2ژ6 A_Ք.ͦ=uf=GB<򈳀 ln N(.\l0M-gG)Ybd@ 2Cd>FBjE\w4AjCI m/8U"d2Sbސu'>~o1( :RRҴ10 ?:nT1 C Ʌn띁nvKXٹrt3Ǽ>_@}Ă}] X1ڃk)2"P"m%KFd' ZF Fp2y&sa1BE`л?I3c_LҌ Ui7`:E:'Tǒ4J5/D7}p(ft[>[ug%µМSwnV]pI$oA]ȠC%Bɭ)aŠzsId^@#eu؋Ct0=;B|~ 7I!_*J`Mԕ2rGHL7AjfV_ˍУeK?I9 Ua.UXlPJ4*fqGVs1iemm w$b4KDܪn؈;K$=<kC nO0)b% pÖA,z'iW;-\iےۀy/OaW6p$[ &{LZaI4g+yTS0ʊS7_VDgf/Y-qEH$ >?رwX 2뱈]<`jf} c; (eŐhɓ.di$DPӧ'0-w0p \)$7^UէFJk{|N=sڦ"_%PW~TI 2V\ \j bw0Z!g(aX |:XP;4>Zx@6rfLgJ]'/bK[S,aR<0􋺤C9Kk4Iv"*>nP [ԽZqKx W,+^ՊCN\7VCS;dm٘ב0M7/{bD}!{B2yA7K <]9uϾCoCqIOرOPsbO,"* -s>ӄlAG-GyK3 f6oX|:R!3iLT*jN&i`kBp$%mIq62dCurZ&0#bTH#ǍI[rd\U qR2q_$@ i!E&K\ ipJM#2v5/7Vqm.|FG5a+zmbl52?u&=E-MmJ5pHtb[. 'rUA1y RN3ƒ,eTxl%{ǪטVo vDTV)ڌM_Zaj`9ӢGZ;riT9p'o:BhֈM1/~eprNѰ9zhK=> 2~ DbdO3eB^(h6A 'qtuS]uX_]p*JuDiXvlvo]Ts=~Ue61p s5kP=2N-Ck=E'=- # ɘpRCl[ x5n~$,ӵE$ x^Rgui,Sg ^U߇ Ηg(= ^9#QR 8wYoD|w[ ߼kv# o!K1uVh vY8W? ])s(zY)Uj>v}:v?;~[{%П#j 05>jdgk<%\0K5;턗tPe0y_&mMs~gN 6> NMrZ_/|VL&e1g #sC9A v6eHv,4:dB2Ys>cp fc%;<2Idsb]UL~cZ~jh )Jb!lt0mAb' ᏊqyskD?Oi&V摒zHNU5޶mj'tj_zjyM W3&8=Ppx,ъ%d]ʣVPJ#ci3]1_1=Gf2TNj&> :xuS>`q'Lk17?]]27.:TA7V̏e<,I n/* }PT_w[Ff}y0n!abIsHq}W(ohRNHIWvKMz:_A\ xqmwWZӹ] <$B|=Qhj<YA=GQ#*(&ư>-a˩ pL,iɑ B]L,d(ǘ%FDm,<[ux0GYۥoq.Ã,)8NpԵ]j0lxR"\&mp-S8> ^;t9,䋂2bDvt+r}sw8*.̌* ;s )"δA< 5ͥ>R Bt^"_YR CNhՑ]n@8;z2bRHfc[AZ6-j/Vk0u'69Wmleۃ,932#Ӄ`'j# s_ʼnʂeB5W˩cg&˳Oeu^Tbb!NzE ̥\%<W8H1(ǻZao&&*=2*OA 'RJ! :jPM9Ry6w]ئm\ZtVBH޳^ز0޲8(D^ePw|E_\$r}iw^,mD UqfJ^sv}AwtcBpwӿZ m/e0ϵ?hɩbjY@u%Vi켌o!7UoJ胻xZ GVGm+4X'V|y5Jx ~V3& 6ɐP+kTPqGΣjDc)>Ek*+%_LO*+Ϛ@u=2R@?B'z6c0~ T~%e&,47EЌzҥQُh%(xC=kDKPҎcdŚ%i5T} OI5x@y`7P2?vsl\"CS,/iaG/MkBA1M?^J+\ycDQ"7 &m 45s? ZSBZ>\aE9vۀ("#ī5]bN=B3p%A5<n;U:X͛udϣ .Nk#+|*Y._=TssaF;tF0d ׀38A%rG%Χ#3JܿгTTH AyNwZIK4-}3*Xul6/]Vy1I^F3̼ Јϣ0;[]igL^/[Zhӆ~W3Rp1U׉h#}{Bl5wЧ_tEo+ jf} |E~6jVuk8exlfB&4zBp>WF! f''3}ٟ"*k݋ z7,ZLY]فFɧ4J noEpHRZ+ ^QH-=Q xJ^RS_TqGʞƲZ~kT ҴIyvy#n~\hbPe&12zNbǶK M(UʍB6\ݏ 3nx+npe&fq傏+}+G֥~#D6Q&,!bQka1Pa͹ٕ1[&]4؊v2֦{9)wƷ1Hfi#:lZW} Pŗ_.wG =. Dخki ^K|=ru[\̼VWN@C?2n 0o-۲ y ՏUTu:MN8$7%_5~+G{ZiHy~o"HǗ?Qhhu71ք||ϰ}Vـ> 6o2%U–1ao Ŀ\RTN'!f-Nsqf a9欗g E|-1*kW   Um7[baaa bxS,;$fBWItBk&#ڵ/2ނN?7hEpwCڙTwwcڏ5m>K`uQe`P0l-A+׌-d>S%b$t3"3AD]/;- Rkf>Vg{@K:߿ ò5@qJES>-#BuV:>2`݃TT(tT}ˍ BRh^Fr/ NU_)ШFO_iX\4.z0UauVjϚq!-ea$}lUs#NoEE^+;AEHO4+wOWߒK8\Xo$$@I""UlA< JlY)'9vc7B0͵%M ~umm,Y]y_k<ԹG}{h|gm0?jn@=gPcjM ]v!T`i{gB I}kҫb hчvomZi2 whѾ3"2c+Ó$YJLW?LQt p&[4i$6oq`Gẁ.>4h-II}ޒw?$3"r vݶ1 Ut$+lAag1 |tfړC =)N S 2\2ND gej^9d4[!v9XlвH$ L]F>yַ^GA'EǸA>x\#œoiܝ󎵘R3]}"CYN A7vwsU[p+쥲{sNNdm;G*򃮉Kٯ2.gw7I]H??5l"{U9 #{%|PZ8&ş D/ KNXR*"#yw8튃YP= - GgsmT\ڵ<-b'ZIR9"=̴ܽéQ[lC'\#zbA 0r)gDˌVv/_ TW.i)d xBp(LP`t/Eş8$N&j8j`՘(:21um2jRMY?I74S#&!Сvl3ajڡrh3Z5*D"Ayte:$**]c}yqnRhK}d"}p00kUb2FčB3 e`~aYDJ0E,-L 6Q1Bx&YVcV,heHHT[zN'c_:kkkabb o/H*YbQ&xu!]gxZaɮ$8r= V36ÝETe}a zcb@g6fX2 kG;Ұ7 jJmt#^ 0{5p*NpJOxfRs Olz-lK͑'f"ΊK0]|KHB+Dr=LH0t,b |Jr\1wjȀjM J=Lė_W\,/˒9@C=B `)wy5D#Y P8yD"mb ,VAӌEPr;l40IrA 7Fwv⌗|16K Xe3MAm^- q6x؁l2M|yN]"=T_yĥ#׃NԵ36R] Ј!Vu뜪%R_|L,"\% ۯ77ꝔfnbVQ[F?n8\Y|'Aaل+]fQ vf^Zq}#‘ Xԅ>Awֺ UKߡ$a*dȐhR`2fZp,#Ol^φh=-MVghY~Cq}E[Hފ@M4a%)Pk dvXpg39|fF8y 0E h,v7w9G9Aóg69_!@*j3HU-R̒>lkW =]<6(/@%)1nNf /*rNw?=v7֓i&p0iԨjꑅHboνBAc8acP#*Xps`~sqO/c`%F*j'Ђ~e'YGxcP4ۡaL,x4Lmi4$\z 1aTR@Q||oi%;+4^4@^ƛĜ~Eǒ$cS-k;*BZR6 McAvZyT8#hg2~>~|Ni3ګG?푳A-fX`?=6[A -O6 ߙwEdJ͢M93A΍e(q?-PF ;v €)#( ~̲5*$ăWoW&$P5zZ^x87MeLv 1wmvC=_ů}t9m t'@U!̇$~ogH%UVEQؒwdD ->xO>IdAϽY8ߴI'#oYT⚤g$fҡ'\UOLM`$$.?c!́R=(:Hs3 E~%/M%傥G=]ƱNb굮SM$<} _,P~ad)!$;>Vƺ/-s߹P7|]C~aDUr}[cNXwT)߾NF[yJBT+n\LjO('@h PRP?% ML+I9u)NIt\6Sе)g Ӿ= 2iJӜb\sLMe""Ag -%RpT-s&rK8*"9C Ol"A_EdL"n-Ei2Mdp]l%gՍ᫶hAJOO+;<,8c=x'T.\xnBR:ؼ)קוB,?r J-0h[3@cXo 2+w1ߛIH ] Շ^}N$Ԁ5bQfr$(MeF@{D<"6im>)!xtB%ZʿY v 'SQ?I~7@]I=OC8([ 9(Wՠ0EC!Md7v|iwz=rWOւ^<-SV5 ėF{)" UccQ|aO)]O?O^[jUQ#*g $%{ksrdP-һt{B69i'JCo.r=q}2wN G xy/&,54P Vn FFA<8\rȄ=8טZ}랺PI6"eCG4]|~YۦH1Δ@ V,֩Zx¯՝ q5aXiWQ ,)N ,3! ' 'R.ԎeV$Ll99f6qKpgB'Ŀd7Q |.[eT|x'$OJ(z{ɣjG3 36hqwOץ@2lvDE C܄_GL!ib[.RnT#^}`X48yvwk0aSĂ>dy!k_L;Zʛ@Zv e122ChJҧ!ڠ׵hOɥ'0qI%եhVT{tQV^n$NK݋a/ Q !t/Ȗ8?e\=fX$8N5cwYN)iL=PTO8y=dzD BLSfM4SH u<^2m6mUOgfVDRpY,qq8:؏f<x1@C^M8xJ\g|ouY0g$ĹW!q t>h[ sg2^םz84(ՉH}!=.5x2ѾFw@ bKQP5hgn2N|Clj "&ޔ<0UGƱ2 tMCf+Yd2IDp/akʙT8d+oI:$lYGO5dL1;lo`G/3f;jg,H\VV`d5ڧƾQ~:p*#Te!P7) ؀bEn[ ovC|S Pv֋?U~}}A{7a٫N h&xjAI^+I)M(>bO̯'1oV=SO$T'6D„ÂDvɲlםm}Df7l ,aQk0 m?7`c 4ZFNp{.z6CU#ZYpEh:dJsvBwEUzb+b(nݚ=T(|ޣLz1JՍ#}`Ъk_'!s<9׌kz hw全džPgK䂬xCY]}<~W*V;!_6X&dO~nfBIY ,%gɻ++JYpc E/}SQ^fhYM|1rRm3 `hYA5@YhKrz;๐OŌ AAҰc`FS(1L!I `N`fMH6pKwcXQ98fxMbCk@7|-|^4 } ߃N!tYk4oD] gV4`[]0REH.Ι~KxP>U2#%} _{䀾.1LTe\)SXmÙ&rbDNJ6K&_got 70Yԣ^VVZU7= &Wn?eo ahQeA'Y6j6MfPSNDOCGn@H'픬;[ά bIAУHs3JŕzPohFNX59yϊR?v*c+bZ"V_ɵѦ璎[%<8YiD#6+ gεrvb ^͹jwD; )/?cdW7%5n[e x"K/bX*iC\CTt,,_)qQ(}eS9) eH$ȏ./P2u21<~++,u烣a d:.92KIg.FcD33b֪h+VgB%,/-DsufТs"3{<{1j#OM ѸȬaepZ 흼b􄇟8{02'cO[:3iOn osh3бcLʕ4/17Ia`}<G @3PkqUޛ {Z}qmLߦ6TH/o+ L,43c񿌰)MƲ`?(iIVSn퐁V4Oцyh RᵘHneJ)}0 9m07w[ 9'+5`@ǝEUJk h- L#goŏwUXL#NSFg7j_瓎Iw̿ 8cͻUz!̫p1,6;q U=Cxe%J:|6c%jdFAr 3y߿; >lL{+ADH%B)DƗX(̬.9. %L 8v;t09׊$i!mJ`hop&ǜgvVPB`# ޚ: TH+r[6Q" B4LVe|8pfƚ Kyin&JULr]D{x'~[I>*"42XvĆkl>KSR*\fD[taq2h!!L-(OR@(pOr^_ZH͏L{aߍI!^ůT#t\{(5Nљ}˚k79/2QxYH>lI'wPJ*Vl4ޙkiQBbR_,GX+YH$` 0s57{CWm~Zm?9(I7ʮEoϙd<cR ^fBb,(BK0@^ g9]gHT!4^')2&k—Y ͔L!C` D+MmlT G<>\;OZh˗R7,$&}!ed{K lƤ: Ί𒳆?d{Zu:.O( džUFv9_I893ߋ!5тjНa NүT{=$KۛN;BI!LzҢn[tA#_~$XuP̛"8򏾞5L8!(|< T53c1/p`X$M zpnÿ< S[_~ OaDz_RҚ ZUG|oCRȦ/$HH"`R@txQtlsj,]R #ݶ@G<]#$b'.H^ 1U :L(ih׸s T*y*R?*T (&aoDvpaadP+hl`L86žʂSo]ӾeB3]9> R4-QPfY`@%C9?^v" =^FM^&>cVGQR"lUIX4[9Vr 876J>׫ JJE@l~G40P^q0 MTGFRHmt y3Iث^x^ iQ<0O<.rI cw7^Ėd?KFP!#i7L,@EgwUg.ܗ!6/Sj6֑DiM315pw5ޮE @7o2zB]=++ 7WMkk"皫 x7Nx=>5  -qmb8 4ۈ 64.2&#eyETZ6˖Uoj+ [t\AjivÅth̵P ۗ9~gǧ߹MϿS6'> /+<<@{JόB#"a,ū9H8-W s m#& .}#9谗ڛˠjsHw}UAȩz5uH#60=[5yɏDq:%!TQ0)61,dF qU[Tx!ҥCu_j%ֻHF Toi܂˯D@cUeܝ2#w"Ep4=$٣j#+l]K {SAs=¾7yt^B+,]"(6jɊ}؝ٰ`o-!EcRԳ,`$bZ5]MzSs*5y+%\!S;`Qo8`-ag458 Z7FG{EQ6ߍHt-_$jB$Y\kTs"@IT-][ 6S1'tw*ӹDdE(| 7CS灣&8D"Mebq((R;W!-]9NF|bA=gZq%Hbsz0@:g襩% A4)90МW{%"*7=CA kb19$XmMeu]G$OiMRa|꘰C vr-qg:oR[1n;GeMQIT" wS8v84Ԋio z)ZK{.-WDy{xcxx!֡ {@93r Xm,azkc+YK 22vMp|)_'T.]p\ۿ5s=4h8P ;-cil)=8 Ma ]a:uROʅw>*( (X1p"!*iJb:mTWDD؂x1=ε sgOO%[+Aus#&8FH|O5Zm*"t]N7HIv JCGZel =PV󠑉=ՎDSODQ:26P .h巻O ºGMڙ#񲧴ΛnobYXNӳg /}`2`m#~qiX#kuI/ZGו0lc\//pl2a+z:(`]} QAamkl*/vhMRh'PΓZ79 q \95?VOZE@0ޓVgQ=Ԭ(nӣ / mOӞ1 cp,͠4QN:Uz{W|ΐ ^Р#mTuB;0+ŦT'"VMoy[!Ix#bljTa80hB74oQ),d&=Vaɀp(<( k[sx;@Vi%ܢ'p!Xy2bE:uswQ&:^FUҡwOc*#"}^giSpi{,nkaGA=!v *oXQl}8 N7FL,ߍ`m7뤍rI8{7x̀p .F?bN~JWӁ*8+ׁ%>?n$k;F=\-G̕> r/bM7c2Kڳ%2"VG|5(SxqS\5b6qL_p8{iWq4T[Svkch5TAB^U i;3z>ϡ?Zgp_X^Z垐N% *+M b~[mExxlHh0̬3H2ﰀl,"'jM $C{ 5!%ֆ.'ih#ekHJKֹYUZ H}+LFX0>;巺 .b/ rMޓ-`It5eM4?ʄ2yIk&zfN,%y]wr~#.XfmSۋFdkwSFh˗DuOi~NgFIm%G2{tѼ+IJwcƢ{SpXhdzC J&(0$֬0* eHOxbf(O&;@ 7-N$|%`Ԡ%rpB^'Ɛ&yH2c{#&^F]!9XĔ6jO) \u0qX"e=FtӃ̝d86M?ٟԯ;,K zKZcRU{h=ڣ!1Y^RPwC]zS*D_Z>o 4''*%zd_ oP@Wn/_YҮ $Rc R'tɞK*S(WFfrHܗsڈJVԮcfF"3f[Q]\ւ0jBn xb&šj|=SL(D{4.\AywSp@\U*?᠉/ aP^ӚϨ %$KNwqԇk #7riͳQ+q *Svٳ5S%])<(S?D;|o GR\NhcyJH?×I["/^AmW x 4FWT],fڰ L2Vuc~!Ǡ& "ui[s>GzW[C(5һ-ޫ1hQKfx@7 8 W^:?K%l‹L r\rOC՟eȢu R͛5߾}cVo`E#]yG|;VRƩPJK+~Z8]/4=&~y}558b'Vg;u'$= ~bey\>0Z0~ <,f%.^(W:5YT!@?jr7y"|OblٿcU.FR:bujtSό3F޻.VQPw60|*2n#^gX:7'v#^#L8gyl0q]J vsѸ}?eTziʖXnj\c y-gWݷduHdgv>CPOM[LDG\e`bOb)BFs%km3\:)4$6X + C0ӭ* YoF3bIUyQdA܀ A*@e. NTx`1*`<PHyjHqg1!]q&QX.'.=hBgՀUyIC$E3%XO4ΣDv6{ I]B:/ "+/ӞGuCʒ9o&kD3MD%̅&ts. ts.!8`i 5p;Kpy*{ 9Nx"/A45je ;@("xw˪R{@RǴ bt_uX @8, H}[|[όY#ṉ,KMXLӰZDn~.. diqwm59y+ĠUKRR>3ԶiIh8;O#Ep?^E,jo#vZaednлk`+jlkRqߦ#ݟ|4tvUYiX<\]_Z}R%3IpG/ę)il'JE AJ!zlaMB y΅/\]F\a_q r ?VayumCuEL陼PYF%l&t^ .pdg{e@SXjU߻e*/=.^}Vp^}ԢRI:k@!(M$G)朹˂æw/O1+'N'Hn-SďϡHn]:V[O .NNu-M 6LŰab@4%`BY~E@}^]Ħ% ȹiZ':=غk{6<=_)NbJѠnZG>oH7b~Kw+? @0ۺWQɆ6cB,g~Q!9[PWWP \B fI1DQ7兹TI{n\ݰjJw,喧E,L.}TA ڲ])^J3a9EG.T;12. !ATw~%&B9a.YɄ̠ ME->'ݍ&?!Rߝx2-j/ h}"Qo7HBY(OC뵫Gf0p%V*^~(E)?Y#5Ap, Ѵ@ڍϥ\H^An*X0BH¼ 'l~g-NR*<.L('yulW9!;dENV::0##X>-oWp~?#D $g5Ji >`sp3}`H>VEFLLYK#dI *V;8Lv@aW=6wFFX<+֝H W'cE!ZU G\M\EGF(~|p0Oi#94'3ZU1s-284Ǽ+ЌX`8 2艾MK{SPf#,Mʴ񨺳)Ts#3obP7Y)nf~K*9Hi& F\!#=﵆ Lh#chV1T2hel؇Phؓ?w{eEk,fB#[jy $`tD mqB֥]9J $ԏZJlaBe*ז\ʞbLc&Yagb >WG |8 nL2;`Ied Ӵ)@%B,l@UEw@o1֑3BA61 B1c঵@?0h=N͜DR$CMgm&޻ٔ+b,5cm0΃X@bo|+t HOR/pPxH3 elF'[ (~: !o=/d71Br(Az5ʗoI K]8;x!I+aLD7ظqE$WQG-kǘevuX씠ZݮH2Kקw)] P+^P>2]m_guђAohB^I%$,V5SJŔ * \5$JT!6bg!NA.yt=U5NOhZ<\\7V6T|/S뇃@N#s)aAH қܟ4HIѪ>h>pBiX̞V{|V"i1nkLI"!L)A:T S8N0Ž!-; ˾A}ƧΜ|]?+aԯoK76d\= <7dqe ^Y  ~*Bz)b*16w7D~'tQ}'00bWg=ƵUyqaKOyi3lcRNm[rMh6#[#H=5m4+ ;*Gmh; Dui\*B`e,hnRr@ v ׭A%O,(C~!j2Rӡc}@.S\Sh+1Fo;!M~#M.ϗmE[;ɥϱ@UF"եyU7jUZ{!$7*XjH.?J- LgEgn`Ȯ#WbJGO͑,lO!K5ufxo srFVu6Uv7r z$8A R.S7!zК r@Vs6.o'B4ر["<:Xw"d^k'ץ2/{W8h|p߀y"03Ojeq1gcj Z e.< W~t'RUZ1@wGبS|)j []RV'akss EW(*¶(Jn#¨2XXH7)y DR]r 5X,ցѯ2ᴐ0髽'9 ̀Ox`Mz;_(^7\c|麰(cT ?fzD7&Oi_h~1IYBR*7@顩}(:) dMȲ1#/KCjG*t\B*2S9nKeq?_5y-09}__y"Y11pE@>%(DGAGo}Q 6QK!1Lizrt͆_ujVdDz*Cqug R]; \W֋*H?-K&I ה{Le*` Y㭉C&QJ!/Z(jT0BMP=s32̳n4rJ&Q(s]πQz|PQY曲6bUJu0DVIh2~57eR'.6_eu)7907N(:0\lX`&r܈5m]Ҳ1OJ3KdMXeRCT5/"V01ITF Fݕ_~|w`Q(R. f8oneFelAco jY0)c35gHcm AC4πFS}zG )~1PPkNOd:YJE1a^<Ռh/py7ۥ{lyߦDވlTsG;6/fi%"]hD4i%2!hcqYx%% Ay6ە`*lՓ)lg,ޢ7kX,A%'_Mr-hW3HzSEu츦Iwx܎رT6bNBITST+F󕺙Hv"@zgW Z rP|PfCX*u\wvAsjU (W>+Ge HLilΠ7h;8o:ҝŸ!5ܡ?.!d3;T*?i )~uxWYSXiLzΧNIx=zU]9>R ڨ">s2\&}]yi9^6 6XLeI MjH{^ h{ ^YC6y U ϛP/.3abRvNG,inyKa7qLD|~ ^7]ANepӨd_yoQ)SC{pLH-xZhF_X=ޓe Wz pju+I J -/;o7uw8HJ+vU~C 9 O޼:0nW iûb՚E\ң,R)EVy.W]v ^ xdw 5 fs{f :;R@̱1& 'Wy뻠[I-} uE B p`{ݚ,)@%1=y2w#E5p=@jݒP:6X^ U[Jb8B'p$ߋ#Rs>&MT dlv[dtLpfQ#D^8gPeG¶ $͘l27B+C)7I}¹TL/Al8@N!*D?na qю=5{ߡ\,ۥ~cp0}i*;~s7TRG[;e,Us096 \7\ZemҔA͔NlmXk{7i'3(HWD:(phQ_0^ J{pVHsu)wayvad5Ǡ,4 g2"]Ӓn Sj I|PL˽q`/=h$Lh&%6+)/ K(eg "da26]Cو"@@ E7xE6&/L?LHkq+8 9-(/ʢ}>q3b6J`HiZyr!E_dB'_ռkZ# P;FqjzSvrRD2]-(Rt}%2؋$=kM(:f1y8X\G­۟$sҼ1:S0dkS\ 19 }7FVv:;oYDglR,[qcY)qUکoV, $C~Q-T{,!L0NC `fKz&`6@U;5R>qq[b FnNqumu >GO'<#VܲäSOHSළL;-m4 =y4 #$4Cj`TO?ŊC}?0|_2GogJH)8G"dFGBEm6Dy>gm&T!D>TYdݦZJUC7f/Yfڜ`3{rv jou ,?IާۢrIE{+! 8+0=&՝՗,X 9] ?~R]Vpަ$KEѯ )XY~y8@*۫?Le_U[a-_3"”v+{0'_JNVīZtE:ft O3jk}(e˩ Ji]9p؍>Dn;@GøJTF*^ֶu.=)cԡ$ߍ `LNԊh Â$^ pL%4HӡMHₗC3˔iIG9E֠F(B8[F(]*ScO@"$8m\9Sp;UN=+{+1s4'>&ٞ1ê-u#pmHLk! Ź#ק u(FVK{JP3#TNqzr2%|"UgQyB.Ÿ#%럵ƷN'zd^ԀьhNh]+Nr[=YbX%"\IzpjF`}a_v7DO44z|jqT/SnL-GY_s!xa OkjϞQ 4; ~,6BC? !^F5ߠ숆V*WMf.+/gK6.]]pMvn^xJ @_U"3܌e:|MĕD)*wfAW{LS`4Ҧd "& IPAQB.g2ZwN+FK6Jlڙ$1/*}=sph cT}U>NIQhZr iBB eD9Xd%4| fѢݺHF3~zsze5@vtvC+0&eR-c;sJe;8tZV[kaE0<b]YUM+!-jGm;teHhvd>!ɓ1;yvV+@},5 ~.~]QMa&8q6)fkrAV_)zvAkL`8Hu|Ć?πs[wruB|̷$epQh $L@(I@e!y 236p̻<`iN e u NSC:~EIJw{":vY=x= *ʝ*dRh}{Y+;ܰ^ \>"!69>A?Xeh7ZG vP#}Z&:awB)IV SREm.NJGNџ~I6֬Uy\ 6~ Az.5Mܰh)Cd)-UG.A0b޹=c?9Kv-AR2[Z#&=)K~H_A^A~BZ1尷ڟ1v S$[aVeF?2n\87 nmFH +sZO_D&Z =O4 ǂ^\j )=Lqљ@݋4/ %q6ҿ>pc^Y;IBM)J wT\8uV ѯm_%ȋ5(Xf$?~*&=2fFd5<+ׅ\2RO xfu`@;<o"FHR/@.aFv c‰.namh9~S .z厇B ,csNb"IEaS!Jlxug9)wL^JKHCҠU6=gVV}V9jh5oy鵒D9}&J- 0fct\V4[3-Lh(VZ%)<^ Gp59=Ȋ3epT媷"\$(~;-ʷg;90B r:W^rs- 2ZW:E lF&v'g/9X^:Vi~]58?f 2*E&\d'_L7mfӜ3'x -K6km. -q>n0,rw])ds.IZ9ƒf;KF Lw s1UGQhtS9->A~m,+lhE Bnˏ%?"0erISp2h G>IzZqZ.jgl-pBD]Y?<vN^mmεN:t=3za~|nn$3*s9wFEy=aÂk<+:§ԟxA &O]E '3u&5ZAQT~zۉ S/R ;)_!Śd;;sRoj#`uɳR_-i =WQ2VZ E"?S)պIv^''F۶8R $uh p qz'3Q:;z6!G|sIE=XkgU%] ʃ1Hi]npCE;+/9nd`Џ͓JnVnZ8JҧL5" )y k{a يhHZpqu]rY\1G?z)s,8Kqp.ż9#y hJ"xje5%C^S1Fi R5 T\OmP/tzԯ~ä@֋wFEG0TDo# IDĥ`/g*Q/kϚ,!R %uW0|ϕH3x͠jpzFKItXp$k'Q S;WfVH՟aianoj>mwP1 2 g-$B /y0`=iPrhde7Kkj#gNyJe;}օe2`a܏)S:HrVl*1u&AŐ)#-&eI5o9 8mls؋f_WcƠ0eXgOai o4/4<.r p P3.ʦⲋu}ҔW3 Ɲ1ϫ.[5F椒Ҝ\+`?H5cvGdT4|͑D*Gots2iAaqrSb*~ x@ln$-Lj)!}+p_S Ĺ1jإVqrvu{:ITsX`>(I3edD:ڑ7#_td[ 3xt,lw/PGEģm]/R,[O=~ JB44WB& ԹvDZk^cF\ Sb C{3'{P?5O]⤏ dLr+sm(W캠?i!JRz"gdDѶߚ7gfJ0|Aۆ~Q$yZ]Р贇a\QyY+տF ??<3.":ajYPQ~(RW5Ŧ ^[`$˃+ᨪ A$aPjz`=~iHV7to6&ܒx6sx,N!w|?~#ejEţӋ`5]Bxjm%|!^ʎBd D4W\"7eW(Et!}Y0rv9vz`hywTjYEwK.8D$j yQ-p jKZPP,bƒsD}dfb$wG6gǩ+=C'rQȻTBvU4AJדW^ fhN>LXVDIl[;Bg<[h P+?M!̉uld+8VQ3d`Fykp,0t $üEI"KR/QznG"ss;yґ#9g︗F.-$R+D&޻/ VR%i:eҕ @DԼ6FsfM0{ GbsFz7,GNh=gh7>r:U ~L?ld@+Sr7LNBe1S4ݼC]J[E38VK^%006z |Mk L JdMKAܐّ@׿ZnD *x5v/hAO!:mRw#yea2@h+*;V:(1qX5ӏ2Kr0YaO ]^d1稱G2aCB\+EZ;|Go@roRE֕Gl{,pu9cG{IzBSc¼Dv B/7҂~"ꚦ6/\eFD) -:z}D` 0Wc/5oMUP81TJ <}E/DmdJQ< ^XW+@M(+ R#G|ƒƦ"~1kft{>r]W}E-~:<O{C7ȏ>M(dN[`SEDxynQ'^+~`8P%bf qTSlu7gfǵ-xk<2V9q}u[:ͺfV4*)W _12iJ1|!`AEu&}Km#ڎ3Ju- e~%M'ɲq4%Y;Z~#[R$/.@T^靍Pbza=3:)bL܄`nZ4kOKa3JikC.h{K (){,u{xnRy H]VSu5K}*Gؘ{ܼp`yeFE2N}ӆ ^>W'8&q?,k:_]yy1fkhN?ߥzuWBqBJfY,6M!;p;9%s9-bK?D$=P < w@br(Abt$8$5IHu'l) yiZ祖2'zyl:ݫf ֩cQ|[vx.̩߯'E7^,YDVT.G}W-"ڞ2'VոSf/_=ǒMf: VE{ꇆng4\Cn?|l5WڂfSAܫN{#\;yɕrn\оZ4)1.F]%ژnN[yVW8C{XqDoUzeawZ ~ÑBPN+{):ChZ"Ii-Z eV1'˅!_K+Y~`vRټmSU#u?3YGƒLdU`>.S8Baސpʲgl.%xBli=x l@A ϑ_;ǏY47.+gі#7̲dٟ:%$A{` ^kvdO.O%*WAOcjkF05Co񐏭dUlC<;B_(~ڹn\x fq|]qy|4\:P[)0+7QԯG^v=W099/[C-=6CMjQxu\M1OƏ'.$Pab93,~ ;śozҰ(x~om/Ds3rh~DS4]e?/W/B{Ѳ6H8t&YVd.!?;8 aXP|K-]ӠI'~.Y-ER:R2`mL,BElD,Cu7) M3!v~xQ)zk3?"rάJ%v bf2 ~ǹըz@t-)FGk qܷ֓%'e;ĈJ U_?yy12B—A7S}~ #lF/je5DXI[ASG[ %|t|<:r *>SQkdt6,k=}嚅0Rța@`8u%>%,uZpD\s0'e2=N|7:NnV]~Jg ݻ" ^}&d utA,̐9;Cdϋ޷ӿ_sB?J|坻\yy84 RjÉջk?)S4ρt+{PG!Qݜ4.j!I462Tsg] 3'*jI*Ŧq@xyu֥׏2o1uI/CRf0nY> '~vH6 r( rAIn/O 9|(`l4S;RDm.Xk zxx='@3F JgȒf@ 8%eSenvj=PAsHEy7G&y?;(6AǽBh=ψ?ɍ@+TNXчkrLT RiIRUmnYdje)takY Ht@rxO0NN\{u[~+pCt.K]%)#RPBi0l0-) ׀ F:NĜoěቖ!yX0>-;Q{*,h QN-#iNO5<6}Q8 R* Zv(?8-L-{D }L>* YH jv OM_XRP,Ĝ|}cކ@#ʷ; & nTԵQ#U3\5PQ#&oGĭwT%u{8%m'%9X`ϟ_U> v#~]qktR$d4VF /xP&AJx{wQ큧^ZT@HUp)mgѣeAj NEWUaRzn^[/H#>/Uhb>>LT;'A4}]ɩ׈n~:I6>'fЋ{=Gp0!vsCZNkFQUA52G$9 /-=d Q=2: #&OCsAcevDI\A_l6obNVⓩ#ZpOWq2_J8+d(Wق\:OZ|]γF`!! XjE7$ʗ+=5eveR3YlR˴K7Y%̝`/' ,pB ZV6.͊QIR5`vlbC0AЎB뗀#eT\`dX##$Rp/{fmw*GrP2=8\X5D8fGs)qD],4xFecإ>dl,|᪆%jwI <8OG3(9#mhI0~;qϻ髏P?A ]]|TM'g.‚%ۼz A_1%gH+ۄVx#ަ b.5uA&~{ FBS'sމdNvGk>!F[a@gM: D> WaqNjzԉZBbHBᯭtmĪϑ>zS[n^_sB RXIPis0N`ƟA"sڐ7~ee`Y^F/Kb FC+˓b!6:Pc{4M+7K ( 凸9d^YnUG)qb Uh]gJ=(nҲo(S X1DC׫Uz'c$5 ,!K}6{t8]ʳcF7[DHW&6c2` @ߡč+4."`!KXgT/=|rJ9%m>.10|9xR63^tQ ,țV4eƾ&NO8}&,#aۛ6R(2cuAVY`bԖB_mț./J{ލ$;H h ceH51dV`ȹy!Jdm=0DKDVK!`> _ y|%`nA|VwulY+T{BGٻfvXޙ#rkِzS ۏxU.x (gD\dq()`7KPKV \Ysg*sߣ=&u^-O?!h\/i}\28Z#:`l#z rV)S3~rQA䊼  S7k4䥙b[ 7#)%䃯&w?Z73{pSJ[6(*;&<,Ja|n"E.! aD[3MSYԓ7ێ"bb =0V7U9X;;.LJʚvV&@|ۛkGX.’X,APYkKЌR< DhkZ_XNTv]^{zDg_[g8-=>ڭ0`)c)_i-) m"7zF/T}1klWKk@ 'J8{Zi|MLg3%cHz.|Ć%wx6G 2N0hfrѐhlqS3q] O6Ë9Y*Éټp_-<шcz Dyj7}ęy !㧩kA @lFBuӐ(JGJ _wxbQ "CgcixȦ+b5*5EZߦd0!JLqer6bgN2.~6GqD{]eu|6MF5'ߔ׳a:1[c#b \| I*$l['V Vk;Do%35+Jxg_}E0|APQ& WwV.QiIwr7|U.=;gZ_uFcͷ}#'Q˅vgT3m5z8{<0Pr#>A?|G⋸WN"A<p@ڳ係Oلa#ބPx ~nnEQwWc@-Y5?U Ph Zp7Jms`fb ҝt9溎܌:lO95Hz :w2p* Ϩ\O@ VVgGE0عLf{/P mQ>QxXKՉIetHt)|F"~̊+\RZTd eJ5 i T>~5p+{'*ԵF}ٽ!y.ڿ!9UJqmHҀ›}BDf ߌp7{[1ۆer Ք7Sy"L툣G31u`b?sDa/(˕jT7{"'%6&s G4zFҰ2݁W|(!>P^2_&3 }[p/ "9\yvU$ K,G<'Ւ+G+J>oNt|_٠d %s,zY/J53X!mf׾#055{Et Jt1pOWIn a*cɨREndcԢ|gʷII W" qzTDZ1R3 y|Z i<6V\9TRSQ[u&u>V`YGUj8y|4ڔ?wlTvm`;DvrsGQ][b6*k˝: cQ 9B<_?ð)iOmGldЇECǯ%xn)FWϞXA6yѻ`Lu.*kYOFcS#HM>zU[*ssTX_++RѐHCx)p ~kaٺhJ__uM0\Fa|3?UߍjR8ىv@9R;.1޲gO&;LIn щC+{YWG1j3\ɩC WOiQX_U|d RG<!]>i9*ô҈hԐCTAOI+ F};lwřb zٜtQŗc- DҞ1fǭN_{V!Y~ӹwCC?)_$ 5cNLs{`eY <<^ֻ2Qm`v5a0^y-5FaZ6,WǘdFF5ZtkqP[Vm%XʞU{T>-g9ia'[qQ'U#FYQ\/ދ4Z0yne(,3a.{].9 L7bd&= ҽ#^ w = ޠ6-ſ\c>.MaP$ϚYd ףTISdV,t. ;e>6F֌X]XIsdtp6kF6^Gp4O ǞI63?P㞵ob;蘈@4$>67ƕƲ&OSR1"Fle.b'<–8#^Ste]Qa( QW2j*u0éE,=O˞g̔J % >dfT2^TS ƫKĝ Xc&{IHşkO'61P d/s߂r^/+z_O4;O\65"C(vRDNʠ$zpSF]VDwxRiqӫNsUGhO3/4a|%%^vU퀸*p'BlaΈkD)GZhiUQr+EՋć{HAj'p2mbd_Z-|OMwʭ9A \u]Ta1Jr< Lonx6>M\yFY \4@Σ.:N]ٖf$vMU;@G4 ~Uy@zOnyÏECFa0X%VKq#@~rCZ\Vp*,0 )]8is00 fP?sy#fh+ۄ0.XOl&NѬd#)w_u?,O/!uH&ϒiv"0YȊv#wh]9mF7CqllJS c6S Ļ}sW2nArDfv'r£⚩}ARڒu1>,1Ryg0^WA5;X d5z)U@hTXg,x)ćSjIz ѻSQR޸E Y%Sj[Z겟S|$s7o?̮(wέ2:KN ; x^ɠcF~8 ?gC}5TO 838uui.>T4|DҰhY-*t̐*CǼS$: Epsu"(FiRg%qs(vbH3gg2:]Ŋg ^ (Ȏo*ZC[tCBmWQ 0KD-~1f,%|U 0ٯ^_nSe Nu,,ծ@nptGzrapGM'AG|'#A´Q)Q3p<&Dŵ A־`NWsvSQ#52Ҕ$3hzCӬp6gC?DTeWj! RZbȠ" t+aQŨ$6 μMQTiRM }y+ t"0cfutw0wLZTlC:U*^~KV|ctY[>QR -nQZ6 B+Bmq?v=g+[Q;Γ3?+vu?}}/~HC,vk;NGpSEP26_8Ӂ 9H.<q?g/2ۈ_N/SC5+'AVMc|4yBgPRT3(]a^;2Ӓ|ġm[&p~ YEBR!X+?&Y_Gk&-q2Jϩì0V0ko~YB ZNʫxe׉k[F5k:Ȳg>y`iM`?鏏C TbKEDн`G<׬@.sAqM̓Q9:K Zsn揸jBuO̩Hdoq黟᭞9@a3^b 2pɝ7BζAtѡyV[^3JJ1s,^uħJVZJE3&܀8㆘{\$Wo,g; |{\$I2rJ{sXJU5.D^i5)%yHUjE%\"qCI;|5:aܶr7$t+9B`:P֝g>mT*'T=@<$MZ=)2XBv:Y#T"O//mbc$^O2Kjv_jwkb&{(0| "W\.eWX~4sxO !G:s"ZӸohxv4"[{+-e@|בыn`O?k1VXzERz:ْ_0BL H{Z|H~>E|QKA|% P;J`}Wt c` n)v%VmOj]Lx~e*t]aYT34ʍM#G®f&;QɋW {IL^3DŽ"oaN_'hUq}9od3 pbs + ZDC9'+ [Nfh5Kr^N?|k:4pwƩ(G0m2ֽ$d &Hte}]vFcjj+V2In>%C4ڼT wB₌kR N>@`xXA 1}.-sTXͲrϒ> -dLagi6I٪! =Ϫصl8t'9VpP2Ab},dp[+tάCG+m[ATg a V5z(Ǥ =:ߵr0"*.~iYC&u:IFTXBF`Dz# tI%>Xl w":e *C0}i<**Ž:f7HhDg_vdV&2x~AAF\ƒ8a4 ]X4 &?J}_?$ FSCѧ6p!bnJ0ٵ0& !N#W$=[C'rpY:z "X\ Z?&WjWy`6irSf*wb1Z$ū[J:;="B}FQry)y"RTkxvq..R\xK@svF 9p@jc_nCP^Ì=:'l33!4tQ[3BcʄhY-OsYo7سˮ ZkP-[Ϯf}xI1һ-S0Bm>fUfVV .p&4F!H`JL /^Q+̣?RGb(;k`l< ~1 [+«OɫO8hF7_Bt[#^ da&~>N*v5,W*¶F_4f"MMl4^W$"iH:]XC)k~ALcTq4RyεY*%(5qٗ؋P-GVqBg.ZЩ0HY4KN˾ͭ_O%]/YFgLFђr> e WٴF9O0?/!؛%1G +]b5R9VEYtB~}w{ 7ia>2^`o\w6u WSysgkp0l}F5>; <)0H5Gu D/ U'4}o2g֩c}x;yk9iwjqCGj~gd;CKceZ¾g 3`}AkPNȧPe6e-+NZv9XXuҠ$SMTgĊ4=WN.]@ ;:12&aY`2n3#˘TZ b"w5^q{J O{"-,9 uB!y"]fepC5?#\P 3&€6|@RuU{:til_M NFΖ“_\+@l8Fn=q=bd {t$-a;:ݯ3yT!;V#?X{BZ-PHFutFouP:0.ߍ<':ԳRPk_yLzM z1GղItKTc (-Y>A!PR"!\9j(Ls6FeھPbi<˵x.gvN kcH mwnC*BNƫn."ZMG Y+rƹVF&? d/<'Vco0ė79z'BD_C[_ÀO!^p<xPAmK`>x5V?np Yu*2Ofg\~ r%_27oVx,d(\_h;U\>77^\\N( 'Gxpm{0L7܃7̛ui2 ݠīa};.RhQr]jP#^.18gbAx?z2 W%z=rEg|CٳY8*%s2GegGM9^~yqGh5Z7<*<)4#W-{qR{ B*=T!-]=]A}, ll+s/5|ܰa/,i2gFfi#Q釶&*B!a`fYC #sخbif37ePq3TFsȅƸnKzO1>ɚdzsNKٓ/K,$x'0 ?SPyvl-w=k% h;ƊBWVGϢU1$AB2/xz> .8zkWVG}H6P+ǎZ\,R@MZ@em>O-sǜh%&<)lbܧTX0KR9Z3[,l+S˩ٓGY8i#Fq1T,51szͪkd kueۛ.D kzZ/$Ȓݔ%LSR7* c;2S .?t}Vs KWƐ-p֋D3g^1(:})o)i u.n.4KU|d"uУ#]鈰hnfyʤLbτxzhU`s4b7ۯVP ]͖#?m̜ WVD~\t@ENtΫ[W5B ~ÎöU'+v6Fz?׶hH~4V`sim"YZϑLˍ fz {@vfg%%k6Dw5w+(y -!V'v$504u8`0_?ףxEQ' e;8|o*/_thK UGIKΰtN+UBZw`:Ҩ&jOb1xtŦt"I,7aϘgt zm"aİ)P`eU`V ~ 3p2i?hޟ[nZ,/wF:6\@)yH6U_Lt;oET{[ ̖-pWTC"C- 7cCWX[-ª,[* h!DIBa֋ 6~FRMfƗqE5Nוb52r>evK} 7*tJ)B-%hwIMC{KǶFY4~tDRVq.`vGAGHͪnUiUcA}<= abG̠;u0} ِ"2c1 gY >j$._r2aDU!ػ2Y!7k^H"2j5+87GcEHn `32L@Ֆm6 ~]uC1<{0KlӅP3hF'}vVkۥٺNou.QD!Mb8UAhӲ̉20 #ݦ.AdJ0 0IP$*d?U.G'ϡ쒴a{3.@3DzTX¸iQGeE:X 0RZ(!lo'{'gru1pQu4`z$noӛbu_3jTpn [S+,;ג !)C0CF"O 6Ec ZSӫ{k'qVtzO m/,fJq.uiYGaq9񭉑EDd0ƎE)j"\nׅO+s ^m3!O8cpmzY4VZDV|~-}iQ$^M\kZdpOCY|_'.~ԹȀӴŴr^0>&H^C,?I30ь?Sɠp6-E.br-G3ay{M_B>|뷃B4f]vDMS҂C#Ь=.,@u:oپ04]]ntu Sە<!R5wm9GNJ{b>;(%5f?o[W#Zb"aR-BFVW^SQ[Oe:Sszm!l~s7;!AqunpTczEA=?7 ydǵG9RAA"z)SJ=wz R(;>3MAS3Vr}Za1zwwfPŎnwv݂Xf-'ب\Ҏ#Q{ET`EqSNGtĪ #'XAin_a"KIn+be+߁:6库1<2<`‡K.ξZ*DVeֻ&Jܙ"꬈uM"BwQ&|h^`լ",X2 ˌw$v\Z7N,nLP?IyC3v7]9ߩ (<1>,XҞkԂ9q )>1pQ%5~.2#IB&q@Z󝅑tXu|90R 7|o9eٌj%)C7Kg[9 3nW[%/V˖T5 -nhQ> h; fض-GP)MME"lo0n!餽]m&y3`xb[֐6QS(rp>w@. ex#}B{rx{c?v&mIw"  w`mm[NVX+ts@/MFn5\fAOzmêgVkUI9b(Xf ,2mψor.^Im&t6uWyrܿDbClTY,b1Kcp]So~V@ $rF9zk13p#C M·yv)|adQv| j}.%h>q-<|?(DAul1-[dI<e\]$eg:A cb{|螼tc~Jp wH~-ʅi0}cϾm.IPeW(Y'98AZo:F- GBȣIxJNHB@5`rj 풅&Dż=YւnTy|NerRVF,.tR`-SH_t4.m lqqdK' H9ZBq w{BLJv+Rј6B~?};&80#g~ g/LWmu`%sm%GJmJb"fev[R0o[Ji7+.xk8ݮ3Xba͇a9#݊;΋ݴZSĢ_ V.v>`ξn"!XOd]E |m.eMw h/W Z-Ţ ư }Rf?E7څ*s?x- JF*Gٔ;VS'NM,% 8 uyHϥwaQ"vȉ>aEUIpu]) 4f|0?B*,ڪ<"7٘T[%w O?T7wJ4nhU!/tNR_T`Nn|5ORk(DbƬȹXyi]o_E6U=>$eizّ hޜh|uƆWFoZ?$QvxBd9>oS%,\]KX#ǻgQ2MSIfd}U9;ޞ:[ۤ , HTm}.XG `5T߭ >]F8K)ΥXn,^7=]&s$SO`p R; z \נy+В.;'}]S4[`&kEˢae_Y{`p`@9X'⟞{Ht:eԊ[ C=3s51ZU1MnRrx>O E5y~ өIXq4XRL(S v}&{: hQ_ 0&mfr\ s%UY9"xV~\?Y+C=} "Jf 8HcJBP[k_ ۊG&xq4,I= Pp^!r1'`ΨޗQ")KYi}ڳV̴Ar`T {Z48D瞔{tr 7L=ͮ@Xge ⦲c kMJՊ.#Tn wo޹߅0vF@?bFg+="ާtJ]mDFЄݷDcFLt*w(wΰkE%usd(Ckyqxܢ#Z`Udl]xI5Ȱڗh BSF*Ue\]5LݖU +9ALGTh嚬aI-3𭶀 3H%R8<[iq'G,q6p2^,3)K /OgHmK[q`F`dM'ÁѡtEWW'3nG b }ۍXv9R?s`URQ@ M=f~zmE:tJ@D}'&nhS PSۖXZo Rӽ rt 5*Ք6iW.0\o.< O 2j5*dBi4ؾc C/SBɟ~5&s"ǜFn ;.L@tH0(vԓ8Cn 1z?X %@^q>{Ktxu7U!^&{/Z3NEp(4;H%rr'xP_(isl\XM጑.Cs־H{~$wc %PZV/_Rx 4#AwAOd| ^/Xñk9Pm;YIJ n#'TZ94>+cnK[S;܋ɼPMȑnĢ@n.y4;,}ֹ \(+: ,%}HhD8"*W ^KtdP=Z@- ?)ohjJs槌B~e|ڛ.OmkfHFQsDA1x"zЙDqC[Iy@rDv|QZ̛Y=j_Ew%–{k{W& NY BSt M!hCW c ƧaS/?}Ztjn3/5;-X!K4x;z0JL7'=IE$ (Þ3*O~4f ;*+4JU UeM#7a2Sldu%C€K"}5fh36'Hj:d#,t cc,5| oǏߗ, f&&Z&<N_. 64,. _jfLjx⋊iŒs~ޟ@V]78.4)u,;KmYfK^XtYT8 ,wLmaC> zeؾ&/@jܢsT_⿂qc="q{Yx/cVCaا2}h%̝ CLfob̊m_KqUI/ьYJ?fbkmq7^&LMP+#kǤֿ݄ĭ!:>"Vۢ^QdN4HkNw$P m?a*3•YlR8*lӕvVEJS'6H<~9dkv5CisKjVU@Y&,ag@6}7_cM2sHvpG\ R8 ~ Xs"Xip) g"Osag,#)S̈́M6!{S@/ %O[-5 ǍST÷ v5g SQ|QS?=0M Q lH{R.L;9a; ɭ]6W-7s %Nq/Tڋ7wDSR:s i1*Gi  kqXAImgb0bQMk͒'^>{ EgPp|aDNG=BFlPz CZ>0q6!~*-pO15efۃD>D5?梟c QE#ws^YUj%:[z?t3P|3qlَt\ 2iX6=}]`.X3S\D)ߝXip~ 9 9Y){ (w'$@è,&UM5fȠrgPz?x\cPqiKKY|UjizI[ׁGXN7=ĀOۆ=N9A#2tw-6A0")j( @$ier;"րhg8 ܬM$Opc(#Fj Q!h6|w887vx4EYdĽ#=P4l 9^ݦ' ,|{7 %]4K!נeAuϏӞџIǔ Z0 ie+Z^x&-z#[<פ'E N?_LCfQ)]i .[ o2L۩4%NA%1s̟dމyMWd/M]uBUbVxN$OQr6aXj_jJHr ؐxmBEOF E .FĬdLg.Td;vj/bB^k/QlM?:q'>!)}Q|HҗM!SvZ1 {lNJ٠UwftWoȹIQ>(i6L[^>#ӬQ'U`[H9w]W旪a;oի57ShMtuE~mVkMKPy5D;G:y[=%y9MdڝsKLeۧ?xJ~|EymTFEHBpR2'6Ŗb#8> ؆}lj$941'McXr:: <# QE'Q(z~/6Ȣyܹ8=]O8!ޏm'l6Vg{W&Br`Q'DZ_ݏoZT\"ttAUI{sz´  2ݸ_(i&n}C4$qz/_ܣ ؎ i$Y~CxRwS @=œ׹)wDޚAdð8 ]5(0 YIj|ᢥsGl g;b O𷠀8vgT\7uqc8[R߽64xOi@ȭF?msW(е}c *05? ̓2i#Cj҂iȩ[]6 .y>L@ Դ?\s|HPhbCE41rU ߅VGZ*AEE|mZ_ӮRN &āJCr_6/~*v#B8mU7Ro-Szc[-oCa`ČM㺤8P" |Ol_ߋ^8y NX:6LAKfA=S$J˖ps~\7oG%"\Ә()Р:5ͻZiGq6%_* e7F*,S:|,jTXo>/u3,9&2A@ ;0(75/ &'Zj٠1dq(P Ŧzï^)̆[@}8bLņc_q!qIH +a!k8fȪiʟvר㎟<hjۧ[9a09oRHBQ`GUa앢֦V߹RBI:^rȊ()&%`Ep_'Ξ0oIh!PEu醕fEj,YrIKiQldr:Fr}Jhv&%]7hfuųrg+-բQsW/< PB:3`CI,)͉qD t!]/x2h s @ת7sl}ыH@M-NI^Wm%猢rmjZK2icmt8Q`ΤZ;9x Hb9ޣ +Ð` a:?oPN 2/Q|3)4|S/3VUx!T/vt\0E…d;NC$۳E PDo9ij еϰj%<~wRyޖrF7M`H|-J9q邰 Lۋ|*kjw MOCpkP@D?C?fG\*[(:񕌥) >c0 +c2EfOG+ uҲ43}MjSi>2u"9a\!QRZVRTh_j-b];-܇`_9Dk_T?b+= '̝u֣AOe"vh t9cnX<4WSrLg6O7`j A@ߑUnŸ:A,gfO@HSk9=;jd Ƅl2 p+8&T_ܒTZ+on MV7K~Q;! z:Y5WkV'V[\*̬^>7($0cZsobN+qm9Mj5ga L;R='SW KA$ l9ء#}#zD@$Ȕ`A}s?#T^hpqC8 tbTDKr`FuwYWK6Lk7d}-r;emx?oKݘPug@n! |hͻ#%\}ϔCjeYM4AS@/!*|g -/5!)RXId(^=asFGXv.v-4sEP]I,ֈ;Dـσ}g߬:^,)+eI6'M'C8|ГU8'RDq= ~@~;M9(ph>ˑD5J˜ o՞-!Qf*Eӻ磂Wغ>}MolE(V<&@ &5q%P !ȑZhFMF~O)J_GZSH{˧3"Y<n;l=p'<*K<+lG W 7L3 ArJQҹYUVo&*iCl ZLV+<*~6D vZIsu_B ]pFtND7DJohS4(c!eX!#PJj,dM N0$5 jJEF,C ֙ ;0z!~0ZQ NTbYZnlVLx7ѓ|8Ա˛d=z|?a/MF~*$uV#V\tBU#jĻV *0q?RH Pױ Dt=|/fN- , ̮y[k|fwL)}4`ıQdk*IyثeD28{XkTHK^( hkXE u6c(>jv f ʐ GsusYErw ٫>$q{WtF55)YGdQS!,tHgV) G؅EY͘Q*+:= $8aO.-j|}pjnդ (V w3j\d0 I)-XФ# ȑ9^W|dxT2򧵶e sȑE9M{I&Lq%PY/x{PG{vq73Hŀ :ч 9M/K"іl zD#1e {ᑊx{`_ Ɠb5?8z;c!q`X5.VM׆PjO)JOdS|h4UGȮYVHAo];W7CY^OjOD$lDWHF~ ˚ ">=H-!FOPǻ1iV3Gif6ÌM'3Q?hu{cٻ8\:^tPMXٞk*4]l^赉SJZ, 7D\\42C9˯Xn䔰kM~_$}6f.X` #Y}L'f~> 2P)X{ɟk5Kzw[Gt!ZѨ1%Ja;;C5wKAm`35z EkJW">5OoO8 GoH:9ū-WFF݅1OR([mSfq|/]ML8 NzX-rru˻K;_<nmO>0uDi1to٨9Ҵ=XXĐXDGOc ϰn %i˽y*q+o\&@5]Y΁u9njcS{>wqq 6͗[h nԝXKk`\8B.uOEtvٰTNY9 MU=dz@<;ZHU:ܰ2=/FCr5/PU)m*^ZM dW.޸\:h$.e&k)0ߑ¢#nU1}baB^:Cp̾XCT٭M\}mk Dy8QR+=l2;#64<~Ykmi.BE~q\gyi=!J[3:9My˱0,G@:քUFzs!sN)B&z :țxfL/Jח\Cq02HRN@w6z`7Uou͙v>#]{g Z_'\_]@ =-,F BSm$pI(0 f1Xoͫ*t+>-~wK&1|2Cw#L㑽< ǁ$I1;s MC"r,WP+2V; O8ds! OPqc(nLµH:>ڜm8P2p bwuČp?{SL{O #j\%Yi*#)rguO`qeI"r*>76Feau.s"p6 >c:%^^f + 9㐬}Xyq[_SFim+o{k{Di. f% U.aڡE1]E"5SY[^Rѱ=t"r؆c-/ > )O~]ZM`V;Yw78kgR-pj-TꂷXYjn=SpC(y jp~(];od&NkѽxR[Jy .tvYäTTR8D@q3]@ub.f Wqάw=\BEv*|V8/gid Ǩw94_z.'RY4EXuCCY>A/`OXY9 ZMV-9ЁOtų=/:6؎d= uwN;6h '$F RKgtF-[%c wC2MVz>GaK4- :aaeMPw \"`{JNjgiЬ2hM*KaD)<g/eH𨓊à.x%Ew 5FL݌H5ʐ;`@k"o񔡲n`_)h1̪KO'A&ABB\fO^/)Otwf>5snHmJLXF’AՓW5M@) 6xlWQ@uک.@#m&=X8ӦG[tH7ts5ŻTgkz/ H)) y9!>y5@?pFY'A:}Ch`ݹ`x gեlPd4.S{GC0kǏ^LaMywWȱН0组raQ>tIĢuڱ oP1Xu: =s+;5}RќD(YY?MB@ b!"=~1Y ٰ-xZU)rRUxhӤr5" qlw fTi>a(X.FMq*B((dMfqsGFaO*1jjb|MA_wtBև@) a5;n(LEu .`kKUP_f]pﭦFl#r RD-bg8uloGy҂|ʶZ<480dҔ58|rZ-ݻ;x\XI^?Ig|pYr.]5&I!UJȤF[XVVm@GIP O,)3'fYnc'ru>bKu| IbFß*oF: |O*t7Wz \ `T,S _}#zx+D,UsTdũ@hc0IEjqhkC>{G,AɯdP=P7] at`./( ieׅ"OQS lQ2*MΟ|gvE>DX6IW8ws3*J:RܷsnXU$|H l͟'xCrgœdcpV.YpN9֚QGW1]i_cV~Jfg g6vu*d.$&U? JbL;Rew&k ?e{ Eo-7koJ7:?HpeBJ6s.'obEKqy/TPq?<oRf#יbx8]YUֵZ7?9K&%i6 ː-cBty5S,qz;j 4m'Xx.~T:mvIŃ@}$iS۟Nt[0UWnYIiȝ8(a%fkv{7"0*سMc=2 f]n0'boe4>{=zZ>-%XքtĽnCg7b;*EfLˏb~AB h,j?&S~.5Enn#&Q"9!nua/Y¶ :uQ 4x'G# X8ˬI6s%1iG!̚ +k$j"0FzmX(;#[3ǷHC2kUGǫھgʫ'XY ӼSTlbgTFzE&BZ91= ls>K' XJw^]reIdcVUm}ѪEBnb S<٣ckYc^ |_3z0'=aNr}YFJD{g Pz^}|~s˂ O{4]4&VY_ %t_(uROb[Fk. X}%Exj4nٸGg8hf B|r |15T |LV#GOGAf9]NgnK|/|Зǰ3(IlMqD0 ![kjԠ?oƘ#HEˊLduA^ʃ[_SuTI(g2(_-<?G:IpE;=N@Պr% AG4G6"`s7SYO%V{ux#Y6foWJaQ}e"x]G;Cxu'.ni@̔'wUWOZ-ۅudzRΣI0mLŞeIh`Jc. p!Da+:,+(RUwgfUK?In'O@ ?Ei+ X/VK =CUr|+u#L-_o񵶚{fhy5aғ>67Tݽ;Ā5 BZ>uқq>ڭzq) Ĺ> Uc!iY$ XfxB21es&/!C ֈ.J[Gh;5ۯ-p{Qiл*1VUuBcזO**.<6ҼZtwlt&M+%.G@2@'A46MK eBg.[]&JmPHAμJW+^y%X\Ho0L F*θJ7<>I7)J,ȘyiM^=yeo7Oʫb@iBl $K~&/=h-_js`"a1IOcQ1qpo&FZr^NWrT^[$lEuvw׌T|qU*[dF*vsaNLןAlTy_2DZQu2s@dژs{>Lҙ a&8L.$ya~J^E# t61\aR}E!FjlJcLf/ 8c&#dz~<(1{Mΰ[9<|S7rdt(PG96Ȕ3-~ 4SP:3@F[U=a|׃X߾똴]fgm7 6C m&X ܓYh][ؚ "ΦrYbj-r|Whpا ,9椿%Z"QZJV!+`b{M _|$tW#_8=AE=&3c_e ]#@=#"*JK 0z9>D6=DW1 [@9n3L5(Xt~ c8MUիO(!ˠÇ\n. -|]?gPtE~=GT#O4d{P!}|n*yQl%cg^lz,]`s=%R~e}zb<`EVu 4?t1m4+|'bŢl:;V;en^:BpV -nsaʭL!z8uce}HNm9SnP$מzYTv{c OHC?}R n.V ބM 1 t+ex`}d%#j.d%cU3;'ɹx~hem)tKhV7zoXK~b/GE kY wYGb (~jKa}x "plHWA/V6ڋ]$/VvfS bgP-'mwM] v/~;W_n,؋:wAdL#ݚv nhvMܱ"ֿmx&),YQݒr!wys }tdU6yhfAħ0o SDVC4Pvm>{Eϕ%F Q 0z(JKH)/+#܀1B–{0V5F_M3rj:q=!#Yg-ZaGD1@~ԬlP8Á]PsM$ǎh$;&NW 2ο[0km3*h:V赒ZtJ^*,UJgc.[W%ĄNk0 : *Dz vJ&ύ\"sj0ȏI;ppݏ)R\8J_ΊLL_H(Jظ;whzڃW%6@pIXV|vǼ`Wc5XLvhۢ,x/6$w"}(p>/enͷEЕ{$a`0luCL}z±?_ߘ6t uSBKYR-U/^K?`\lձM#-_|y%/r"~Mg?m^M#aܳtZb8Uuը/}@Ž{B ^er a,VN `.ҵ0h. ufsqqġ.Ыd +s!4j~mj3 aT]O@bqJ}LJx7{]1a"G]6<.I=8]3A$RHS$~R-4O>F9w/1*7ƞRb"6!}+e?WȨWBd ᥕm6>9J><-˝jr0%h%7 uX*GAy&懕8;FIRЍlYs(}暊@2dec80L:735P kJz; vZGsCF\}*Z5X '2?}4(X)7^ .X ΰ%`:͏qU\lH4@b*nn%E »"ڳBv{pPD}QoGEo_:x`dĻDl5Q8kIܻ  `G2|8þmKeyr^ߜɈJVy7p~ 9BpMr? D5+b3F$JYpCDk ,qq@o?:›4 {<ٶ4JжvUM!2h06Uǡe@j2i=uo7 p\hGoZ=Np4^( i~5n)`p) JÇGa? 2$0R6_ܺ=g~b 3Avm)ADZI v\>ۺer˚نzB5>h>5##i3NyBͯujӛhlw_4o\;+!?1Ntr*#{PӐ>nOQF5(hi UNXrL@R0c Hdu r_<`(ޗ>5!1˛,^:($Ӱ9 4 e"b]9Ԩ?֏g|JD[@}W[^?mn%=^kUEMk JpUwPekHnOzX άT{z`I5U'7rڏ6oڍR*Kּ@u(aafr_w@ߌq^Ϻ'֬}8,o2m"x]HB>`6X5>G[OcC@J{$(CNb ԯ%\EM 4&oĸY?QJUBų<86 b֓Ig I@{M9T=Z;.jqݵa\ITD&j~V6CIiF+S43~o@1)*F]o(h#lG0h>sm7c5JS+qtaZZ6ۢlk_t4 2mWfLZvP= 4m՜qOY7\VR=iA=1cuǯd=V^gx?-KM-2|R!84J?QJGPSt[cNYT'Z~6m0C[gB=_ BD%i3-0v)pWۨFvHw9 T˪KgiLqSKb̙dss͓C ͡x>t(5ZuJǢ[u$وY1JW?'"F1VT4f?0,^ [ew9Glfq Wm@~9dA&tHT{K.e+otm)$"h JEP>܋]:Pa^*M˩)9JOz+e3v|7nujZZxR#*L Cd$xV i8@3ďfhVr9Tj]x5J!ťGq/mFxb[ȳp'&(mZR6 G O!G:7 W }XՊQ(dReًk~ϮžJ2>cy;;9Jƚ wiyIBVbִ\RcL<r KsEKcS(2M5zYH9T.x.O+-ug{dU1YݪhRaUu#lx9-fҔ50!O'D&#VQ"Ceֶ'QNC &;37ݜFy7r#kP;J,UŭRN{=H١L( >][` !vvɧԍX4e< 7k7(Ngu!Y|5~#e4f_w[?V39$fp0Khk^[ ]y>G: yJ[$X ܣm&?Fb`hEJd;9КyO4|e3+x;UYkD|G[T|8 ~XD֊Y!0aLRР#L_׮ij nP}{fvyWe0)= c(iOkrt mDS20[b9K]#8 @FI]̵lpKuw};썌 .(|c&ml,Y1%pϓWZ&GytEO4 2<ьCs'>iimA%J,MqLic | $ӌccV7BD,ျD*տoiTk^S)sYf \5K#ҘRanC'ѣUࡂw* ׌h xk廁TQߺ ~/[XϠ?R2rq혖sv]5oD2č4uo 'Zؙ豧o[=4c6$E4HΡ !|6o.漫\ n|_Uʜ#c<|Kx;4ϴGԼN[ztjձ {P3t)aI?vW"(Mb2Op9mF`+9k/{H ?":U~q31\ fxfR0 ؋ D q;: `N.׀X?;k^c4V3wJ{R15^+A`7 Л|k gٛ|P9站Bȫv(>I¼U-_? bs|}Id*P1ۉ:E=T?OA2\ ~к(,D[ۉ hoTLROx}<*_;݂OLZ5$(zuʘ㑛3uD=e@*E797ZHnZkaBA,wR |e,lMB9ϑm-*'k,X֓؆.,V[cun!yP*1Hgs`ﶛFNp6n8IУkUB}kϗ+oc}÷ ] B֚S6`T%tWC0sj>{*'_ xbU̻G%z4K;,LYc0!8yY)!)7S}/5Љ>O{]JT3!@m}:D 1mk1x0yXmqFaU N4숺Cl:2nPV溗5=D Ɨ<_Ad.V=|99mL9Bb(gÁ]/K|%*z\.踑n thR~cj]| bKk7LhTbĒz/ofV Rf&sɤ"⯤d3[ZȽlEeePTGeԂoHz <~bwePNu֩)qV5w'; s~K>Љ%,?_-s>y ¥.]VWs@v_w3Pi ED+ʴnlRjlB / QyфK(GluQ;&5ˇƞ\*fSb'uIX ?;Qn*2[*@$*YT#3%[bmkBwB os;D׃d|Bw3>FmwRATd%`<^+7O}m<-ey VcƋ#38fO3Qfm zy;&`<@b9U%[,QDz-"yV>(o:P Π/DںpG]7ni$A| tQc+qzqژŠXʔw a+@5E&-9@#WN ko?.ጠ=K3 ;E}+`=xMm4,6zWҊ7s>V`䓌 AqIo߷ZgI15ƋVOj\, }-Z)sD;o}F{l}@UGsEC|VK5જG DdR7KK&I_cBɿ,'?sCe$$cQ[t ಟ^)MUSSOGa yܸQEj#;X:%KE|CGsԧWd,&wer0(0 ̼A}M#8VJdaG 7bOGΚoT HSj\xJZqTN~"˩ZcCoIʼnc[N-[Lq$0"J dGdۭ4Q_}_"0X^)AyTwr=:f`ndD,=,}\-- O3#w!1]虁GgN%`}dCZ;@oS>nwGk#${=ߧ)Q9gfs/i3uAK6/~j9\;G̪W' ̉%Ǟ@ov~| 5vk!@#S( <*Fφecv: |Sml77р=VsRUi\*Y<Du;nxFӷRRk_ bOOu T i2a7B¸b2:!6tVzHH5kli߻H yF@PV :"Č-ԲVVCH1F " JUF 3mL&ϛK~ߎ+w\ܠ&jLj?v @Y;  bj,Z-pa!D|dbX/'i*hΝ$S2a ճ0(=Tő4-(LT8_ q{ Ҟm||ZQoAf ⹝C T;ρec>&ކ tO>'y쀜x+ *o="&=}c[: Z_OFTZXZ&uʁ){GK3̻Cjb:lo45?PӜy$/Ox" Dՙז< WNijFE3*)hijj[h+lK:_>Lt["˜i9h Ӊ&NX^1)&`5_ ê]e+t7It:s6{Y4<'hExR|i|=2ZLC3`Lq_w 7sd(l9f+O 61SL thyBQ6% k{84~WGE.wEb]lwb=' &nL$ zB5PU&Q]Vɝz޼CJ^#[L{zA0 aȷ=ZQzc xE{{'i\v NLf]!0b2\ʫ,K c0a/Y}:c'i*`*;M6I h,V =i*hbVa6ON/>Z{eLm_ p!9\]r$W]l;Ǘ{V${n{m(A֤iGdA#9 G2zg7EC4jO[*:@o  G '28L<}p!k95:(p;]'DoMBUNt )*64 Й,V"(OQ=_rok6jv&cmd)cl .vwb:me՜K"᥼/z兄aa>#Ι{kĤ|_97*[E ߥ3;3Lgvq<L'؞L4cTcJqCp1j_ }Omox0]^;Ee,~ nD= jSGWx t{|5YyHF{> q*>{FKND 3mmjQh֘>USx [\ "$|5PF.Ni^h2y| F) C`~N"\!/ Na fkMZO%Cܪsn18yLO\)ZCEزVdu-S͟A`7=QKdZڿc`_B[o"f v |ug) .]aڃ넅՝8( L`]$R1ʲ׋DYX.D߳h\_ڕ$ި`8<5#lW$cMjI cS ܄k= 2p]@%:vDq S;v tBtmXaɖ^ KOM-BSєU!Rrosqj>go nXIEPGľmLE:+gzM{ v1 ѧQ}YBfDCAcp @Ri#c vXD-uE5;S>-I~"MuY><.[`L|7č4 SkZ-!~o!kip,-.s%zq*e)ٳw-z(| |OUwI8va2ZP,܂ ]gÓ On>lI,ѐ2`cBUU2U/1u? $Џ#l$PS>PGHk>)S!R|VpX5RU#:, Ziy i"Ҽy-Me^v؜6sLN@.&ebWP&<1'Ѿo}j\&}tbMX˹QGBK- ٔ@e.ٌ]$E0n`1 NS>Nf*!ߙuVbb4#ldk ˚oAT"j b dӟƳA_7lps7! 6֙m th -̶$}tEuV{x7q(5[8M" x3X鈀h!zWmSѳD2P msIO(R03D% ;$֨ҪrGR|d{ UQb֢:~6h<%|5[ 8RgAZneOjhAqij 0ā'9 z4 $h`c)QVXIac'Z,q-?̮(DRHs*4xFSeptq/|7#x'Ó/|,|/j&`qm?.ڒەV޿bqa:$a3 e *&Q姡G$Nĭ zd1dq8PD ~|T0]D6V[ {$k-R'4%))~_8pߜܽrɐ1 [\S1MlL=uJę*Fr {Y恻@KYQՈcv)?Ԉ*/HJs-U <=%=d=n @8"㋍6fƲ5bo+S좾Kܿ;8pM>ipb1;#)߅NRXw!B®}SQA6ZwwKU6Zdm?>p~qsX=DT$V0ߏ55ϭ/r최GG>FS/8x| v=IUMιvU|}FU\z2:Gyo)*h+?S,Qp9rei D {%υ!fvA9WaqsU"eq߬,9PC"\y<xW ݵ'md"HS6ӻl( 5w#ح'/* Tߵu'. \ȮDk3*~5fh)vpBz$H֪'P޾G8M=X3RՒ<0W ^jeLѳMJ*JSp"86>Bށ jvxg5rW} m@ )5-/}g5}^WwY 6hy LJ痘?GB|Ym60>'v{[eՏ&Es}?j E"`(nBrcP )><=U-y\=Oҍ,raɖg6֯:Y# s \K~N#KZ-`D(\zwa@llBWdt?USYl<Ǐ9~i-R%A]\5=OI؄ =Qj ćkZͷJj p颬T.$wҵղGi),F;Aeu2=o7 c}E*ٔ97ıx_:]ez!̩]x$}ܫWro ! ̟lݥ]5!/'1ڷP5cC[/ mѮ)j#DZ l|m3^wDCͯ(;}G~UT$]K8cF8|A1(MJozѲtG"T֗=J!'_Y8#ݴkZ ϗ⠙Jk& D xb.p(`c=ÃNLOݳʗDcN 4eD \)q#8l2I>Lj[ŻO) <0;b}ԩdTԼox]Yimɐ)MluBir~Uݩ S9laĩRJ:P7$Wn:;P:H ^oXe'j@6;8wNU#swAEHkEoY"K@;^a3V2 rbD$)$S ݂%.QQjĚ 8GI9_k_i4хSB񏙙}/ÅlpY1?e7 nŻHR_ľWQ=$` LVmat儕q} +5y.:44\J:]#] /+vvć|,W#>:[ A~Zs31 f8 z,pPklG?}Ȏd5-MKpx֪0;*-"ʔԉv'>v*p=Yɨ۽4>,@ABq5ܟaك{bƂúdc1ʽx<`Rr+ֹ`ݑ]C|5$##:y. (7 ¤Ma*[ap:YGR3&`ؐtB<_Ų`ʟv(dvfw#Ƞ I`G- g~vڧl(Tԃ;uKibZ{!p}"LiEHY'/OPu̚/i [,7&|C 'w?!ӣ>j@e0y$Adl-<鿣8tmw,U:]Т_Xs:ߡ;'SW*t^eY"CT3wq6 ߫`)bV!_Qe)M=W- -JcnP+:39-S$L0F"M(uHdJt#' ?DNGX!|Yˍ&B޴ĴHZyhl+xt&[02f3Oo 9m8Gtnve2MYf;û4]GܠW|Wyv˝A-jǬD -zHCy㗾ȤKt=?*|3r QZM;f>`3>&Lq ҿ*R1~E~|f3~k@U-UvHB'UbOd-}SVcxk@F:(|K_7ineBJpJc4/62-QgnSnSkYឍeDsȡP? 0&VPT8!^MQe\뽉2kٰ%"Ϯ\B oʑV'>ċ Cn M-ԂdFYZaC%M\=rgq4hFw4"6/w8]vqM-1w=2MNڎiw7!W!h)&GU 1g!UfW*Yڭ[J@!i=7=Go 1Ъ7͟~YڬUC#/H5B ˘jM<͊VΝecmRnڭC%u>a"*e̼|fh JFy%JP좍a⨉P)_ wf!9g(#CHZ"PnA34+qtw1 ^22!xOa#̼4 U#tOwus̮g? #UI{N4iFr+EKKbM2߷㷢eZGl"\H^d9|~Z/^ģDfm\=h-ṋ_^jfFwLs+͝@VPY4<`nW V[/lozd)[kDIlbO Z_|;1XBti?jt:V=J29RO`:>(+kf.ikWMw]hR[3vTE>sA/\TJLX,.+ʧn!@Aa`^$ hl WpiN;4VI`$r{-kfFȻ^Tvu 467G:IOa1uJqs'EQUTh,SDRL",i R]P᱑kZ]VY=(S9 ޫDm?QS@XSޠ,D@t=)A\5hNe׶pbljZY PﮔvTW*Yw]DմOȹUMt#dDMv@9tR/ KwThtlN4lX՝K:=v\3 \︺ ԕx=@*huA;\V6p3yC&%> Vq?tZQ87g–~Q4XC gt#zf8l3vV,`9~$v E!.E$VKe`"#"NHOަ/Ңλ|Bҷ8MJ+xp6l>wmM+`a&q PyDz1p_jI^Q@<2ϧ!@6JӶ[EINJPW?S;3Іێc1K4qW~tg7Sngcn` .~ЫiU^zI5S[GG]+ZvZ'=09޽e;?;MCiTFᧅ̴ccܡỠ~_@Hv?Onjݽ3 >i\q>dp2I;l_[IIU{J\W'LPi\VGD\ĤbRs(Y~Zݐi{ .ϳ!gQϾO~JJ*@._Ǡ}YߞÑ*1$JʖqR=1 %yѳl*H;0뷽VeP È1=sFkQf"\5b3Q6g!bX<.!= 4(z̹pts}­] M"H!k:R+Eyv$+#[{o,Yc7-,S#C >6&wdIӕi˭;ʶ^XntĽYExqvhQx ,Dkqi7?CC~H6+8%q6k It4O@}YP>zƪ/ }YaC#p(뉛͆o }]7^5 ~T.~IϟꐐؾV 6~,ޫe>Z҉-ن@gT >nMŹk® -Z~ZN9.{qb~,#Wo6rMdrČGf2eO 4]2HN@V2Q%7)nuW8VnKJ%iArTP/ pLi <_mMl“F A Wv(Y뵡{(5b2p'-lƀ:*NEiц%Pe_pZYBbGB5+poF/RN%|v1aZ{DKEz)<<"q`(RtL"*=H7 )cm&<3uf?Vh`Z 課 * EAKMEUzmQح_c4#}y5ku"؟S20).Lܙb榪_̵ڄ7J ntowVE=. MhZl,b~iu8+7$;B%yBfOFnju#Sgb("DE:$:3RʒykSXMN/ɂbvokPQ ދREaϫTO{vfŭKJWnr 9!1_tɜ6IN2RMr֍Qe6- +0lNh#rcn>~d$S# "ʘ=TO||XOB᳷P?$s4~"WR#Lǩ"4{{?Hws r.&k4_/|gl+->cNwv)mzNVu_|WyPFW+QQ"˘{r)QJEtḬuL O.ϭCY"Y(o#l8|H/IP?0ELjKI[5_>.1N\qLHɊ}DYj KQvHch1  yD:(!kA'߯ĐR+(؆_Ǖ.&ҝMGea2Usf[^E؄MHtgkoy?;GՆq4&\ֽyǢ7jr}K_Aq}O*ĉ(_L/t{ZMpG~FO](Xcj gm-ӊ4Pr7j5~ZIznTd?čF4&Ep:dRjɼF]"V&'B%wLS >rƸJ3ǛHrKk}Gɚ߸ fZsqx[,_2F4 =CU\+ے٪ʄkLGH3!QOX w׊(6ǃ`8~=?FTt/ ȿL߅`>s~=Ƣ蒄BuQE ςFtQ{Ⱦ(Zq DoPL0Br#h'oψ! raO-x֧ X Xă:5ݸ^\0\~!LilBN}9c|%L_xec'1fqlB7nmK4@og^@)~i3y餕TA}{׌j 5P d֋>M*>f8ۻ% p]bU&af3=&d?hy۱VR Q[YEO:mۆRpZJ[BJWb'5Z5HuJ|jYU4n'+N&x'P,Jo3,),01;s+A\ kl,p>bPFŸʾQrq-[oI15Ǽ.pl=:N[@lvfSn}lO> 9W#9G9/{ЩNatI(0ޘ0 VcFzpŠJ;4 ^s .NE< s  ϏYeڝFOd~"3 qs\V()܏n<1jERܼЖO'!y2m$B&%XxhgW&k.Bɀޢ5vVpz|r/|%5xS C& Ur(wZ#Nb`\yHR*AvEXJl>^K^-C$t%uOOdw)KDM?Er$w+9߫ fyȏ~b!Ӊz$-]c.0!e&Lr˶++By5Vi̧ٓxWH[f˪ pYDofqbU ns&ݤ_.*XDǣe̪v&p`1rõyO>~2n$ ꩤFMAFZ:6z)Z*]"  rjn}׸96G ,i z"?Жu vUҹb͒P2b뺇~*QB0L[GiBdTJQ"4EJGV纀5f, ܪ^i˘d#.*$SNv4J[}>@1#Sk:k4OգrcYt۟%F>ɱpxGRxL4<Qw!V_T!䔏&^I^6;yc RC67jJ)2Hсvc3E8SR0~7lj#X#d9|cz_ ,0/3ڝP"*֋;±yKn!(>oJemұrj2z2O޴J逡[OTѦIB&Xa} Z"ruWtvx8 !/Ϫ XRIw\+BcέEk*}Ъ$RBLs5vp߼[D.yX>`u>NOf%/?({ڳKsD!@]] 63xʙmպMf*}'G!\ec1̏ ^;Q ]]N;SLӘDݑ?-ʉ}hC_iGt%ɛ7ud,@mx"a&C Pzq;Fi0pM7jjCL|PmFHqB!vj:bQev+0-淪vdЩ L +a4h֠boEP⭊`;!0e%v{O`X\( deNt6Iʐ`$zE5ehnFcBLj{,m.@x%7]!R1r۠F nhaΒ<_702DH?3EWJq9eP1u$ط~+3]|<Ŋύbļ8Ʃ8n">l_~Q4"~j zZsN]0 <Ɍ2_}Ni|/K4%Д~kĕ>Ԣ7 L~ &e3XV(5!{+l{_S]75^~h&>al?}ɞtgO*M12/Dr^4lHH;t|P7 =jFS ԯNro1W(?Yep@n'S~q< Afa8_gk Uث֜.O۽$[pl2!4vJU_ \ [ky Eauuls!Oo%?,pY̯ky'D]A_2dR. v?¬@un>F O(^Chr&[䔈`,瓻7bߛCXi4[xf)spgh5П%Κd(&f~ÓTmbh× ؕk. Qu|{՝q{ry|OGEOvr4qNtXG6t d f}&l>WWt\2t)Vn|wƼi,W?ϠNjY,jH]·FxTͻcM.m6>!}{Znӳ׿gJ:!6;HWPWМ.U^ Hy~5u.ɡ2p8֤ta"zz/6! Vq ?> }6FI4B[:8ژ!toOJ%6C,EI#Y##u`C!O֊\67I^+b`Eīxj`1eci.-/X<,qM#]U qNM‘t_?DTP< ipslj8*rZ9q-զ9S%eB}xyУ"J/Ɛ?<)XU`LG8B)A °~TnLZ *ˏ + 7IXREOwh]\iN5xf+(-IM~0\~NKEK8*l Z]QCQ2b)#q"|WBQfFK' ?R`ePAoP\!@ \HJD,<<{"ŀҀp+wh Rh[Xm`v~b ]`4iS#2OGKT&R51SASo\/b}r5Q?KWCG2)|ahRIɴp'o^ */d2 K"Q ch8,Oi/++ 7ɡ*؄A5փpap2k썼x\󤼼rkhˑ΃]MIskw" C~G;a i],$.N8EK0jP.R mȻ[M|=㨿 XeUeC]Sg uL ;g7#hPzbdV=ޑT[ ˌ vSźn?pAW7]Q<r`!9x$ۢ)UQa–)R aZOIJ<+. gsatcd6vН[м*] WbˠՈ̍c=LXHGT*m]P?rnǡPb"tJJz;[3.q`XM)TS "n'}EH:}?>/wƼR|=}Ċ ۏfnhw! GUm-{73\)q'ReE,GrՑMs*6xLq{@XbFnQr<.zblj,,o#r`~|[ӦE) MX)fJ6"[bR{q]ΝxcoۓN t Ã1 h8qrxI.L GbUz5J{cVĻZ! lMbIpjE`컸O띤˘~ uapC%=w`z^^Z,akb/h2H yƵJI{6Q:@_(&rؑ6 ]5 N|Ls"#T`@ Zܺ)yqWF;q,#+eKh}G4xb1 |6rk7 VyZ|hRچ@ '3"^䍪*ʈ{NqxΣT @G/tGmn&'f@US*(Z>=NjHe(~zhʕ6̹#nH&ȳR@ <ӏ.'E+S@r 8dkO4LBQ27?iOqm1 @SZ*AaP>yVe;gSBf|,L֌޶ -,XSaRC8`T1=k숱F'frl)]j(Yk %Ep=j2kd/= 3f7)$K-.U.p'G^ 'Ajsz5%40ȝ(x1ӥns[c%{te0xl@5o8! #n+3wSP'' r)-p~v}wЭ2~H%j.Ķ{g`~Km:rړVI0ȁK72xoU/3_8>)#*,.ExlD̠E=gބ˞æa7vqk$ܠŜjh &Vd.8Ϯ-VTz_S7QQBe|&TX ;!(@)O(NF\1jւ}A@5:kK5OIŤΒdVSw Up`_V"#OhQ-.(]xv.К.9jnz@00p[gaШ*9@/к8vR Sy8! qB@϶B'1^KL@M'5 k4WSfvbY킶+buw̢R,v_opoGl?}<;mDoK^g,h=}Ak"'AA5؛6$O 2|ywUs"]r+b,rR?~H$ "k[[ ~iXJ.+KD0b:u8*kS >z w] z!h 4i@ĠmvhpDtl [ ertD]"S8Px" Y:κOsIڍq%/[6줘V1ck˵L>7Uyl$!dZH< "/_tu/ݹd.廻.,J\y\Xu,sP;h9ρ~Wp8RχH_d%|?EW@VNm/yqFIn-!5u Ynxlui-knY#I RdЏ:SMZ߭or}ӶB@.ASPV|Q,sxHvtjpR"-~BTգӏ3dk+1X63s4m ut迾e X> AK9Yv#ÌPyK" ML=݉[&o;{lcX*A} _,;[vdzba5` \0P>^sG.hZ=A[(2LHiAbujZֹT{JUİ"g"dl.\D^9*X˺SO>DvpɻEo$#/끽)h4-0wm$v'o}+d]jY`{}F\>OKvↀ7IN.EhXxɆqC]4hէ@-s6aς@ѩ Je=/8Dj 6O ih3O;j ͍Vjm4-v$`h^?Yu/>d{UUʓiW]2[a+:a@RQ5eҬgA]SU[$&g Mʗ e^Z5׏T^0VN .?8|C &4 罎ȭħ)JTC :78+Or|)uaW@"K#Yv*0>'}h&2q Vu;7/\ ֭UD1/JѰ. q!K +Lq|Gdo~&xc %UDai`-GR ]G0؞e,,X?0ؔb,PHKSQ~Z^:tGYrNR?vPVQLmzSr+OUIc"4G${1U#|aIuc"c6zXv'6fseO{}ɴD>Zٛ$&s%RRtOC^wb/Xt%{Efh!ޅ0J*\aYద>J$?%A-yPE01|#Xށq˽߄wVWlNhycO9CXo_/(69Xok Q%ɈD(. MH o" Z{Tp Eп~ hUPK(r6/ jUS|# ~$oȐ8>(F."rM5"g T8k=XJ~E823XM O_~&#cu![|P~{P1('D O^%*2\W'Ѯ,a^e`HS^,Q=dtOb6}˚]4ۻ{ udWf9ڶ>x(LgU˯?dhdzo4H܉W"YH;VuA^~m]QSc#`]/I=?9pǡ Iӳ8YAiEASO2˺Ug_erp Yb6upnȿ@=@Q:/ r<>ud@/)bl:X?B3P4Lu 2p|/3IkH>20U0̨$N_Kꗜ2֔f7h>jƏ2<|kOLhb`-foQo8.(z@j'C'+R+׻\^I/-w~F5®@&^|Ϟc%\0O&Ӓ] l] k r)%53GXSū8k@8tcu S.  G* T!,)~(8h l|1q~ҟR cFzcmzJݼ/QRf˳P璇f |n1 %KB8Vc`x"?eOGbxZh?ỏH&9~Ħ )euuRfz;ctwhRWAxJWL3 GwM>4&o>|m gs|HS?5zpNvZo~-6\tźjȩϙ0[2;BrkV'Ԟ*\I"S(?Jfh#'2*5rf^I'u[:-bSGFo̒:i۹@ӄ ?Xp'h(&c-$7T!~L7AB)i_(*B'͒ o * ʟ`q/PU1wW%Óu+kw1y0)CulGQ pnێoOWc>k*ӑJ)y#-pCQ ME=V`ʃ[ۖO7 OӪ $Ƀ[ &gkR _ *,(/NȀx§ Pm:ŋl,21Q)'UFv1*(Ne(tJP;$UHdN :ab"m=,wN?›<@-F2#B%%D$f(ӳ|ja +t3>Ͻ̈́M=[YCWIi\5AI%P/r@{]EEiZ hX_FD$eMwq~'EB,G,O KLKYUžىPge;Vd!b@YGp)QZd*}q&v6/uHV`M.sLH 9_魢O"^Q.¢¾J˶\|cƚXKJRk9L8纠vGf`e}"i>8@Jm|M?.%lzi%Xļ|/]Rhz$#y0:@$&)P0gp,EDq#wf( Z n~r"Gt 4NIv e/,n$*TT6S]Jm+!Dw cK@T>-FqfQG&jC㔶3 RrFĊ~'Y'JN1v +>#rH ԰GgպpmN+R^:[MaScY= +-c!Z cHH,▣ṣp)Iy{'4ŹАn+tg!7KWlgD , )GX]fi >^8P:H)\ ^톙En䕳QO[L@"Cd`ߘ`Vkȅb'{Љclp'ꎕ6$p|Y{&UKǍt/ ג>ܔ;@` ̖rtr/Y5~/=ZhZ^#Q}>w[WM7wp! fîԠb6CRj~"vK/BV\p >RDko aܐ^2SᱮIjzzȬV1 jY\㖺C}TZC rAWۄ-芗#|?8pGmaOcc H"|U{@Tcle14vuQ+a3W,{dEVux!zRJ\=;h)ta @=(pr3xw5yڇ9G❤hz4q[U%|N)ByQ쿙:7$o&ESڞH:-lhԸ$.s'BytHkW@/n՝'( $=.P} a D:U6To\*k?,Fuש~[9ѺX ef-έznD/3 ,U`FLJ??,4쀰@\qPk+snsbga/&h!8_hț m,27Eed[*5EĘ댨+W<{\.FVFl >a*K;V}BۏmcXEIş{7d@ds6O'9L^(jf䶵m֡97"SW>hVB Nmv?A0ugE)Z6}t-erøX+^sJ;0'9I0g1;|SPd|ÑJwKʝ('V7mAoR:I5)t!6-藸 ?^rZn mqt2ƹH^)ɶ-[ך+s]W5$ Rǒ('ELNJh3%A&{Of#BkSL4Z ; t̙O^Yu|1tۀ`0u/$c*idIA89c[}W(xh%8wU4ʱq y|qCvkew T!:֬;=\,dvNÖVtL'cb?c OZsb(4 [oo]' N: tqw@1WtT>;vOUޝ7ٸ?U!%}i2H%@ed@HdBM kI@Sp3cRvBN~%vLjhe["?j:Dy=dU Dˉoh>Ro\{+ĻJ%y2%(1̅8oDžn>QG1 Ƌ;O2pdX{xB-өC`DNUTս#++` 678KC7Cټ(%#acm1T)/tq6SI6L ?E h@nOά})w.?ƺo-*9|;Fs6}i똣76BGG ̮=v\IH;[1V]. B8+i! (͕ӹDžs/g/~:f,]Ռ3>|8!]GU ?H%K`ί6Fw ٽ]ԚÕ;j.r6`榘*g{ێ#cɲ9?Kh f|J4,,XgLh>F=vt27Յeoy$, mzeLUL`$X {:?F88sg\nsZ63Hi›i; Nj_DKW38:y/%}&*wG纫[(Krt1(p|HuHB"ɬYvg-R.k(L9P)@<pZ}ȃ?OQ5LJc!wvSpi͗Re{v>g|%Y1!RkЁ%]@ 5ul 2yT?cĄ7W|MDsh p:QU[50> g+@"DzXlI&_B0&E=Ǒ0M|>x;d2R9uřSĥV̺6Tw@t]}J<Z~c8Su/$Z9H "gԅT7)h 0syL JwpW1G6 F S~@8TqY z㾄[}Zz;w.Ұ3JHF}pG[r[5R%[ƙ; %l(A~A %y1?Ai:A.Yrg }J{DgS)`*{C4m%NYy p#@c Uw?} ~;xY`B'Db3LP^<HJPKa 45`BYuқ#7'X; N HĈ)@Jml ]m\y"¨Zӈ@(&mDf>I{RB539tT~#?vMύh7˪83> /*iU4Nk`$1FSC;h-V#z+UW^{Hj.8{{ KVV_v4M\-7Tr`lk+{SQAEޟD[ 7c Zr),ԟy%)Qkeø]RkLLݚ$̲mNŒ]~:Ei1tt~I&`O_npǞ`;sð\Lt{Bط7em-YFRf1,hin#R9<]B;AVPK/*ou9Wy1-bqp\6B ЬW@ 6Y"тLi;W+sZ .h)Fs✧YݑU@ęe[F"f@6LvMԹc%QF׼\\03ըxň].!{&kZKoT& oN"7t" gqi];-V$`Q$cfذ(VӇGb iBȺkeYzaEKy~Oq7_ "a PEY^YVb; ]5zՏ ~~M)H( x͔1 5n`Ypl=ْLlt#sӕvֈ9tW<$!9Lkwn"hGJ<5!{نĖv<7@/[Ol1iwmXD$ahU qm5{uo_reAs^&L'G kݦ9D *L35xiTl+PM֐,)3 q tSҏ|0x 62 R)\BzhQZ$#N|+@k@ T^:w= <൩حi{jgyN5z0s:sg0syn}A4;<]G$Q܍oVE+s+.R0F]: 7RqU^r/xJ^<^J>G~xUoK4=6t"hR_VQGy\9QJ?ᘕ덎gaXHT 6:ecVB3` ֠ٸ5Km#O3C%_KUigҌ>4$/'-w.1'2 !ei fOKjw )|z^NhBJ/@*H^Y;W'C$JzQ\+jࣁ'ϾQh*t-g"SoJόU/k &^kXi΂k%Btl ec6 i}jJ7uJ?A~@N d\꫆lFbrW[d[2v%) >i_?:4~d10e{~' y)~/ 5{–懩EzX>$VzEjXmվ:Jo'W225LZ|8ڠ]^u$Nÿ5U>$:Į/2; ŝ^۫7s=kE>|!GR0Cn 0 FH!:9Ad@PP4dJQJ%bǰIp龀hK@9W>*N1<'-D77EϚ*H(vɳ/ +|b]w@ڋ*".NI̐EASo ooyrr7XA$SE5.[Ɇ[ ddj7 EXt^&rT' v/t/Hzȭۢ@bxNP'8ak.] Wo@h!Wuq lF>Շ`d. Ŕ~4e 9B- Sea MJ 8pPM1yߎq{#^DyE#<XcWOAvw $8T|1n\*MKJB3O}tޓjgLpe+b'SG0K#XSO֠ȫJ4)s%Nf1QyvE YmI'H*=@ ͞zp@eS lHkwBhͦia!AqT붊)q+06@+CPG*boO Qt$e"^FeO/l/;lj?O萋1v]yH^O_?z mQw13chq:y6,RPujYo`2F'K㏃3nQdrr+TCgŇ!@B4%cgr-`whZ+k3fio(>bؤ(c= }TVH>ClP:gy{-_Ǯ~C("CvyZ7`ETPSDPbj"yÍKt<3X ! B#HzUPEN>|, G*h%{; BL0cT ujؤ_771 ^x I'$ܥ+L xeU ,3aڀ,C g]qҳ=>e S+(nW)`䙀bB0ڞ0i}p?*4R㋥PK/m)U- ,%>"Q\@bc$ 4!%U*!enWZ`dR.mj/^TJɺPf)0ԝ;F](ϥ\^~A 8J׏STee~bD!EG4!:5^kI|wPk#zBҿYSF>ks(95e2O#h5B H34զ9iKؠ>A_Jq_`e.l9y:tZd6W•UzIsFӂwCzYq94BԢ*9u{7.k!1щ{-ɱvؔ9̆#w9xsL?#@,Ŭ(ɔCkܨy0(aE3ӄ_Ȱ1Hu%0&G 6 LUOGr׼(Ւ4Nx`|Vr [c/*@ Nf=t\nQ( 1x=qU%QC+-tf*&/dtX-&%A+7D=A<Hj_RL!*uqBsڞء/{3`$!ebDWY~tg$˽%A ½`TeRy3.LH gEDfp; o$q, }%#F\/F)&4 _೗b ya7 heOd>G )\Yk6a ʽXByZ̲36 ȉhF`Th }U.j{uZ'CJr~sHTUzsxq-hC 0}rK!9|!&LoCUhJk°``b%Owp%-&*qaq 9(-Iɳ`I%$?_z&2ZҀŁo@[G0;cNyؑ=bUbW@q ]RL Ϥ^Z ByǙa\Y\~TDt:U^J->F3?f<뼀0AӳSB"+l!n)i #"9 q"LP=T"r)'Ą6V(=󋝫pIx$,^+ _Ɋ-rpLe|+RbW{m:>l4A6aV C_.aLMCQxYه\y 5Ј[&~d"@Z9`yL.6*)IX< }Ca sbzpGXyd)@q/Rz%i@6m;D%{A)M镊6-"PJ5z7U^12|P[Ew{hMo(t-,"K"+lߙD,q;awTe$GN}0:rz8MmbTsGy$_-(BT72GyTK'"Ѱ6Vϡ;<d=ܾ\bӎɘܗn46FDg@yk@*^{?kvUjB_@#K;<õ u%hk76c,F**qI T{ sB#PD5/I,5̥z)f7%E8 f rJ$ 㺱B$yˎI+KQ t7A ~g`Pn,LquwGzyHV͚Eۃiw.'1闕-̬z]' 5Ary)x:9}z}jz@IY,v]]1Ʀpg @"$ uxg;PyFE&O1tϦǬ0|HJY"=*m]0&'HM "&c8$}P6rMT5<̍҃Werr קh;9 1P= aG?3rE2 E))J6;??\5ǏB~/ќ.cPxn B 9CZS$){w4KevmtB!tKq[Ay\YuDq~VgC"'*QÞL6nʫj)|̂Z$E%^:ʻxd:pVA'aހ!`\+j5쯖'q4&4qd wyu>/ozI?`uM0;y,؂BS\ANcAw<&&񿡲Xs,YHtIHOH3|@nvF>^J՛ڀ*jJ-y=KE: ac;a$<3a\Pr,uQR1 IכXaf.%_ (QA)2a> z1RD'͝g"P)hvvL+a4<`\TɁtgz.bFLr˨b`1q%+M/~jT:8G:\p:@q<׻IT/н^ ;շ]{A*ϩxqA t^ ԂRACn~.IqoQ݌DB'm"0!V؂ɦ\:V@i7]oD]lo_?މ)9zRQ)"O;fhG-К OA0p$>Kle wDY0&1)ü=ISSz@l*hBZ|:)"Y=G͡&WlSsYP|մZG(΀̮>(Fw6Shd+>u;Caa+smM˪x_-u/BG lׁ-:G@nRr^~5|w6s,Mߞպ>ѭ";֮?)ɶw8yį3zzU#I2w3#t{-д+4Rp͝-h CV/ffwjLzu=.t|)gWRJڻRq=h/Z]mI؇~n@'~1U1OWD6 ЯJx;Kc ZHjg0wH.rQlmiW!X#K Q &,2K`JN@KhfKxyJl0h#޸xVU#ݰ ׊J cNņt5ӑD4 ^FdGDrd6W*=htE {D]T;~\E~Ie=n~ @WH<ig/tA+̷C+l~R\g!U>N-dQ%[`++&BbqoJ"py|ZaR!94vx0NGfIϚOZC՘\H+ ݔV!w"I,Fno<1MqA/H5}w]gĬB[V iR+:c'kVX(,}(;\yf>$᫐dy+S:] |L5uLGn:p2N3j77$/H[yC*+;1 X~qm%78gRIYN;|/XT:=<ƦK@IckEv;{܁0E9wRgm!L7bR*~9!d;%XQE=~1hI\ǩY= &K zHg鑩J?[KWD?RT6Z['f5>b_K Ҳ_y\ &Խ@#W2)UCBIF礄XϢ pLg<iqDʤ=F,γr6q=(,!rQ;p4G](>ӗsV]cu5Ϋ@~2>% <q=$O!=K20PsueرOC ñm^f1op_bp}O-iuR.t 3'q8]>MTCx Wa*h(:#]"v[z *?x.&$["C1n((+tR/RO5v8/˼sϠTAy(ygM87H"k[k-,MO'"v+ D~a/>YԪg0gN6ù93zCQoz/X ."Rcӹd!@ Ӡ| v6`lpdWWJ'G/nZASU~]1 gUh()QF){#D0 ڒJHx}wX>z BSߨ%|'_,O+Yt`=P.S`ynt:4dR@4KHɸg CVBNcGnMA9 6N<2(gz PuV]~!gtdAodmGmN?VRu^YR,2jO fP:аٓR 1bgy%)o 6ۀjܭj$Uܙ%9F~ ((!a/b<]8 $Bs\Eh VTt^b>;1*P}GrBo2'4;M{ XrZYcZ{xqoeW$T:wCX{goe,%۩JchK9 z|O(GizvG 2<:K7c8w& ? K#]$ịږ YX)J&XCBT9(~?f Ȟ7SnEiO;x5'mY!|o/zUP7=w5s+Ψ S>M*̍c 6zܒw ISo9c{s&jӡjo(#ֆ$U͊HDnGĐҕ ׷nf-Sn-{-'}/]%n6x]  / \Ŷ}zK-g~paOUђ},~I6 i^8Znl|{PVySk?R S;͖f]F/3}Ra0B8!TXq# } dBE_  ?">J6`}o?3Ssۋఱ)m}ioX6'Cey׎YX#~0pUA3xI-n-)yS]҂Tvs 1 +Dc=B*ܣetdaLMZЦu=?> HՓu{*>2tTFG2jp:6=HP$8п&vH_^ߩI=etьy!dNzK 8lh B[ɸ檙eaVݰ;e6ȴ$ 1l|% jɌB_ k`ƥ'yG #TŏQf(!*˶?vk_k>~O.k[[I~ɝEP\7VEPri'H~ k뻺_`^fƖ&B|(ӕɱFmLL}u`y|Lu3 ${WmsI1Qbj^!ѐK =fyL=CŅe`@&r+}8X!ћ{};Y[[XYڭ󵥗f@z1Cj0CCKk YY}xk5mO[QNCF0N{OI=.q"v]yGepm ܷ-65[%|;1ݙi/8Ko^(7M+Aj8>$ٽƐl8ȃZϪPR t^5b,OY6Vg%g,My+֘nEVr7%Y6"ӰRUcƟew!#sT>oYPcn0=={sq8a)<_ wuLkrZى1p&;)1?Z0TUJ\޺sq&06nM![ÄkfQb}>KjT5M})\b93[8:1P}uUn^LJ/d}aKìY($Nkf2/ Omֈ&ZQ?ɦ:V>T;;9-)=4Qb]dOTx +/?dw_b~Qҋ@ػѤdc Ln˜ ;)vp\(Wmq1^}sKW/֑sC`?ˢUMi<"SWz9֬K5OKQX'Ґv65޷Tr([ON][<5nn*i5Z ٷ&xLKVMe+dabGEg-C Ϫ6ŒH&]VB-7`tYb̀WU[K!GD|9 Z(\C (m+]ܲ3#JၥJ 4.`K&(,T q dnN8e\5LV@&7y.4֪hB~Խ7.·Jme4 qd|}2+M1Cb_QWѤ.冠z@[j(!]_R#*\5QC#b6Z)Xx>DnrtRStUاBy" QĐ#\9ɡ¿jPJ6suS{D}Ԇ TOӒN(P[,u2|8QNh(qUP qJ1Z^q85N-dT*nS#yba .(C|gphTO*ՋڦiBRg-*HOQ(Z{6,!v@5o+vMLpz3JOfI5$t_Cb\QS6o-C{K)*B>;W _HeAYtcK# rN2!CCe`3/%+&u9|,C$҄ nAOprD'u/FRxa:G"XWށ<XS5FkY6=|ZcrՓQ7=8,(ћABlOzZY>ik(FM}UOy[ӠQ.I=ǂK/m&ytC^O([l0;S<\͆s>>Y>*8![X$0Шb1 w]TB;Iξ_M7cypPֺUDB` '>BÝ":_Y], L "tX=QP7Zb+zs /  seӅUxtd^'NW{GIfv"Rm>@Vqq?{>k=ǹ Q̷Ɏ=흗ix31M&b-I07YW 2@1MK~>Fbxs؀}?0:?N*֔2TD GlZrn SDr^}Aּ2)i&6(r#0m9uʮj9}uLgiP"6;Co`1Tzk$5jPR8@kI M+ z_0{B*Nm.lSXQߑCcXgF()'èנ4Ak q;omlw'5m.z6.'0gBɫI܃pO<5d~Y!SWuyrQD8Nr6%:,)NLuFHy"RݝSb@fe+(!⣋DC|N-Ep=|k =?.n*h?I /1{mgjؼ}䚟L>UJ䪗Y5&d;=@iV19P@ݳ,[Gt*. T5~5Qr/XBAXO "Sn|brχO3(gJ9l;nC(pDgg?]OX* G1=~{^+-s7κ˱Wq;yewTp[= $2/ؐ51m0=>UQFjlRQ kび# !J"I5{6uKDږ=+P 3DNN/޴7`w w1 dl-8 NK0PLϣNM Jv1P`T6 /Ⅴ0b,S4rDxzn;G;O鷣Iaﯾ`úHT]Hys[Ecv/!qFEשA|٭P`S[hVtL77~pyse̬`zjfe]mW-ٵ!rvHF/jǣέをptӶ(Hs<[QЈ3#R' vqC[dz[NݭMW5dċo9d o ~a"E_族e'1Sջ/*h]0l-&nj6W'h-Y٬[2~T`PeG4Dq| M{Ev^* Nd@N/{MPR}꿜o[&Gbz !ZDjge)$D(M)^䔓* **syĄXMaiju:0L9'P(0!H"ιY? p,>AC|@ɀָC!5;l-"+ /UOpgԜRO[)54詄 gجgԟH1{k{$Z1_&%ɻc{Ti*mw=d只4kSSw@$f$c-ɧ4ѱN$Ţ3L(j }8d<_œDC8bwbW%]=K'-^ aΖ[0#֘5s3$O+tj*t=/#q3ү:`D&z#3]nԻ^r@e*}kTA#G.0[As _3[|9,p)Yܸ5L%q͙e?RUH5;X}iҤHl,!+=o+Dꣁs9`d|gN+SȩЕZ`w(HWW[] H%a[H%`"Ql-P~"lFfRUiRV6¬A[@{j`ޕMȇ) {`?a{Pk%|!$@EǮu#$*Du B5gz73\WuԉKfs>`[f{wf_09^Ɂr35|lq7NxW5 u*f&y[:de>ϭxxΰbuޒh$z\p"o` D#URHL7_~–7ICТcBa"EAQUA&}-\~ȮhH:kTiZ^QUD%Fw-W-m6,n+~XDS^a?N;B(T]l֬&KxM iGL|XJp`*YhwQ厵OERVRwh\ -K(3&!2:rR;d|]sJZƁ4r>_1%C_T="Z([S$y@ԆHHzs'I"Y*&eCK:Iw_w`'Qf}~ӝÏɅP=N[2U p\>H9)kÐÖ˶¢>zm`Cee_xr@Yl4)ϛ"b Fڬ"15OM D8KqL'&f vln{e2諂{w~״4Zev$WNTzWN/t,pXf5c7X"h Vy Ho ƒ9d*xQlrWXbYJ%]^l_/z=[fl2fWG**AB7DFݎFש_j\)XAс7<Ϧ9Ƿ{W,^cI,[ ` &8( p@-foz uL6.lZ9\+$jYՔvEG\W\CZĈ -sjӏX^PNѸ4 ezIGRFnqr}b 74*U.*{/1iLrLnT ~&2*ep3x: mTjvK>8 Gjֹ>}?,z[9{>!cª-K4o0[^9x,NDo&TqʈX#51PM(]lMYr..-huiܶZѽ0\h#^f'NR;8z& vԐ00( 4~[w3|N!8-gCN ɫ 90LsgA#3-Iϴ]]*EyŢjt7u!,^Dow'6;L.C6/-Zwj¿ݜ[r?ب^_WZf2`Ͽ:͋g_K \+S,}Cٹ8!g;@bRa@vrfIQs2AzbXNA]!Kއ*ŁXij+>\ikpR"7|HEŸyU$ * imډ9Tg-R׿r:=1tu9;}mC%w|ۮ =QVv&q>!SN`j:§Y(h6^&Ь%c"x!vjxVsEQv{h_mD6RXE$,MdU2֔lǥD0,1#uvs>3`ƌe !R@e^d2Nc2K{UK%o w®aֶ44?#)>҈'oDdȔ}s8M슾yd˾5:iYW)XIMVIqr[(urf)B%SVg|w^crn P,wV Ü(4H:TDaedhU6o9*)Kfâxj^ҹtɦHX"Q0A*b|#7\0p)8jewy Ӎgd`tj`ȿ:TʳXEδ!{? L{D #1ei?ު==O m#](!j0~Ae=+jcon}#;E x-:#T፣ 7O_ *ͥ/E4ږxXW3E`h5^|4d[H ]-L 0@+8f*Jw# GI8EM)#٣ío*{/@I' X|aqǚ(ۉ@.3~^IZ8Ԝr/ 7/IeK 4LtK6M#so^)`/]Nr?x,*!#t ~3؍4_y NDJ{s|wi|z;N0yw\⣒_d'J$K CcҚ1@n$V!F$d$i4=\$MVGa*:xӀ'no)jbǴP$Jt#r틲IQ2kJ v.wloЯ;ԑ /i×h~B.ވg2ۗ~jXI7$ghUjw>DSۋX!fK݄REOMPyNL5O 桔nofG>S{SuMs\kvwvu/˞h :ҟDqe0!v158O4G:aFqw˛IaUm!K{;&ػ$dYz0 L]ٳgR:٫zF+CE, h#8{6CXI^BWtIPrNh7j`>u >Th $XX:Ю*9 dͺy6@qO(Hϰm #Hb 걨*n)ѧx̺PeIM$;g% Y4kz !- lBݮL,w <<{35oeѴ丆p/$Db1B:7^*oK0wE:l]Xo|ӱZ[>rǔ~؛*Qɋ~)ics]'i~ifD;fof S[u()hxѪ>Ct0p׮n9H/wΘ [ZTP.AZ7jĕ,,k2k(~teQNYg87@=̀Q!P<2x%',H&aMu[qThdޫiZ|,h(:)yM:{?,U}$VM#8rF /O ƭH{G΀ܚEdU˔`\*L??0|qlcg'Dn]ؾYowVr[+NTVm8K~`Z>T}1=/Z6j#3I_TJCCh`p"w߸@o{},Gua>#:d0E]#2}:;w  9tI-5v Koϵl9Vϧ3g1||Aw>rydh(n%8gwѸ{ݙUɍ$ AJz=Y7ə|17i2@kQ<6+u&(fs$!J uQHQq z]c.#d/ӃPT>-EmjI >uՓqӥ a3:pwedXZ84_ mP`'eZ#ļI0 @g=IM>$92 wGNUi9MhǦ=!md籛egA_\D:C & \`Ձעnz$h}@'ʪZt(?9 utݢ3}$M1S˗rttd!tMɻpJJ vKZ8`HQ͗! j`W"pO,]m6q0cګ;!49Xg$k4:dE܏Yw U/VswHbn5,3mgXѡ&!͓0{j9= "\t`ܳ_Lf'8vV w% 7Dyi`^-@[hU_p VU;#b;(5+<>1GM#zTIY}z$|||S<LiWtfi0D3P,5*yM2%9%_BzegŃMYL0:Qk==P̓sy S#%KMŅB{I/7<̹6 gbf`k\(6&eB`f^ T<)iCTAF2Sv;UMo` ux[i6m幕Jɻnhm(yQ8͹ owy gﻱqH?HaDiϴK~Fwsx~L[i zCWV<0oabT`2[L%ewJJ-&LDWt2ʘۏQwtOjF)E5?WQ`DTNx= 3Ħ|e.Nߪ|SIkҕBZx[p[e!uےqKT2ֶ<?w s<5K~tI.bUL?! E_nvB"@zTɛ67085qUQcq6˙J(؆2{^q D^ ?mO>MFp)Zwa⫒_F:o l rx3RXK&us7bI0L;޸زLĒTA"jˏڷ-&I.tYDA.8x'ַ^"̶6v} ̝?þ #@fw(VTk$6k4|VKX;r c+0L: ೻[GyF{ ÈC)4i|Ĭl P(8c=cۭa బ_pw^dp&gf0lC6 Tm?{j$#O|0 =J$Azݰ?L\}RO{FYctTzet5>uY ~P$'Nf$Dp t9<{^z&玵#ÞKM ug\/ZT>,ըK&h MbW@s,!Ce:YW찍=yƲn wB/]+.sV/Hjk+G:َ$%?Qd}[ j_=aU?Nǡ `-T/ 54HOcOcJLd} |s 4w̪x-\ybc5KPg{;#i1PqeF׈l |[~NVЭ=½,WUrwߴy^ѢǛ!4B1>iF]WPPq8a6 x7M_Γ0Lc.{/h.j0x 䣳>nSX6F)!0FǑ BGC-Bj4`zOgާ͞n}1q 5aO3ΪpW#$N0Q:idp-&W9a[t]4#pS fŘ<\Xnf,k9E,!KpԿΛBdvCxf. +/Sf ܄}hIjW>XЌjo3{3׾#b,?a,泘AK.!}į.H<CfV{3H8~ʰBGHㅭՊT2SG۸~?^=$bovJ00";3[nOn :&OWOUP".+Es= DXuXl?gYKA\ )j 7b|};z6 /-a8?v t' yX/vcܓɺaag]l=E 88t$x׉$Vz]I3I8 qU#΁PXZJ6 *.khY֌^bF.v;kUN%zʿv83*F 'Kǝ!+K+7vbM̵9FfdS5 bH }ն_F'_$VvNWST*Td IQ%xL9Ja7GRs2$W_jkKPg+@8%/LIO`בeM.>Qa zo0:uIY30twYl^F9Ēp3_ iLa՝r $8>7cHfD[Tz  u޸CID"/BեGerU-8j8p{#py[8C j 8zrՇ+GR I (ۺ2$yZ֘dž>JUu(KQqС5AH5dBIto=OE$'ݝpm/OYyь;ϝu} Y CV܁B~K(>rfEƟGW(u?-M,~'* Si^9.ѿT?_ZAwgqM+ȦU&g+ +f%ߎb_@8;ktBf`*SϒVye~9 wы܃cx4U,GV&?JuCŽ7|y"Q0F$^Z>sz̕ /שV79/_J:d 0։_Jy4ĦSB- y{lU[&5FэL6A!o7j'iw6Ќ&[oK`uUSO{."U/ƹ;4e:5%0A{S@Wޠ =ڥEEHѭ.[:g k ԨOsNn3RR7zԌ-Yc xTI:;j7 v\ `Yca:b@]foJde@OǟJzu۫Nz*Ba1hOm G "IZJǿzY cG~R*O+NHISzQE-{EF y`/#, f]"gfn2LggPQXcK0~_gh0>7oe}} :r fUzFN8:Hx6~b7Pn8u[l5(*jL 4#1~L&"X"`D-2cÁ.!t t8qr>HS9Wa?}A/qR&nkH<&آY3Kȥp6sV%>ljsn*% Yc~D6{ʛ=:wD1-m,j.V,ק'$mp.&OY'.3ٔ FL@-$<<Ju:sq>wBk˙.Y:&Ndl}āAzSK)W۞UT^[@lG4s}`+eTTsرؑ,&"CхO1:/ˮɖ} ni3˱8~V%Tz?׮4Pvi#`LFn1W\XL\a?@b ~|sa 9[}\Tj$ݡhȟH?e`>t.,Dl[$[~. w֨(($&4M/˝T#s7Am+VCc\`tTb͏sV"wQi7?p|ӥhd+" cG?8c3*/b' tqX"aK_"V/4z24r<y j 6z]B̽b* %.&JRHjՌ) PH}ƷϴȃrxL6?4D!{OJ H=9ti[K+b#P91/S,bD!k^hGZ+[ٶ [yhPB=_F|kd4Ey∿7#) y ,8Q5~ 7/28_/Zm} O6IF*8ȥ^6e{ {p2LrvÙX )\7UW9цi췬ͮGә[ (-?R/F?]r_h/-2 <KCkXZFP@#"մ {Q63ZZFE-k gEL r飈pچi0iwz~ujYshvs-If/m7JVW#D ATXhACЯ;p"y쟸%;}{'g"ß?>0iQc ^!o٧tRS Fc/ g7'T77eU =X]dSݨZ^=@F)>؜JBq}ZM1RwG6STO-ת)waMgۍnBghF\x1[9RC@G񮀾G)d3T1 :yT.T(o韙7XW` כ3+, | F€m2ڙ _ZQj׌:),PZXr' 2HUL qX((&ݮ=m9z:ETFUfiOlo|hĥZy;@`;6HkNZ[ORLSH*{*e-,=O ?tW=M2Y#+-xlA-A]q[ uUOH=ĉٳmC4zf&A0^ԥ5HJeɘ2`coe|L(͖ǥX'{x3fFP(/܋EjNj }㽼nd ̃]!Kv!#0}j\,"r,:Ilg\I$iK} M?iB?da.0q_ [&Do 6u=glFh1d;v*d䧦Dٹj;-os~֖bNa C |lZa:#6e6T:HfږJ1P>|>a|F, s:`Q2D]<[s:*:Ck)oIm%x нѵ9' #MQJ['(Qɢ m= &^|"PG#ipmt.ՒPQƈXscG\`D%h7,aKɌzkxk^n,9`:9$n% Q"z?[5X['hۇh {q`^Z򆦰oBDQNﱂ!#[8bƌ>$ ה&hTۋ"8y_4:3@.Bw]󅀛MC4ؽ5<*42. - i{pQK=a}}f6fv.A_Q(D3)?.kӈr4BnCؘRJ܍@tjm)K5v,Dp"W$Qo)|c96󕀔V/}X)Mc3Q7t{^uUҹr;0Cub#HŽ/y5"Q k^9y{@F1>^GRne&@9JPƴ Z̼kGc 驄6sEy0eBs+9~ pTt][ uu:٪24ۆg<Z;_F)HHR<}}kR _л;CO @3\F|1Kp4d0q]N;z;SW[Tz1e饈׈a(y!,Ȋ\t8٪Z?,ٝo?ѕɌO+kJ# F]|䎳$џ YD8N!8VT1"|,{sd[BDwRJ?]5:fdSf)fLJ>m#K_>0^y;۪'F&lZA@7p"bYdP#9Vڤؼ\F-]Z@A˻5 8Bm VT-gRF :6yugFDkiQ5:lOR͞=7KDX)_. t2 Q\d3b!ET>GoPBbHpgIS@4*$ߺi{;x)H\((+dֆsF)y|L6A y-g|j(]բ$6sP# 133zAQU qŽmzJ+o|gM4*@<i͸='_LU2eh!c0-G6jx1f% ҡոnWV ݙ-eRrr>QU>-Ϋ _͓9x?E 7aNb袿D'/`qy6u5Q~FWtc)JŻFyI.`3V)j~z#e$*܂'M1eS9 Z@P٤޿zh'5qWeZ2#;3B*{6C[).S4pU{:WQ\WYd:"گ2'tg~d னV0I <~r^;@j6LI]w"!{( 4fR?p\6nlIv"/沭$}ԗAd\ ;=̀v E HP]\Nw@|Dq*`>|IF˾1bVMM!#]2z/3&3Сq>I4, K,)A6]خ >辘H4m9//L=wkǕֵQs>tBø :\I~P_wBh +w+8oҎ﫚˥˄ě֬,&BGLcp'gqqk䰪2J:Ĥ_lL_q7Vkpi\u !p[\FP kٵ?IB>ъߪeK*ctYM MsY{s77)QiN|V,@W`U ]y{ّ¬#s @2,pQ}>ԭN;iܳW|^3O~@25Fd*ACξyƝY6(xGjL#itcȤZmSݳ1a/ Cq4Fav [-U2d.bZX Nw=z['9 w$Ž \ eNlDSFa6]SR@\POI9TsiڟlWEBDڪ*e}> DSWDEl7gV.>)9՞Fk_ˢRԽn S(GLjlNIJrqz3.*hav] YʁHg+dcm:TW;e<"?_Av!r;tD SgitgPx`o5_ҳR6L;|jHT.tQs**1V 94?|x >.E12H &R47.T|zT`߹)5SԭQ^^}c6^R~l&xM?-= VWaVx1nD{lm'(b5 ;<-?' lW%kƇ8P7Sn[]=<-~h5d<[3nf:)[ x038р RgQ Ry5YsTWX=Ck/#-|y !Y~b[z~*\E򪨼; p=ŁQ_Nd\^15$F*s]|EkLnݗ{3wm5;jyK|غ7$taC̤Yb9"y5 Α1M]j"dwޛ޺hZsA aAޮ^^ȆDQ"H"O:kUywړI@GT(az`uD: L'p֊ͧPw& B"~^a wFlt^>o tIb{WN:̐->b ig4#;q\qbCQ$10O^\߲+=}J3#ݬi|"5#ˈJl )~pR陕7@NWyADGbitAĄ\*;ѻ]—$MZh LuKc<MŸ)l%7Ϭ/:&s#&T5`fv~h%r;DzLxo`n&+hcQ+^Xߤq%aEZ}Qykzݼ[}u(Y!:af/P&hpBh.3\xh@m!H]C"Rq7BWv |pb?v$\2|1=n7=_ĐCWVw}BJ\biBs3s3f*OC~Gʄ-`Ҩ4A%]bSI35S/ =%sbzP^f[ AJ=^Z_P܄VOREٲDp1/i`m"W&\c[,7֤د,1Ŝz;Im/t6˚c'eAfΩSo]p?o60v1b(%#I}NibʘIO ҔCJEkw^q *WzZ[a0kT0QXsg,RQTPVvpU\[bh5_jOyw(j$-q Ȱ}S`dMrRbI8ǫL<{Pc]Zcᡛ%1!Ƽ5d=h=ݚH(=:RA/KyMlaGBhPo"o6칶_xB 3ڿPa+ˢ!cH:>/0yWuFs]WT 5\eqw y/uGrTJ0oq 9tx**L$kjxxmƍ\jA(9R45^ۂPvW;B cwKbNwit{>JtVV &)H3tq#S W!?|=#M*Ѡz+^pyٗ 4uq5VydYP}j ()Tjص6;T5O\L[`^.:˩e1&-Bt"5x XJ!g:TډRbx!g]Wy%T0?DW 0Wő 0YXxϱ.bm!Ԃ Jf6&zCe9g e; ͡IarY_V.i7 \_#dI܌צ>OTlʳ& 'M[GB, 0gu=~ۍeޯac۴!7r[+8ci*dwdm,r]dpA|FuX)tzQgٞ[Tͳ=!C܀x7ZwzSVmpW7I˴Dlln۵JɆj#duZV]u&g:˞<0I \o$tߗ?i^S, I&pK yt?R6 ms쎮X{%mWvD{]@(RAcn_'N[#2ZQ,׺HAJmN-#pb> +z jZvWS i%SC[sHӊ%;yUI)hl1&:.?M)0rLugjwՑ_TpIAV@7RDo@-+j;DyN#$g<g$-v 5屆`qSXDz[e~w8? ^| 'sø/ʄהhc[-IS]N4{ݯyŪTTJR} Ŋ5T!PDVp!-?XҹV:Q<9Q*`j C*; ބI755iw飲U~_Fp q;XJaA5=؊-qRAaug. p D+ aZyP ^ r+VWxB(#?=ʣa6IBNYH1m̌f2I.`bs23;387xEۑpqY&2vvqOOP+_72= ^Bx|>2o1+RbZ#H8hDr?SAP>G~zፃL9{+_끇 ÇC6+;k!;lw8w1L7}|q4̎w" KxE\+=}ެ©xD!Z400KKZ)Wi$4U=e4g XDVM7=+K] Hv#"a6:EӴau8woC-\Ξ tE|?9b.NimAZ6Tw녈CJUgvxufꐽ٢Epǘ,Z2IdꈍUX(?|!Գx+hc%TڋpΤzqy<!^Vǹ v}gC(mdf^4i/ЩH%P舔qWJ*_^ 7N!b2#/|@bƧ}4 8G(#_znA# \}WBٖ8f>j^ɢC+N<Mqo5Ar+j:tJˌPgҏto v@&B9w,Ip027WnIа&;_F͆)t6_ ~`YF\gДS'}n$zxDٕn}{T+a<,Y\[8Չns2+ Y]Wm "/l@-`Ra9tD BFSog/gȸ{---g>CX*J ۊݠ80TE^1ؾW87cAdp%܉"-U!ky~dgvuCdjY$|(l8 PU)3kMڈDvw\N1RzN`# I`I2z]qVҹKu_Kb}иqH=!pL }Faa$8y[JE $OuuKjP`-=NȘ8$#TMQ7i S@xl(Ÿ*RXԚB)my{}Lq光D
\8 bEDvt OUM8=A&(3ǜ*7ף=ux_"iIeOKZ2(րR]PQ7n@KnoHQ1(=E:Wnq,{I\I3I14dAP/9wb1BUA]pt1;3<#vv`qY6Hͷt8+Ok9l]/T8!6d4_2 #nӯ zDŽ{mnҬO+Xt[)2G$s1d0gk9Q2Hš;g,6b%D`[bHmBrbd1+ X.2' ğú}elxRY l7~{U(lcitt&Sr5ϊ=RZh> \s jۥ,'{+ެ26l uPvE` zOc6#ǬSտZQ~JŤәܸ1 =IEPMs'8 x-+>m#]mmq}JɞU Ei66*Rn1${?f+mu> XrJmpU`d]ڽK2칿{tYׯxDZJ2t4/&+Q.;H^7ݓQbx:ZU6fI\}"q3<:l55{ ;_g~B9Oʮ)vxt8Q>Eٴcu'%Qne`UUqv 3F]i @BҽUh'1`Q^<$w%]⛧%lYn=[I6[(?˟*C,3{&\Wh`wPZ화 ö'}`9<ւy9W.lWIm _KK;`F2I|b/K4-6+{sSVn%w5Ka:h1x.F:BLMzSG=5Qs~^IrQiYR}'-2`q$d[KRu>:tV.w(^hS츘z`xd+hyN'f$C]~ʤ2&W)='om!4󗨛cY߅"%9(Y %0au6y9>əe\)4\İb 0ԁk@xc.3Jڼ,kw)rN*ΜX0-w}j YvΎx0ZX\D6-LIAJLAYTtnʸQOů ' L{`!%#zB+^U, <2W>y., $A?p'>$ jy\vНa{+{c3PڪHП.nGZWJo5FCEx@m b8qAaB1.xo@ݸ' ;v=Ѱ4~@DL˦V7hD,Oͷĭ%/.2@M~xc~㣢Eb'< 1v",WA I U~LfE6z'Ȁ/V"Y=XOF|Xza>IQF{>GLX)AZ|Y%U9z90jKv5/6wljk-Z->lj7| ,Eg5Qb-9Kܱɇ d9l=isϷVƘФ=%ߨ(I˧b'إ^ʻ["NAJnM\!2#C4h+:4TҜPj!^?T阳yHtK7j*~FiV{GJ¡?$M;fick+i mT( Oo bd3[ s񸞠4bѝ;:S9c߸;_XH4SHAa&il}Rn\*u5J½pL~| Y jGCb/Wd#D1H]?@c8kg2rP˅;di%CR˸*W&<*o=g9@DyZ̶TgAdE넪<] |7;OPSDSB%c@j~@kU [՘*F^|V,w_x.5t%8L@ j cU,tNvR.J{h5v;p$`7Ym&#s{WdB6LiǎvauoYqEGHp0ܞ,0{ J7""GFp$"5 P͝ Σac!Qa/EXu;iF?/`@,MRPU4I+ܖvYݕyh6}Fp7h?;S5zIkڋ@W`*fe!X]ƫ5&5yP0yG PDf'\;33x>^[ (j3Ԉ YN Z!Cr.f-q8/X8,B )4՚-z!"/F𼛬rS:\\:(Ӌ ر!euфHAf8Bt7:| #Bg>v߇_*0Y/NgNG{Zփ :i p36[1ܓAi>S g.0}nۯo]@7& Z9wQ**fŠʀhؙ$~'W[)̯5x߉Tx R3,G Iƚ&5mv1d}l3[?R%ڐ S2Q|$&LޕdCn ]b^jf!դw6;첊śT(g:k@Fկ H>Ɖ [tb=7g8۔˂{CP:jW{4-[2OفB;,`稺ΊtD_z@b, ͂V>C'T vsLG *d38& Ds/;=p/`iGٖBc͕Nخ}\|nݯJ4nt];vу(|e#e,L碕ˎz ʇ@ru(_jNmp a痝LdShl&CetӼ)6+S tVHƖr3%wka$4Dj΁t6cQ%*RURQ%qdh!% h3djM}\y '/#*KLK7/xv+|t|/PbeLHg?jVPhQ4 :JSYnYP=M煝VԳf7hT] Mw~zЈջ t Z50XEY,3FxCbTu s5-r8 Tݔb\Et2l|"߮Py$vߕpma"jpt94%L9:G$˴ S -MWE' ^fAߍw=܃ ҝ Rc WUD =C{[*κo ;=Ǎ/%ɟ@NfݽO6@6u(˼JrzB9WtIzgCn:p2wo_+,ژ`YP% ;Y0*lߺ-yդ>718g RuUcr p)0tJp}E D! 8LRG$|6o_W.JHNX ÜrI4bYz fq|f0 '}`,=BdyYE~S,Ȑ;ax2}a\7[%?l}"+BnWJ6\諡#BAcΫia_XVQRMYOOKorDO1;j.nZD&&vhs+I5߉HE|ݓE_flI4\>[A$vA Ȓg&P/n\ܣ; %Ϗ Sǟ /+3>2<p=!f󢱃\yjt𢮖hݡTZt7kJKxjE }$|)m7+{ث7&π:Pdڹ/DD쒩VzY=45ЍvAqy}N+H$1 ]1ۨ!ZCO U-+1-!{4e "4>,*9 I'9C?4]BooAދocoJJS6 砹Lz7c ;ٖlij-˪ٞs/Jbxvu%?_C_ \t>Rc٤ g R `?WWyC$sMLU*bA3ORt≂%XH{a(C|Ѽ LJ4&0!; \Ҡ͹Γĵ澪%ٮd.F8}l7g\ h:uˇ\CΦw:3vL[Q;QA7(9Q):Qq Vr"C ֹh"'/Ei6 EtB@ԋaEqǹU<=z]i0vEБ.IJ]1 Y*֋rq EQb ̸qAgL%c }k707z# |Ia{UTw&j[275j7&psCD1}üEH!M !:,>-#¶{z%Cݿi{tM՘&aWUI |l`Co>KgU{DaOIg9f =i_d!>Lce7Qwa#Wx'p9"5&`80c k'b 聆0IJijr%Lؿ}A5T@u';Mug`Xl\a"La;9} ֲ;5Qt $[8Hi6>eKS;2Pֈ kz%!A> 2 t6\ʭ6qH?X2Hcn ni{cexLɻ+ Kc,Ȏ;8uT8#޹ iH/*qZݬF❰%8ozrFiԂ`@qZQXrE%(Om_lw/[a!P"M$,T±[Σ$Qʱ,XKGcn0sF0Sg/VdK,4iDej/?X ,PG-GLb~̢$j3T%g@/<{Mz!]OКCԈKˠUq<?39^&4 t>Dy9הVdJ_wC98܎7'z_84Q@*$OV\"xB5cĐUWL}L1B7i^fTb 7o%h'N kh(vZkej5btk{uo;'X5ܪ巍`!-(quߛIP {]L<#B·eǸLy)fC1-]$bI|Y1Ӧcw32Nw7+[+7_c.Ӻjp;‹̤Q*TxT njNwz]N˕"v[#2b\6@"h\r:+m vjDĮet13-JpnoJwH8kh`VgBylW'1E΃72F.?:OjAݚ-dl߈.F@2ҭtVCMdXYDРuf :rHwETopو)!XC(7$e ^**Nc%dá"۟PS#W ,NB^|2%o.T8ƻWXcm<;^%ă.l$brg]sj(,`2z1sH=PRά&ޭZa%M6.3&m]sCd3$FĕinGfNZNt㲜GAe[[u~W0AcĘہc drw`Bcj| 2ihk&WuFD/:(֚UIs&JJZ5(.~m<~@b(/*]ί(hDsbwRC6Kց0BQ$3<<:8KG"#ުg%/B6 r.#k܎-lhNv`nZS K` e C4|:=+ԯ? 1ϔ|gw’ \pHw?E0oƼ_[G}-&Ցczce絅p EmKٕ +ā[GtkO?'fFbpP;} ;̜\˛+Jm _i}P(j^Eڻ6ުA5 S.n:oOa_(AK&^،T8zjsGFD窎C9.;zSK/qG$eMgP֞`[s**7Xtՠs]cG6$)Kgm}*3[36ۺ̻洇4v.D$.dlNz)&:/'}Dfs *2A|Ѩ-6HA:9הܫCk#` L&SSGgVMXuqu+ R{ Η'lqG +@ȄkjgdbiPӵ R =% 6T_n,7l nGa|p_/^ {Β.'[YzrlėZf(mEG)[0eBt'(K)zF% m(IpYҪl:bemҾ<+Qw\,nEZkWCR@ȟvrmPps8SV$ߊ;.5@zA~:m`LwRaG+bRY@ v["K_I܎p|&d7vG )`ba:f.f-Pm&)@H 6~fJQDϵM[J eJ]24$p7yȑyfҭFRk3)nhV$x?@ݝ ~Wlany)|z+bc38(_n٭:#hD]!F`*Bb$^E}v"x,42٫=)óa:\߼JK-`j%B}Yox5O0LXqVLm%BF+)Lv7MP-$23F{,KƌbƒxbcumƁ&@54=CIY(ݏ>mjo|ހl;>. ;a:wDI1? uCyLJ"Xs!Fc%oU%S4L㌧=ψ~KR4䯵HD.wCm?U׾&ucy[ZTWͶ PK'0ND }RԑyDCuE(S>WzsbD' (qxuFb4L<'`+wԥ7A꫺i|wkY }t*x~8p&QρCMFyv06#ClfQ|zPYW[K=v:-{ z~$ (Lja NIfZ)CO1U'zP^;Ws|ke./T܂k_WG9cQ(Qnx6`6rǕ$mXzag$Pԇ)G_<*Un}c)N/cǗ q}#F!F'kWLMg ajh..;2 r3voGjMxЪ'FPd$N3(.8#b+dS\n7Uӝd޻BUQVvq"L~BKP:YG \|wpٔ.(3Ff; z b,l9z2y7IJ['_"Q<('aagZAo }pln_BMh$i 4h?Hr_3N FcŃe]vM!5NRW?&FgJoIr|u1,,O+ QX Z6I<03HzR"3V:G3gEH#V֋SYT\ k֡9դ(Rq#b4Hj< w͘ (NqO d8tc]ʝX@ŰD| []Reb@+M?':},(&/E.J=_M-$PyźI|Etj9dJ7Kd`zx8ݬY}#1KpҗIٲ8ҁ#i<+6k\@R/)g5>-ZS Fҗ~D-ps>f <ڿSzxiTWU[_nͤEVqS#j:yA^NeȂRkF*lBv~$b\jY$,u Ƞ91"P?w20̘l^$b(Զن> R`|=<-o1eX%bԖbʪ) UzvTPZdBǐKy? VXqWBȨDF{+ײje#nQKU+Ȟ͗)qE)b SP**Zj[љ-͘FN7lu*_I;М4@rrȇgb4Y=wI\V$d6GWNg6hx xEq S"1{wz= | ,7̎pflT 6#5_ݮsq 5S{fUwģ1~ LoD?iX` \x?vp!"׀zᑩ cF8 'FQ؛[la \|Z4[\#x)VtiD4]E3V\B=_6IբE6ʹӃZwE+7 $/5"Ci #{XKyV Z'k=赍dA2cU&19\I+UC<6m thkORt>DqqohFv73ć Cw[v6|#ثxV'CYuky)ռxfjGktLmM411 ߺN͖T 4 o@[/$T!:uGx2Z7zeImhiE"OU \_joNtCt_Щx\U횡WDa酩z1-'o,\zb3L~F+c,gPT:K7)Q8Q2- XjwyZʲb6JL^Tг@7D pj"G"= +55bbڠT_opW5|BoEgw0Q&[~,7Ƽi,vʐ3,0%oTQ04M-r`7!(aޮυijHãHűlB-^oúRJ>'p˞n3:DWURzψ+i,ZO#7ϯ[/ %t{E-Y%xR&HGbFU+Ct m_ܷٺ={t"*kbJ߭"]n6˦vqm!W< ]pA(P$GhP? I=:;)<{y/a $H_*'ɛH~] :M[9FSd<s?u=û}UQK_2pT/~'l:XwUT֧V̖y%G9w`i i@ic`f}b+?J07.)MpsA߄(S.Ȋe}#S#4&ğаԑ8@ufVI6nX2]5r7{4؞pxRy]2ujaNBw:, 4`:/UnbvW.%ӜUc96uts'9T}8BFW}?s ?c w0Bky )`cPF;~ ( ה}a*= xJѥ uK:xJ ?#mkͺ\0KKY6ўv'Rs48q́~M9+'3Svl7>f~l_'م3'u;M2zw-1jTrCzf+^Je iX,~6<8[[qF}*x}ϕ3\{(Qr^H\;M~]5nOYq2DKE Qt:.! U*"sw*&h=j_Kii\:0wCض|YĜ RE!eo͋qxi] 𔯞b9p0ef;)EʊZ ʋNMh*tb' \= :ӑnBIrMTOZA8rsH)z[1 571n$䈲OH@v1 iP 3m-*)1*1lt@(:rSCzHsEc'&\dZ Lw $ j薜ϖ#>ıʘr)JW%0tm*Fıt܇jaDsM*PzڥFvPiEڬZޱW}\6.mɅ}zkƍ2j![;y~ً+*Q*ymrMԠ<;VuOr vm)Wҩ=˷C\+q 2 :zWR]]T%PMRP+r D3ʮYIIEGU8LI~T. /9;+(FpPM g XKHUsk|yvԈh Zj]SEڸ 7f.5[)@Kwoi&:%tַ5\g+#ϢrD=چ]!o(D/]5z7&Ӏ^xfo\3gyz sQ) v,@RSUoF_IJ~0#*/+|f|-<-Ȯ>7"/rD/jF`浗ž^!IQ{ Z\p툧 5'jtJ&'T[UFQ_wcvϮ10* F ztJq>A:M|6c`Yz ܎;QqTEU.ŀ3ǹU+ezmSPv$8\Ժ~փi-,*)HҤb#}ԻZ޾62-~[vȄ->h9#Y e3(Q2f~]p#M)?)o 1k\,PKRnY>ZxMS 3qg}`@sZKKS ⾰uz.ͻalQ1^n֢=h}ʓ4'vT <՜诨mʯ2:"T)0{jQw{NEZg[Bf!Nr(cK:Z[-{( %^X@'ӶuD@E~?,u`9#s*5w h^ A#߃,v'j,t#&?R*/F9Sť*$!J݄6z KSdqc6JGm+MxvڼK dۺNq`TNh?DQy#S3O.y:ai4a +>w4!\+P `zso2}2tOPS݂T<؞8 qRl09gME/r20V&qGBUl,Rrֿת)t30?-D4qe|Ra,D.%3}vFR#a 'oMiufuo0K$V }VK@.'1SKEj]gbFg yU7> .$޾%sU"sLAz&11בZT[}{.rwX]}9L:5G,mzy !)Υ -y?9f4WJ;4(~κrck$֧b9v-O0Q]2N Qۍ}UPE\Hډ"aMy B5G), 1E'G_=Ļv'<9WKpwũkAfLwJVNë4{, &_Rɟ4QUwlQ&7): 3 R=W'ȅp,#inwt+w+?j_7Yh)FcbZANMR>3A$,'Y*I o,DOmŚ><L2 42K|ӳu8@(aeOfKvM[H3\v@qhT3d7l9NDg,rGS/E흂.)Pɱngvp PhXMz3Se/:O̊JS3SV/:_vSfRp姙2 A` ' lxj)+ 5K-544ZtKB4mGDu99r'!ɲŀϊ𡈖kV1f,JzC[s!|1p})I[6C&_W-$nA' xG&lEU3$x">RNQK#6 }#bĴ䈱urTmH"D 's,iח(`' f ]ougc-fS,`Cѱ§å)pKiɭxyr)r  A~WBt<1^%,U61'N+;flNd+"zE'~AA0†6EOP^c])$y4d(ˏ;OG5%+~>&NX.J =wɐw՘u' b[ f-I G77@JB 9$}iXu{GКg1|&[,P/}/կ/r:.W @-5ؽ5"]335t xB+[iK\ラ@d-)P-&}lCXraS ;僯]QӬ埈ZԽJw) ڶW5=ѢY2L gE3Εr,"`4|M>VO+<~\RQ774c죿="Kj o_O%!CNqb{&St{beW  NUwZ'1ɴ`fCJmmt銎F0µ@f8l=[}q!F `V(LFډ,$G mU"є;Bl&r`©}$bF_T6AwO"ծ,'7mWh='d)1cJS;Ke黌. VZÏ[ݼG;E،BT">eEO18h=t N}t{*Hi/H Fe1}{ѢHA A{DƖ&9 =KQW!||-߇]_EpWKߛ[Tcۃ\MjS0~ul׏8k@2Zճ*Q@#d## 7wʩaշ9aCM KSG3fzbcdeI͐İK<2`uDc%}hT$X6ľG'uS⥈X*mfgVoawĝ:yhOk5#txI-7'hV\qӯ'Ľ~?K ~fެ9Tʘo1V&IX%bMDI<{e,@X՝q<$/LR? ]kq?ܽ_lC9n#KT3L7-U?nW ̾*2*;mRLVCp f6t\`gI8ؓZ'3't),`'Y]2F{ay X( SwS۵mG-gi*.PSE" e%>Ns3a ~%4! P>%Ċe}<./j n*l;~  q7AB0.,&ϦP+{:PNDN{Hf8nO4ػSSU2.ڴ5mVw6&hT)UP-_8ЉPT-VV Auq6dHE)~ScFdxvgbUu)".T YWV.3 [qK#5يzL&}Bۏ+Γ!W,7b\Ymv> }Q F0`3=PICvIs0 UK뤴'1zrϱA ǥ<:U/,Yv͹9`e*Wڙo9+f0ER0z'N4GL \,-?W"w KYEN3UJM-S>1d 04bWPN-`xc:˜1*;}B> #?_jc6Yg+J@FJd1n_POU1k ),ڟ_xO)zg?Ⱦ$ln^$'B[Nn-u/Uq9')j\ ;\nm|>WwZ!ý1&'7?GV\Hѩ 61亮FFm.#ulGd* ÷q} ҾPk9-ߧB%X 8RL6׽5Ύ¸"})vДyBKH/]&> J_RpJjn`XOYrK>vqw1#- `ԪUx^Ƿ0:)'f}WA~XR:vBEF-6&Ly&s+F0M7!_ 45j>S ǣX aOy@A˭:xr HD<_cϙ_7(<;v0ȡbT;lw.x /L*X|.fYFڐtı>ҳ#awfưM.z^9aql6y\K?Ԧ6NuPfY^HEpІsT]>a(Zj[3I-1fP"H,XoB04џT~Dޡiw1&& <{鶃f2F丁\B'-iȄ"Z#oWQsw#Ijsh f_Ck"|#)lvf%Ip$.J͝]վ۞ k/ժ^m>?U<$Ǥf-(Ya(t뽺> N 2sR1:-ow/oW]I f`'!~x7)!]5𽂺ތ:15i2}H >G ղߜT| BBa=JoKE[}bUdxӘCߣ䊞7ӝ$M<kպUA!8u 2hW+8]x-& vgl-"M@*:v]g;%nF,ϡ ٺ`5u2\~?) bK(ХnFhZlk&Db:\c5<J'5ы=e*4ik+>ffd_@h* կfL^wlɐcpMG\'Sp"콳z/cAFԯ2&%*+~hRmrߍ&OUfzE% VZKz U= %PyD>Jt֙sΩ<Փ~B _aD56aMqxa.$]t9JJQ` qlHёA9Sh3葂 )KI>SX{\rDHnVt Xs(+vn#,>B/ݳ?R+`]!Gز*k \z4 @NN+L>p>-.Ţj}i"Y(ZwtU+Ts_>6*L"4W&IWVf愂@eg.PS /ֺl@+~{*q#|S=S ^D+ J/ _~hL"hs3`r+W)h恨Kleeȑ86AQouWm# !ӱd3pГ]1 ΛSkcMw>C )j%o*n0VtC kx *VMJ# JsO*[Dj,Ah XZnD>7E4&= l>R'\JT$CO궫}@Q ^+Sen q6z}}s>Sb.B?za8{h,Ɣb˰+..@`Xv Bt%ekb'^6JՠLswyOIN,٘!B\3[A;i~͇`f+c%{hyIXK dmeSk@L4_#AFh$s[w4lE Idqyܚc45Մ%7J[޼/JĿg4[aNn~h+RvcqX(xR@F1q;+Fw6IPCxvYZQϏo+9 M сϢ;Xz%fouwAҞk.\STx h$wnO6C&z~IabD]A!T܉W\fV!H qf,*]+PsӶn28%Jן!*[ډR"D$F|'cTe)䐐! )Og-;T~iŨ~צLR[D\24X9rL38:l(q;QqRF9) = z <>^O VrY%gHR.K~ߘ/iIVUْoƘbṵT930M)9İ+9kjk Anv+[e,#ZV Q԰ dcRN;d#"}Xd1,Q3bGy_IQ O0P _|PftHaV`ֿCG6ao`09A{$OH\ LK="']zF0׾6"=+WAmA}Zyid-$ T-m~먆 BY%xUZ]OЀtTsc BEh4 pݮ 23_eQV:!Qcrhz,dȎS8e;e'!fc/ PTGyG{ r-^5+G8v~*uu.ŕ=m N#@,xtNs "cGxߨ2 "Xz!:Yd}@UT̋Q:jGxeR}`aK[U܇hKZԱw\m*x\zUJ6 ]?sbw10 ?HhcOnEGSB>,ŧv:cG Cڿme2A<2ra qcpi 11%15nF4u!96)@Z;+RnzG[1lƶYABr`%+k53pbjI8o{Y| ͫ):ŔdvwxJ܏R,DVK#WӪđB]&!.ʮ.5tS6pQ3 QsH5 Yç`,Se[EM0aWo3WS)_L6Ű!#ǹE麅=Q#"S[ldJ(Mh)UDl"a36?+ܗy"bslDWgTpO:A_f&d;bhԈ4` ڿ)N]>vvjjcJ8λI p:Ͽo83܁TgʉֺH"*:hI@B.O u'UNGѽo3Մ5b:_RgdBF$}%qQ4ɏA9Bª~ kxCA{ya۽[m8͢+ptSu"O!y7:A 8M<C"6n \E_:C vpi σko6 EaޮT)7^%[ (^AJn>ݕ1UF%GGg}4З{cW2VrJҷ [΋t0rþg"Uk]W(E#SQ CgTH4Z4ѴN`2_sabŒ0w\EwMK\`}EҬ0wBqPB[{%i+K1 Q'.ZQˠt.6](/V3lAZGLHv,p")<p>߉li+EP`8' R 0蓉6cBfHBPBiN*Ǖyl1CW +Ѯ"vnV`HIh'%$Y)h֛ն0A˺Xn튚Jݢ244 A%h4u1 5Q]>m nDߜV[{ ;V _Vj:tEy)V^gَ41wQL$<,@9 ˪eHn-hp!U|-"!hjiV;9ąht!ͤc[7ЌeeVӋ%)z_0l*k}I Z97fd>:Kz~ӝڅՠye3"s!-o.vi8_Q!{-)A3H_lMlLr2 )0%&0Un$7(dEļ iӚ*^dOWʼSƵu0 WZVzM^pCk؞撉u-҇zcND{2FSfF7a<-Ǒ}^@> 0dH++<ԉ5&pzQ,1,?3AJ7OSlD+3gâ5w3$99hC.G*ThoҍV^"Ԛdς9PاD"FpUnYu8~JF-df Y因EL78?%`΃/A߼+3 sK@j3V![12v(={k(A-.YsKUGdZ^p>.'oS1aboҵ>,a]dnBe>]:[w=SrL~h0'eyf$>xk{#5t$ZΧf,puyAl^ %uYT =XuF~X2ÞNbmZDfCik`7Es6Wg) ʮv)TrDu $҄3H8:4~BǜF9ɸ`WPWg=;zKj:9Eî0$ S~W|-8"2Ċ9ۥ)aɳ"GC^[@ތС?<6ZO1S|\4$(z*pi/ .Ū~p F"[y^G(@@ARa@N0S9Oj")Y P-ᦖaBӖՒekV"Ƈ1H\|h}cPCD6n89#-PO7>ʺ/gB=v0aE,1^&Gy=DSٛ6G)ddpLo!m%?8!X+!sJR{TBݞ:-8 ᎄw a7a^KeOwrY穩'A;#9w"?HnyM?BkӃrڰ|*Tn2<.o-v[OGsv0%`R=e.8 "Ȑi[ 1ܸZ񎽤\6f: L8H*Pp+ :U}A+Cs4i>iѨNx44b\I=ll!—wtIOiP )긪;.^I|6j |8_sV@/XG)4'N:i*<]"1{,ect,DyKp7Keѝ(8]kZ΅fvHVL1l}d _ ?Z'f+BuDlel@T@d|pxdv%tMd>0&rBU[z pP:ǜH c9ΓzȘ-y8b-tjm2zɷY 6<|gH_v2V}KŸP\ZV {(徭M6,[_ hߠ/7/ aL:Tjuȣ2yy]JpmR{#w4Θԕ_OeItǚ~}bkr\ءєf,\ViKw߫P<>9ݑ&LZbM+RRj62p4l||^Q )iv4Ei4dIX $f?``#rnpC1I15_Es&Jg6%y6~-TZhw_U8ALD n$kldVtd8u2W|uyS"OϼEjSkD(h@2 $h ϕʡh#5ج\ȷD*G#vsؐnLnYeg e_#dzqmJ9qm0˱ObMpa 2x"THYtLcQ+*{/Q9~M3v$ʒ+uVѶ.(6?P2-'Ћd~R6';r6{;!>돬U ՌO|lx_&&\y'4_5~FM.-I!aӸ-GcBۮ-%PkS 5d3l%$! u<Q[ٵS^1 L&(8`DMT>թXvj 1u؜J{IƑjo-cx*˸Sڈ [ u |/N|)UGޯ\N<*XJ5s6eOfcZP,ҡ68ӁMe"7?:dSEpלG&3G̕V*wft;) JP m% n,Ovc͌eDPԘ5[MiK/rGzc] ;a&<PEk34-Una҈NW=C"~?"i'z1rBV_BOHB:S)uK QSvNB|dԁxbZ )b c[:KsHO` &gŅC5Rc"3^g:L޿;Yx[qsv;.ܹWucsG7TlZ̸ It=s&̳* v^;-~hmx'ܲ | mqŞl6M %s4ySY+^~MP,2ڟ{nyA]A\ inC6\v ث/B\ކ)OChkZP"^%hÞac֟v0F$zu]8T" пe@1u- IUVXNr H!+"5BWcM& 1fUqpdCPhk~$_sgJu[pdm(ԅenIhq&Qi~8#Ϻ&#v]M8*΍!1!BQVV%X"? I6[mfxL#p| DπA9SQ)yIgy#dcF~&o{KJĘ2En xϫ 0aw |4& 1![ЗLڕ'6rgM!겪Hݎ;j%rY/ r_d2烖8Ē:aflYiHd>: ޼0MƱ`ꜨNV>uƯ^\8A/Xj%Tbf@׻SZBHܡp xyZ`C5OnK/:jk/ /bym[{YQ{`@oAkEvu%uc$&#zz\h- 'EMy<,^RBbۂz]z7'h74 >dT 2 FvdՙL"3]x '7'.W^@A?b2HC0D.9eHfe?I o?Uѯ9Hs <.3󹲙EnT>vX@鯀EvpgMP9ˇOt;2ţѶQ5#/u.k׃2y9MَᜓVG 5oRch5/ES$-ѕϛ8{{Yv ,|5};އxjtO}scpl !6]HGKA#,_#LJ!LEE'3;x4k]~ó:xQrUOO<ȣ Ye*Syq ! F4"p9& JOYo{]I(1͘LDW<< b>0Y,ƾbϢkf_eQ!ir[З`=ej6k \l+"[,n$ȤdHr勐ĹW7 FGi0FK/ z~#0b|%t=hp yqMC*52B{3vG h8X<1wڸņ$/?e(/`s aǻߧW[2HN4Q2—_dT.2)@1eASu/Ԗ٦yVN2@t.\Ta5v@^pOz!RcͩxobS([Wfo7ʶE{rKt+ /Ȱ}|bVy;}>(qlu`w 7^,DNPVe0WAL`Bk}b!;-(|V5 apvd,qM/cczf;`iu[3+,aAaQ$3µ^vF\![Toô fULtkPHk;~z Ԫ ˮN*wݽlL~ċh_gIG|ώ%o/Vv`g ""‚Wy .tw6eEx()ec(2מ$`?m< U%cW?47SapzDL%$ \*5), vvO4 p ]ȖN+-tvhz4>\Jd+ުw ֕Ŕ@\-p?P=r˯5;YTr)ԍO/N:n ilè)Ƿ>iveIMH6>zVIڴtȲ&ao^:@CrraeFgB<7_ӆ NdТkZvmbjZ|.GiC.orZIdSD|$24m'0swQQp7kR#y^1b.beg.L,_LtZJ$DiWn}'/%k-.Dž :!^$ I̟@t`цFI5 76€Rom;@EdPw/pFfdr$$> GAkP޿^b>ܠ/bSI0ɵy/(kCH"O*^QsJdXǺ6`V! j"AȊ}cɠYqWK1\q *G[,peJY\b_rz՞P:ʉ=O("ߓRz4DWcU5jF3oD{ ' VY0Txbl;aNzdcc0l!S}>|BǸx1r\disbŲqCf6[1R0Si؈Ҕ+U1^%/ e?A\/\ǽ16*? [*ouzw% QGIS&dsgqܤR]^RȨrAx2QyhąHb29.c5r7CD?()'26NV݋~F! mv ޘ)ަ&9&OKwͥ]t up(Fł݉ D7rmW:h ?ήw-%kیO;Lڭ~ Ql1(T:} ˣz#3$Y'B9ۙZ Kh.hC WODhxI"00bw\3C|l'Fk*`#GA)R gV/މ}dt1򦴳ԍՄ׏ \ *q/<^򤚨Jr7W9stND6hx/p96Aa8p- }ED[մҖ1j!edBb">jm$Ĉ#_y j:%-[ K78 5\'xH&L`pz^7˰Pϵ6&LŽaM"*wECrYpۢnZQ(bek$#䕗M2V^-T<_m[Hl+j;? ٟP/,!',\>Nƪ7(&о)|J)C)@?x;oLrFt`lǝ{I@~hi&FME4Q^B8e٬M!f?EŲ&s.Cw٧MdŹ p xpU<#eEd&exu \Gc`L5>Xq?^MkPxGcv{[J:"7P՝lOgYb=^Awg""V\ T}r)2*@D|aZjōǠq+`+O6ۧQ):"EnSN6oR^*1eWqJG՟ջ L.TO V$e'vATP~c˘IlN3 ~~rUI':!& k f8*{|^:LDWŚ#J@p%r'b|aOVMGhLQBC!$lB+-BH(c̻l,/c{`m䠓;Uڜ(XI>n*YMy)fw*3iuXjqN?<A _U$fUiX= n.i9$}i[ 0DC[ks,B/ ) H<1..n3Ls⚖Z/Bc<4"b XnƞgU*u_N(=iŽ5FqrVhǍ']dC$Ctu}lb#ʠ6$Inzxlj'׈t/8CE-戆ItȾal{jO7JHTm+ іUh:)A2; ,]3ܗLB+IJC}4*yib-TP6sY3ׂgPYH[y(- l@`(}upp7Qrw_rHm=6DnDB?. /فVTn>\{#ݮ,CM|x7Dgsk-;v]g]5yXJRwڵ:8}垯Q }a;syCrRP3X5]\ekPNG.DS"Y4to!. Iw՗D:ɢ]^ 0-"CtFoZnlF<%kJ 4~J95_'Y{;3s/;(3wABZ58UZk o . fpxS'|FbMEx.+pl^~`b-3#f\x$\ gCD؄~9M@cD8@2qKʀB#ZL$r܌ nt<|=( LN#ti4s #ckǓ>{(=}̱S;9<. @6fx)Kl!^21vu\ qM@tN0K97w#PҠ!/@01MY)NNg\ߘc~h41 HE7 ] WrT/::qA?u8g7@I?cpg12U'GO2͢ftsIa͔%ɰ,xKGY,d~d ?]y~T}!9xإS`-ٲ:N>G]@ߛN "#s_O7R}hڐhZeSٯk+[9$kn!MV;)]k9Մwܪ4#Fj`Mk2_9&IsIJ[d=z\k_COۮG󚄿[Ǐ05_.q4~*{jqoX+FΥǗB;_IO/ߧRў)#PdX;\KE|s}#\d)@S4qĸj/_+lۮ'I j+ccG7oa5Ȓ`G\-\CH2MD{u 2O“#[֤K@^EH QJ#lȅ 2.QmFe=D/Lqq"AEK@8K}&I)@Z'YR-'iS03NT\? l 4@ѵ*:Rl QAL11Gi5c/&1.,gTAV$J?j֬j2z:1s="z-"H <]j 4B|]#_jޮJ`ቊM ~a~ea4$73UaO\;2N?$BM: ٮwB8.np:24iEMH'f<=g>ڈ [d[OK3(ke&d7م0$ɛ إyS,mr>ɃX,ίf\ "˔vVV\ Č:'t\18dtnX8TmWjáI#X -zTw3bNu~py u :GMml&+Izz#0 ẹt[_Ýe_C|Cd~hVYzpI9n/C#A-+^~R`sl+}f:t~pbQ*L[`zȸSZ6S|@3 UΟc@]Ehzc)oYGqwv/0s>W=>vB"PZi!-,[cznjVHu5 T(Q,M[jaZĦϒJj4w-%7n_=TGwy3WYQ3:ӻ)W9a\i0YG8_PťLNө;Ef"<m!apl`PNG =̹2kiJ)WE?)`2!HAH݄>AdW!Ts$JrvӦO_,բ"h3~ڑW(#neo+4Y:BYcIlg&+?,ߒ0.wvJL{t8u{8:T7YkE֩W? N fK@vv{n\}awx \pt+ЫTRlnŪ [_HEmfoJz,]^]coȒ%*GBbu}uWiʲ;tgƣ)Z.Ͼ~c-^<nRסStwưղ^8 hoI\r6\ iz W|-Nj%/S"C)勉`iU߫J;A_BE-E)ZuӢCż @m}~+V|5Kisk Q0N┗J_`83^TɅG*HTT7uGHL/cKϲ݉LlMw',)~:<|+T_}l}^ tC {UGEک}enwa|DgW"F0͹Y#%U:8(I8{|4(?%7vװWnFC YbR>RUt۪Jv焖;ʸGh(_OмZVWiE,'B Ł^ qLg?\֛DyR{IE5Ыc+r{ᄑXafh22 }WǷ @5b2ԙfLZVUH+?|(A +"OTIH;Җ# !My0a0XyIEG"7h@z|Zʨk^`^r[;8Emi*z rgfjarFwEeq u G|sYPS9u8NVq6SQF]~"/ R إoÒؘAtT"?Ytx^U.a||Tpo^P "CbJD:en "sůnGG}q8yXF6,0KN u> ψpt`k=1%S6BɬYm3`kUO 8cR.$.h7m'7XP |AooFQo6UڼH=MO*_h,+%jKsrM o[s9eXQ*%`h#?7znXjPzEx:qѬz=f{HCPqE+ fflה\4Rr CL =3P 1̻W -'(\W]+s ʊ z + 1atEa\!B7DW u PD}-/dEg׈-\|^)|IfeW?UGbX3EgMVqSmVa^jEſ1.&fRžk=qD ࡟ӿr"]Rwyp IYwO,CYD% [tӔ2ͧVuIT4+`SGgyX[(f;'jz~UT׿Aa!tUտ3Ql2:ztNWk@d &0n>LvVnEu)ir11F2v,wT汵*gW)r|]UH°yŽ7X\%muv Eƽ+Ys:V\cfodh~oaI:Vgtkn:UDsF;NԞڗ4Gh3ףd2H).bIruud!b'1x:%/o` i33(=oկATʦ7踑fzʍv*JPE8& J˜wøqnEuoOl_)ꉋ 6H_b$ΊIy8.NN<&wp{қcL{ï&]tsH^vФWslI7кW R n,.P#E{ o",>12 'oWCZ!&$#A>U,p:|}N3Hܐex";o[p(\Õ vkϳYB?T[E0[WѺ7qCJż3͹ԧmj5 je` -{oC S*󗟖a*_VD(OZg $̩~;T]h:*;-&IۋdWHL2:4P9Xo/OP;~%pt GI%ai]uJaCWUp6o% Ϻr#1 SL\p`вX2u񪗓a5dI6J+\ܳjbKSo[Տ-PXQ^hzXᩬ)eZ"2[bsoǦQ%vC> dSBNskDp_lT*M3@Ԏ8Aka{0Z!o(GIu>@Y Sd+?wOy?|*TOMAdi3/xlI.v/y< #; E@2j %v c"~4Hj "mw|̔1_8nXh@2\;u8!m5w +, _H+Hl: If~$#$8v%=2Ehmܺ{7R!ZǪ!nL7.ܵ*fAEѳWC03*#rrMکɑ-|٦"I̾ ZI 8c|Q.:6]˾+Gkl$,4/WeK3nJ_ )jgm11梩Gq +5,,"$:.u(P_w} PVK;3?98V6MV? abtÊ5G>DNJ7!Ő1Nb_q~S^UFY-M䡶'l\ K\O.i+ `̠v~>C5pg OvaZ1 l5'0_=ds9hUXF,IjJ?]֋](OS6 |gToqiU!3- KXd B|Isn,)T^4x+5K%|8z'M@^!&b$4 0SE4%)|G!%); 0aŨ^%h s9 o6dMΉRI`MJ4A/wvh>{ zOQs=dm*)`,-.ҝ$q&_IJP~ndW+$$Yjg0n DinRBG2+W=_i!_Ny#W/ .BbL$$RPKJ{tLc?UDm_h01+N7* fy{>YyFL*^ZG`0 YZspatstat/data/shapley.rda0000644000176000001440000011121012252324050015202 0ustar ripleyusers7zXZi"6!X2K])TW"nRʟ)'dz$&}T}i6ac$w;uCF@3GOM=2waѐ9Dhy!,~B 4FEc9 O&e0{04X j_nٌXƈX1)'IFwpE >SH@+4z2<21x] 3 {x`ﱮQpZ%%AƺӯD.O]3<c  \k<3)> A $HiŹ;)LM{6 fkŲ!?Q^'k,'/6'K-dg N"x78ޮ; R%@kE(U"b֚7QR\ Q2!)ˇfQ̳Ģ!>6"q+<0Dg g+ncQD=? zW׺70MŷoAu,ʑD󻌖3|͆4PŒꈞ!.^z{ 1H{ff'ar}u`k?9 m :R W!e!]/_A&܆= 7ߔ}P0fN =ViyLDϤ+6@l}g nBkM,075^F$_29mDf /0 f G_JfQEc@W=%݁ X%PuGYMPGlh3@fqх|V1X=͙Xp@iT9k٬XBf&j,ӹ <ΤDRYBewLŎ$_5ꐊ+RƤCTAQ6M 2?nڪ5QRM)jB*뺨 0h&~?fA^ىZѕ͹{"F՟.e(( ~YRL:ZdMQ*z2YЗŦxP!Ph⌯t},i| m5:lP-4Tn 7 9"ˋ} 5w!ۂz Gq BS ߂n7z⽳LQG41jqSܛa]v^IC42C1]12MY$uH<<;'d0EB~Bi,|lZKl!x3H5?9.H]'5h3vq+v$AMgv)/Qs`dHf > (t@M=Vg s:3i‘\ՠ["fDj=H[~2h|M{ln -;\ J#L0"xDm؊GH\/g P4r;AVAUC5͠ HX'5N6AO4PK*f2qiveH:P)L 'u|U1rHkB2c(ku9"A]w{-֦\y[q8tl^S1(S.h+ 7[AGD9AG7+[G!0͆3V0d\KF=Dw9|QYw&&g_QYrю?; ylqh5GFEcx+ xm (*\ J~=]"4po39l5NilL൞oڷ)̈|X|q#zX3v8VÄ "=kPzEzW <,2nzPfr]RH &-\S3J'E2u"0f-HWZŋ޼OKvS8ٌ)I'< չ5\WDkwnThl AB$PP \84[f00<&Wt03q|>pmɥED*wLBzeZ䅍{ɏ% /\*ؐ4THFT^Q4( Rgꪅ5X&ؔ=IM~Jj{C_R8ݩ`nmK39y^A!V8_bD3\Y,y%m˓ʩO3*h$2Cݔ=8j{Ck]*,M@Euh)=ST'IVA&}P%=6Ő>_9|2=?bJOk%;UүrÞi+6_[A}Twu%FCRfteVD2J8fc! hT,Y`"nR$ 7M]x֭g%6}Yb&XSFzXxX"z zDZS%Z.l.I ŀ99 T8;7zv:7~0͚(U\#x,Q1V DltsH-Hhx[NyN%(,Ӧ0j\j:fGK}Rv-%d{M=*1BS */iBS>UAź1*o hvp{|!<=k>z FȫC4i\YwڡļVkY$xhEde'kqoBz"T&A2%;`<tt4S'xa]} }]Xɔ9Z7ĤQBk؟RLpPf@VTgbOD%~xa#@Nn&@ ^DH`[zI )KK?YIP2/,|{"Q{Ւ"4Au2`bn?R5L@7^߂#@B`ڏ}#e˔|hCm,yIb1XRsoD{h,̱GvpP{;'Ó\ԵP}+Xz~@Alm^ ã+ԙ.5W$Zd }K("Pj༲seO:NI/8sv(cke xUoAwק-:Cl26]bdx0 AW T.]9QVPS?}w1_])L!0Й-LwX!H.$p=-Ld;Y)A%}nYV&ۚ뇫7ܻ4K>1#T!8~Axy!/$>̚TnԈWȕw~g "y[yC|N-AnGϝƺQqբ׻ H v]"ňw#ΎI5 ˦Ġ# Д)eh!h ܢ.6=Z>,8PFO~xDNTvI+Wd/OdQT$=Uפ>JC#8_$(3'˰[ڞ%2">*3j@؇VH0lq)f@L%pYY%B*RlblnZ>\+k'p>[_O u[Dޮ4!6f D~|E)hN))BD[6\NJG x5޻/~ynrHFfE mxĸp e]}K8al#O'&J-Ղ͐ï)3T8{xԗYs98vJˀϑ)"DڿhL5SwU6LSL9ZwՏ"K_C- ᄮ TRW~S:ۜBfBۮ. jK΢3{1gM"0uû1뫯hj"[R6> ?bGبA/ä./d'..tC,YЍ7F -؊lsDGH|m.dak}<(g~ܫ"&'1 Xt= DYz9?!QnG~ Yq@ДޘkM2k} ##u("y՗#h7pnR c@ݙ f3Nrmv$`h_{W^Ըgd㙾5Qk#;"03v.r{EG/ Y 5zc"%\3>n{Q*%K<:B6O=DJgU9&I`#_v a3 ݎ-A%&ǩdum-H_,^آ_(+qt+7pZ/iL\jܶ 9ԇȌdRnVDP]] :pޟ A!FyUA}L`)!_霄Wmҫu߄-#i n tTdhes:4  i/wUV.@lp`\/DK_GH[1ޫ$tFnc5fg}y߹娀Pj e˙;]U^׍||ai){.yV(Qr$)3ijrhV&.N<$mUQ=Y>Y>*+Cyv[rӄ|lųj{)h;v41\׈7V+Hpp2 iZ9g|JY@mAjPz&K{5D:0&Zx,зY/%9Wg;, ˑPY~9hX6my~vTTA֦D] HiަҴI2 )eO(AbSZZ3彧+4gDD- m,ԢM!ٶg$Q:M+{B ۜ+M%OegQ&x[OyH(t:8錄 Ly3ʳ/Ȥ?`V_k[mcuqO IG9ᱝ YD~ sp_x5C+8YU&(o}2#;EWf=CBA!5:o+v@R@[(<8Pqr)1~=\XAJTA)QS\co` 6RQgd*\F0Ně= ~ȶa3̾^1>k̽y=$.½;`!}uJv7ӕX&OvѹS<5} _$*.1w` 5P+j]‹UE"et" /@u~ו$ Uzl@Rn2+P(*WKǃvnFxt>VߟYnL(2• ?Qxj&ޭ"'֞_ls4k'q])tz,܏'?ǖ]ܣujtzrG >W=(t !9|^ ǣ,wƈoW! ]PN+#~-IMSѺzpȄ!WHVuZ̃G{s ~-ba <'&Z];iSWm,Gv Jar|4eX>Q .E#jg$uCd`;,aK =!bspV ۖu{N0E82Pa_  ңs՜צ;NnѰ~"Ɉʛ7VxZ*"|YESq=UzŶa_f]:Q26hߕEHgM&se/g}vijB*\"9'S!v1)^Rb@F>NVKW3&Zx/l\$GRIPNJ%(/+,yi>QRZXeu"*ڒ{;B|yAu[E)7^yF;ZyU[h2qBSR-6S?yjdTr'q}tD[M[%o35m<{sN-&i:# S&[ O> f{QHFvny~`ku|FDT< 3ЁF23+D4D:`-FmWtxڟ۲uY.:2 - dwoW/'/2KeTwzpeߊ${3whm~S ܈ hxΘ97WG|~n (/ tÏ^70kޢvB\#v\bv_!%j2}:[Q_JN&8%k^J921Eb~tԜjVݑ1u7xYE_i,qIQ^Cͱ#"(ar%q;.>  ^,D?LBH+[pPbJ%1 ժ6U:$Ȕ,m289c/{mȾ~<@lgA͒,~{upZjey#]ýĸP[6mhgMp}`9'_L1I:Q<}fl5k<.I%@My2HXX,۔ V (o4ٺ>D^ 5<9a_DŜm `bK l)*GּnFz4; \}HeT%|JG%_X$%JգihͧSaN #e ޗo/,{筱|U=9%MOdO x0~^^\">X:ݎ æ?^ċaB/[x, r |GjBdV%Fc`:u6T9럫%LDJQg0|"rW}"Jp1U!\auib2_E bx2A!ׂGa8P/鷻O-Һ `mBKg+AG749=%eͫ]ioҟc1!v#~u$0p6ƚ } [J #X:ȿj*;C eK?ȏϰa?6 v=0-%SLKc8s`rW[0 3Q$D"#PRǕt 9?Brٷݐ]O!NO,\mط¼ cBd)| !Իy{;+ʢe\Qtd[,"ٸs&=2EeW.49_FTVhk# !h#徳oۗھ]bc'5 80ƀTU> ¼SֿIKz.9颬}R(,ϦzS=ӿc2i'Bh(n:R-6Ɉ8,"QwM Ts^ˬkVh. &/wb*R&܁`lDZ^%.LAquЧܢ[QDبZgテg[kU= >G:ѱh$T cۇ-Hc׍f4g\.1߮>W -IVQrTQ=&*G>DZca ';YB>!hHEKqAq|[г(\>qd7y_H6gJH?ʥ5g?A c?ƴQy_&d"?{2 J< #嫼כ:{G ]̺`ٙr!*رy W'J-XUtlҤ_ob,[ɪUz ,uۨM'(3J ^ihhmaoN Ȝո"8|<ڭ;۪Xؤ&M^5W_tmTi#^3-ep?HN׵O;Af Z1hJweC:Fʥ 1]lh}&tteկA}9ߋ LE\Au˙4ev|*~/%y a6V.!,iWjMQA=F:lK&l~Q$OŎcv?ɰGW4oцq/SK6 SBpGQY`oT .ħhm7U>;ǝ\E [ {!QH՛ ^m]AyGPd yL4w=ёHU5ׇ\SڷI"kBU5F<]kgrĥT%5/}_Fug IJ23jft9R}֗3e,۫ s|1Q\p[}%w"msL8@35-*z<sYQi yC:3\ yËh?!g[.``<"Ș9cimu%DdXrH%<>sݼcLhvnm关]&'q~WA5{4Q~ aG$n݈},Pt%H1ڀLf*-biʆ8 MtRU4?ֺB l6dc%#:s~A.,=5aXP?;=6f1 Lh;ͭE2n ~Чb949wtm訓r+Γ̨XTKhXO66DO-"J MQH6e8Dm2zoSZs5GJe*:yu_E +P}e5JЧTFSHJ83:跡p9(T&Fm~Sn"Z6| (U[ҐE'V#PgL;ɐJ5{u$RAZI`oަ"3د"T>̱]idGjݕ{ EUCy;avSrՃ}w’XxW{xk&Tb3c bPZ0V\pJbN PCie7?Cs'-R>Gހ H96%~ du+.k#/&ЍWbqǕ I(D7 Y6ܥԺV"pc_ŰM5.sIj9!bZF +Ҿ: )fkU4$ІzˮkΕpc#^(&8=p!7bPrynj3X 3OaG0ՌH@IWXLÏON+~g β63/>"j\3KOP$}L0C5u1y8x4Vħnv@Zu5%:hc XM/0pe߱ɻ6)# q Im_r=%P;T[߀T/>[txu^V_q ` -*gBM*_ Q ֿP1Ar$./n˪\HPK\9;&9@`18h{--$[{<qB  ]4՛sNC:|,}HE#b1{~X2U $N(Y".[DC29j^ sHD~q ƽ|s\iiߏU/6ɳ|s4AEVcPtiQNF!K:SMe('!%u9 rՂr2msG`Z WXNɓ#!ϊjQSj3>ݞ /BoG_r~zz3!qRn9Wh=M7vyK*T![i@~ʰ)'O%o?=rw N6o  0V9Fۻ=P ~<+X;XWyݜ2Ňb%}aqx[ D%nqᇋ3@=,ujnCvacRRKJp~Ol?w ]o.F3ד%:.K'OW(4ܹc^\['izF 7jS1lZX\scWr]0X,JZ]* ֽU:Y<=~jq!pYU + //)3 r,x1pfnE732/a". +tp!g[KBҍ=_Z'(Y,kufWS۶Ѧ4)rS-Tǯ$\'L^x)O:A BEC،*9S (y!tHf7k 7xH#ɼ" )TYJ7gMl7״RhKil0*PRe Ijh5s^S sVIUǥ{S SDit*ƌlmi׎,GpwV=,Ŝu@~26vkd.ȫ§Gk( 3u[KF E ~п!cuy@8J VOm.$2y~~PW IM2e~|߿͂c=΄,bJh,3|Vo4$ll[\~k'cG.GΤI{滹 998ċv4̺b$3*vl֍ DPN. #s"hXcYаi⬐iDQC{΂+dL4mo!) ٧#%Wx(j)z+>X4,HnY:. I5 @C#/Ve>uh\fy@?L(o>ਖY k18I7&͑R)'ꟙU-Gc%EE[\`0O5,kV)"{4`d49g֙}O'eX wѿf:{S}4hx1K3kҒ0Vު\.+w8A@Fif|#7DX߇A? 04L%uz=A°9o/awc `ȓP?)D FbЉDJtaI` tMou_p6 {x[BȺV Yj6_;/nSdr#: ؄W#= hN{S*^T_=5i0"flPϡQ)2FA1 zdHggѠ/\7jUs )=kCn: V~_]bJ9ӗB0dnnNد6s砈s5A{0 9\GICJlJwj~#Mb]k G0T>lav{#h\R"?;il$~q1 w} NRwVugeZ~#'7S\g뷤۵W\8^'mb&;Q՚s5-dZ L4l=h^1S$1eYWu $aMU߻YŃmڽ, xjˇҸ 3]Ҟ'$0fr?HE!cJc?2l5;dFKx0*-_]]8'=C/5T*ÉȔ́>Ex\R1Ϟ4nJP "4eDՏQ*^fv#1 ?$}ҧ"/Y0<s)Ȉ{4nϞJ85 hu ;6Q4ix*3<{ZOAL'#]ԺOY.bg-s*ǡ3#QUlXmivڃؠxv/@ 5BWoWrpnU´< \\A8նY00R-2*;~F+ke!ŮXqt]AP&!VH?kEu0Qi M6e <:D'3Pvr+o4g^bv onN;o1JXy%go宷jj!$ DZx9,cCG.!rZza<ѻ&C_u? dw9Y%Ej"Z5h}< @-`eSʜ 2-wovhЀ Z~$yp x98 ?MX \7 !I =n-ZkD:iPy8o=.AUaĘLP4*$֨2:L3OW?@avCIM&dA_cCrqX;v"64E53iҬMop0CS~l؎*Ϣ q(쾎fPf RUjxZ m((,&"Ę__,(%忽`og7`];V=iUYO.fb-4 {dOTR_6`̂E8ٶ{Ĵ_;#!#?ԛ*AͲwl_iL,صk9P/l*4Yb@хYGQ0qF]. Lm@u" O QҮBzd}> fx<ҍ,X&؎W ^̎OT+v@߫E xm^X<{,{n㲩!a" e/:+=#v8{υ/2 |f)oOg)$*(K?WjOZ"Zye1ɀLb&wٱKtv6<*-:3=Qq@rk[STֻF4EaSOCNSSeR妅$'m|W9 T$jm~Mb`{ ȹz-Yp^rOK=@<U#حk #m;Є! =0W.]gyw?~(c'q3:ZP>z @`?[.=#MߔNZ\q^El/g@ÐxI]߀4g{p%iHqjݵK1[CnxNOt+]60=0sIy=*AdEFyjk`I"+䇫éDTcCsj'*uEC6ⸯ"d}cKP aV (]ւ1~ =q+R  ; EeL5k'ƁI$m=?MqvfR8v< LU~ŧgVw#&61^dW i!Eէ˗I5GV yr )ҧ*V 89EE;Ipմ 6ޛr0wJ ˨ Q"' m!55K+\f.PVa/HM$YP!}@yз;l@|zsu @3]Q>\` @"Mv =,^r>)eQߝ\k,c92pݭ6,߱M^C)Zq:]fhUDZ8c~xKG]F|*jes/B~e //om<ƭЪ?Bc2Cc8OΪ(!$ށpkgcd->IoJˁ* S'|o--Iyo9Gca309y?U-=_2;\0ŔT= ouF}UG:WelʴϘ ʝ߹I)zsZa#Q!Ӯi&/a=+ )֠"Nm:ߩ,̓1YKM]:_)Կ I7j0 t為(S0@ۼ<`'<=(ӥ잏q!0uT* {h%CaHQ`3*EꏊΛɚwdg§\q+>r~r>^3{3)<[k,a)LhLgB,#hpڸOA0'Lҷh/k "QJxz/\el3h>v' :+% N`t#"S+ LR]k|~~ә?nCVu zkf{n.\94LnG% P=E-1$l[δVf\ch[r6= VӾ6S6PR S. bCWp\w,g6}#xfF; E-N-k22U4K(HX*݄̊*0vk-,%8ztϽOc;M)@q7Pzl&Lb.S$WzoEJw>?D K][̗12 sIS W$Bힾ$+B7sաZe](@KAqܲD2-T/6{:a!ËzDF F yug0Lմ"0] < Ɋak '_:K5@)(ƭQ7&m[BLF-AV%nfj_5GsoRm?q]餖k6#a&`^'p]Y@ݟ6El◭Y: *݆@&LJ*{;cp=_ C34s'YG~WBRiw‘fӅ#߫IKI_B_ 1F1K.pr{5+5 5$W5R Rކt@\ć Rs LT~C&楁% h:rв{xI?g)>T/2$chA be/TH5̲begɏq>evͥݠ%$p϶G$Qűo!MyG3OEKR6y[a/jks!*V˭*~a] EVeq06ysLO  ,_Ttg`L~fJo<BMa#`,J5]xuBBwz5L ordpKa & p~(ն蚱/BYFp]m +9U`eVb3dKaܑprya*67U?sGxJ8eq1A/co(3Uwn˿J>nH½ [)2x>'h R-@!rF,h5c7hVPj']Cs>J6P䦇K3{5sn]:(.}(MC'!f[b dZdCp3 8i}@>A[&:z}nRm~+s냺:y{|Ln1V?89n23dZ~G!s>VldT$YG(;S`]p+Q++ x4,\&܋XKj?m~Y>g[7QJ4mԽꦍ*]\[#J!*ڦ4zngm XDa>pH70g-1Cl j~J- KcUNcϦJU{sx9dacSC~%|˫w ݃04>w´ hxmxȈ`h<@(g@ ɃR:/Hp?*s|IeFc s Aqׅ4pfڒ80q $x=uS#>|Ecl$Hi{PBs"X#C>:.)F0b\F!E-ް*2$vNEkw=Ŷ?m==ɟ&U79hT2 ߸ PՕs0 `{a F;Tn=EJٻ:ѽ)bݶgLj. {Dh& u]3nLuZpŶX41MO8A$9(t@3BJl6Ɩrc9-߼ߔan!2"(vꁍǙ+"2;Ճ 1:ݢZ͏id[QnB3gP)4ps\7̹/d?JBp{!|a"ϯPXx5!h)xc>z/}}W æ4\>[ZE *v?\|C6iJ _c'vk[exLjɹnAόoỂ <ז*mjD [bMΐg w)8Po3DhRd:'\!}?s{L'͵Gy)…՛}zL#9Xh_)~nA>gNH.m_Wm"Z1 i4WwoR&AlND?D(HvIׇH:f>2TraI" E ' +}9-{ùêfrϼd \ bܑ9E"Y`Fޭswia6K"("f+pSi Znŧwl3HN ߲Nae0!L!!)1ċI0rU52tT2?HM)B}֧s}$7s2ʍm₄s3as clBM i`c]miV^ϥҚlNXnZF2,6͹#wAR,BdJo|'ehMΊ $y4^GsNf60$B>Bͻ+)zX\ 3t9B$pW2qv/ @ʲŶ{c+ IGNT7w ɥX2H߯6[*bV+"ΝWrf.zؽpiis.!2 FtWة1OБB7gm̒^%0 Uj͋R,dF4+V9c=2SO:WL~ ;\ԢDw7i:`lܴp:Q",n|z e}ӗ,, rAj{ˮ-EIh=ZaCP!tx,]ݝn{39즬keE6wa"P9$@3[ [9&ͨVl]q>km|[ 4~?!ϛuHC|~+$f( H]zP=AjI_>\M7'_1lDm$s8,#]L4 Cb@AxZs'X_l,W'+wgA>I"AQ*w( d>FHS. W"lg?:W"޺ MU"ԥȝ0H7w ^*X4log?6OX^Gs5vX<>eut 3mڊ.߳Dcoѡ|sHq٘5^#|k=fwᝦqaVAKl,<{hBɇz3j{6BAdvd7S!'˻8f-23@߀\" ; 9`~ǃ߸^l7q#@Y`}(a};Kǟr*4xhhaaae_': ߼ o.V&: #?2b#DSiK ݮp9rCj_ծEyٖC#S./X80`{óPJoM6)O^2ɳJG'$+@$9Zm2MN15z=DHaadf6Wn7,?&TC' HK [}zp*wG_ }eD0C-:A4%-8OP]K@9*ИrF-W9zK^2ǝW{/6_2dOn'Nu5X:u!υTwtDRʨykj )|{$<.6p銊wB a,˺zYJN b 6_rS$#"> po j.]Q,u!uNٳm' 1͍1նSOI2fP:cD-XGzA&2~l!k#b@]  jgGtBG$qb<A:ve@T$3:z 5 W xz='+֦vPڣUZ)KZ|cQ6a?ki*Bwȼs\>6]3쐑<ƋQa MЏ H41}W.i*UZjwt=LE|3tEuv`Fmms)G |團Y-IpKUdeK[Ul@LW'߬?͌wR>qmi+pH\wF1,0z=ե 5v@}Dnoi$^arCO'79R~vJ|nl͢΁oQH\1t 'b >YHK?fWe,ӰS0Q\p' |(ǒ|*ڻX+/BծF0WaC v Q/ug$P-l!Z"j}]}iա, tb,RS۷-$-;gΡYxAgZ!(|%ow8 w8@x15Ƅq{Q=fx m~in1 z& D,fzɣtߣ63Q[9v;#s>0t,CXL n3P<$ɰLtkEaygU:_ƿ02:cOdmңr/CY:xW&L5(7m]5?̋/m%쥓WA.i .L~PWXU3 7 + [XhPs[ nn`w ECq? 9]8KnChKwhD'F4A#3>AF1L}]HD*y1O+;HYK+ܴy x5y-6;oSs*"f}d.2y2\UxڽC)~f|/>f}Y*cM;pFNWiB3@& AЭaI㔞uNN[g/ћ5yO92F7O3yYXxf'=8q ŋQge~czsЮ#q*%U{>0gX>L![' 4~=oXs)!MMq~DS\aTHgKL]l O|imּ[3 sK:w`;[ObKkl h%XnƢq,hK~v/[r6k!D/!8m8龦>cpYIHt\5\g(.²NYщn2y Ύ  v+&`L;a&I:zbD0 $6inwᢥa+ \lZԻ}{Yy1X7Ml=_r02LU~V7PC=P7~] y^$>0X<ֿ+?#zH[,هZ49-Ns ;l-Zd/Y9 4ܖ;PNP9$`5e/5Xd,d>;,IU#Dp)΂4_,Pܕ#ljφN `a9cB_ogy4;v᱇d9jޅC%gs7Ϳ hXhUt(-<7 mw>>e(,E뚑(w*A1#ՐviC9p9$+Ǻ?.A[!3旮}^u{#覂c3΍E4 M>ja -ūu(/{M>Lvb52$U.{@T~Q#L (Wf+ӨUԙ)@qWOlW$&dKM]qnT÷q~K^5+7s/'x~1 sـR?Oc,- TF_KJ/o7ӓ|&=qp}lueKjKބ\uV}DB=ֿR:<^=i:#x(b㍔?uYU?ǘWjaNj qb%Z3)mWsVfvlMJgcA, =KDfJqv n#ʫ.zߝZ%.6"0LE|=cI[,i` ^JFc>PRTc/٪)~kyZY;ꅈ]*=W#oԄ̸Sx~ ,$h x1~WP \ 7|fvSN_67?Y3k6椔6XQui%E8i$wV+q'/C&I9E%7~#?S#Czjե&`_3e"b{3\1jؤD{0sO1Rʃ)J勖ms ¼e< WK)h 0Q,;C+ǣ39Zx͇#.-)Oq+ znٺZ&t ˋY}̠  6!D=d*KS:1K_h 4p( ?N4VZ5(B{](Kzq~"2^߫Qc47vN,(8&H+Ni^ת%Ӥ!VZ4IHza%"}.Al/zNJoE+Cu ,mPQEn_ݴHᤊo~df7{x{.si5eeF 9CF`$sVl6qͩNr'#vzT 5=~?gK9'#z%5c޳3ſZp@kץ}5ڟD%?UL˕?dpt# SmtK8 !^2ۙUEz5plw`X|yAEuFE|S> Vw<%[]7ŇZ=pq->RD؏b2vo?0~p|dž0;f_jHȭZ6\ BZ2+A9-ZhfgXΟ_[Ztv4|1۾}({!~\w zߓa:.[..b{6̔[@W uTٔgԇ2إ݃4}nppvDpFH۠M1t%,=$ et b6}b?ڎC$|]bv@9/Zqeo 8Р>Kb dOEݛ9hm$ɌKsVWFF 9Ec3 Y>8-3ê. Bp܉͟RēyUD8Aw]mUۅYdѼOvmvpx s=ZԔJW^Lx>3t6 ^GAF%}_ Z&UU'ꡠ-_>].ws;Wh=v gE#qpyTI/:x07ِ[v $e,3+PXe HG/.T)ZwnJ-!CVAD{2h5oZ\p5<C0 iR ֮{u:\z)jYٰՖGj hGOa3W-RaBe/u%GC߷br"SS]"QcF;nYh Iv&? OF\//(ۻNg=[֑J2g9r7ĝ1{x̪¤/4e8}؃% m e{ʻt[ PpHxl.eI8&>u bح(9W" Hix I~ {D́se8 9Z0x/ڟV뻓;,2v ؍4uFu']L(}Pe:Xj֩Ib@IWrmo.NcjWZ!>yCcs?[݇ R|5Zv0~D _]w)LI$v_z6L_j+#D=i_ , uaN)363.o1dJetk Acg `<3'̿ԯC @gx:ZGCl]eU3OĬY&<] A SʖZ>37_sSETGE.Ҫ/>ף$>Z׈7t9fCǙgS߰G?B#IU$ZK u?NaIBW[n !*}bG>bKhnAl`H\k W<g2Z2ƻEr89mt~ pDc=thUA^L'e*z9D;&tl9=)-5Gh$bsK @S5 '9A9/vn@,zc",Z}BUUk'L[];lULBZFnom-nܸ/o *+^yb[//J߽592"/,1qV"Ԍ]?#Dq ?S[$ó^v*6OK-xG4@L=TR$' + ,6Cݑ0'^#+U;k@q ix7B=<ҘW9;o?obtI0"/撦R\wR2*3G=M;T.C2hՠmSI@062 [5=#ght/[>j':Ҋ5ޡMljSes'uӲY4 isuŲ^&idvwU2A,ssǶf ʉt,Y5q)Bxsg]FE;.PZ~H8X2RB9sF]I=uJwMLMJf ;#&cNv0i,k(2lBMMR{5Ba2A^[8]DE-?];Hz{53 ~$T;*AA>{Dm0L7OMI}4s p4Ẁ# OgyzJa7էgNRp_|ap2#'t+[([ ڨx#[vcwp%LheY7>FMUV7&24 Dxe&I |p6e 7^T8ò\H-j&ȿ9FB8?ǟ)-B>0 YZspatstat/data/betacells.rda0000644000176000001440000000354412252324035015510 0ustar ripleyusersX lenkFBUTD ʡFBK(+vNggأv[1!C# &*U( HD.C h "Ĵ-鮭$3L[p8lϙmqZ9AV;mH.,G0K6s I)Vg(Ww=-_|,E#ք\ڳf3s.ͧdH@,>ia_HUnԽJ)KZʃ*$WeʆWn-%Ku5la7=TȡBV)O!J͕^w/dddx 8nH8ŌEgb#wEgEs@W b K|&&*|ycBa{eiwDV]-bfU.Q3!nΩdfE@?>c?½ӂWHiKZ5(k?.DisЯ݅a"9QCW߼1.W6(+?Hvr=E N({п}{w De.;cROYGun!;M"ٸ4a<# #{wIKțW[a*xj6FA?:/޲_S7s[}?pk#H;A/=f{|"u љOr{Lyb`>߸pP86 Q:87tQ 3y*8$4 [8ށb ZE~A^0Jo$F>1H?!OۺՄ]Osm q=]DmX1n@ wctO]G1eX3JɐȹeK{򖌓V2H>frY.vdy%4(^p3a\;}*<%3ƠƢwGy`(va OGho=O'ؒjqM3yCBd58Ko3/_%|F Ai[f4Ot>N:Y7ݐWo,攊ӜK#xud~?7nTq /J0צǭsɋßGj~2Nu'~yP9Oló<ȸ|&:x%kj_e}7F=SN +S.zkK;uGU`L/O}}$ IƫWIsKI4_p|ꋃ}9===~<3__rSd/KI~|JOG[GlUUdspatstat/data/amacrine.rda0000644000176000001440000000621412252324034015325 0ustar ripleyusersYgxB" XEiE@/"4 F MT)CUz'$^Id7ٚͦn"wfrA>ϗoofΜ39plZckT*g ]OWgI媪ɾkx{yT. ٽ}_ޫ0ŏ+*WGvK €n/Vj -=Ej^>!EYJo0?`e `/?`~^!!/R-,77>zJ/ Qf\#NY\gTM#UNmn?_|[脆wo>Y?{&Q}p~m8Ыp%?_G{EB.m S7]"Uy*I]*vϷz7"3ѝHk_"2OzRрdwo}[?:7s's!3ݳc#p7Qw,S~'8W^+Hig\;]uk^SiG6eEA wǾm8ԧZ7V_hqhZush-=6ې?)ó{zAVg#ti>Br7A2k)H|kFؚ1v[7'<͢߆ aI%GU%^h54+6lw@{#Y e(Bz@wc_zq:SDܴf XW:=Phnާ'|EC !XlCXiVfL8M:_Smi{ARRUoÙMZBG#wBwwC".=3SvU=lCX|W#7"?zg^reT'ۣ)>w=ܚ!@+$dT,,⻜P FI^R:e7bD yJ'Xp_Z =0F8 8<-V,{,&^ꭇqw|ɿEė&) 7G̭^MEy^,|s 0 b(SR>Yyy;t%TJ~2`M"oC)sQB8(,%<~?e~(P[#lT/m6-vkɔ2+@ajd ;c`?7t]+_ ,ߡ\OQL5ʝ_.k:sg&ƨ<\PvB|)g0w''z??xM =L¡3FZE='(7H 8e((6#zt.w a`tldoJ+(yKc䉴:;$0եb7. X'l8ê8Hd.RI)qu "T&Zx{2Q==u E{PWHQppO'0$y[꿂ÜzWl(jy"6\F1 ("a!Y=z# 8gA8,)=9a[SYv?r'pG"  u9f#Y_uK?tI'+U3}\aK?_%^.WSTx'Cws)$q؄fH$ UQ% .#Οd,<+;_dUO?俜o/dX0j`Z,F: ֫?؍AY /saZR-B͒ߢBe TL\x̫K0W^Љ(zNݨW-o5X*gEq D(q+I.1̋!Z'A; y/(QHOc=D IIǛQPgw]:7Jt]|`nwh`JRN_^/zy|f)8y2tkngn|&΢;lOBߎ]U=7}espatstat/R/0000755000176000001440000000000012247336365012361 5ustar ripleyusersspatstat/R/window.R0000755000176000001440000010005612240375345014012 0ustar ripleyusers# # window.S # # A class 'owin' to define the "observation window" # # $Revision: 4.135 $ $Date: 2013/11/12 09:30:10 $ # # # A window may be either # # - rectangular: # a rectangle in R^2 # (with sides parallel to the coordinate axes) # # - polygonal: # delineated by 0, 1 or more non-self-intersecting # polygons, possibly including polygonal holes. # # - digital mask: # defined by a binary image # whose pixel values are TRUE wherever the pixel # is inside the window # # Any window is an object of class 'owin', # containing at least the following entries: # # $type: a string ("rectangle", "polygonal" or "mask") # # $xrange # $yrange # vectors of length 2 giving the real dimensions # of the enclosing box # $units # name of the unit of length # # The 'rectangle' type has only these entries. # # The 'polygonal' type has an additional entry # # $bdry # a list of polygons. # Each entry bdry[[i]] determines a closed polygon. # # bdry[[i]] has components $x and $y which are # the cartesian coordinates of the vertices of # the i-th boundary polygon (without repetition of # the first vertex, i.e. same convention as in the # plotting function polygon().) # # # The 'mask' type has entries # # $m logical matrix # $dim its dimension array # $xstep,ystep x and y dimensions of a pixel # $xcol vector of x values for each column # $yrow vector of y values for each row # # (the row index corresponds to increasing y coordinate; # the column index " " " " " " x " " ".) # # #----------------------------------------------------------------------------- # .Spatstat.Image.Warning <- c("Row index corresponds to increasing y coordinate; column to increasing x", "Transpose matrices to get the standard presentation in R", "Example: image(result$xcol,result$yrow,t(result$d))") owin <- function(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) { unitname <- as.units(unitname) ## Exterminate ambiguities poly.given <- !is.null(poly) mask.given <- !is.null(mask) if(poly.given && mask.given) stop("Ambiguous -- both polygonal boundary and digital mask supplied") if(!is.null(xy) && !mask.given) warning("Argument xy ignored: it is only applicable when a mask is given") if(missing(xrange) != missing(yrange)) stop("If one of xrange, yrange is specified then both must be.") # convert data frames to vanilla lists if(poly.given) { if(is.data.frame(poly)) poly <- as.list(poly) else if(is.list(poly) && any(unlist(lapply(poly, is.data.frame)))) poly <- lapply(poly, as.list) } # avoid re-checking if already checked, or if checking suppressed checkdefault <- spatstat.options("checkpolygons") check <- resolve.defaults(list(...), list(check=checkdefault))$check # whether to calculate polygon areas calculate <- resolve.defaults(list(...), list(calculate=check))$calculate if(!poly.given && !mask.given) { ######### rectangle ################# if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2] < xrange[1]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2] < yrange[1]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="rectangle", xrange=xrange, yrange=yrange, units=unitname) class(w) <- "owin" return(w) } else if(poly.given) { ######### polygonal boundary ######## # if(length(poly) == 0) { # empty polygon if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2] < xrange[1]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2] < yrange[1]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=list(), units=unitname) class(w) <- "owin" return(w) } # convert matrix or data frame to list(x,y) isxy <- function(x) { (is.matrix(x) || is.data.frame(x)) && ncol(x) == 2 } asxy <- function(xy) { list(x=xy[,1], y=xy[,2]) } if(isxy(poly)) { poly <- asxy(poly) } else if(is.list(poly) && all(unlist(lapply(poly, isxy)))) { poly <- lapply(poly, asxy) } # nonempty polygon # test whether it's a single polygon or multiple polygons if(verify.xypolygon(poly, fatal=FALSE)) psingle <- TRUE else if(all(unlist(lapply(poly, verify.xypolygon, fatal=FALSE)))) psingle <- FALSE else stop("poly must be either a list(x,y) or a list of list(x,y)") w.area <- NULL if(psingle) { # single boundary polygon bdry <- list(poly) if(check || calculate) { w.area <- area.xypolygon(poly) if(w.area < 0) stop(paste("Area of polygon is negative -", "maybe traversed in wrong direction?")) } } else { # multiple boundary polygons bdry <- poly if(check || calculate) { w.area <- unlist(lapply(poly, area.xypolygon)) if(sum(w.area) < 0) stop(paste("Area of window is negative;\n", "check that all polygons were traversed", "in the right direction")) } } actual.xrange <- range(unlist(lapply(bdry, function(a) a$x))) if(missing(xrange)) xrange <- actual.xrange else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2] <= xrange[1]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!all(xrange == range(c(xrange, actual.xrange)))) stop("polygon's x coordinates outside xrange") } actual.yrange <- range(unlist(lapply(bdry, function(a) a$y))) if(missing(yrange)) yrange <- actual.yrange else if(check) { if(!is.vector(yrange) || length(yrange) != 2 || yrange[2] <= yrange[1]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") if(!all(yrange == range(c(yrange, actual.yrange)))) stop("polygon's y coordinates outside yrange") } if(!is.null(w.area)) { # tack on area and hole data holes <- (w.area < 0) for(i in seq_along(bdry)) bdry[[i]] <- append(bdry[[i]], list(area=w.area[i], hole=holes[i])) } w <- list(type="polygonal", xrange=xrange, yrange=yrange, bdry=bdry, units=unitname) class(w) <- "owin" # check for intersection or self-intersection if(check) { ok <- owinpolycheck(w) if(!ok) { errors <- attr(ok, "err") stop(paste("Polygon data contain", commasep(errors))) } } return(w) } else if(mask.given) { ######### digital mask ##################### if(!is.matrix(mask)) stop(paste(sQuote("mask"), "must be a matrix")) if(!is.logical(mask)) stop(paste("The entries of", sQuote("mask"), "must be logical")) nc <- ncol(mask) nr <- nrow(mask) if(!is.null(xy)) { # pixel coordinates given explicitly # validate dimensions if(!is.list(xy) || !checkfields(xy, c("x","y"))) stop("xy should be a list with entries x and y") xcol <- xy$x yrow <- xy$y if(length(xcol) != nc) stop(paste("length of xy$x =", length(xcol), "!=", nc, "= number of columns of mask")) if(length(yrow) != nr) stop(paste("length of xy$y =", length(yrow), "!=", nr, "= number of rows of mask")) # x and y should be evenly spaced if(!evenly.spaced(xcol)) stop("xy$x is not evenly spaced") if(!evenly.spaced(yrow)) stop("xy$y is not evenly spaced") # determine other parameters xstep <- diff(xcol)[1] ystep <- diff(yrow)[1] if(missing(xrange) && missing(yrange)) { xrange <- range(xcol) + c(-1,1) * xstep/2 yrange <- range(yrow) + c(-1,1) * ystep/2 } } else { # determine pixel coordinates from xrange, yrange if(missing(xrange) && missing(yrange)) { # take pixels to be 1 x 1 unit xrange <- c(0,nc) yrange <- c(0,nr) } else if(check) { if(!is.vector(xrange) || length(xrange) != 2 || xrange[2] <= xrange[1]) stop("xrange should be a vector of length 2 giving (xmin, xmax)") if(!is.vector(yrange) || length(yrange) != 2 || yrange[2] <= yrange[1]) stop("yrange should be a vector of length 2 giving (ymin, ymax)") } xstep <- diff(xrange)/nc ystep <- diff(yrange)/nr xcol <- seq(from=xrange[1]+xstep/2, to=xrange[2]-xstep/2, length.out=nc) yrow <- seq(from=yrange[1]+ystep/2, to=yrange[2]-ystep/2, length.out=nr) } out <- list(type = "mask", xrange = xrange, yrange = yrange, dim = c(nr, nc), xstep = xstep, ystep = ystep, warnings = .Spatstat.Image.Warning, xcol = xcol, yrow = yrow, m = mask, units = unitname) class(out) <- "owin" return(out) } # never reached NULL } # #----------------------------------------------------------------------------- # is.owin <- function(x) { inherits(x, "owin") } # #----------------------------------------------------------------------------- # as.owin <- function(W, ..., fatal=TRUE) { UseMethod("as.owin") } as.owin.owin <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "owin", fatal=fatal)) return(owin(W$xrange, W$yrange, poly=W$bdry, mask=W$m, unitname=unitname(W), check=FALSE)) else return(NULL) } as.owin.ppp <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "ppp", fatal=fatal)) return(W$window) else return(NULL) } as.owin.quad <- function(W, ..., fatal=TRUE) { if(verifyclass(W, "quad", fatal=fatal)) return(W$data$window) else return(NULL) } as.owin.im <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "im", fatal=fatal)) return(NULL) out <- list(type = "mask", xrange = W$xrange, yrange = W$yrange, dim = W$dim, xstep = W$xstep, ystep = W$ystep, warnings = .Spatstat.Image.Warning, xcol = W$xcol, yrow = W$yrow, m = !is.na(W$v), units = unitname(W)) class(out) <- "owin" return(out) } as.owin.psp <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "psp", fatal=fatal)) return(NULL) return(W$window) } as.owin.tess <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "tess", fatal=fatal)) return(NULL) return(W$window) } as.owin.data.frame <- function(W, ..., fatal=TRUE) { if(!verifyclass(W, "data.frame", fatal=fatal)) return(NULL) if(ncol(W) != 3) { whinge <- "need exactly 3 columns of data" if(fatal) stop(whinge) warning(whinge) return(NULL) } mch <- match(c("x", "y"), names(W)) if(!any(is.na(mch))) { ix <- mch[1] iy <- mch[2] iz <- (1:3)[-mch] } else { ix <- 1 iy <- 2 iz <- 3 } df <- data.frame(x=W[,ix], y=W[,iy], z=as.logical(W[,iz])) # convert data frame (x,y,z) to logical matrix m <- with(df, tapply(z, list(y, x), any)) # extract pixel coordinates xy <- with(df, list(x=sort(unique(x)), y=sort(unique(y)))) # make binary mask out <- owin(mask=m, xy=xy) return(out) } as.owin.default <- function(W, ..., fatal=TRUE) { # Tries to interpret data as an object of class 'window' # W may be # a structure with entries xrange, yrange # a four-element vector (interpreted xmin, xmax, ymin, ymax) # a structure with entries xl, xu, yl, yu if(checkfields(W, c("xrange", "yrange"))) { Z <- owin(W$xrange, W$yrange) return(Z) } else if(is.vector(W) && is.numeric(W) && length(W) == 4) { Z <- owin(W[1:2], W[3:4]) return(Z) } else if(checkfields(W, c("xl", "xu", "yl", "yu"))) { W <- as.list(W) Z <- owin(c(W$xl, W$xu),c(W$yl, W$yu)) return(Z) } else if(checkfields(W, c("x", "y", "area")) && checkfields(W$area, c("xl", "xu", "yl", "yu"))) { V <- as.list(W$area) Z <- owin(c(V$xl, V$xu),c(V$yl, V$yu)) return(Z) } else if(fatal) stop("Can't interpret W as a window") else return(NULL) } # #----------------------------------------------------------------------------- # # as.rectangle <- function(w, ...) { if(inherits(w, "owin")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else if(inherits(w, "im")) return(owin(w$xrange, w$yrange, unitname=unitname(w))) else { w <- as.owin(w, ...) return(owin(w$xrange, w$yrange, unitname=unitname(w))) } } # #----------------------------------------------------------------------------- # as.mask <- function(w, eps=NULL, dimyx=NULL, xy=NULL) { # eps: grid mesh (pixel) size # dimyx: dimensions of pixel raster # xy: coordinates of pixel raster if(!missing(w) && !is.null(w)) { if(is.matrix(w)) return(owin(mask=w, xy=xy)) w <- as.owin(w) uname <- unitname(w) } else { uname <- as.units(NULL) if(is.null(xy)) stop("If w is missing, xy is required") } # If it's already a mask, and no other arguments specified, # just return it. if(!missing(w) && w$type == "mask" && is.null(eps) && is.null(dimyx) && is.null(xy)) return(w) ########################## # First determine pixel coordinates ########################## if(is.null(xy)) { # Pixel coordinates to be computed from other dimensions # First determine row & column dimensions if(!is.null(dimyx)) { dimyx <- ensure2vector(dimyx) nr <- dimyx[1] nc <- dimyx[2] } else { # use pixel size 'eps' if(!is.null(eps)) { eps <- ensure2vector(eps) nc <- diff(w$xrange)/eps[1] nr <- diff(w$yrange)/eps[2] if(nr < 1 || nc < 1) warning("pixel size parameter eps > size of window") nr <- ceiling(nr) nc <- ceiling(nc) } else { # use spatstat defaults np <- spatstat.options("npixel") if(length(np) == 1) nr <- nc <- np[1] else { nr <- np[2] nc <- np[1] } } } # Initialise mask with all entries TRUE rasta <- owin(w$xrange, w$yrange, mask=matrix(TRUE, nr, nc)) } else { # # Pixel coordinates given explicitly: # xy is an image, a mask, or a list(x,y) # if(is.im(xy)) { rasta <- as.owin(xy) rasta$m[] <- TRUE } else if(is.owin(xy)) { if(xy$type != "mask") stop("argument xy does not contain raster coordinates.") rasta <- xy rasta$m[] <- TRUE } else { if(!checkfields(xy, c("x", "y"))) stop(paste(sQuote("xy"), "should be a list containing two vectors x and y")) x <- sort(unique(xy$x)) y <- sort(unique(xy$y)) # derive other parameters nr <- length(y) nc <- length(x) # x and y pixel sizes dx <- diff(x) if(diff(range(dx)) > 0.01 * mean(dx)) stop("x coordinates must be evenly spaced") xstep <- mean(dx) dy <- diff(y) if(diff(range(dy)) > 0.01 * mean(dy)) stop("y coordinates must be evenly spaced") ystep <- mean(dy) xr <- range(x) yr <- range(y) xrange <- xr + xstep * c(-1,1)/2 yrange <- yr + ystep * c(-1,1)/2 # initialise mask with all entries TRUE rasta <- list(type = "mask", xrange = xrange, yrange = yrange, dim = c(nr, nc), xstep = xstep, ystep = ystep, warnings = .Spatstat.Image.Warning, xcol = seq(from=xr[1], to=xr[2], length.out=nc), yrow = seq(from=yr[1], to=yr[2], length.out=nr), m = matrix(TRUE, nr, nc), units = uname) class(rasta) <- "owin" } # window may be implicit in this case. if(missing(w)) w <- owin(xrange, yrange) } ################################ # Second, mask pixel raster with existing window ################################ switch(w$type, rectangle = { out <- rasta if(!all(w$xrange == rasta$xrange) || !all(w$yrange == rasta$yrange)) { xcol <- rasta$xcol yrow <- rasta$yrow wx <- w$xrange wy <- w$yrange badrow <- which(yrow > wy[2] | yrow < wy[1]) badcol <- which(xcol > wx[2] | xcol < wx[1]) out$m[badrow , ] <- FALSE out$m[ , badcol] <- FALSE } }, mask = { # resample existing mask on new raster out <- rastersample(w, rasta) }, polygonal = { # use C code out <- owinpoly2mask(w, rasta, FALSE) }) unitname(out) <- uname return(out) } as.matrix.owin <- function(x, ...) { m <- as.mask(x, ...) return(m$m) } # # #----------------------------------------------------------------------------- # as.polygonal <- function(W) { verifyclass(W, "owin") switch(W$type, rectangle = { xr <- W$xrange yr <- W$yrange return(owin(xr, yr, poly=list(x=xr[c(1,2,2,1)],y=yr[c(1,1,2,2)]), unitname=unitname(W))) }, polygonal = { return(W) }, mask = { # This could take a while M <- W$m nr <- nrow(M) notM <- !M out <- NULL xcol <- W$xcol yrow <- W$yrow xbracket <- 1.1 * c(-1,1) * W$xstep/2 ybracket <- 1.1 * c(-1,1) * W$ystep/2 # identify runs of TRUE entries in each column start <- M & rbind(TRUE, notM[-nr, ]) finish <- M & rbind(notM[-1, ], TRUE) for(j in 1:ncol(M)) { xj <- xcol[j] # identify start and end positions in column j starts <- which(start[,j]) finishes <- which(finish[,j]) ns <- length(starts) nf <- length(finishes) if(ns != nf) stop(paste("Internal error: length(starts)=", ns, ", length(finishes)=", nf)) if(ns > 0) for(k in 1:ns) { yfrom <- yrow[starts[k]] yto <- yrow[finishes[k]] yk <- sort(c(yfrom,yto)) # make rectangle recto <- owin(xj+xbracket,yk+ybracket) # add to result out <- union.owin(out, recto) } } return(out) } ) } # # ---------------------------------------------------------------------- is.polygonal <- function(w) { return(inherits(w, "owin") && (w$type == "polygonal")) } is.rectangle <- function(w) { return(inherits(w, "owin") && (w$type == "rectangle")) } is.mask <- function(w) { return(inherits(w, "owin") && (w$type == "mask")) } validate.mask <- function(w, fatal=TRUE) { verifyclass(w, "owin", fatal=fatal) if(w$type == "mask") return(TRUE) if(fatal) stop(paste(short.deparse(substitute(w)), "is not a binary mask")) else { warning(paste(short.deparse(substitute(w)), "is not a binary mask")) return(FALSE) } } raster.x <- function(w, drop=FALSE) { validate.mask(w) m <- w$m x <- w$xcol[col(m)] x <- if(drop) x[m, drop=TRUE] else array(x, dim=w$dim) return(x) } raster.y <- function(w, drop=FALSE) { validate.mask(w) m <- w$m y <- w$yrow[row(m)] y <- if(drop) y[m, drop=TRUE] else array(y, dim=w$dim) return(y) } raster.xy <- function(w, drop=FALSE) { list(x=as.numeric(raster.x(w, drop=drop)), y=as.numeric(raster.y(w, drop=drop))) } nearest.raster.point <- function(x,y,w, indices=TRUE) { validate.mask(w) nr <- w$dim[1] nc <- w$dim[2] if(length(x) == 0) cc <- rr <- integer(0) else { cc <- 1 + round((x - w$xcol[1])/w$xstep) rr <- 1 + round((y - w$yrow[1])/w$ystep) cc <- pmax.int(1,pmin.int(cc, nc)) rr <- pmax.int(1,pmin.int(rr, nr)) } if(indices) return(list(row=rr, col=cc)) else return(list(x=w$xcol[cc], y=w$yrow[rr])) } mask2df <- function(w) { stopifnot(is.owin(w) && w$type == "mask") xx <- raster.x(w) yy <- raster.y(w) ok <- w$m xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) return(data.frame(x=xx, y=yy)) } #------------------------------------------------------------------ bounding.box <- function(...) { wins <- list(...) if(length(wins) == 0) stop("No arguments supplied") # trap a particular misuse of this function if(length(wins) == 2) { w1 <- wins[[1]] w2 <- wins[[2]] if(is.vector(w1) && is.numeric(w1) && is.vector(w2) && is.numeric(w2) && length(w1) == length(w2)) stop(paste("bounding.box() was applied to two numeric vectors;", "you probably wanted bounding.box.xy()")) } # remove null objects isnul <- unlist(lapply(wins, is.null)) if(all(isnul)) stop("All arguments are NULL") wins <- wins[!isnul] if(length(wins) > 1) { # multiple arguments -- compute bounding box for each argument. # First trap any point patterns and extract bounding boxes of points isppp <- unlist(lapply(wins, is.ppp)) if(any(isppp)) wins[isppp] <- lapply(wins[isppp], bounding.box.xy) # then convert all windows to owin wins <- lapply(wins, as.owin) # then take bounding box of each window boxes <- lapply(wins, bounding.box) # discard NULL values isnull <- unlist(lapply(boxes, is.null)) boxes <- boxes[!isnull] # take bounding box of these boxes xrange <- range(unlist(lapply(boxes, function(b){b$xrange}))) yrange <- range(unlist(lapply(boxes, function(b){b$yrange}))) return(owin(xrange, yrange)) } # single argument w <- wins[[1]] if(is.null(w)) return(NULL) # point pattern? if(is.ppp(w)) return(bounding.box.xy(w)) # convert to window w <- as.owin(w) # determine a tight bounding box for the window w switch(w$type, rectangle = { return(w) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) return(NULL) xr <- range(unlist(lapply(bdry, function(a) range(a$x)))) yr <- range(unlist(lapply(bdry, function(a) range(a$y)))) return(owin(xr, yr, unitname=unitname(w))) }, mask = { m <- w$m x <- raster.x(w) y <- raster.y(w) xr <- range(x[m]) + c(-1,1) * w$xstep/2 yr <- range(y[m]) + c(-1,1) * w$ystep/2 return(owin(xr, yr, unitname=unitname(w))) }, stop("unrecognised window type", w$type) ) } complement.owin <- function(w, frame=as.rectangle(w)) { wname <- short.deparse(substitute(w)) w <- as.owin(w) if(reframe <- !missing(frame)) { verifyclass(frame, "owin") w <- rebound.owin(w, frame) # if w was a rectangle, it's now polygonal } switch(w$type, mask = { w$m <- !(w$m) }, rectangle = { # return empty window return(emptywindow(w)) }, polygonal = { bdry <- w$bdry if(length(bdry) == 0) { # w is empty return(frame) } # bounding box, in anticlockwise order box <- list(x=w$xrange[c(1,2,2,1)], y=w$yrange[c(1,1,2,2)]) boxarea <- area.xypolygon(box) # first check whether one of the current boundary polygons # is the bounding box itself (with + sign) if(reframe) is.box <- rep.int(FALSE, length(bdry)) else { nvert <- unlist(lapply(bdry, function(a) { length(a$x) })) area <- unlist(lapply(bdry, area.xypolygon)) boxarea.mineps <- boxarea * (0.99999) is.box <- (nvert == 4 & area >= boxarea.mineps) if(sum(is.box) > 1) stop("Internal error: multiple copies of bounding box") if(all(is.box)) { return(emptywindow(box)) } } # if box is present (with + sign), remove it if(any(is.box)) bdry <- bdry[!is.box] # now reverse the direction of each polygon bdry <- lapply(bdry, reverse.xypolygon, adjust=TRUE) # if box was absent, add it if(!any(is.box)) bdry <- c(bdry, list(box)) # sic # put back into w w$bdry <- bdry }, stop("unrecognised window type", w$type) ) return(w) } #----------------------------------------------------------- inside.owin <- function(x, y, w) { # test whether (x,y) is inside window w # x, y may be vectors if(missing(y) && all(c("x", "y") %in% names(x))) return(inside.owin(x$x, x$y, w)) w <- as.owin(w) if(length(x)==0) return(logical(0)) # test whether inside bounding rectangle xr <- w$xrange yr <- w$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1] - eps) & (x <= xr[2] + eps) & (y >= yr[1] - eps) & (y <= yr[2] + eps) if(all(!frameok)) # all points OUTSIDE window - no further work needed return(frameok) ok <- frameok switch(w$type, rectangle = { return(ok) }, polygonal = { xy <- list(x=x,y=y) bdry <- w$bdry total <- numeric(length(x)) on.bdry <- rep.int(FALSE, length(x)) for(i in seq_along(bdry)) { score <- inside.xypolygon(xy, bdry[[i]], test01=FALSE) total <- total + score on.bdry <- on.bdry | attr(score, "on.boundary") } # any points identified as belonging to the boundary get score 1 total[on.bdry] <- 1 # check for sanity now.. uhoh <- (total * (1-total) != 0) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste("point-in-polygon test had difficulty with", nuh, ngettext(nuh, "point", "points"), "(total score not 0 or 1)"), call.=FALSE) total[uhoh] <- 0 } return(ok & (total != 0)) }, mask = { # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates loc <- nearest.raster.point(xf,yf,w) # look up mask values okf <- (w$m)[cbind(loc$row, loc$col)] # insert into 'ok' vector ok[frameok] <- okf return(ok) }, stop("unrecognised window type", w$type) ) } #------------------------------------------------------------------------- print.owin <- function(x, ...) { verifyclass(x, "owin") unitinfo <- summary(unitname(x)) cat("window: ") switch(x$type, rectangle={ cat("rectangle = ") }, polygonal={ cat("polygonal boundary\n") if(length(x$bdry) == 0) cat("window is empty\n") cat("enclosing rectangle: ") }, mask={ cat("binary image mask\n") di <- x$dim cat(paste(di[1], "x", di[2], "pixel array (ny, nx)\n")) cat("enclosing rectangle: ") } ) cat(paste(prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), unitinfo$plural, unitinfo$explain, "\n")) } summary.owin <- function(object, ...) { verifyclass(object, "owin") result <- list(xrange=object$xrange, yrange=object$yrange, type=object$type, area=area.owin(object), units=unitname(object)) switch(object$type, rectangle={ }, polygonal={ poly <- object$bdry result$npoly <- npoly <- length(poly) if(npoly == 0) { result$areas <- result$nvertices <- numeric(0) } else if(npoly == 1) { result$areas <- area.xypolygon(poly[[1]]) result$nvertices <- length(poly[[1]]$x) } else { result$areas <- unlist(lapply(poly, area.xypolygon)) result$nvertices <- unlist(lapply(poly, function(a) {length(a$x)})) } result$nhole <- sum(result$areas < 0) }, mask={ result$npixels <- object$dim result$xstep <- object$xstep result$ystep <- object$ystep } ) class(result) <- "summary.owin" result } print.summary.owin <- function(x, ...) { verifyclass(x, "summary.owin") unitinfo <- summary(x$units) pluralunits <- unitinfo$plural singularunits <- unitinfo$singular cat("Window: ") switch(x$type, rectangle={ cat("rectangle = ") }, polygonal={ cat("polygonal boundary\n") if(x$npoly == 0) { cat("window is empty\n") } else if(x$npoly == 1) { cat(paste("single connected closed polygon with", x$nvertices, "vertices\n")) } else { cat(paste(x$npoly, "separate polygons (")) if(x$nhole == 0) cat("no holes)\n") else if(x$nhole == 1) cat("1 hole)\n") else cat(paste(x$nhole, "holes)\n")) if(x$npoly > 0) print(data.frame(vertices=x$nvertices, area=signif(x$areas, 6), relative.area=signif(x$areas/x$area,3), row.names=paste("polygon", 1:(x$npoly), ifelse(x$areas < 0, "(hole)", "") ))) } cat("enclosing rectangle: ") }, mask={ cat("binary image mask\n") di <- x$npixels cat(paste(di[1], "x", di[2], "pixel array (ny, nx)\n")) cat(paste("pixel size:", signif(x$xstep,3), "by", signif(x$ystep,3), pluralunits, "\n")) cat("enclosing rectangle: ") } ) cat(paste(prange(zapsmall(x$xrange)), "x", prange(zapsmall(x$yrange)), pluralunits, "\n")) Area <- signif(x$area, 6) cat(paste("Window area = ", Area, "square", if(Area == 1) singularunits else pluralunits, "\n")) if(!is.null(ledge <- unitinfo$legend)) cat(paste(ledge, "\n")) return(invisible(x)) } discretise <- function(X,eps=NULL,dimyx=NULL,xy=NULL) { verifyclass(X,"ppp") W <- X$window ok <- inside.owin(X$x,X$y,W) if(!all(ok)) stop("There are points of X outside the window of X") all.null <- is.null(eps) & is.null(dimyx) & is.null(xy) if(W$type=="mask" & all.null) return(X) WM <- as.mask(W,eps=eps,dimyx=dimyx,xy=xy) nok <- !inside.owin(X$x,X$y,WM) if(any(nok)) { ifix <- nearest.raster.point(X$x[nok],X$y[nok], WM) ifix <- cbind(ifix$row,ifix$col) WM$m[ifix] <- TRUE } X$window <- WM X } spatstat/R/classes.R0000755000176000001440000000232312237642727014145 0ustar ripleyusers# # # classes.S # # $Revision: 1.7 $ $Date: 2006/10/09 03:38:14 $ # # Generic utilities for classes # # #-------------------------------------------------------------------------- verifyclass <- function(X, C, N=deparse(substitute(X)), fatal=TRUE) { if(!inherits(X, C)) { if(fatal) { gripe <- paste("argument", sQuote(N), "is not of class", sQuote(C)) stop(gripe) } else return(FALSE) } return(TRUE) } #-------------------------------------------------------------------------- checkfields <- function(X, L) { # X is a list, L is a vector of strings # Checks for presence of field named L[i] for all i return(all(!is.na(match(L,names(X))))) } getfields <- function(X, L, fatal=TRUE) { # X is a list, L is a vector of strings # Extracts all fields with names L[i] from list X # Checks for presence of all desired fields # Returns the sublist of X with fields named L[i] absent <- is.na(match(L, names(X))) if(any(absent)) { gripe <- paste("Needed the following components:", paste(L, collapse=", "), "\nThese ones were missing: ", paste(L[absent], collapse=", ")) if(fatal) stop(gripe) else warning(gripe) } return(X[L[!absent]]) } spatstat/R/affine.R0000755000176000001440000002350712237642727013747 0ustar ripleyusers# # affine.R # # $Revision: 1.43 $ $Date: 2013/02/25 07:49:05 $ # affinexy <- function(X, mat=diag(c(1,1)), vec=c(0,0), invert=FALSE) { if(length(X$x) == 0 && length(X$y) == 0) return(list(x=numeric(0),y=numeric(0))) if(invert) { mat <- invmat <- solve(mat) vec <- - as.numeric(invmat %*% vec) } # Y = M X + V ans <- mat %*% rbind(X$x, X$y) + matrix(vec, nrow=2, ncol=length(X$x)) return(list(x = ans[1,], y = ans[2,])) } affinexypolygon <- function(p, mat=diag(c(1,1)), vec=c(0,0), detmat=det(mat)) { # transform (x,y) p[c("x","y")] <- affinexy(p, mat=mat, vec=vec) # transform area if(!is.null(p$area)) p$area <- p$area * detmat # if map has negative sign, cyclic order was reversed; correct it if(detmat < 0) p <- reverse.xypolygon(p, adjust=TRUE) return(p) } "affine" <- function(X, ...) { UseMethod("affine") } "affine.owin" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ..., rescue=TRUE) { verifyclass(X, "owin") if(!is.vector(vec) || length(vec) != 2) stop(paste(sQuote("vec"), "should be a vector of length 2")) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) # Inspect the determinant detmat <- det(mat) if(abs(detmat) < .Machine$double.eps) stop("Matrix of linear transformation is singular") # diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1) newunits <- if(scaletransform) unitname(X) else as.units(NULL) # switch(X$type, rectangle={ if(diagonalmatrix) { # result is a rectangle Y <- owin(range(mat[1,1] * X$xrange + vec[1]), range(mat[2,2] * X$yrange + vec[2])) unitname(Y) <- newunits return(Y) } else { # convert rectangle to polygon P <- as.polygonal(X) # call polygonal case return(affine.owin(P, mat, vec, rescue=rescue)) } }, polygonal={ # Transform the polygonal boundaries bdry <- lapply(X$bdry, affinexypolygon, mat=mat, vec=vec, detmat=detmat) # Compile result W <- owin(poly=bdry, check=FALSE, unitname=newunits) # Result might be a rectangle: if so, convert to rectangle type if(rescue) W <- rescue.rectangle(W) return(W) }, mask={ # binary mask newframe <- bounding.box.xy(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- raster.xy(W) xybefore <- affinexy(pixelxy, mat, vec, invert=TRUE) W$m[] <- with(xybefore, inside.owin(x, y, X)) W <- intersect.owin(W, bounding.box(W)) if(rescue) W <- rescue.rectangle(W) return(W) }, stop("Unrecognised window type") ) } "affine.ppp" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "ppp") r <- affinexy(X, mat, vec) w <- affine.owin(X$window, mat, vec, ...) return(ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)) } "affine.im" <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "im") if(!is.vector(vec) || length(vec) != 2) stop(paste(sQuote("vec"), "should be a vector of length 2")) if(!is.matrix(mat) || any(dim(mat) != c(2,2))) stop(paste(sQuote("mat"), "should be a 2 x 2 matrix")) # Inspect the determinant detmat <- det(mat) if(abs(detmat) < .Machine$double.eps) stop("Matrix of linear transformation is singular") # diagonalmatrix <- all(mat == diag(diag(mat))) scaletransform <- diagonalmatrix && (length(unique(diag(mat))) == 1) newunits <- if(scaletransform) unitname(X) else as.units(NULL) newpixels <- (length(list(...)) > 0) # if(diagonalmatrix && !newpixels) { # diagonal matrix: apply map to row and column locations v <- X$v d <- X$dim newbox <- affine(as.rectangle(X), mat=mat, vec=vec) xscale <- diag(mat)[1] yscale <- diag(mat)[2] xcol <- xscale * X$xcol + vec[1] yrow <- yscale * X$yrow + vec[2] if(xscale < 0) { # x scale is negative xcol <- rev(xcol) v <- v[, (d[2]:1)] } if(yscale < 0) { # y scale is negative yrow <- rev(yrow) v <- v[(d[1]:1), ] } Y <- im(v, xcol=xcol, yrow=yrow, xrange=newbox$xrange, yrange=newbox$yrange, unitname=newunits) } else { # general case # create box containing transformed image newframe <- bounding.box.xy(affinexy(corners(X), mat, vec)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) unitname(W) <- newunits # raster for transformed image naval <- switch(X$type, factor={ factor(NA, levels=levels(X)) }, integer = NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y <- as.im(W, value=naval) # preimages of pixels of transformed image xx <- as.vector(rasterx.im(Y)) yy <- as.vector(rastery.im(Y)) pre <- affinexy(list(x=xx, y=yy), mat, vec, invert=TRUE) # sample original image Y$v[] <- lookup.im(X, pre$x, pre$y, naok=TRUE) } return(Y) } ### ---------------------- reflect ---------------------------------- reflect <- function(X) { UseMethod("reflect") } reflect.default <- function(X) { affine(X, mat=diag(c(-1,-1))) } reflect.im <- function(X) { stopifnot(is.im(X)) out <- with(X, list(v = v[dim[1]:1, dim[2]:1], dim = dim, xrange = rev(-xrange), yrange = rev(-yrange), xstep = xstep, ystep = ystep, xcol = rev(-xcol), yrow = rev(-yrow), type = type, units = units)) class(out) <- "im" return(out) } ### ---------------------- shift ---------------------------------- "shift" <- function(X, ...) { UseMethod("shift") } shiftxy <- function(X, vec=c(0,0)) { list(x = X$x + vec[1], y = X$y + vec[2]) } shiftxypolygon <- function(p, vec=c(0,0)) { # transform (x,y), retaining other data p[c("x","y")] <- shiftxy(p, vec=vec) return(p) } "shift.owin" <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "owin") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") if(is.numeric(origin)) { locn <- origin } else if(is.character(origin)) { origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) locn <- switch(origin, centroid={ unlist(centroid.owin(X)) }, midpoint={ c(mean(X$xrange), mean(X$yrange)) }, bottomleft={ c(X$xrange[1], X$yrange[1]) }) } else stop("origin must be a character string or a numeric vector") return(shift(X, -locn)) } # Shift the bounding box X$xrange <- X$xrange + vec[1] X$yrange <- X$yrange + vec[2] switch(X$type, rectangle={ }, polygonal={ # Shift the polygonal boundaries X$bdry <- lapply(X$bdry, shiftxypolygon, vec=vec) }, mask={ # Shift the pixel coordinates X$xcol <- X$xcol + vec[1] X$yrow <- X$yrow + vec[2] # That's all --- the mask entries are unchanged }, stop("Unrecognised window type") ) # tack on shift vector attr(X, "lastshift") <- vec # units are unchanged return(X) } "shift.ppp" <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "ppp") if(!is.null(origin)) { if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") if(is.numeric(origin)) { locn <- origin } else if(is.character(origin)) { origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- X$window locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1], W$yrange[1]) }) } else stop("origin must be a character string or a numeric vector") vec <- -locn } # perform shift r <- shiftxy(X, vec) w <- shift.owin(X$window, vec) Y <- ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } ### ---------------------- scalar dilation --------------------------------- scalardilate <- function(X, f, ...) { UseMethod("scalardilate") } scalardilate.default <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- affine(X, mat=diag(c(f,f))) return(Y) } scalardilate.im <- scalardilate.owin <- scalardilate.psp <- scalardilate.ppp <- function(X, f, ..., origin=NULL) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) if(!is.null(origin)) { X <- shift(X, origin=origin) negorig <- attr(X, "lastshift") } else negorig <- c(0,0) Y <- affine(X, mat=diag(c(f, f)), vec = -negorig) return(Y) } spatstat/R/nndistlpp.R0000755000176000001440000001676312237642727014540 0ustar ripleyusers# # nndistlpp.R # # $Revision: 1.3 $ $Date: 2013/10/21 02:35:05 $ # # Methods for nndist, nnwhich, nncross for linear networks # # nndist.lpp # Calculates the nearest neighbour distances in the shortest-path metric # for a point pattern on a linear network. nndist.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) # L <- X$domain Y <- as.ppp(X) n <- npoints(Y) # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath if(n == 0) return(numeric(0)) if(n == 1) return(NA) # find nearest segment for each point # This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { D <- pairdist(X, method="interpreted") diag(D) <- Inf return(apply(D, 1, min)) } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result ans <- double(n) # call C zz <- .C("linnndist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), answer = as.double(ans)) ans <- zz$answer } return(ans) } # nnwhich.lpp # Identifies the nearest neighbours in the shortest-path metric # for a point pattern on a linear network. # nnwhich.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) # L <- X$domain Y <- as.ppp(X) n <- npoints(Y) # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath if(n == 0) return(integer(0)) if(n == 1) return(as.integer(NA)) # find nearest segment for each point # This is given by local coordinates, if available (spatstat >= 1.28-0) loco <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE) pro <- if(!is.null(seg <- loco$seg)) seg else nearestsegment(X, Lseg) if(method == "interpreted") { D <- pairdist(X, method="interpreted") diag(D) <- Inf return(apply(D, 1, which.min)) } else { # C code # convert indices to start at 0 from0 <- from - 1 to0 <- to - 1 segmap <- pro - 1 # upper bound on interpoint distance huge <- max(dpath) + 2 * max(lengths.psp(Lseg)) # space for result nnd <- double(n) nnw <- integer(n) # call C zz <- .C("linnnwhich", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw)) # convert C indexing to R indexing nnw <- zz$nnwhich + 1L # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } return(nnw) } # nncross.lpp # Identifies the nearest neighbours in the shortest-path metric # from one point pattern on a linear network to ANOTHER pattern # on the SAME network. # nncross.lpp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(inherits(Y, "lpp")) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) stopifnot(method %in% c("C", "interpreted")) check <- resolve.defaults(list(...), list(check=TRUE))$check # L <- as.linnet(X) if(check && !identical(L, as.linnet(Y))) stop("X and Y are on different linear networks") # nX <- npoints(X) nY <- npoints(Y) P <- as.ppp(X) Q <- as.ppp(Y) # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # deal with null cases if(nX == 0) return(data.frame(dist=numeric(0), which=integer(0))[, what]) if(nY == 0) return(data.frame(dist=rep(Inf, nX), which=rep(NA_integer_, nX))[, what]) # find nearest segment for each point Xpro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg Ypro <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE)$seg # handle serial numbers if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if(method == "interpreted") { D <- crossdist(X, method="interpreted") if(exclude) D[outer(iX, iY, "==")] <- Inf nnd <- if("dist" %in% what) apply(D, 1, min) else NA nnw <- if("which" %in% what) apply(D, 1, which.min) else NA } else { # C code # convert indices to start at 0 from0 <- from - 1 to0 <- to - 1 Xsegmap <- Xpro - 1 Ysegmap <- Ypro - 1 # upper bound on interpoint distance huge <- max(dpath) + 2 * diameter(as.rectangle(as.owin(L))) # space for result nnd <- double(nX) nnw <- integer(nX) # call C if(!exclude) { zz <- .C("linndcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw)) } else { zz <- .C("linndxcross", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), idP = as.integer(iX), idQ = as.integer(iY), huge = as.double(huge), nndist = as.double(nnd), nnwhich = as.integer(nnw)) } nnd <- zz$nndist # convert C indexing to R indexing nnw <- zz$nnwhich + 1L # any zeroes occur if points have no neighbours. nnw[nnw == 0] <- NA } result <- data.frame(dist=nnd, which=nnw)[, what] return(result) } spatstat/R/vcov.mppm.R0000644000176000001440000000310112237642730014417 0ustar ripleyusers# Variance-covariance matrix for mppm objects # # $Revision: 1.9 $ $Date: 2013/11/09 16:55:46 $ # # vcov.mppm <- function(object, ..., what="vcov", err="fatal") { errhandler <- function(whinge, err) { switch(err, fatal=stop(whinge), warn={ warning(whinge) return(NA) }, null= return(NULL), stop(paste("Unrecognised option: err=", dQuote(err)))) } whinge <- NULL if(object$Fit$fitter != "glm") whinge <- "vcov.mppm only implemented for glm fits" else if(!is.poisson.mppm(object)) whinge <- "vcov.mppm only implemented for Poisson processes" if(!is.null(whinge)) return(errhandler(whinge, err)) gf <- object$Fit$FIT gd <- object$Fit$moadf wt <- gd$.mpl.W fi <- fitted(gf) fo <- object$trend if(is.null(fo)) fo <- (~1) mof <- model.frame(fo, gd) mom <- model.matrix(fo, mof) momnames <- dimnames(mom)[[2]] fisher <- sumouter(mom, fi * wt) dimnames(fisher) <- list(momnames, momnames) switch(what, fisher = { return(fisher) }, vcov = { vc <- try(solve(fisher), silent=(err == "null")) if(inherits(vc, "try-error")) return(errhandler("Fisher information is singular", err)) else return(vc) }, corr={ co <- try(solve(fisher), silent=(err == "null")) if(inherits(co, "try-error")) return(errhandler("Fisher information is singular", err)) sd <- sqrt(diag(co)) return(co / outer(sd, sd, "*")) }) } spatstat/R/ord.family.R0000755000176000001440000001020512237642727014552 0ustar ripleyusers# # # ord.family.S # # $Revision: 1.16 $ $Date: 2013/04/25 06:37:43 $ # # The Ord model (family of point process models) # # ord.family: object of class 'isf' defining Ord model structure # # # ------------------------------------------------------------------- # ord.family <- list( name = "ord", print = function(self) { cat("Ord model family\n") }, eval = function(X, U, EqualPairs, pot, pars, ...) { # # This auxiliary function is not meant to be called by the user. # It computes the distances between points, # evaluates the pair potential and applies edge corrections. # # Arguments: # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function function(d, p) # pars auxiliary parameters for pot list(......) # ... IGNORED # # Value: # matrix of values of the potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # # Note: # The potential function 'pot' will be called as # pot(M, pars) where M is a vector of tile areas. # It must return a vector of the same length as M # or a matrix with number of rows equal to the length of M ########################################################################## nX <- npoints(X) nU <- length(U$x) # number of data + dummy points seqX <- seq_len(nX) seqU <- seq_len(nU) # determine which points in the combined list are data points if(length(EqualPairs) > 0) is.data <- seqU %in% EqualPairs[,2] else is.data <- rep.int(FALSE, nU) ############################################################################# # First compute Dirichlet tessellation of data # and its total potential (which could be vector-valued) ############################################################################# marks(X) <- NULL Wdata <- dirichlet.weights(X) # sic - these are the tile areas. Pdata <- pot(Wdata, pars) summa <- function(P) { if(is.matrix(P)) matrowsum(P) else if(is.vector(P) || length(dim(P))==1 ) sum(P) else stop("Don't know how to take row sums of this object") } total.data.potential <- summa(Pdata) # Initialise V dimpot <- dim(Pdata)[-1] # dimension of each value of the potential function # (= numeric(0) if potential is a scalar) dimV <- c(nU, dimpot) if(length(dimV) == 1) dimV <- c(dimV, 1) V <- array(0, dim=dimV) rowV <- array(seqU, dim=dimV) #################### Next, evaluate V for the data points. ############### # For each data point, compute Dirichlet tessellation # of the data with this point removed. # Compute difference of total potential. ############################################################################# for(j in seq_len(nX)) { # Dirichlet tessellation of data without point j Wminus <- dirichlet.weights(X[-j]) # regressor is the difference in total potential V[rowV == j] <- total.data.potential - summa(pot(Wminus, pars)) } #################### Next, evaluate V for the dummy points ################ # For each dummy point, compute Dirichlet tessellation # of (data points together with this dummy point) only. # Take difference of total potential. ############################################################################# for(j in seqU[!is.data]) { Xplus <- superimpose(X, list(x=U$x[j], y=U$y[j]), W=X$window) # compute Dirichlet tessellation (of these points only!) Wplus <- dirichlet.weights(Xplus) # regressor is difference in total potential V[rowV == j] <- summa(pot(Wplus, pars)) - total.data.potential } cat("dim(V) = \n") print(dim(V)) return(V) } ######### end of function $eval ) ######### end of list class(ord.family) <- "isf" spatstat/R/exactPdt.R0000755000176000001440000000352412237642727014270 0ustar ripleyusers# # exactPdt.R # R function exactPdt() for exact distance transform of pixel image # # $Revision: 4.13 $ $Date: 2012/04/06 09:49:56 $ # "exactPdt"<- function(w) { verifyclass(w, "owin") if(w$type != "mask") stop(paste("Input must be a window of type", sQuote("mask"))) # nr <- w$dim[1] nc <- w$dim[2] # input image will be padded out with a margin of width 2 on all sides mr <- mc <- 2 # full dimensions of padded image Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc # output image (subset): rows & columns (R indexing) rmin <- mr + 1 rmax <- Nnr - mr cmin <- mc + 1 cmax <- Nnc - mc # do padding x <- matrix(FALSE, nrow=Nnr, ncol=Nnc) x[rmin:rmax, cmin:cmax] <- w$m # DUP <- spatstat.options("dupC") res <- .C("ps_exact_dt_R", as.double(w$xrange[1]), as.double(w$yrange[1]), as.double(w$xrange[2]), as.double(w$yrange[2]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), inp = as.integer(t(x)), distances = as.double (double(N)), rows = as.integer(integer(N)), cols = as.integer(integer(N)), boundary = as.double (double(N)), DUP=DUP) # PACKAGE="spatstat") dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] rows <- matrix(res$rows, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] cols <- matrix(res$cols, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdist<- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # convert from C to R indexing rows <- rows + 1L cols <- cols + 1L return(list(d=dist,row=rows,col=cols,b=bdist, w=w)) } spatstat/R/colourtools.R0000755000176000001440000000214612237642727015077 0ustar ripleyusers# # colourtools.R # # $Revision: 1.3 $ $Date: 2011/10/13 10:40:48 $ # rgb2hex <- function(v) { stopifnot(is.numeric(v)) if(is.matrix(v)) { stopifnot(ncol(v) == 3) } else { if(length(v) != 3) stop("v should be a vector of length 3 or a matrix with 3 columns") v <- matrix(v, ncol=3) } out <- rgb(v[,1], v[,2], v[,3], maxColorValue=255) return(out) } col2hex <- function(x) { apply(col2rgb(x), 2, rgb2hex) } paletteindex <- function(x) { x <- col2hex(x) p <- col2hex(palette()) m <- match(x, p) return(m) } samecolour <- function(x, y) { col2hex(x) == col2hex(y) } # versions of rgb() and hsv() that work with NA values rgbNA <- function(red, green, blue, ...) { ok <- !(is.na(red) | is.na(green) | is.na(blue)) values <- rgb(red[ok], green[ok], blue[ok], ...) result <- character(length(red)) result[ok] <- values result[!ok] <- NA return(result) } hsvNA <- function(h, s, v, ...) { ok <- !(is.na(h) | is.na(s) | is.na(v)) values <- hsv(h[ok], s[ok], v[ok], ...) result <- character(length(h)) result[ok] <- values result[!ok] <- NA return(result) } spatstat/R/dummy.R0000755000176000001440000002535612237642727013656 0ustar ripleyusers# # dummy.S # # Utilities for generating patterns of dummy points # # $Revision: 5.24 $ $Date: 2013/07/16 07:49:22 $ # # corners() corners of window # gridcenters() points of a rectangular grid # stratrand() random points in each tile of a rectangular grid # spokes() Rolf's 'spokes' arrangement # # concatxy() concatenate any lists of x, y coordinates # # default.dummy() Default action to create a dummy pattern # corners <- function(window) { window <- as.owin(window) x <- window$xrange[c(1,2,1,2)] y <- window$yrange[c(1,1,2,2)] return(list(x=x, y=y)) } gridcenters <- gridcentres <- function(window, nx, ny) { window <- as.owin(window) xr <- window$xrange yr <- window$yrange x <- seq(from=xr[1], to=xr[2], length.out = 2 * nx + 1)[2 * (1:nx)] y <- seq(from=yr[1], to=yr[2], length.out = 2 * ny + 1)[2 * (1:ny)] x <- rep.int(x, ny) y <- rep.int(y, rep.int(nx, ny)) return(list(x=x, y=y)) } stratrand <- function(window,nx,ny, k=1) { # divide window into an nx * ny grid of tiles # and place k points at random in each tile window <- as.owin(window) wide <- diff(window$xrange)/nx high <- diff(window$yrange)/ny cent <- gridcentres(window, nx, ny) cx <- rep.int(cent$x, k) cy <- rep.int(cent$y, k) n <- nx * ny * k x <- cx + runif(n, min = -wide/2, max = wide/2) y <- cy + runif(n, min = -high/2, max = high/2) return(list(x=x,y=y)) } tilecentroids <- function (W, nx, ny) { W <- as.owin(W) if(W$type == "rectangle") return(gridcentres(W, nx, ny)) else { # approximate W <- as.mask(W) xx <- as.vector(raster.x(W)[W$m]) yy <- as.vector(raster.y(W)[W$m]) pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,nx)$index x <- tapply(xx,pid,mean) y <- tapply(yy,pid,mean) return(list(x=x,y=y)) } } cellmiddles <- local({ # auxiliary middle <- function(v) { n <- length(v); mid <- ceiling(n/2); v[mid]} # main cellmiddles <- function (W, nx, ny, npix=NULL, gi=FALSE) { if(W$type == "rectangle") return(gridcentres(W, nx, ny)) # pixel approximation to window # This matches the pixel approximation used to compute tile areas # and ensures that dummy points are generated only inside those tiles # that have nonzero digital area M <- as.mask(W, dimyx=rev(npix)) Mm <- M$m if(gi) { xx <- as.vector(raster.x(M)[Mm]) yy <- as.vector(raster.y(M)[Mm]) pid <- gridindex(xx,yy,W$xrange,W$yrange,nx,ny)$index } else { # identify all pixels that are inside the window # by their row and column index Mrow <- as.vector(row(Mm)[Mm]) Mcol <- as.vector(col(Mm)[Mm]) # this code matches 'gridindex' imap <- grid1index(M$yrow, M$yrange, ny) jmap <- grid1index(M$xcol, M$xrange, nx) # apply this mapping to all pixels inside the window ii <- imap[Mrow] jj <- jmap[Mcol] # construct a tile index pid <- (ii-1) * nx + jj } ## For each tile, find middle point in list of pixels in each tile # (always inside tile, by construction) midpix <- tapply(seq_along(pid), pid, middle) if(gi) { x <- xx[midpix] y <- yy[midpix] } else { midcol <- Mcol[midpix] midrow <- Mrow[midpix] x <- M$xcol[midcol] y <- M$yrow[midrow] } return(list(x=x,y=y)) } cellmiddles }) spokes <- function(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault=1) { # # Rolf Turner's "spokes" arrangement # # Places dummy points on radii of circles # emanating from each data point x[i], y[i] # # nrad: number of radii from each data point # nper: number of dummy points per radius # fctr: length of largest radius = fctr * M # where M is mean nearest neighbour distance in data # pat <- inherits(x,"ppp") if(pat) w <- x$w if(checkfields(x,c("x","y"))) { y <- x$y x <- x$x } M <- if(length(x) > 1) mean(nndist(x,y)) else Mdefault lrad <- fctr * M / nper theta <- 2 * pi * (1:nrad)/nrad cs <- cos(theta) sn <- sin(theta) xt <- lrad * as.vector((1:nper) %o% cs) yt <- lrad * as.vector((1:nper) %o% sn) xd <- as.vector(outer(x, xt, "+")) yd <- as.vector(outer(y, yt, "+")) tmp <- list(x = xd, y = yd) if(pat) return(as.ppp(tmp,W=w)[w]) else return(tmp) } # concatenate any number of list(x,y) into a list(x,y) concatxy <- function(...) { x <- unlist(lapply(list(...), function(w) {w$x})) y <- unlist(lapply(list(...), function(w) {w$y})) if(length(x) != length(y)) stop("Internal error: lengths of x and y unequal") return(list(x=x,y=y)) } #------------------------------------------------------------ default.dummy <- function(X, nd=NULL, random=FALSE, ntile=NULL, npix = NULL, ..., eps=NULL, verbose=FALSE) { # default action to create dummy points. # regular grid of nd[1] * nd[2] points # plus corner points of window frame, # all clipped to window. # X <- as.ppp(X) win <- X$window # # default dimensions a <- default.n.tiling(X, nd=nd, ntile=ntile, npix=npix, eps=eps, verbose=verbose) nd <- a$nd ntile <- a$ntile npix <- a$npix periodsample <- !random && (win$type == "mask") && all(nd %% win$dim == 0) # make dummy points dummy <- if(random) stratrand(win, nd[1], nd[2], 1) else cellmiddles(win, nd[1], nd[2], npix) dummy <- as.ppp(dummy, win, check=FALSE) # restrict to window if(!(win$type == "rectangle" || periodsample)) dummy <- dummy[win] # corner points corn <- as.ppp(corners(win), win, check=FALSE) corn <- corn[win] dummy <- superimpose(dummy, corn, W=win) if(dummy$n == 0) stop("None of the dummy points lies inside the window") # pass parameters for computing weights attr(dummy, "dummy.parameters") <- list(nd=nd, random=random, verbose=verbose) attr(dummy, "weight.parameters") <- append(list(...), list(ntile=ntile, verbose=verbose, npix=npix)) return(dummy) } # Criteria: # for rectangular windows, # R1. nd >= ntile # for non-rectangular windows, # R2. nd should be a multiple of ntile # R3. each dummy point is also a pixel of the npix grid # R4. npix should ideally be a multiple of nd, for speed # R5. npix should be large, for accuracy # R6. npix should not be too large, for speed # R7. if the window is a mask, npix should ideally be # a multiple of the mask array dimensions, for speed. # default.n.tiling <- local({ # auxiliary ensure2print <- function(x, verbose=TRUE, blah="user specified") { xname <- short.deparse(substitute(x)) x <- ensure2vector(x) if(verbose) cat(paste(blah, xname, "=", x[1], "*", x[2], "\n")) x } minmultiple <- function(n, lo, hi) { if(lo > hi) { temp <- hi hi <- lo lo <- temp } if(n > hi) return(hi) m <- n * (floor(lo/n):ceiling(hi/n)) m <- m[m >= n & m >= lo & m <= hi] if(length(m) > 0) min(m) else hi } mindivisor <- function(N, lo, Nbig) { d <- divisors(N) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) m <- floor(Nbig/N) d <- unlist(lapply(as.list(seq_len(m) * N), divisors)) d <- sort(unique(d)) ok <- (d >= lo) if(any(ok)) return(min(d[ok])) return(Nbig) } min2mul <- function(n, lo, hi) c(minmultiple(n[1], lo[1], hi[1]), minmultiple(n[2], lo[2], hi[2])) min2div <- function(N, lo, Nbig) c(mindivisor(N[1], lo[1], Nbig[1]), mindivisor(N[2], lo[2], Nbig[2])) # main default.n.tiling <- function(X, nd=NULL, ntile=NULL, npix=NULL, eps=NULL, verbose=TRUE) { # computes dimensions of rectangular grids of # - dummy points (nd) (eps) # - tiles for grid weights (ntile) # - pixels for approximating area (npix) # for data pattern X. # verifyclass(X, "ppp") win <- X$window pixels <- (win$type != "rectangle") if(nd.given <- !is.null(nd)) nd <- ensure2print(nd, verbose) if(ntile.given <- !is.null(ntile)) ntile <- ensure2print(ntile, verbose) if(npix.given <- !is.null(npix)) npix <- ensure2print(npix, verbose) if(pixels) sonpixel <- rev(ensure2print(spatstat.options("npixel"), verbose, "")) ndummy.min <- ensure2print(spatstat.options("ndummy.min"), verbose, "") ndminX <- pmax(ndummy.min, 10 * ceiling(2 * sqrt(X$n)/10)) ndminX <- ensure2vector(ndminX) if(eps.given <- !is.null(eps)) { eps <- ensure2print(eps, verbose) Xbox <- as.rectangle(as.owin(X)) sides <- with(Xbox, c(diff(xrange), diff(yrange))) ndminX <- pmax(ndminX, ceiling(sides/eps)) } # range of acceptable values for npix if(npix.given) Nmin <- Nmax <- npix else switch(win$type, rectangle = { Nmin <- ensure2vector(X$n) Nmax <- Inf }, polygonal = { Nmin <- sonpixel Nmax <- 4 * sonpixel }, mask={ nmask <- rev(win$dim) Nmin <- nmask Nmax <- pmax(2 * nmask, 4 * sonpixel) }) # determine values of nd and ntile if(nd.given && !ntile.given) { # ntile must be a divisor of nd if(any(nd > Nmax)) warning("number of dummy points nd exceeds maximum pixel dimensions") ntile <- min2div(nd, ndminX, nd) } else if(!nd.given && ntile.given) { # nd must be a multiple of ntile nd <- min2mul(ntile, ndminX, Nmin) if(any(nd >= Nmin)) nd <- ntile } else if(!nd.given && !ntile.given) { if(!pixels) { nd <- ntile <- ensure2vector(ndminX) if(verbose) cat(paste("nd and ntile default to", nd[1], "*", nd[2], "\n")) } else { # find suitable divisors of the number of pixels nd <- ntile <- min2div(Nmin, ndminX, Nmax) if(any(nd >= Nmin)) { # none suitable if(verbose) cat("No suitable divisor of pixel dimensions\n") nd <- ntile <- ndminX } } } else { # both nd, ntile were given if(any(ntile > nd)) warning("the number of tiles (ntile) exceeds the number of dummy points (nd)") } if(!npix.given && pixels) npix <- min2mul(nd, Nmin, Nmax) if(verbose) { cat(paste("dummy point grid", nd[1], "x", nd[2], "\n")) cat(paste("weighting tiles", ntile[1], "x", ntile[2], "\n")) if(pixels) cat(paste("pixel grid", npix[1], "x", npix[2], "\n")) } if(pixels) return(list(nd=nd, ntile=ntile, npix=npix)) else return(list(nd=nd, ntile=ntile, npix=npix)) } default.n.tiling }) spatstat/R/rmhexpand.R0000644000176000001440000001454712237642727014506 0ustar ripleyusers# # rmhexpand.R # # Rules/data for expanding the simulation window in rmh # # $Revision: 1.6 $ $Date: 2012/05/11 12:38:42 $ # # Establish names and rules for each type of expansion RmhExpandRule <- local({ .RmhExpandTable <- list(area=list(descrip ="Area expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), length=list(descrip ="Length expansion factor", minval = 1, expands = function(x) { unname(x) > 1 }), distance=list(descrip="Expansion buffer distance", minval = 0, expands = function(x) { unname(x) > 0 })) RmhExpandRule <- function(nama) { if(length(nama) == 0) nama <- "area" if(length(nama) > 1) stop("Internal error: too many names in RmhExpandRule", call.=FALSE) if(!(nama %in% names(.RmhExpandTable))) stop(paste("Internal error: unrecognised expansion type", sQuote(nama)), call.=FALSE) return(.RmhExpandTable[[nama]]) } RmhExpandRule }) rmhexpand <- function(x=NULL, ..., area=NULL, length=NULL, distance=NULL) { trap.extra.arguments(..., .Context="In rmhexpand") # check for incompatibility n <- (!is.null(x)) + (!is.null(area)) + (!is.null(length)) + (!is.null(distance)) if(n > 1) stop("Only one argument should be given") # absorb other arguments into 'x' if(is.null(x) && n > 0) { if(!is.null(area)) x <- c(area=area) if(!is.null(length)) x <- c(length=length) if(!is.null(distance)) x <- c(distance=distance) } if(is.null(x)) { # No expansion rule supplied. # Use spatstat default, indicating that the user did not choose it. force.exp <- force.noexp <- FALSE x <- spatstat.options("expand") x <- rmhexpand(x)$expand } else { # process x if(inherits(x, "rmhexpand")) return(x) if(is.owin(x)) { force.exp <- TRUE force.noexp <- FALSE } else { # expecting c(name=value) or list(name=value) if(is.list(x)) x <- unlist(x) if(!is.numeric(x)) stop(paste("Expansion argument must be either", "a number, a window, or NULL.\n")) # x is numeric check.1.real(x, "In rmhexpand(x)") explain.ifnot(is.finite(x), "In rmhexpand(x)") # an unlabelled numeric value is interpreted as an area expansion factor if(!any(nzchar(names(x)))) names(x) <- "area" # validate rule <- RmhExpandRule(names(x)) if(x < rule$minval) { warning(paste(rule$descrip, "<", rule$minval, "has been reset to", rule$minval), call.=FALSE) x[] <- rule$minval } force.exp <- rule$expands(x) force.noexp <- !force.exp } } result <- list(expand=x, force.exp=force.exp, force.noexp=force.noexp) class(result) <- "rmhexpand" return(result) } .no.expansion <- list(expand=c(area=1), force.exp=FALSE, force.noexp=TRUE) class(.no.expansion) <- "rmhexpand" print.rmhexpand <- function(x, ..., prefix=TRUE) { if(prefix) cat("Expand the simulation window? ") if(x$force.noexp) { cat("No.\n") } else { if(x$force.exp) cat("Yes:\n") else cat("Not determined. Default is:\n") y <- x$expand if(is.null(y)) { print(rmhexpand(spatstat.options("expand")), prefix=FALSE) } else if(is.numeric(y)) { descrip <- RmhExpandRule(names(y))$descrip cat(paste("\t", descrip, unname(y), "\n")) } else { print(y) } } return(invisible(NULL)) } summary.rmhexpand <- function(object, ...) { decided <- with(object, force.exp || force.noexp) ex <- object$expand if(is.null(ex)) ex <- rmhexpand(spatstat.options("expand"))$expand if(is.owin(ex)) { willexpand <- TRUE descrip <- "Window" } else if(is.numeric(ex)) { rule <- RmhExpandRule(names(ex)) descrip <- rule$descrip willexpand <- if(object$force.exp) TRUE else if(object$force.noexp) FALSE else (unname(ex) > rule$minval) } else stop("Internal error: unrecognised format in summary.rmhexpand", call.=FALSE) out <- list(rule.decided=decided, window.decided=decided && is.owin(ex), expand=ex, descrip=descrip, willexpand=willexpand) class(out) <- "summary.rmhexpand" return(out) } print.summary.rmhexpand <- function(x, ...) { cat("Expansion rule\n") ex <- x$expand if(x$window.decided) { cat("Window is decided.\n") print(ex) } else { if(x$rule.decided) { cat("Rule is decided.\n") } else { cat("Rule is not decided.\nDefault is:\n") } if(!x$willexpand) { cat("No expansion\n") } else { if(is.numeric(ex)) cat(paste(x$descrip, ex, "\n")) else print(ex) } } return(invisible(NULL)) } expand.owin <- function(W, ...) { ex <- list(...) if(length(ex) > 1) stop("Too many arguments") # get an rmhexpand object if(inherits(ex[[1]], "rmhexpand")) { ex <- ex[[1]] } else ex <- do.call("rmhexpand", ex) f <- ex$expand if(is.null(f)) return(W) if(is.owin(f)) return(f) if(!is.numeric(f)) stop("Format not understood") switch(names(f), area = { if(f == 1) return(W) bb <- bounding.box(W) xr <- bb$xrange yr <- bb$yrange fff <- (sqrt(f) - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, length = { if(f == 1) return(W) bb <- bounding.box(W) xr <- bb$xrange yr <- bb$yrange fff <- (f - 1)/2 Wexp <- grow.rectangle(bb, fff * diff(xr), fff * diff(yr)) }, distance = { if(f == 0) return(W) Wexp <- if(is.rectangle(W)) grow.rectangle(W, f) else dilation(W, f) }, stop("Internal error: unrecognised type") ) return(Wexp) } will.expand <- function(x) { stopifnot(inherits(x, "rmhexpand")) if(x$force.exp) return(TRUE) if(x$force.noexp) return(FALSE) return(summary(x)$willexpand) } is.expandable <- function(x) { UseMethod("is.expandable") } change.default.expand <- function(x, newdefault) { stopifnot(inherits(x, "rmhexpand")) decided <- with(x, force.exp || force.noexp) if(!decided) x$expand <- rmhexpand(newdefault)$expand return(x) } spatstat/R/resolve.defaults.R0000755000176000001440000000466612237642727016011 0ustar ripleyusers# # resolve.defaults.R # # $Revision: 1.15 $ $Date: 2013/04/25 06:37:43 $ # # Resolve conflicts between several sets of defaults # Usage: # resolve.defaults(list1, list2, list3, .......) # where the earlier lists have priority # resolve.defaults <- function(..., .MatchNull=TRUE, .StripNull=FALSE) { # Each argument is a list. Append them. argue <- c(...) # is NULL a possible value? if(!.MatchNull) { isnul <- unlist(lapply(argue, is.null)) argue <- argue[!isnul] } if(!is.null(nam <- names(argue))) { named <- nzchar(nam) arg.unnamed <- argue[!named] arg.named <- argue[named] if(any(discard <- duplicated(names(arg.named)))) arg.named <- arg.named[!discard] argue <- append(arg.unnamed, arg.named) } # should NULL become a missing argument? if(.StripNull) { isnull <- sapply(argue, is.null) argue <- argue[!isnull] } return(argue) } do.call.matched <- function(fun, arglist, funargs, extrargs=NULL, sieve=FALSE) { if(!is.function(fun) && !is.character(fun)) stop("Internal error: wrong argument type in do.call.matched") if(is.character(fun)) { fname <- fun fun <- get(fname, mode="function") if(!is.function(fun)) stop(paste("internal error: function", sQuote(fname), "not found", sep="")) } if(missing(funargs)) funargs <- names(formals(fun)) funargs <- c(funargs, extrargs) givenargs <- names(arglist) matched <- givenargs %in% funargs # apply 'fun' to matched arguments out <- do.call(fun, arglist[matched]) # retain un-matched arguments? if(sieve) out <- list(result=out, otherargs=arglist[!matched]) return(out) } resolve.1.default <- function(.A, ...) { res <- resolve.defaults(...) hit <- (names(res) == .A) if(!any(hit)) return(NULL) return(res[[min(which(hit))]]) } # extract all the arguments that match '...' rather than a named argument passthrough <- function(.Fun, ..., .Fname=NULL) { if(is.null(.Fname)) .Fname <- deparse(substitute(.Fun)) # make a fake call to the named function using the arguments provided cl <- eval(substitute(call(.Fname, ...))) # match the call to the function mc <- match.call(.Fun, cl) # extract the arguments mcargs <- as.list(mc)[-1] # figure out which ones are actually formal arguments of the function nam <- names(formals(.Fun)) nam <- setdiff(nam, "...") known <- names(mcargs) %in% nam # return the *other* arguments return(mcargs[!known]) } spatstat/R/round.R0000644000176000001440000000170312237642727013635 0ustar ripleyusers# # round.R # # discretisation of coordinates # # $Revision: 1.5 $ $Date: 2013/01/09 03:13:10 $ round.ppp <- round.pp3 <- round.ppx <- function(x, digits=0) { coords(x) <- round(as.matrix(coords(x)), digits=digits) return(x) } rounding <- function(x) { UseMethod("rounding") } rounding.ppp <- rounding.pp3 <- rounding.ppx <- function(x) { rounding(as.matrix(coords(x))) } rounding.default <- function(x) { # works for numeric, complex, matrix etc if(all(x == 0)) return(NULL) if(identical(all.equal(x, round(x)), TRUE)) { # integers: go up k <- 0 smallk <- -log10(.Machine$double.xmax) repeat { if(k < smallk || !identical(all.equal(x, round(x, k-1)), TRUE)) return(k) k <- k-1 } } else { # not integers: go down k <- 1 bigk <- -log10(.Machine$double.eps) repeat { if(k > bigk || identical(all.equal(x, round(x, k)), TRUE)) return(k) k <- k+1 } } } spatstat/R/quadrattest.R0000755000176000001440000003334012237642727015054 0ustar ripleyusers# # quadrattest.R # # $Revision: 1.38 $ $Date: 2012/09/06 03:20:43 $ # quadrat.test <- function(X, ...) { UseMethod("quadrat.test") } quadrat.test.ppp <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method = c("Chisq", "MonteCarlo"), conditional=TRUE, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { Xname <- short.deparse(substitute(X)) method <- match.arg(method) alternative <- match.arg(alternative) do.call("quadrat.testEngine", resolve.defaults(list(X, nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim), list(...), list(Xname=Xname, fitname="CSR"))) } quadrat.test.splitppp <- function(X, ..., df=NULL, df.est=NULL, Xname=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) pool.quadrattest(lapply(X, quadrat.test.ppp, ...), df=df, df.est=df.est, Xname=Xname) } quadrat.test.ppm <- function(X, nx=5, ny=nx, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) { fitname <- short.deparse(substitute(X)) dataname <- paste("data from", fitname) method <- match.arg(method) alternative <- match.arg(alternative) if(!is.poisson.ppm(X)) stop("Test is only defined for Poisson point process models") if(is.marked(X)) stop("Sorry, not yet implemented for marked point process models") do.call("quadrat.testEngine", resolve.defaults(list(data.ppm(X), nx=nx, ny=ny, alternative=alternative, method=method, conditional=conditional, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess, nsim=nsim, fit=X), list(...), list(Xname=dataname, fitname=fitname))) } quadrat.test.quadratcount <- function(X, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., nsim=1999) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) quadrat.testEngine(Xcount=X, alternative=alternative, method=method, conditional=conditional, nsim=nsim) } quadrat.testEngine <- function(X, nx, ny, alternative = c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., nsim=1999, Xcount=NULL, xbreaks=NULL, ybreaks=NULL, tess=NULL, fit=NULL, Xname=NULL, fitname=NULL) { trap.extra.arguments(...) method <- match.arg(method) alternative <- match.arg(alternative) if(method == "MonteCarlo") { check.1.real(nsim) explain.ifnot(nsim > 0) } if(is.null(Xcount)) Xcount <- quadratcount(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, tess=tess) tess <- attr(Xcount, "tess") testname <- switch(method, Chisq = "Chi-squared test", MonteCarlo = paste( if(conditional) "Conditional" else "Unconditional", "Monte Carlo test") ) # determine expected values under model if(is.null(fit)) { nullname <- "CSR" if(tess$type == "rect") areas <- outer(diff(tess$xgrid), diff(tess$ygrid), "*") else areas <- unlist(lapply(tiles(tess), area.owin)) fitmeans <- sum(Xcount) * areas/sum(areas) df <- switch(method, Chisq = length(fitmeans) - 1, MonteCarlo = NULL) } else { if(!is.ppm(fit)) stop("fit should be a ppm object") if(!is.poisson.ppm(fit)) stop("Quadrat test only supported for Poisson point process models") if(is.marked(fit)) stop("Sorry, not yet implemented for marked point process models") nullname <- paste("fitted Poisson model", sQuote(fitname)) Q <- quad.ppm(fit, drop=TRUE) ww <- w.quad(Q) lambda <- fitted(fit, drop=TRUE) masses <- lambda * ww # sum weights of quadrature points in each tile if(tess$type == "rect") { xx <- x.quad(Q) yy <- y.quad(Q) xbreaks <- tess$xgrid ybreaks <- tess$ygrid fitmeans <- rectquadrat.countEngine(xx, yy, xbreaks, ybreaks, weights=masses) fitmeans <- as.vector(t(fitmeans)) } else { U <- as.ppp(Q) V <- marks(cut(U, tess), dfok=FALSE) fitmeans <- tapply(masses, list(tile=V), sum) fitmeans[is.na(fitmeans)] <- 0 } switch(method, Chisq = { df <- length(fitmeans) - length(coef(fit)) if(df < 1) stop(paste("Not enough quadrats: degrees of freedom df =", df)) }, MonteCarlo = { df <- NA }) } OBS <- as.vector(t(as.table(Xcount))) EXP <- as.vector(fitmeans) testname <- paste(testname, "of", nullname, "using quadrat counts") result <- X2testEngine(OBS, EXP, method=method, df=df, nsim=nsim, conditional=conditional, alternative=alternative, testname=testname, dataname=Xname) class(result) <- c("quadrattest", class(result)) attr(result, "quadratcount") <- Xcount return(result) } X2testEngine <- function(OBS, EXP, ..., method=c("Chisq", "MonteCarlo"), df=NULL, nsim=NULL, conditional, alternative, testname, dataname) { method <- match.arg(method) if(method == "Chisq" & any(EXP < 5)) warning(paste("Some expected counts are small;", "chi^2 approximation may be inaccurate"), call.=FALSE) X2 <- sum((OBS - EXP)^2/EXP) names(X2) <- "X-squared" # conduct test switch(method, Chisq = { if(!is.null(df)) names(df) <- "df" pup <- pchisq(X2, df, lower.tail=FALSE) plo <- pchisq(X2, df, lower.tail=TRUE) PVAL <- switch(alternative, regular = plo, clustered = pup, two.sided = 2 * min(pup, plo)) }, MonteCarlo = { nsim <- as.integer(nsim) if(conditional) { npts <- sum(OBS) p <- EXP/sum(EXP) SIM <- rmultinom(n=nsim,size=npts,prob=p) } else { ne <- length(EXP) SIM <- matrix(rpois(nsim*ne,EXP),nrow=ne) } simstats <- apply((SIM-EXP)^2/EXP,2,sum) if(any(duplicated(simstats))) simstats <- jitter(simstats) phi <- (1 + sum(simstats >= X2))/(1+nsim) plo <- (1 + sum(simstats <= X2))/(1+nsim) PVAL <- switch(alternative, clustered = phi, regular = plo, two.sided = 2 * min(phi,plo)) }) result <- structure(list(statistic = X2, parameter = df, p.value = PVAL, method = testname, data.name = dataname, alternative = alternative, observed = OBS, expected = EXP, residuals = (OBS - EXP)/sqrt(EXP), method.key = method), class = "htest") } print.quadrattest <- function(x, ...) { NextMethod("print") if(is.atomicQtest(x)) { cat("Quadrats: ") } else { cat("Pooled test\nQuadrats of component tests:\n") } do.call("print", resolve.defaults(list(x=as.tess(x)), list(...), list(brief=TRUE))) return(invisible(NULL)) } plot.quadrattest <- local({ plot.quadrattest <- function(x, ...) { xname <- short.deparse(substitute(x)) if(!is.atomicQtest(x)) { # pooled test - plot the original tests tests <- extractAtomicQtests(x) do.call("plot", resolve.defaults(list(x=tests), list(...), list(main=xname))) return(invisible(NULL)) } Xcount <- attr(x, "quadratcount") # plot tessellation tess <- as.tess(Xcount) do.call("plot.tess", resolve.defaults(list(tess), list(...), list(main=xname))) # compute locations for text til <- tiles(tess) ok <- unlist(lapply(til, function(x) { !is.null(x) && area.owin(x) > 0 })) incircles <- lapply(til[ok], incircle) x0 <- unlist(lapply(incircles, function(z) { z$x })) y0 <- unlist(lapply(incircles, function(z) { z$y })) ra <- unlist(lapply(incircles, function(z) { z$r })) # plot observed counts cos30 <- sqrt(2)/2 sin30 <- 1/2 f <- 0.4 dotext(-f * cos30, f * sin30, as.vector(t(as.table(Xcount)))[ok], x0, y0, ra, adj=c(1,0), ...) # plot expected counts dotext(f * cos30, f * sin30, round(x$expected,1)[ok], x0, y0, ra, adj=c(0,0), ...) # plot Pearson residuals dotext(0, -f, signif(x$residuals,2)[ok],x0, y0, ra, ...) return(invisible(NULL)) } dotext <- function(dx, dy, values, x0, y0, ra, ...) { do.call.matched("text.default", resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=paste(as.vector(values))), list(...))) } plot.quadrattest }) ######## pooling multiple quadrat tests into a quadrat test pool.quadrattest <- function(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL) { argh <- list(...) if(!is.null(df) + !is.null(df.est)) stop("Arguments df and df.est are incompatible") if(all(unlist(lapply(argh, inherits, what="quadrattest")))) { # Each argument is a quadrattest object tests <- argh } else if(length(argh) == 1 && is.list(arg1 <- argh[[1]]) && all(unlist(lapply(arg1, inherits, "quadrattest")))) { # There is just one argument, which is a list of quadrattests tests <- arg1 } else stop("Each entry in the list must be a quadrat test") # data from all cells in all tests OBS <- unlist(lapply(tests, getElement, name="observed")) EXP <- unlist(lapply(tests, getElement, name="expected")) RES <- unlist(lapply(tests, getElement, name="residuals")) STA <- unlist(lapply(tests, getElement, name="statistic")) # information about each test Mkey <- unlist(lapply(tests, getElement, name="method.key")) Testname <- unlist(lapply(tests, getElement, name="method")) Alternative <- unlist(lapply(tests, getElement, name="alternative")) Conditional <- unlist(lapply(tests, getElement, name="conditional")) # name of data if(is.null(Xname)) { Nam <- unlist(lapply(tests, getElement, name="data.name")) Xname <- commasep(sQuote(Nam)) } # name of test testname <- unique(Testname) method.key <- unique(Mkey) if(length(testname) > 1) stop(paste("Cannot combine different types of tests:", commasep(sQuote(testname)))) # alternative hypothesis alternative <- unique(Alternative) if(length(alternative) > 1) stop(paste("Cannot combine tests with different alternatives:", commasep(sQuote(alternative)))) # conditional tests conditional <- any(Conditional) if(conditional) stop("Sorry, not implemented for conditional tests") if(method.key == "Chisq") { # determine degrees of freedom if(is.null(df)) { if(!is.null(df.est)) { # total number of observations minus number of fitted parameters df <- length(OBS) - df.est } else { # total degrees of freedom of tests # implicitly assumes independence of tests PAR <- unlist(lapply(tests, getElement, name="parameter")) df <- sum(PAR) } } # validate df if(df < 1) stop(paste("Degrees of freedom = ", df)) names(df) <- "df" } # perform test result <- X2testEngine(OBS, EXP, method=method.key, df=df, nsim=nsim, conditional=conditional, alternative=alternative, testname=testname, dataname=Xname) # add info class(result) <- c("quadrattest", class(result)) attr(result, "tests") <- as.listof(tests) # there is no quadratcount attribute return(result) } is.atomicQtest <- function(x) { inherits(x, "quadrattest") && is.null(attr(x, "tests")) } extractAtomicQtests <- function(x) { if(is.atomicQtest(x)) return(list(x)) stopifnot(inherits(x, "quadrattest")) tests <- attr(x, "tests") y <- lapply(tests, extractAtomicQtests) z <- do.call("c", y) return(as.listof(z)) } as.tess.quadrattest <- function(X) { if(is.atomicQtest(X)) { Y <- attr(X, "quadratcount") return(as.tess(Y)) } tests <- extractAtomicQtests(X) return(as.listof(lapply(tests, as.tess.quadrattest))) } spatstat/R/Jest.R0000755000176000001440000000471512237642727013424 0ustar ripleyusers# Jest.S # # Usual invocation to compute J function # if F and G are not required # # $Revision: 4.18 $ $Date: 2011/04/19 02:14:27 $ # # # "Jest" <- function(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) { X <- as.ppp(X) W<- X$window rmaxdefault <- rmax.rule("J", W) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val # compute F and G FF <- Fest(X, eps, breaks=brks, correction=correction) G <- Gest(X, breaks=brks, correction=correction) # initialise fv object rvals <- FF$r rmax <- max(rvals) Fvals <- FF[[attr(FF, "valu")]] Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(J(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="J") # compute J function estimates # this has to be done manually because of the mismatch between names ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } Fnames <- names(FF) Gnames <- names(G) if("raw" %in% Gnames && "raw" %in% Fnames) { Jun <- ratio(1-G$raw, 1-FF$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") attr(Z, "alim") <- range(rvals[FF$raw <= 0.9]) } if("rs" %in% Gnames && "rs" %in% Fnames) { Jrs <- ratio(1-G$rs, 1-FF$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") attr(Z, "alim") <- range(rvals[FF$rs <= 0.9]) } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-G$han, 1-FF$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") attr(Z, "alim") <- range(rvals[FF$cs <= 0.9]) } if("km" %in% Gnames && "km" %in% Fnames) { Jkm <- ratio(1-G$km, 1-FF$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") attr(Z, "alim") <- range(rvals[FF$km <= 0.9]) } if("hazard" %in% Gnames && "hazard" %in% Fnames) { Jhaz <- G$hazard - FF$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add more info attr(Z, "F") <- FF attr(Z, "G") <- G unitname(Z) <- unitname(X) return(Z) } spatstat/R/anova.mppm.R0000644000176000001440000000222712237650402014551 0ustar ripleyusers# # anova.mppm.R # # $Revision: 1.3 $ $Date: 2007/02/28 10:16:07 $ # anova.mppm <- function(object, ..., test=NULL, override=FALSE) { # list of models objex <- append(list(object), list(...)) # Check each model is an mppm object if(!all(unlist(lapply(objex, function(x) {inherits(x, "mppm")})))) stop(paste("Arguments must all be", sQuote("mppm"), "objects")) # Any non-Poisson models? if(!all(unlist(lapply(objex, is.poisson.mppm)))) { whinge <- paste("Some of the fitted models are not Poisson processes:", "p-values are not supported by any theory") if(override) warning(whinge) else stop(whinge) } # All models fitted using same method? fitter <- unique(unlist(lapply(objex, function(x) { x$Fit$fitter }))) if(length(fitter) > 1) stop(paste("Models are incompatible;", "they were fitted by different methods (", paste(fitter, collapse=", "), ")" )) # Extract fit objects fitz <- lapply(objex, function(x) { x$Fit$FIT }) # Finally do the appropriate ANOVA result <- do.call("anova", append(fitz, list(test=test, dispersion=1))) return(result) } spatstat/R/alltypes.R0000755000176000001440000001433512237642727014353 0ustar ripleyusers# # alltypes.R # # $Revision: 1.29 $ $Date: 2013/01/24 07:47:50 $ # # alltypes <- function(X, fun="K", ..., dataname=NULL,verb=FALSE,envelope=FALSE) { # # Function 'alltypes' --- calculates a summary function for # each type, or each pair of types, in a multitype point pattern # if(is.ppp(X)) classname <- "ppp" else if(is.lpp(X)) classname <- "lpp" else stop("X should be a ppp or lpp object") if(is.null(dataname)) dataname <- short.deparse(substitute(X)) # -------------------------------------------------------------------- # First inspect marks if(!is.marked(X)) { nmarks <- 0 marklabels <- "" } else { if(!is.multitype(X)) stop("the marks must be a factor") # ensure type names are parseable (for mathematical labels) levels(marks(X)) <- make.parseable(levels(marks(X))) mks <- marks(X) ma <- levels(mks) nmarks <- length(ma) marklabels <- paste(ma) } # --------------------------------------------------------------------- # determine function name f.is.name <- is.name(substitute(fun)) fname <- if(f.is.name) paste(as.name(substitute(fun))) else if(is.character(fun)) fun else sQuote("fun") # --------------------------------------------------------------------- # determine function to be called if(is.function(fun)) { estimator <- fun } else if(is.character(fun)) { # First try matching one of the standard abbreviations K, G etc estimator <- getSumFun(fun, classname, (nmarks > 0), fatal=FALSE) if(is.null(estimator)) estimator <- get(fun, mode="function") } else stop(paste(sQuote("fun"), "should be a function or a character string")) # ------------------------------------------------------------------ # determine how the function shall be called. # indices.expected <- sum(c("i", "j") %in% names(formals(estimator))) apply.to.split <- (indices.expected == 0 && nmarks > 1) if(apply.to.split) ppsplit <- split(X) # -------------------------------------------------------------------- # determine array dimensions and margin labels witch <- if(nmarks == 0) matrix(1, nrow=1, ncol=1, dimnames=list("","")) else if (nmarks == 1) matrix(1, nrow=1, ncol=1, dimnames=list(marklabels, marklabels)) else if(indices.expected != 2) matrix(1:nmarks, nrow=nmarks, ncol=1, dimnames=list(marklabels, "")) else matrix(1:(nmarks^2),ncol=nmarks,nrow=nmarks, byrow=TRUE, dimnames <- list(marklabels, marklabels)) # ------------ start computing ------------------------------- # if computing envelopes, first generate simulated patterns # using undocumented feature of envelope() if(envelope) { L <- do.call("envelope", resolve.defaults( list(X, fun=estimator), list(internal=list(eject="patterns")), list(...), list(verbose=verb))) intern <- attr(L, "internal") } # compute function array and build up 'fasp' object fns <- list() k <- 0 for(i in 1:nrow(witch)) { Y <- if(apply.to.split) ppsplit[[i]] else X for(j in 1:ncol(witch)) { if(verb) cat("i =",i,"j =",j,"\n") currentfv <- if(!envelope) switch(1+indices.expected, estimator(Y, ...), estimator(Y, i=ma[i], ...), estimator(Y, i=ma[i], j=ma[j], ...)) else do.call("envelope", resolve.defaults( list(Y, estimator), list(simulate=L, internal=intern), list(verbose=FALSE), list(...), list(Yname=dataname), switch(1+indices.expected, NULL, list(i=ma[i]), list(i=ma[i], j=ma[j]), NULL))) k <- k+1 fns[[k]] <- as.fv(currentfv) } } # wrap up into 'fasp' object title <- paste(if(nmarks > 1) "array of " else NULL, if(envelope) "envelopes of " else NULL, fname, if(nmarks <= 1) " function " else " functions ", "for ", dataname, ".", sep="") rslt <- fasp(fns, which=witch, formulae=NULL, dataname=dataname, title=title, checkfv=FALSE) return(rslt) } # Lookup table for standard abbreviations of functions getSumFun <- local({ ftable <- rbind( data.frame(class="ppp", marked=FALSE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full=c("Fest", "Gest", "Jest", "Kest", "Lest", "pcf"), stringsAsFactors=FALSE), data.frame(class="ppp", marked=TRUE, abbrev=c("F", "G", "J", "K", "L", "pcf"), full= c("Fest", "Gcross", "Jcross", "Kcross", "Lcross", "pcfcross"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=FALSE, abbrev=c("K", "pcf"), full=c("linearK", "linearpcf"), stringsAsFactors=FALSE), data.frame(class="lpp", marked=TRUE, abbrev=c("K", "pcf"), full=c("linearKcross", "linearpcfcross"), stringsAsFactors=FALSE) ) getfun <- function(abbreviation, classname, ismarked, fatal=TRUE) { matches <- with(ftable, which(abbrev == abbreviation & class == classname & marked == ismarked)) if(length(matches) == 0) { if(!fatal) return(NULL) stop(paste("No match to function abbreviation", sQuote(abbreviation), "for class", sQuote(classname))) } if(length(matches) > 1) stop("Ambiguous function name") fullname <- ftable$full[matches] get(fullname, mode="function") } getfun }) spatstat/R/Gcom.R0000755000176000001440000001452212237642727013401 0ustar ripleyusers# # Gcom.R # # Model compensator of G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ################################################################################ # Gcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "Hanisch"), conditional=!is.poisson(object), restrict=FALSE, trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp") || inherits(object, "quad")) fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord, correction=ppmcorrection, ...) else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) rfixed <- !is.null(r) || !is.null(breaks) # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", Hanisch="Hanisch", hanisch="Hanisch", best="Hanisch"), multi=TRUE) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # basic statistics npoints <- X$n area <- area.owin(Win) lambda <- npoints/area # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # adjustments to account for restricted domain if(conditional) { npoints.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npoints.used/area.used } else { npoints.used <- npoints area.used <- area lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", if(restrict) Wfree else Win, lambda) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, coefs=truecoef, quad=hi.res) rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree rescts <- rescts[retain] } # absolute weight for continuous integrals wc <- -rescts # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- Z <- (I <= X$n) # TRUE for data points wcIJ <- -rescts # determine whether a quadrature point will be used in integral okI <- USED[I] # initialise fv object r <- breaks$r df <- data.frame(r=r, pois=1 - exp(-pi * lambda * r^2)) G <- fv(df, "r", substitute(G(r), NULL), "pois", . ~ r, alim=c(0, rmax), labl=c("r","%s[pois](r)"), desc=c("distance argument r", "theoretical Poisson %s"), fname="G") # distance to boundary b <- bI <- bdist.points(U) dotnames <- character(0) # Border method if("border" %in% correction) { # reduced sample for G(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) Gb <- RSX$numerator/RSX$denom.count G <- bind.fv(G, data.frame(border=Gb), "hat(%s)[bord](r)", "border-corrected nonparametric estimate of %s", "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npoints.used), breaks) Gbcom <- RSD$numerator/(1 + RSD$denominator) G <- bind.fv(G, data.frame(bcom=Gbcom), "bold(C)~hat(%s)[bord](r)", "model compensator of border-corrected %s", "bcom") dotnames <- c("border", "bcom", "pois") } # Hanisch correction for data if("Hanisch" %in% correction) { nnd <- dIJ[DD & okI] bdry <- bI[DD & okI] # weights ea <- eroded.areas(Win, rvals) if(algo == "reweighted") { # replace weight(r) by weight(max(rbord,r)) ea[rvals < rbord] <- eroded.areas(Win, rbord) } # compute x <- nnd[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val) H <- (1/lambda) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(han=H), "hat(%s)[han](r)", "Hanisch correction estimate of %s", "han") # Hanisch correction for adjustment integral nnd <- dIJ[okI] bdry <- bI[okI] wt <- wcIJ[okI] x <- nnd[nnd <= bdry] wt <- wt[nnd <= bdry] h <- whist(x[x <= rmax], breaks=breaks$val, weights=wt[x <= rmax]) lambdaplus <- (npoints + 1)/area Hint <- (1/lambdaplus) * cumsum(h/ea) # glue on G <- bind.fv(G, data.frame(hcom=Hint), "bold(C)~hat(%s)[han](r)", "model compensator of Hanisch-corrected %s", "hcom") # pseudovariance for Hanisch residual Hvar <- (1/lambdaplus^2) * cumsum(h/ea^2) G <- bind.fv(G, data.frame(hvar=Hvar), "bold(C)^2~hat(%s)[han](r)", "Poincare variance for Hanisch corrected %s", "hcom") # default plot does not show all components dotnames <- c("han", "hcom", dotnames) } # compute sensible 'alim' endpoint <- function(y, r, f) { min(r[y >= f * max(y)]) } amax <- endpoint(G$pois, G$r, 0.99) if(length(dotnames) > 0) amax <- max(amax, unlist(lapply(as.data.frame(G)[,dotnames,drop=FALSE], endpoint, r=r, f=0.9))) attr(G, "alim") <- c(0, amax) # fvnames(G, ".") <- dotnames unitname(G) <- unitname(X) # secret tag used by 'Gres' attr(G, "maker") <- "Gcom" return(G) } spatstat/R/pairpiece.R0000755000176000001440000001034412237642727014453 0ustar ripleyusers# # # pairpiece.S # # $Revision: 1.21 $ $Date: 2013/04/25 06:37:43 $ # # A pairwise interaction process with piecewise constant potential # # PairPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # PairPiece <- local({ # .... auxiliary functions ........ delP <- function(i, r) { r <- r[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Strauss(r)) return(PairPiece(r)) } # ..... template .......... BlankPairPiece <- list( name = "Piecewise constant pairwise interaction process", creator = "PairPiece", family = "pairwise.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL), # filled in later parnames = "interaction thresholds", init = function(self) { r <- self$par$r if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=round(gn,4))) }, valid = function(coeffs, self) { # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas if(!all(is.finite(gamma))) return(FALSE) return(all(gamma <= 1) || gamma[1] == 0) }, project = function(coeffs, self){ # interaction parameters gamma gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction thresholds r[i] r <- self$par$r # check for NA or Inf bad <- !is.finite(gamma) # gamma > 1 forbidden unless hard core ishard <- is.finite(gamma[1]) && (gamma[1] == 0) if(!ishard) bad <- bad | (gamma > 1) if(!any(bad)) return(NULL) if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delP(firstbad, r)) } else { # consider all candidate submodels subs <- lapply(which(bad), delP, r=r) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(all(is.na(coeffs))) return(max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) if(!any(active)) return(0) else return(max(r[active])) }, Mayer=function(coeffs, self) { # second Mayer cluster integral r <- self$par$r gamma <- (self$interpret)(coeffs, self)$param$gammas # areas of annuli between r[i-1], r[i] areas <- pi * diff(c(0,r)^2) return(sum(areas * (1-gamma))) }, version=NULL # filled in later ) class(BlankPairPiece) <- "interact" PairPiece <- function(r) { instantiate.interact(BlankPairPiece, list(r=r)) } PairPiece }) spatstat/R/varblock.R0000755000176000001440000000732512237642727014322 0ustar ripleyusers# # varblock.R # # Variance estimation using block subdivision # # $Revision: 1.8 $ $Date: 2010/11/21 07:05:12 $ # varblock <- function(X, fun=Kest, blocks=quadrats(X, nx=nx, ny=ny), ..., nx=3, ny=nx) { stopifnot(is.ppp(X)) stopifnot(is.tess(blocks)) if(is.character(fun)) fun <- get(fun, mode="function") stopifnot(is.function(fun)) rvalues <- function(z) { with(z, .x) } canrestrict <- "domain" %in% names(formals(fun)) if(!canrestrict) { # divide data into disjoint blocks Y <- split(X, blocks) n <- length(Y) if(n <= 1) stop("Need at least 2 blocks") # apply 'fun' to each block if(any(c("r", "breaks") %in% names(list(...)))) { # r vector specified fX <- fun(X, ...) z <- lapply(Y, fun, ...) } else { # need to ensure compatible fv objects z <- lapply(Y, fun, ...) rmaxes <- unlist(lapply(z, function(x){ max(rvalues(x)) })) smallest <- which.min(rmaxes) r <- rvalues(z[[smallest]]) z <- lapply(Y, fun, ..., r=r) fX <- fun(X, ..., r=r) } } else { # use 'domain' argument of 'fun' to compute contributions from each tile B <- tiles(blocks) n <- length(B) dofun <- function(domain, fun, Xpp, ...) { fun(Xpp, ..., domain=domain) } if(any(c("r", "breaks") %in% names(list(...)))) { # r vector specified fX <- fun(X, ...) z <- lapply(B, dofun, ..., fun=fun, Xpp=X) } else { # need to ensure compatible fv objects z <- lapply(B, dofun, ..., fun=fun, Xpp=X) rmaxes <- unlist(lapply(z, function(x){ max(rvalues(x)) })) smallest <- which.min(rmaxes) r <- rvalues(z[[smallest]]) z <- lapply(B, dofun, ..., fun=fun, Xpp=X, r=r) fX <- fun(X, ..., r=r) } } # find columns that are common to all estimates zzz <- reconcile.fv(append(list(fX), z)) fX <- zzz[[1]] z <- zzz[-1] # get info ylab <- attr(z[[1]], "ylab") yexp <- attr(z[[1]], "yexp") fname <- attr(z[[1]], "fname") # sample mean m <- meanlistfv(z) # sample variance sqdev <- lapply(z, function(x,m){ eval.fv((x-m)^2) }, m=m) v <- meanlistfv(sqdev) v <- eval.fv(v * n/(n-1)) v <- rebadge.fv(v, new.ylab=ylab, new.fname=fname, new.yexp=yexp) # sample standard deviation sd <- eval.fv(sqrt(v)) sd <- rebadge.fv(sd, new.ylab=ylab, new.fname=fname, new.yexp=yexp) # upper and lower limits sem <- eval.fv(sd/sqrt(n)) upper <- eval.fv(fX + 2 * sem) upper <- rebadge.fv(upper, new.ylab=ylab, new.fname=fname, new.yexp=yexp) lower <- eval.fv(fX - 2 * sem) lower <- rebadge.fv(lower, new.ylab=ylab, new.fname=fname, new.yexp=yexp) # tack together m <- prefixfv(m, "mean", "sample mean of") v <- prefixfv(v, "var", "estimated variance of") sd <- prefixfv(sd, "sd", "estimated standard deviation of") upper <- prefixfv(upper, "hi", "upper CI limit for") lower <- prefixfv(lower, "lo", "lower CI limit for") out <- cbind(fX,m,v,sd,upper,lower) # restrict r domain ok <- apply(is.finite(as.matrix(as.data.frame(out))), 1, all) rmax <- max(rvalues(out)[ok]) alim <- attr(out, "alim") attr(out, "alim") <- c(0, min(rmax, alim[2])) return(out) } meanlistfv <- function(z) { # compute sample mean of a list of fv objects if(!is.list(z) || !all(unlist(lapply(z, is.fv)))) stop("z should be a list of fv objects") n <- length(z) if(n == 0) return(NULL) a <- a1 <- z[[1]] if(n > 1) { for(i in 2:n) { b <- z[[i]] a <- eval.fv(a+b) } a <- eval.fv(a/n) a <- rebadge.fv(a, new.ylab=attr(a1, "ylab"), new.fname=attr(a1, "fname")) } return(a) } spatstat/R/nnmap.R0000644000176000001440000001447412247336365013627 0ustar ripleyusers# # nnmap.R # # nearest or k-th nearest neighbour of each pixel # # $Revision: 1.6 $ $Date: 2013/12/03 10:15:30 $ # nnmap <- function(X, k=1, what = c("dist", "which"), ..., W=as.owin(X), is.sorted.X=FALSE, sortby=c("range", "var", "x", "y")) { stopifnot(is.ppp(X)) sortby <- match.arg(sortby) outputarray <- resolve.1.default("outputarray", ..., outputarray=FALSE) W <- as.owin(W) huge <- 1.1 * diameter(bounding.box(as.rectangle(X), as.rectangle(W))) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # note whether W is `really' a rectangle isrect <- is.rectangle(rescue.rectangle(W)) # set up pixel array M <- do.call.matched("as.mask", resolve.defaults(list(...), list(w=W))) Mdim <- M$dim nxcol <- Mdim[2] nyrow <- Mdim[1] npixel <- nxcol * nyrow nX <- npoints(X) if(nX == 0) { # trivial - avoid potential problems in C code NND <- if(want.dist) array(Inf, dim=c(nk, Mdim)) else 0 NNW <- if(want.which) array(NA_integer_, dim=c(nk, Mdim)) else 0 } else { # usual case if(is.sorted.X && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { s <- sidelengths(as.rectangle(X)) sortby.y <- (s[1] < s[2]) }, var = { sortby.y <- (var(X$x) < var(X$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by x coordinate. if(sortby.y) { oldM <- M X <- flipxy(X) W <- flipxy(W) M <- flipxy(M) Mdim <- M$dim } xx <- X$x yy <- X$y # sort only if needed if(!is.sorted.X){ oX <- fave.order(xx) xx <- xx[oX] yy <- yy[oX] } # number of neighbours that are well-defined kmaxcalc <- min(nX, kmax) # prepare to call C code nndv <- if(want.dist) numeric(npixel * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(npixel * kmaxcalc) else integer(1) DUP <- spatstat.options("dupC") # ............. call C code ............................ if(kmaxcalc == 1) { zz <- .C("nnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), DUP = DUP) } else { zz <- .C("knnGinterface", nx = as.integer(nxcol), x0 = as.double(M$xcol[1]), xstep = as.double(M$xstep), ny = as.integer(nyrow), y0 = as.double(M$yrow[1]), ystep = as.double(M$ystep), np = as.integer(nX), xp = as.double(xx), yp = as.double(yy), kmax = as.integer(kmaxcalc), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd = as.double(nndv), nnwhich = as.integer(nnwh), huge = as.double(huge), DUP = DUP) } # extract results nnW <- zz$nnwhich nnD <- zz$nnd # map index 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.X) nnW <- oX[nnW] # reform as arrays NND <- if(want.dist) array(nnD, dim=c(kmaxcalc, Mdim)) else 0 NNW <- if(want.which) array(nnW, dim=c(kmaxcalc, Mdim)) else 0 if(sortby.y) { # flip x and y back again if(want.dist) NND <- aperm(NND, c(1, 3, 2)) if(want.which) NNW <- aperm(NNW, c(1, 3, 2)) M <- oldM Mdim <- dim(M) } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # pad with NA / Inf if(want.dist) { NNDcalc <- NND NND <- array(Inf, dim=c(kmax, Mdim)) NND[1:kmaxcalc, , ] <- NNDcalc } if(want.which) { NNWcalc <- NNW NNW <- array(NA_integer_, dim=c(kmax, Mdim)) NNW[1:kmaxcalc, , ] <- NNWcalc } } if(length(k) < kmax) { # select only the specified planes if(want.dist) NND <- NND[k, , , drop=FALSE] if(want.which) NNW <- NNW[k, , , drop=FALSE] } } # secret backdoor if(outputarray) { # return result as an array or pair of arrays result <- if(want.both) { list(dist=NND, which=NNW) } else if(want.dist) NND else NNW attr(result, "pixarea") <- with(M, xstep * ystep) return(result) } # format result as a list of images result <- list() if(want.dist) { dlist <- list() for(i in 1:nk) { DI <- as.im(NND[i,,], M) if(!isrect) DI <- DI[M, drop=FALSE] dlist[[i]] <- DI } names(dlist) <- k result[["dist"]] <- if(nk > 1) dlist else dlist[[1]] } if(want.which) { wlist <- list() for(i in 1:nk) { WI <- as.im(NNW[i,,], M) if(!isrect) WI <- WI[M, drop=FALSE] wlist[[i]] <- WI } names(wlist) <- k result[["which"]] <- if(nk > 1) wlist else wlist[[1]] } if(!want.both) result <- result[[1]] return(result) } spatstat/R/hybrid.family.R0000755000176000001440000001314712237642727015257 0ustar ripleyusers# # hybrid.family.R # # $Revision: 1.6 $ $Date: 2013/04/25 06:37:43 $ # # Hybrid interactions # # hybrid.family: object of class 'isf' defining pairwise interaction # # ------------------------------------------------------------------- # hybrid.family <- list( name = "hybrid", print = function(self) { cat("Hybrid interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE, separate=FALSE) { # plot hybrid interaction if possible verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "hybrid") stop("Tried to plot the wrong kind of interaction") if(is.null(d)) { # compute reach and determine max distance for plots dmax <- 1.25 * reach(inter) if(!is.finite(dmax)) { # interaction has infinite reach # Are plot limits specified? xlim <- resolve.defaults(list(...), list(xlim=c(0, Inf))) if(all(is.finite(xlim))) dmax <- max(xlim) else stop("Interaction has infinite reach; need to specify xlim or d") } d <- seq(0, dmax, length=256) } # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # extract the component interactions interlist <- inter$par # check that they are all pairwise interactions families <- unlist(lapply(interlist, function(x) { x$family$name })) if(!separate && !all(families == "pairwise")) { warning(paste("Cannot compute the resultant function;", "not all components are pairwise interactions;", "plotting each component separately")) separate <- TRUE } # deal with each interaction ninter <- length(interlist) results <- list() for(i in 1:ninter) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. prefixlength <- nchar(nameI.) Vprefix <- substr(Vnames, 1, prefixlength) relevant <- (Vprefix == nameI.) # construct fii object for this component fitinI <- fii(interI, coeff[relevant], Vnames[relevant], IsOffset[relevant]) # convert to fv object a <- plot(fitinI, ..., d=d, plotit=FALSE) aa <- list(a) names(aa) <- nameI results <- append(results, aa) } # computation of resultant is only implemented for fv objects if(!separate && !all(unlist(lapply(results, is.fv)))) { warning(paste("Cannot compute the resultant function;", "not all interaction components yielded an fv object;", "plotting separate results for each component")) separate <- TRUE } # return separate 'fv' or 'fasp' objects if required results <- as.listof(results) if(separate) { if(plotit) { main0 <- "Pairwise interaction components" do.call("plot", resolve.defaults(list(results), list(...), list(main=main0))) } return(invisible(results)) } # multiply together to obtain resultant pairwise interaction ans <- results[[1]] if(ninter >= 2) { for(i in 2:ninter) { Fi <- results[[i]] ans <- eval.fv(ans * Fi) } copyover <- c("ylab", "yexp", "labl", "desc", "fname") attributes(ans)[copyover] <- attributes(results[[1]])[copyover] } main0 <- "Resultant pairwise interaction" if(plotit) do.call("plot", resolve.defaults(list(ans), list(...), list(main=main0))) return(invisible(ans)) }, eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # `pot' is ignored; `pars' is a list of interactions nU <- length(U$x) V <- matrix(, nU, 0) IsOffset <- logical(0) for(i in 1:length(pars)) { # extract i-th component interaction interI <- pars[[i]] nameI <- names(pars)[[i]] # compute potential for i-th component VI <- evalInteraction(X, U, EqualPairs, interI, correction, ...) if(ncol(VI) > 0) { if(ncol(VI) > 1 && is.null(colnames(VI))) # make up names colnames(VI) <- paste("Interaction", seq(ncol(VI)), sep=".") # prefix label with name of i-th component colnames(VI) <- paste(nameI, dimnames(VI)[[2]], sep=".") # handle IsOffset offI <- attr(VI, "IsOffset") if(is.null(offI)) offI <- rep.int(FALSE, ncol(VI)) # tack on IsOffset <- c(IsOffset, offI) # append to matrix V V <- cbind(V, VI) } } if(any(IsOffset)) attr(V, "IsOffset") <- IsOffset return(V) }, suffstat = NULL ) class(hybrid.family) <- "isf" spatstat/R/plot.ppm.R0000755000176000001440000000522312237642727014263 0ustar ripleyusers# # plot.ppm.S # # $Revision: 2.8 $ $Date: 2008/08/12 08:26:45 $ # # plot.ppm() # Plot a point process model fitted by ppm(). # # # plot.ppm <- function(x, ngrid = c(40,40), superimpose = TRUE, trend=TRUE, cif=TRUE, se=TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it=TRUE, locations=NULL, covariates=NULL, ...) { model <- x # Plot a point process model fitted by ppm(). # verifyclass(model, "ppm") # # find out what kind of model it is # mod <- summary(model) stationary <- mod$stationary poisson <- mod$poisson marked <- mod$marked multitype <- mod$multitype data <- mod$entries$data if(marked) { if(!multitype) stop("Not implemented for general marked point processes") else mrkvals <- levels(marks(data)) } else mrkvals <- 1 ntypes <- length(mrkvals) # # Interpret options # ----------------- # # Whether to plot trend, cif, se if(!trend && !cif && !se) { cat(paste("Nothing plotted;", sQuote("trend"), ",", sQuote("cif"), "and", sQuote("se"), "are all FALSE\n")) return(invisible(NULL)) } # Suppress uninteresting plots # unless explicitly instructed otherwise if(missing(trend)) trend <- !stationary if(missing(cif)) cif <- !poisson if(missing(se)) se <- poisson && !stationary else if(se && !poisson) { warning(paste("standard error calculation", "is only implemented for Poisson models")) se <- FALSE } if(!trend && !cif && !se) { cat("Nothing plotted -- all plots selected are flat surfaces.\n") return(invisible(NULL)) } # # style of plot: suppress pseudo-default # if(missing(how)) how <- "image" # # # Do the prediction # ------------------ out <- list() surftypes <- c("trend","cif","se")[c(trend,cif,se)] ng <- if(missing(ngrid) && !missing(locations)) NULL else ngrid for (ttt in surftypes) { p <- predict(model, ngrid=ng, locations=locations, covariates=covariates, type = ttt) if(is.im(p)) p <- list(p) out[[ttt]] <- p } # Make it a plotppm object # ------------------------ class(out) <- "plotppm" attr(out, "mrkvals") <- mrkvals # Actually plot it if required # ---------------------------- if(plot.it) { if(!superimpose) data <- NULL plot(out,data=data,trend=trend,cif=cif,se=se,how=how,pause=pause, ...) } return(invisible(out)) } spatstat/R/wingeom.R0000755000176000001440000004676612237642727014200 0ustar ripleyusers# # wingeom.S Various geometrical computations in windows # # # $Revision: 4.82 $ $Date: 2013/11/01 06:49:45 $ # # # # #------------------------------------- volume.owin <- function(x) { area.owin(x) } area.owin <- function(w) { w <- as.owin(w) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) area <- width * height }, polygonal = { area <- sum(unlist(lapply(w$bdry, area.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) npixels <- sum(w$m) area <- pixelarea * npixels }, stop("Unrecognised window type") ) return(area) } perimeter <- function(w) { w <- as.owin(w) switch(w$type, rectangle = { return(2*(diff(w$xrange)+diff(w$yrange))) }, polygonal={ return(sum(lengths.psp(as.psp(w)))) }, mask={ p <- as.polygonal(w) if(is.null(p)) return(NA) delta <- sqrt(w$xstep^2 + w$ystep^2) p <- simplify.owin(p, delta * 1.15) return(sum(lengths.psp(as.psp(p)))) }) return(NA) } sidelengths.owin <- function(x) { if(x$type != "rectangle") warning("Computing the side lengths of a non-rectangular window") with(x, c(diff(xrange), diff(yrange))) } shortside.owin <- function(x) { min(sidelengths(x)) } eroded.areas <- function(w, r) { w <- as.owin(w) switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) areas <- pmax(width - 2 * r, 0) * pmax(height - 2 * r, 0) }, polygonal = { # warning("Approximating polygonal window by digital image") w <- as.mask(w) areas <- eroded.areas(w, r) }, mask = { # distances from each pixel to window boundary b <- bdist.pixels(w, style="matrix") # histogram breaks to satisfy hist() Bmax <- max(b, r) breaks <- c(-1,r,Bmax+1) # histogram of boundary distances h <- hist(b, breaks=breaks, plot=FALSE)$counts # reverse cumulative histogram H <- revcumsum(h) # drop first entry corresponding to r=-1 H <- H[-1] # convert count to area pixarea <- w$xstep * w$ystep areas <- pixarea * H }, stop("unrecognised window type") ) areas } even.breaks.owin <- function(w) { verifyclass(w, "owin") Rmax <- diameter(w) make.even.breaks(Rmax, Rmax/(100 * sqrt(2))) } unit.square <- function() { owin(c(0,1),c(0,1)) } square <- function(r=1) { stopifnot(is.numeric(r)) if(any(is.na(r) | !is.finite(r))) stop("argument r is NA or infinite") if(length(r) == 1) { stopifnot(r > 0) r <- c(0,r) } else if(length(r) == 2) { stopifnot(r[1] < r[2]) } else stop("argument r must be a single number, or a vector of length 2") owin(r,r) } overlap.owin <- function(A, B) { # compute the area of overlap between two windows # check units if(!compatible.units(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") At <- A$type Bt <- B$type if(At=="rectangle" && Bt=="rectangle") { xmin <- max(A$xrange[1],B$xrange[1]) xmax <- min(A$xrange[2],B$xrange[2]) if(xmax <= xmin) return(0) ymin <- max(A$yrange[1],B$yrange[1]) ymax <- min(A$yrange[2],B$yrange[2]) if(ymax <= ymin) return(0) return((xmax-xmin) * (ymax-ymin)) } if((At=="rectangle" && Bt=="polygonal") || (At=="polygonal" && Bt=="rectangle") || (At=="polygonal" && Bt=="polygonal")) { AA <- as.polygonal(A)$bdry BB <- as.polygonal(B)$bdry area <- 0 for(i in seq_along(AA)) for(j in seq_along(BB)) area <- area + overlap.xypolygon(AA[[i]], BB[[j]]) return(area) } if(At=="mask") { # count pixels in A that belong to B pixelarea <- abs(A$xstep * A$ystep) x <- as.vector(raster.x(A)[A$m]) y <- as.vector(raster.y(A)[A$m]) ok <- inside.owin(x, y, B) return(pixelarea * sum(ok)) } if(Bt== "mask") { # count pixels in B that belong to A pixelarea <- abs(B$xstep * B$ystep) x <- as.vector(raster.x(B)[B$m]) y <- as.vector(raster.y(B)[B$m]) ok <- inside.owin(x, y, A) return(pixelarea * sum(ok)) } stop("Internal error") } # # subset operator for window # "[.owin" <- function(x, i, ...) { if(!missing(i) && !is.null(i)) { if(is.im(i) && i$type == "logical") { # convert to window i <- as.owin(eval.im(ifelse1NA(i))) } else stopifnot(is.owin(i)) x <- intersect.owin(x, i, fatal=FALSE) } return(x) } # # # Intersection and union of windows # # intersect.owin <- function(A, B, ..., fatal=TRUE) { liszt <- list(...) rasterinfo <- list() if(length(liszt) > 0) { # explicit arguments controlling raster info israster <- names(liszt) %in% names(formals(as.mask)) rasterinfo <- liszt[israster] # handle intersection of more than two windows isowin <- unlist(lapply(liszt, is.owin)) nextra <- sum(isowin) if(nextra > 0) { windows <- liszt[isowin] for(i in 1:nextra) B <- do.call("intersect.owin", append(list(B, windows[[i]]), rasterinfo)) } } if(missing(A) || is.null(A)) return(B) if(missing(B) || is.null(B)) return(A) verifyclass(A, "owin") verifyclass(B, "owin") # if(identical(A, B)) return(A) # check units if(!compatible.units(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") # determine intersection of x and y ranges xr <- intersect.ranges(A$xrange, B$xrange, fatal=fatal) yr <- intersect.ranges(A$yrange, B$yrange, fatal=fatal) if(!fatal && (is.null(xr) || is.null(yr))) return(NULL) C <- owin(xr, yr, unitname=unitname(A)) if(is.empty(A) || is.empty(B)) return(emptywindow(C)) # Determine type of intersection Arect <- is.rectangle(A) Brect <- is.rectangle(B) Apoly <- is.polygonal(A) Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) # Rectangular case if(Arect && Brect) return(C) if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- polyclip(a, b, "intersection", fillA="nonzero", fillB="nonzero") if(length(ab)==0) return(emptywindow(C)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE) AB <- rescue.rectangle(AB) return(AB) } ######### Result is a mask ############## # Restrict domain where possible if(Arect) A <- C if(Brect) B <- C if(Amask) A <- trim.mask(A, C) if(Bmask) B <- trim.mask(B, C) # Did the user specify the pixel raster? if(length(rasterinfo) > 0) { # convert to masks with specified parameters, and intersect if(Amask) { A <- do.call("as.mask", append(list(A), rasterinfo)) return(restrict.mask(A, B)) } else { B <- do.call("as.mask", append(list(B), rasterinfo)) return(restrict.mask(B, A)) } } # One mask and one rectangle? if(Arect && Bmask) return(B) if(Amask && Brect) return(A) # One mask and one polygon? if(Amask && !Bmask) return(restrict.mask(A, B)) if(!Amask && Bmask) return(restrict.mask(B, A)) # Two existing masks? if(Amask && Bmask) { # choose the finer one if(A$xstep <= B$xstep) return(restrict.mask(A, B)) else return(restrict.mask(B, A)) } # No existing masks. No clipping applied so far. # Convert one window to a mask with default pixel raster, and intersect. if(Arect) { A <- as.mask(A) return(restrict.mask(A, B)) } else { B <- as.mask(B) return(restrict.mask(B, A)) } } union.owin <- function(A, B, ...) { liszt <- list(...) rasterinfo <- list() if(length(liszt) > 0) { # explicit arguments controlling raster info israster <- names(liszt) %in% names(formals(as.mask)) rasterinfo <- liszt[israster] # handle intersection of more than two windows isowin <- unlist(lapply(liszt, is.owin)) nextra <- sum(isowin) if(nextra > 0) { windows <- liszt[isowin] for(i in 1:nextra) B <- do.call("union.owin", append(list(B, windows[[i]]), rasterinfo)) } } # if(missing(A) || is.null(A) || is.empty(A)) return(B) if(missing(B) || is.null(B) || is.empty(B)) return(A) verifyclass(A, "owin") verifyclass(B, "owin") if(identical(A, B)) return(A) # check units if(!compatible.units(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") # Determine type of intersection Arect <- is.rectangle(A) Brect <- is.rectangle(B) Apoly <- is.polygonal(A) Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) # Result is not rectangular. # Create a rectangle to contain it. C <- owin(range(A$xrange, B$xrange), range(A$yrange, B$yrange), unitname=unitname(A)) if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- polyclip(a, b, "union", fillA="nonzero", fillB="nonzero") if(length(ab) == 0) return(emptywindow(C)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ # Determine pixel raster parameters if(length(rasterinfo) == 0) { rasterinfo <- if(Amask) list(xy=list(x=prolongseq(A$xcol, C$xrange), y=prolongseq(A$yrow, C$yrange))) else if(Bmask) list(xy=list(x=prolongseq(B$xcol, C$xrange), y=prolongseq(B$yrow, C$yrange))) else list() } # Convert C to mask C <- do.call("as.mask", append(list(w=C), rasterinfo)) x <- as.vector(raster.x(C)) y <- as.vector(raster.y(C)) ok <- inside.owin(x, y, A) | inside.owin(x, y, B) if(all(ok)) { # result is a rectangle C <- as.rectangle(C) } else { # result is a mask C$m[] <- ok } return(C) } setminus.owin <- function(A, B, ...) { if(is.null(A) || is.empty(A)) return(B) if(is.null(B) || is.empty(B)) return(A) verifyclass(A, "owin") verifyclass(B, "owin") if(identical(A, B)) return(emptywindow(as.rectangle(A))) # check units if(!compatible.units(unitname(A), unitname(B))) warning("The two windows have incompatible units of length") # Determine type of arguments Arect <- is.rectangle(A) Brect <- is.rectangle(B) Apoly <- is.polygonal(A) Bpoly <- is.polygonal(B) Amask <- is.mask(A) Bmask <- is.mask(B) # Case where A and B are both rectangular if(Arect && Brect) { C <- intersect.owin(A, B, fatal=FALSE) if(is.null(C)) return(A) return(complement.owin(C, A)) } # Polygonal case if(!Amask && !Bmask) { ####### Result is polygonal ############ a <- lapply(as.polygonal(A)$bdry, reverse.xypolygon) b <- lapply(as.polygonal(B)$bdry, reverse.xypolygon) ab <- polyclip(a, b, "minus", fillA="nonzero", fillB="nonzero") if(length(ab) == 0) return(emptywindow(C)) # ensure correct polarity totarea <- sum(unlist(lapply(ab, area.xypolygon))) if(totarea < 0) ab <- lapply(ab, reverse.xypolygon) AB <- owin(poly=ab, check=FALSE) AB <- rescue.rectangle(AB) return(AB) } ####### Result is a mask ############ # Determine pixel raster parameters rasterinfo <- if((length(list(...)) > 0)) list(...) else if(Amask) list(xy=list(x=A$xcol, y=A$yrow)) else if(Bmask) list(xy=list(x=B$xcol, y=B$yrow)) else list() # Convert A to mask AB <- do.call("as.mask", append(list(w=A), rasterinfo)) x <- as.vector(raster.x(AB)) y <- as.vector(raster.y(AB)) ok <- inside.owin(x, y, A) & !inside.owin(x, y, B) if(!all(ok)) AB$m[] <- ok else AB <- rescue.rectangle(AB) return(AB) } # auxiliary functions trim.mask <- function(M, R, tolerant=TRUE) { # M is a mask, # R is a rectangle # Ensure R is a subset of bounding rectangle of M R <- owin(intersect.ranges(M$xrange, R$xrange), intersect.ranges(M$yrange, R$yrange)) # Deal with very thin rectangles if(tolerant) { R$xrange <- adjustthinrange(R$xrange, M$xstep, M$xrange) R$yrange <- adjustthinrange(R$yrange, M$ystep, M$yrange) } # Extract subset of image grid within.range <- function(u, v) { (u >= v[1]) & (u <= v[2]) } yrowok <- within.range(M$yrow, R$yrange) xcolok <- within.range(M$xcol, R$xrange) if((ny <- sum(yrowok)) == 0 || (nx <- sum(xcolok)) == 0) return(emptywindow(R)) Z <- M Z$xrange <- R$xrange Z$yrange <- R$yrange Z$yrow <- M$yrow[yrowok] Z$xcol <- M$xcol[xcolok] Z$m <- M$m[yrowok, xcolok] if(ny < 2 || nx < 2) Z$m <- matrix(Z$m, nrow=ny, ncol=nx) Z$dim <- dim(Z$m) return(Z) } restrict.mask <- function(M, W) { # M is a mask, W is any window stopifnot(is.mask(M)) stopifnot(inherits(W, "owin")) if(is.rectangle(W) == "rectangle") return(trim.mask(M, W)) M <- trim.mask(M, as.rectangle(W)) # Determine which pixels of M are inside W Mm <- M$m x <- as.vector(raster.x(M)[Mm]) y <- as.vector(raster.y(M)[Mm]) ok <- inside.owin(x, y, W) Mm[Mm] <- ok M$m <- Mm return(M) } # SUBSUMED IN rmhexpand.R # expand.owin <- function(W, f=1) { # # # expand bounding box of 'win' # # by factor 'f' in **area** # if(f <= 0) # stop("f must be > 0") # if(f == 1) # return(W) # bb <- bounding.box(W) # xr <- bb$xrange # yr <- bb$yrange # fff <- (sqrt(f) - 1)/2 # Wexp <- owin(xr + fff * c(-1,1) * diff(xr), # yr + fff * c(-1,1) * diff(yr), # unitname=unitname(W)) # return(Wexp) #} trim.rectangle <- function(W, xmargin=0, ymargin=xmargin) { if(!is.rectangle(W)) stop("Internal error: tried to trim margin off non-rectangular window") xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") if(sum(xmargin) > diff(W$xrange)) stop("window is too small to cut off margins of the width specified") if(sum(ymargin) > diff(W$yrange)) stop("window is too small to cut off margins of the height specified") owin(W$xrange + c(1,-1) * xmargin, W$yrange + c(1,-1) * ymargin, unitname=unitname(W)) } grow.rectangle <- function(W, xmargin=0, ymargin=xmargin) { xmargin <- ensure2vector(xmargin) ymargin <- ensure2vector(ymargin) if(any(xmargin < 0) || any(ymargin < 0)) stop("values of xmargin, ymargin must be nonnegative") owin(W$xrange + c(-1,1) * xmargin, W$yrange + c(-1,1) * ymargin, unitname=unitname(W)) } bdry.mask <- function(W) { verifyclass(W, "owin") W <- as.mask(W) m <- W$m nr <- nrow(m) nc <- ncol(m) b <- (m != rbind(FALSE, m[-nr, ])) b <- b | (m != rbind(m[-1, ], FALSE)) b <- b | (m != cbind(FALSE, m[, -nc])) b <- b | (m != cbind(m[, -1], FALSE)) W$m <- b return(W) } vertices <- function(w) { verifyclass(w, "owin") if(is.empty(w)) return(NULL) switch(w$type, rectangle={ xr <- w$xrange yr <- w$yrange vert <- list(x=xr[c(1,2,2,1)], y=yr[c(1,1,2,2)]) }, polygonal={ vert <- do.call("concatxy",w$bdry) }, mask={ b <- bdry.mask(w) xx <- raster.x(w) yy <- raster.y(w) vert <- list(x=as.vector(xx[b$m]), y=as.vector(yy[b$m])) }) return(vert) } diameter <- function(x) { UseMethod("diameter") } diameter.owin <- function(x) { w <- as.owin(x) if(is.empty(w)) return(NULL) vert <- vertices(w) if(length(vert$x) > 3) { # extract convex hull h <- with(vert, chull(x, y)) vert <- with(vert, list(x=x[h], y=y[h])) } d <- pairdist(vert, squared=TRUE) return(sqrt(max(d))) } incircle <- function(W) { # computes the largest circle contained in W verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) { xr <- W$xrange yr <- W$yrange x0 <- mean(xr) y0 <- mean(yr) radius <- min(diff(xr), diff(yr))/2 return(list(x=x0, y=y0, r=radius)) } # compute distance to boundary D <- distmap(W, invert=TRUE) D <- D[W, drop=FALSE] # find maximum distance v <- D$v ok <- !is.na(v) Dvalues <- as.vector(v[ok]) Dmax <- max(Dvalues) # find location of maximum locn <- which.max(Dvalues) locrow <- as.vector(row(v)[ok])[locn] loccol <- as.vector(col(v)[ok])[locn] x0 <- D$xcol[loccol] y0 <- D$yrow[locrow] if(is.mask(W)) { # radius could be one pixel diameter shorter than Dmax Dpixel <- sqrt(D$xstep^2 + D$ystep^2) radius <- max(0, Dmax - Dpixel) } else radius <- Dmax return(list(x=x0, y=y0, r=radius)) } inpoint <- function(W) { # selects a point that is always inside the window. verifyclass(W, "owin") if(is.empty(W)) return(NULL) if(is.rectangle(W)) return(c(mean(W$xrange), mean(W$yrange))) if(is.polygonal(W)) { xy <- centroid.owin(W) if(inside.owin(xy$x, xy$y, W)) return(xy) } W <- as.mask(W) Mm <- W$m Mrow <- as.vector(row(Mm)[Mm]) Mcol <- as.vector(col(Mm)[Mm]) selectmiddle <- function(x) { x[ceiling(length(x)/2)] } midcol <- selectmiddle(Mcol) midrow <- selectmiddle(Mrow[Mcol==midcol]) x <- W$xcol[midcol] y <- W$yrow[midrow] return(c(x,y)) } simplify.owin <- function(W, dmin) { verifyclass(W, "owin") if(is.empty(W)) return(W) W <- as.polygonal(W) W$bdry <- lapply(W$bdry, simplify.xypolygon, dmin=dmin) return(W) } is.convex <- function(x) { verifyclass(x, "owin") if(is.empty(x)) return(TRUE) switch(x$type, rectangle={return(TRUE)}, polygonal={ b <- x$bdry if(length(b) > 1) return(FALSE) b <- b[[1]] xx <- b$x yy <- b$y ch <- chull(xx,yy) return(length(ch) == length(xx)) }, mask={ v <- vertices(x) v <- as.ppp(v, W=as.rectangle(x)) ch <- convexhull.xy(v) edg <- as.psp(ch) edgedist <- nncross(v, edg, what="dist") pixdiam <- sqrt(x$xstep^2 + x$ystep^2) return(all(edgedist <= pixdiam)) }) return(as.logical(NA)) } convexhull <- function(x) { if(inherits(x, "owin")) v <- vertices(x) else if(inherits(x, "psp")) v <- endpoints.psp else if(inherits(x, "ppp")) v <- x else { x <- as.owin(x) v <- vertices(x) } b <- as.rectangle(x) if(is.empty(x)) return(emptywindow(b)) ch <- convexhull.xy(v) out <- rebound.owin(ch, b) return(out) } is.empty <- function(x) { UseMethod("is.empty") } is.empty.default <- function(x) { length(x) == 0 } is.empty.owin <- function(x) { switch(x$type, rectangle=return(FALSE), polygonal=return(length(x$bdry) == 0), mask=return(!any(x$m))) return(NA) } emptywindow <- function(w) { w <- as.owin(w) out <- owin(w$xrange, w$yrange, poly=list(), unitname=unitname(w)) return(out) } spatstat/R/funxy.R0000644000176000001440000000310312237642727013653 0ustar ripleyusers# # funxy.R # # Class of functions of x,y location with a spatial domain # # $Revision: 1.2 $ $Date: 2012/10/13 09:35:28 $ # funxy <- function(f, W=NULL) { stopifnot(is.function(f)) stopifnot(is.owin(W)) class(f) <- c("funxy", class(f)) attr(f, "W") <- W return(f) } print.funxy <- function(x, ...) { cat(paste("function(x,y) of class", sQuote("funxy"), "\n")) print(as.owin(x)) } as.owin.funxy <- function(W, ..., fatal=TRUE) { W <- attr(W, "W") as.owin(W, ..., fatal=fatal) } # Note that 'distfun' (and other classes inheriting from funxy) # has a method for as.owin that takes precedence over as.owin.funxy # and this will affect the behaviour of the following plot methods # because 'distfun' does not have its own plot method. plot.funxy <- function(x, ...) { xname <- short.deparse(substitute(x)) W <- as.owin(x) do.call("do.as.im", resolve.defaults(list(x, action="plot"), list(...), list(main=xname, W=W))) invisible(NULL) } contour.funxy <- function(x, ...) { xname <- deparse(substitute(x)) W <- as.owin(x) do.call("do.as.im", resolve.defaults(list(x, action="contour"), list(...), list(main=xname, W=W))) invisible(NULL) } persp.funxy <- function(x, ...) { xname <- deparse(substitute(x)) W <- as.rectangle(as.owin(x)) do.call("do.as.im", resolve.defaults(list(x, action="persp"), list(...), list(main=xname, W=W))) invisible(NULL) } spatstat/R/subfits.R0000644000176000001440000003233112240201006014135 0ustar ripleyusers# # # $Revision: 1.28 $ $Date: 2013/11/11 16:12:44 $ # # subfits.new <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) if(!(what %in% c("models","interactions"))) stop(paste("Unrecognised option: what=", dQuote(what))) if(what == "interactions") return(subfits.old(object, what, verbose)) # extract stuff announce <- if(verbose) function(x) { cat(x) } else function(x) {} announce("Extracting stuff...") fitter <- object$Fit$fitter FIT <- object$Fit$FIT coef.FIT <- coef(FIT) trend <- object$trend iformula <- object$iformula use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist announce("done.\n") # determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") # exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) # implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") # Fisher information, if possible if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } # Extract data frame announce("Extracting data...") datadf <- object$datadf has.design <- info$has.design rownames <- object$Info$rownames announce("done.\n") # set up lists for results models <- rep(list(NULL), npat) interactions <- rep(list(NULL), npat) # interactions announce("Determining interactions...") for(i in 1:npat) { if(verbose) progressreport(i, npat) # Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] # Find relevant coefficients coef.avail <- coef.FIT if(nactive == 1) { ic <- implcoef[[tagi]] coef.implied <- ic[i, ,drop=TRUE] names(coef.implied) <- colnames(ic) } # overwrite any existing values of coefficients; add new ones. coef.avail[names(coef.implied)] <- coef.implied # create fitted interaction with these coefficients interactions[[i]] <- fii(interi, coef.avail, Vnamelist[[tagi]]) } announce("Done!\n") names(interactions) <- rownames # if(what=="interactions") return(interactions) # Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname moadf <- object$Fit$moadf fmla <- object$Fit$fmla # deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] # used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") # Construct template for fake ppm object spv <- package_version(versionstring.spatstat()) fake.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2013/11/11 16:12:44 $") fake.call <- call("cannot.update", Q=NULL, trend=object$trend, interaction=NULL, covariates=NULL, correction=object$Info$correction, rbord = object$Info$rbord) fakemodel <- list( method = "mpl", fitter = fitter, coef = coef(object), trend = object$trend, interaction = NULL, fitin = NULL, Q = NULL, maxlogpl = NA, internal = list(glmfit = FIT, glmdata = NULL, Vnames = NULL, fmla = fmla, computed = list()), covariates = NULL, correction = object$Info$correction, rbord = object$Info$rbord, version = fake.version, problems = list(), fisher = fisher, varcov = varcov, call = fake.call, callstring = "cannot.update()", fake = TRUE) class(fakemodel) <- "ppm" ## Loop through point patterns announce("Generating models for each row...") for(i in 1:npat) { if(verbose) progressreport(i, npat) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window # assemble relevant covariate images covariates <- if(has.covar) as.list(covariates.hf[i, , drop=FALSE]) else NULL if(has.covar && has.design) ## Convert each data frame covariate value to an image covariates[dfvar] <- lapply(covariates[dfvar], as.im, W=Wi) # Extract relevant interaction finte <- interactions[[i]] inte <- finte$interaction if(is.poisson.interact(inte)) inte <- NULL Vnames <- finte$Vnames if(length(Vnames) == 0) Vnames <- NULL # Construct fake ppm object fakemodel$interaction <- inte fakemodel$fitin <- finte fakemodel$Q <- Yi fakemodel$covariates <- covariates fakemodel$internal$glmdata <- moadf[moadf$id == i, ] fakemodel$internal$Vnames <- Vnames fake.call$Q <- Yi fake.call$covariates <- covariates fakemodel$call <- fake.call fakemodel$callstring <- short.deparse(fake.call) # store in list models[[i]] <- fakemodel } announce("done.\n") names(models) <- rownames class(models) <- c("listof", class(models)) return(models) } subfits <- subfits.old <- function(object, what="models", verbose=FALSE) { stopifnot(inherits(object, "mppm")) if(!(what %in% c("models","interactions"))) stop(paste("Unrecognised option: what=", dQuote(what))) # extract stuff announce <- if(verbose) function(x) { cat(x) } else function(x) {} announce("Extracting stuff...") FIT <- object$Fit$FIT coef.FIT <- coef(FIT) trend <- object$trend iformula <- object$iformula use.gam <- object$Fit$use.gam info <- object$Info npat <- object$npat Inter <- object$Inter interaction <- Inter$interaction itags <- Inter$itags Vnamelist <- object$Fit$Vnamelist announce("done.\n") # determine which interaction(s) are active on each row announce("Determining active interactions...") active <- active.interactions(object) announce("done.\n") # exceptions if(any(rowSums(active) > 1)) stop(paste("subfits() is not implemented for models", "in which several interpoint interactions", "are active on the same point pattern")) # implied coefficients for each active interaction announce("Computing implied coefficients...") implcoef <- list() for(tag in itags) { announce(tag) implcoef[[tag]] <- impliedcoefficients(object, tag) announce(", ") } announce("done.\n") # Fisher information, if possible if(what == "models") { announce("Fisher information...") fisher <- vcov(object, what="fisher", err="null") varcov <- try(solve(fisher), silent=TRUE) if(inherits(varcov, "try-error")) varcov <- NULL announce("done.\n") } # Extract data frame announce("Extracting data...") datadf <- object$datadf has.design <- info$has.design rownames <- object$Info$rownames announce("done.\n") # set up list for results results <- rep(list(NULL), npat) if(what == "interactions") { announce("Determining interactions...") for(i in 1:npat) { if(verbose) progressreport(i, npat) # Find relevant interaction acti <- active[i,] nactive <- sum(acti) interi <- if(nactive == 0) Poisson else interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] # Find relevant coefficients coef.avail <- coef.FIT if(nactive == 1) { ic <- implcoef[[tagi]] coef.implied <- ic[i, ,drop=TRUE] names(coef.implied) <- colnames(ic) } # overwrite any existing values of coefficients; add new ones. coef.avail[names(coef.implied)] <- coef.implied # create fitted interaction with these coefficients results[[i]] <- fii(interi, coef.avail, Vnamelist[[tagi]]) } announce("Done!\n") names(results) <- rownames return(results) } # Extract data required to reconstruct complete model fits announce("Extracting more data...") data <- object$data Y <- object$Y Yname <- info$Yname # deal with older formats of mppm if(is.null(Yname)) Yname <- info$Xname if(is.null(Y)) Y <- data[ , Yname, drop=TRUE] # used.cov.names <- info$used.cov.names has.covar <- info$has.covar if(has.covar) { covariates.hf <- data[, used.cov.names, drop=FALSE] dfvar <- used.cov.names %in% names(datadf) } announce("done.\n") ## Loop through point patterns announce("Looping through rows...") for(i in 1:npat) { if(verbose) progressreport(i, npat) Yi <- Y[[i]] Wi <- if(is.ppp(Yi)) Yi$window else Yi$data$window # assemble relevant covariate images covariates <- if(has.covar) as.list(covariates.hf[i, , drop=FALSE]) else NULL if(has.covar && has.design) { ## Convert each data frame covariate value to an image imrowi <- lapply(covariates[dfvar], as.im, W=Wi) # Problem: constant covariate leads to singular fit # --------------- Hack: --------------------------- # Construct fake data by resampling from possible values possible <- function(z) { if(is.factor(z)) factor(levels(z), levels=levels(z)) else unique(z) } covar.vals <- lapply(as.list(covariates[dfvar, drop=FALSE]), possible) scramble <- function(vals, W, Y) { W <- as.mask(W) npixels <- prod(W$dim) nvalues <- length(vals) npoints <- Y$n # sample the possible values randomly at the non-data pixels sampled <- sample(vals, npixels, replace=TRUE) Z <- im(sampled, xcol=W$xcol, yrow=W$yrow) # repeat the possible values cyclically at the data points if(npoints >= 1) Z[Y] <- vals[1 + ((1:npoints) %% nvalues)] return(Z) } fake.imrowi <- lapply(covar.vals, scramble, W=Wi, Y=Yi$data) # insert fake data into covariates covariates[dfvar] <- fake.imrowi # ------------------ end hack ---------------------------- } # Fit ppm to data for case i only # using relevant interaction acti <- active[i,] nactive <- sum(acti) if(nactive == 1){ interi <- interaction[i, acti, drop=TRUE] tagi <- names(interaction)[acti] fit <- ppm(Yi, trend, interi, covariates=covariates, use.gam=use.gam, forcefit=TRUE, vnamebase=tagi, vnameprefix=tagi) } else { fit <- ppm(Yi, trend, Poisson(), covariates=covariates, use.gam=use.gam, forcefit=TRUE) } # now reset the coefficients to those obtained from the full fit coefnames.wanted <- names(fit$coef) coef.avail <- coef.FIT if(nactive == 1) { ic <- implcoef[[tagi]] coef.implied <- ic[i, ,drop=TRUE] names(coef.implied) <- colnames(ic) # overwrite any existing values of coefficients; add new ones. coef.avail[names(coef.implied)] <- coef.implied } if(!all(coefnames.wanted %in% names(coef.avail))) stop("Internal error: some fitted coefficients not accessible") fit$theta <- fit$coef <- coef.avail[coefnames.wanted] # ... make sure these coefficients will be used in getglmfit, etc ... fit$method <- "mppm" # ... and replace fake data by true data if(has.design) { for(nam in names(imrowi)) { fit$covariates[[nam]] <- imrowi[[nam]] fit$internal$glmdata[[nam]] <- data[i, nam, drop=TRUE] } # ... and tell glm fit object that it has full rank fit$internal$glmfit$rank <- FIT$rank } # Fisher information and variance-covariance if known # Extract submatrices for relevant parameters if(!is.null(fisher)) fit$fisher <- fisher[coefnames.wanted, coefnames.wanted, drop=FALSE] if(!is.null(varcov)) fit$varcov <- varcov[coefnames.wanted, coefnames.wanted, drop=FALSE] # store in list results[[i]] <- fit } announce("done.\n") names(results) <- rownames class(results) <- c("listof", class(results)) return(results) } cannot.update <- function(...) { stop("This model cannot be updated") } spatstat/R/clarkevans.R0000755000176000001440000001465712243051473014643 0ustar ripleyusersclarkevans <- function(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) { verifyclass(X, "ppp") W <- X$window # validate correction argument gavecorrection <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf"), multi=TRUE) if(("Donnelly" %in% correction) && (W$type != "rectangle")) { if(gavecorrection) warning("Donnelly correction only available for rectangular windows") correction <- correction[correction != "Donnelly"] } # guard correction applied iff `clipregion' is present isguard <- "guard" %in% correction askguard <- any(isguard) gaveguard <- !is.null(clipregion) if(gaveguard) clipregion <- as.owin(clipregion) if(askguard && !gaveguard) { warning("guard correction not performed; clipregion not specified") correction <- correction[!isguard] } else if(gaveguard && !askguard) correction <- c(correction, "guard") return(clarkevansCalc(X, correction, clipregion)) } clarkevans.test <- function(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater"), nsim=1000 ) { Xname <- short.deparse(substitute(X)) verifyclass(X, "ppp") W <- X$window # validate SINGLE correction correction <- pickoption("correction", correction, c(none="none", Donnelly="Donnelly", donnelly="Donnelly", guard="guard", cdf="cdf")) switch(correction, none={ corrblurb <- "No edge correction" }, Donnelly={ if(W$type != "rectangle") stop("Donnelly correction only available for rectangular windows") corrblurb <- "Donnelly correction" }, guard={ if(is.null(clipregion)) stop("clipregion not specified") clipregion <- as.owin(clipregion) corrblurb <- "Guard correction" }, cdf={ corrblurb <- "CDF correction" }) # alternative hypothesis if(missing(alternative) || is.null(alternative)) alternative <- "two.sided" alternative <- pickoption("alternative", alternative, c(two.sided="two.sided", less="less", clustered="less", greater="greater", regular="greater")) altblurb <- switch(alternative, two.sided="two-sided", less="mean nn distance less than expected under CSR (clustered)", greater="mean nn distance greater than expected under CSR (regular)") # compute observed value statistic <- clarkevansCalc(X, correction=correction, clipregion=clipregion, working=TRUE) working <- attr(statistic, "working") # if(correction == "none") { # standard Normal p-value SE <- with(working, sqrt(((4-pi)*area)/(4 * pi))/npts) Z <- with(working, (Dobs - Dpois)/SE) p.value <- switch(alternative, less=pnorm(Z), greater=1 - pnorm(Z), two.sided= 2*(1-pnorm(abs(Z)))) pvblurb <- "Z-test" } else { # Monte Carlo p-value sims <- numeric(nsim) intensity <- working$intensity for(i in 1:nsim) { Xsim <- rpoispp(intensity, win=W) sims[i] <- clarkevansCalc(Xsim, correction=correction, clipregion=clipregion) } prob <- mean(sims <= statistic) p.value <- switch(alternative, less=prob, greater=1 - prob, two.sided= 2*min(prob, 1-prob)) pvblurb <- paste("Monte Carlo test based on", nsim, "simulations of CSR") } statistic <- as.numeric(statistic) names(statistic) <- "R" out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=c("Clark-Evans test", corrblurb, pvblurb), data.name=Xname) class(out) <- "htest" return(out) } clarkevansCalc <- function(X, correction="none", clipregion=NULL, working=FALSE) { # calculations for Clark-Evans index or test W <- X$window area <- area.owin(W) npts <- npoints(X) intensity <- npts/area # R undefined for empty point pattern if(npts == 0) return(NA) # Dobs = observed mean nearest neighbour distance nndistX <- nndist(X) Dobs <- mean(nndistX) # Dpois = Expected mean nearest neighbour distance for Poisson process Dpois <- 1/(2*sqrt(intensity)) statistic <- NULL if(working) work <- list(area=area, npts=npts, intensity=intensity, Dobs=Dobs, Dpois=Dpois) # Naive uncorrected value if("none" %in% correction) { Rnaive <- Dobs/Dpois statistic <- c(statistic, naive=Rnaive) } # Donnelly edge correction if("Donnelly" %in% correction) { # Dedge = Edge corrected mean nearest neighbour distance, Donnelly 1978 if(W$type == "rectangle") { perim <- perimeter(W) Dkevin <- Dpois + (0.0514+0.0412/sqrt(npts))*perim/npts Rkevin <- Dobs/Dkevin if(working) work <- append(work, list(perim=perim, Dkevin=Dkevin)) } else Rkevin <- NA statistic <- c(statistic, Donnelly=Rkevin) } # guard area method if("guard" %in% correction && !is.null(clipregion)) { # use nn distances from points inside `clipregion' ok <- inside.owin(X, , clipregion) Dguard <- mean(nndistX[ok]) Rguard <- Dguard/Dpois if(working) work <- append(work, list(Dguard=Dguard)) statistic <- c(statistic, guard=Rguard) } if("cdf" %in% correction) { # compute mean of estimated nearest-neighbour distance distribution G G <- Gest(X) numer <- stieltjes(function(x){x}, G)$km denom <- stieltjes(function(x){rep.int(1, length(x))}, G)$km Dcdf <- numer/denom Rcdf <- Dcdf/Dpois if(working) work <- append(work, list(Dcdf=Dcdf)) statistic <- c(statistic, cdf=Rcdf) } if(working) attr(statistic, "working") <- work return(statistic) } spatstat/R/hypersub.R0000755000176000001440000000651012237642727014353 0ustar ripleyusers# # hypersub.R # # # subset operations for hyperframes # # $Revision: 1.8 $ $Date: 2013/04/25 06:37:43 $ # # "[.hyperframe" <- function(x, i, j, drop=FALSE, ...) { x <- unclass(x) if(!missing(i)) { y <- x y$df <- x$df[i, , drop=FALSE] y$ncases <- nrow(y$df) y$hypercolumns <- lapply(x$hypercolumns, function(z,k) { z[k] }, k=i) x <- y } if(!missing(j)) { y <- x patsy <- seq_len(y$nvars) names(patsy) <- y$vname jj <- patsy[j] names(jj) <- NULL y$nvars <- length(jj) y$vname <- vname <- x$vname[jj] y$vtype <- vtype <- x$vtype[jj] y$vclass <- x$vclass[jj] if(ncol(x$df) != 0) y$df <- x$df[ , vname[vtype == "dfcolumn"], drop=FALSE] y$hyperatoms <- x$hyperatoms[ vname[ vtype == "hyperatom" ]] y$hypercolumns <- x$hypercolumns[ vname [ vtype == "hypercolumn" ] ] x <- y } if(drop && x$nvars == 1) { switch(x$vtype, dfcolumn = { return(x$df[, , drop=TRUE]) }, hypercolumn = { hc <- x$hypercolumns[[1]] if(x$ncases > 1) { hc <- as.listof(hc) names(hc) <- row.names(x$df) return(hc) } else { ha <- hc[[1]] return(ha) } }, hyperatom = { if(x$ncases == 1) { # extract the hyperatom itself ha <- x$hyperatoms[[1]] return(ha) } else { # replicate it to make a hypercolumn ha <- x$hyperatoms[1] names(ha) <- NULL hc <- rep.int(ha, x$ncases) hc <- as.listof(hc) names(hc) <- row.names(x$df) return(hc) } }) } class(x) <- c("hyperframe", class(x)) return(x) } "$.hyperframe" <- function(x,name) { m <- match(name, unclass(x)$vname) if(is.na(m)) return(NULL) return(x[, name, drop=TRUE]) } "$<-.hyperframe" <- function(x, i, value) { rown <- row.names(x) x <- as.list(x) dfcol <- is.atomic(value) && (is.vector(value) || is.factor(value)) if(!dfcol && !is.null(value)) value <- as.list(value) x[[i]] <- value y <- do.call("hyperframe", append(x, list(row.names=rown))) return(y) } "[<-.hyperframe" <- function (x, i, j, value) { sumry <- summary(x) colnam <- sumry$col.names dimx <- sumry$dim die <- function(situation) { stop(paste("Sorry,", dQuote("[<-.hyperframe"), "is not yet implemented for", situation), call.=FALSE) } if(!missing(i)) die("row indices") if(missing(j)) { # x[ ] <- value die("null indices") } if(!missing(j)) { # x[, j] <- value rown <- row.names(x) xlist <- as.list(x) singlecolumn <- ( (is.integer(j) && length(j) == 1 && j > 0) || (is.character(j) && length(j) == 1) || (is.logical(j) && sum(j) == 1)) if(singlecolumn) { # expecting single hypercolumn if(is.logical(j)) j <- names(x)[j] y <- get("$<-.hyperframe")(x, j, value) } else { # expecting hyperframe xlist[j] <- as.list(as.hyperframe(value)) # the above construction accepts all indices including extra entries y <- do.call("hyperframe", append(xlist, list(row.names=rown))) } return(y) } return(NULL) } spatstat/R/compileK.R0000755000176000001440000000700712237642727014257 0ustar ripleyusers# compileK # # Function to take a matrix of pairwise distances # and compile a 'K' function in the format required by spatstat. # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # ------------------------------------------------------------------- compileK <- function(D, r, weights=NULL, denom=1, check=TRUE, ratio=FALSE) { # process r values breaks <- breakpts.from.r(r) rmax <- breaks$max r <- breaks$r # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] } else wvalues <- NULL # count the number of D values in each interval (r[k], r[k+1]] counts <- whist(Dvalues, breaks=breaks$val, weights=wvalues) # cumulative counts: number of D values in [0, r[k]) Kcount <- cumsum(counts) # divide by appropriate denominator Kratio <- Kcount/denom # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=Kratio) if(!ratio) { K <- fv(df, "r", substitute(compileK(r), NULL), "est", . ~ r , c(0,rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname="compileK") } else { num <- data.frame(r=r, est=Kcount) den <- data.frame(r=r, est=denom) K <- ratfv(num, den, "r", substitute(compileK(r), NULL), "est", . ~ r , c(0,rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname="compileK") } return(K) } compilepcf <- function(D, r, weights=NULL, denom=1, check=TRUE, endcorrect=TRUE, ...) { # process r values breaks <- breakpts.from.r(r) if(!breaks$even) stop("compilepcf: r values must be evenly spaced", call.=FALSE) r <- breaks$r rmax <- breaks$max # check that D is a symmetric matrix with nonnegative entries if(check) stopifnot(is.matrix(D) && isSymmetric(D) && all(D >= 0)) # ignore the diagonal; throw away any D values greater than rmax ok <- (D <= rmax & D > 0) Dvalues <- D[ok] # # weights? if(!is.null(weights)) { stopifnot(is.matrix(weights) && all(dim(weights)==dim(D))) wvalues <- weights[ok] totwt <- sum(wvalues) normwvalues <- wvalues/totwt } else { nv <- length(Dvalues) normwvalues <- rep.int(1/nv, nv) totwt <- nv } # form kernel estimate rmin <- min(r) rmax <- max(r) nr <- length(r) den <- density(Dvalues, weights=normwvalues, from=rmin, to=rmax, n=nr, ...) gval <- den$y * totwt # normalise gval <- gval/denom # edge effect correction at r = 0 if(endcorrect) { one <- do.call("density", resolve.defaults( list(seq(rmin,rmax,length=512)), list(bw=den$bw, adjust=1), list(from=rmin, to=rmax, n=nr), list(...))) onefun <- approxfun(one$x, one$y, rule=2) gval <- gval /((rmax-rmin) * onefun(den$x)) } # wrap it up as an 'fv' object for use in spatstat df <- data.frame(r=r, est=gval) g <- fv(df, "r", substitute(compilepcf(r), NULL), "est", . ~ r , c(0,rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname="compilepcf") attr(g, "bw") <- den$bw return(g) } spatstat/R/vcov.kppm.R0000755000176000001440000000342312237642727014435 0ustar ripleyusers# # vcov.kppm # # vcov method for kppm objects # # Original code: Abdollah Jalilian # # $Revision: 1.1 $ $Date: 2012/02/04 01:42:35 $ # vcov.kppm <- function(object, ..., what=c("vcov", "corr", "fisher", "internals")) { what <- match.arg(what) verifyclass(object, "kppm") # extract composite likelihood results po <- object$po # ensure it was fitted with quadscheme if(is.null(getglmfit(po))) { warning("Re-fitting model with forcefit=TRUE") po <- update(po, forcefit=TRUE) } # extract quadrature scheme information Q <- quad.ppm(po) U <- union.quad(Q) nU <- npoints(U) wt <- w.quad(Q) # compute fitted intensity values lambda <- fitted(po, type="lambda") # extract covariate values Z <- model.matrix(po) # compute pair correlation function minus 1 g <- pcfmodel(object) r <- pairdist(U) gr <- g(r) - 1 G <- matrix(gr, nU, nU) # evaluate integral ff <- Z * lambda * wt J <- t(Z) %*% ff E <- t(ff) %*% G %*% ff # asymptotic covariance matrix in the Poisson case J.inv <- try(solve(J)) # could be singular if(inherits(J.inv, "try-error")) { if(what == "internals") { return(list(ff=ff, J=J, E=E, J.inv=NULL)) } else { return(NULL) } } # asymptotic covariance matrix in the clustered case vc <- J.inv + J.inv %*% E %*% J.inv # switch(what, vcov={ return(vc) }, corr={ sd <- sqrt(diag(vc)) co <- vc/outer(sd, sd, "*") return(co) }, fisher={ fish <- try(solve(vc)) if(inherits(fish, "try-error")) fish <- NULL return(fish) }, internals={ return(list(ff=ff, J=J, E=E, J.inv=J.inv, vc=vc)) }) stop(paste("Unrecognised option: what=", what)) } spatstat/R/residuals.mppm.R0000644000176000001440000000566312240173061015442 0ustar ripleyusers# # residuals.mppm.R # # computes residuals for fitted multiple point process model # # # $Revision: 1.4 $ $Date: 2013/11/11 14:47:19 $ # residuals.mppm <- function(object, type="raw", ..., fittedvalues = fitted.mppm(object)) { verifyclass(object, "mppm") userfitted <- !missing(fittedvalues) type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals") typename <- typenames[[type]] # Extract quadrature points and weights Q <- quad.mppm(object) U <- lapply(Q, union.quad) # quadrature point patterns Z <- unlist(lapply(Q, is.data)) # indicator data/dummy W <- unlist(lapply(Q, w.quad)) # quadrature weights # total number of quadrature points nquadrature <- length(W) # number of quadrature points in each pattern nU <- unlist(lapply(Q, n.quad)) # number of rows of hyperframe npat <- object$npat # attribution of each quadrature point id <- factor(rep(1:npat, nU), levels=1:npat) # Compute fitted conditional intensity at quadrature points if(!is.list(fittedvalues) || length(fittedvalues) != npat) stop(paste(sQuote("fittedvalues"), "should be a list of length", npat, "containing vectors of fitted values")) lambda <- unlist(fittedvalues) # consistency check if(length(lambda) != nquadrature) stop(paste(if(!userfitted) "internal error:" else NULL, "number of fitted values", paren(length(lambda)), "does not match number of quadrature points", paren(nquadrature))) # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) # Evaluate residual measure components discrete <- ifelse(Z, switch(type, raw = 1, inverse = 1/lambda, pearson = 1/sqrt(lambda) ), 0) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda)) atoms <- as.logical(Z) # All components resdf <- data.frame(discrete=discrete, density=density, atoms=atoms) # Split residual data according to point pattern affiliation splitres <- split(resdf, id) # Associate with quadrature scheme reshf <- hyperframe(R=splitres, Q=Q) # Convert to signed measures answer <- with(reshf, msr(Q, R$discrete[R$atoms], R$density)) # tag answer <- lapply(answer, "attr<-", which="type", value=type) answer <- lapply(answer, "attr<-", which="typename", value=typename) return(as.listof(answer)) } spatstat/R/ewcdf.R0000755000176000001440000000162312237642727013602 0ustar ripleyusersewcdf <- function(x, weights=rep(1/length(x), length(x))) { stopifnot(length(x) == length(weights)) # remove NA's together nbg <- is.na(x) x <- x[!nbg] weights <- weights[!nbg] n <- length(x) if (n < 1) stop("'x' must have 1 or more non-missing values") stopifnot(all(weights >= 0)) # sort in increasing order of x value ox <- fave.order(x) x <- x[ox] w <- weights[ox] # find jump locations and match vals <- sort(unique(x)) xmatch <- factor(match(x, vals), levels=seq_along(vals)) # sum weight in each interval wmatch <- tapply(w, xmatch, sum) wmatch[is.na(wmatch)] <- 0 cumwt <- cumsum(wmatch) # make function rval <- approxfun(vals, cumwt, method = "constant", yleft = 0, yright = sum(wmatch), f = 0, ties = "ordered") class(rval) <- c("ecdf", "stepfun", class(rval)) attr(rval, "call") <- sys.call() return(rval) } spatstat/R/pairwise.R0000755000176000001440000000403712240721046014321 0ustar ripleyusers# # # pairwise.S # # $Revision: 1.8 $ $Date: 2009/11/02 19:07:46 $ # # Pairwise() create a user-defined pairwise interaction process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Pairwise <- function(pot, name = "user-defined pairwise interaction process", par = NULL, parnames=NULL, printfun) { fop <- names(formals(pot)) if(!identical(all.equal(fop, c("d", "par")), TRUE) && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) if(!is.null(parnames)) { stopifnot(is.character(parnames)) if(is.null(par) || length(par) != length(parnames)) stop("par does not match parnames") } if(missing(printfun)) printfun <- function(self) { cat("Potential function:\n") print(self$pot) if(!is.null(parnames <- self$parnames)) { for(i in 1:length(parnames)) { cat(paste(parnames[i], ":\t")) pari <- self$par[[i]] if(is.numeric(pari) && length(pari) == 1) cat(pari, "\n") else print(pari) } } } out <- list( name = name, creator = "Pairwise", family = pairwise.family, pot = pot, par = par, parnames = parnames, init = NULL, update = function(self, ...){ do.call(Pairwise, resolve.defaults(list(...), list(pot=self$pot, name=self$name, par=self$par, parnames=self$parnames, printfun=self$print))) } , print = printfun, version = versionstring.spatstat() ) class(out) <- "interact" return(out) } spatstat/R/kmrs.R0000755000176000001440000001721612237642727013473 0ustar ripleyusers# # kmrs.S # # S code for Kaplan-Meier, Reduced Sample and Hanisch # estimates of a distribution function # from _histograms_ of censored data. # # kaplan.meier() # reduced.sample() # km.rs() # # $Revision: 3.26 $ $Date: 2013/06/27 08:59:16 $ # # The functions in this file produce vectors `km' and `rs' # where km[k] and rs[k] are estimates of F(breaks[k+1]), # i.e. an estimate of the c.d.f. at the RIGHT endpoint of the interval. # "kaplan.meier" <- function(obs, nco, breaks, upperobs=0) { # obs: histogram of all observations : min(T_i,C_i) # nco: histogram of noncensored observations : T_i such that T_i <= C_i # breaks: breakpoints (vector or 'breakpts' object, see breaks.S) # upperobs: number of observations beyond rightmost breakpoint # breaks <- as.breakpts(breaks) n <- length(obs) if(n != length(nco)) stop("lengths of histograms do not match") check.hist.lengths(nco, breaks) # # # reverse cumulative histogram of observations d <- revcumsum(obs) + upperobs # # product integrand s <- ifelseXB(d > 0, 1 - nco/d, 1) # km <- 1 - cumprod(s) # km has length n; km[i] is an estimate of F(r) for r=breaks[i+1] # widths <- diff(breaks$val) lambda <- numeric(n) pos <- (s > 0) lambda[pos] <- -log(s[pos])/widths[pos] # lambda has length n; lambda[i] is an estimate of # the average of \lambda(r) over the interval (breaks[i],breaks[i+1]). # return(list(km=km, lambda=lambda)) } "reduced.sample" <- function(nco, cen, ncc, show=FALSE, uppercen=0) # nco: histogram of noncensored observations: T_i such that T_i <= C_i # cen: histogram of all censoring times: C_i # ncc: histogram of censoring times for noncensored obs: # C_i such that T_i <= C_i # # Then nco[k] = #{i: T_i <= C_i, T_i \in I_k} # cen[k] = #{i: C_i \in I_k} # ncc[k] = #{i: T_i <= C_i, C_i \in I_k}. # # The intervals I_k must span an interval [0,R] beginning at 0. # If this interval did not include all censoring times, # then `uppercen' must be the number of censoring times # that were not counted in 'cen'. { n <- length(nco) if(n != length(cen) || n != length(ncc)) stop("histogram lengths do not match") # # denominator: reverse cumulative histogram of censoring times # denom(r) = #{i : C_i >= r} # We compute # cc[k] = #{i: C_i > breaks[k]} # except that > becomes >= for k=0. # cc <- revcumsum(cen) + uppercen # # # numerator # #{i: T_i <= r <= C_i } # = #{i: T_i <= r, T_i <= C_i} - #{i: C_i < r, T_i <= C_i} # We compute # u[k] = #{i: T_i <= C_i, T_i <= breaks[k+1]} # - #{i: T_i <= C_i, C_i <= breaks[k]} # = #{i: T_i <= C_i, C_i > breaks[k], T_i <= breaks[k+1]} # this ensures that numerator and denominator are # comparable, u[k] <= cc[k] always. # u <- cumsum(nco) - c(0,cumsum(ncc)[1:(n-1)]) rs <- u/cc # # Hence rs[k] = u[k]/cc[k] is an estimator of F(r) # for r = breaks[k+1], i.e. for the right hand end of the interval. # if(!show) return(rs) else return(list(rs=rs, numerator=u, denominator=cc)) } "km.rs" <- function(o, cc, d, breaks) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val # compile histograms (breakpoints may not span data) obs <- whist( o, breaks=bval) nco <- whist( o[d], breaks=bval) cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) # number of observations exceeding largest breakpoint upperobs <- attr(obs, "high") uppercen <- attr(cen, "high") # go km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # return(list(rs=rs, km=km$km, hazard=km$lambda, r=breaks$r, breaks=bval)) } "km.rs.opt" <- function(o, cc, d, breaks, KM=TRUE, RS=TRUE) { # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # breaks <- as.breakpts(breaks) bval <- breaks$val out <- list(r=breaks$r, breaks=bval) if(KM || RS) nco <- whist( o[d], breaks=bval) if(KM) { obs <- whist( o, breaks=bval) upperobs <- attr(obs, "high") km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) out <- append(list(km=km$km, hazard=km$lambda), out) } if(RS) { cen <- whist( cc, breaks=bval) ncc <- whist( cc[d], breaks=bval) uppercen <- attr(cen, "high") rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) out <- append(list(rs=rs), out) } return(out) } censtimeCDFest <- function(o, cc, d, breaks, ..., KM=TRUE, RS=TRUE, HAN=TRUE, RAW=TRUE, han.denom=NULL, tt=NULL, pmax=0.9) { # Histogram-based estimation of cumulative distribution function # of lifetimes subject to censoring. # o: censored lifetimes min(T_i,C_i) # cc: censoring times C_i # d: censoring indicators 1(T_i <= C_i) # breaks: histogram breakpoints (vector or 'breakpts' object) # han.denom: denominator (eroded area) for each value of r # tt: uncensored lifetimes T_i, if known breaks <- as.breakpts(breaks) bval <- breaks$val rval <- breaks$r rmax <- breaks$max # Kaplan-Meier and/or Reduced Sample out <- km.rs.opt(o, cc, d, breaks, KM=KM, RS=RS) # convert to data frame out$breaks <- NULL df <- as.data.frame(out) # Raw ecdf of observed lifetimes if available if(RAW && !is.null(tt)) { h <- whist(tt[tt <= rmax], breaks=bval) df <- cbind(df, data.frame(raw=cumsum(h)/length(tt))) } # Hanisch if(HAN) { if(is.null(han.denom)) stop("Internal error: missing denominator for Hanisch estimator") if(length(han.denom) != length(rval)) stop(paste("Internal error:", "length(han.denom) =", length(han.denom), "!=", length(rval), "= length(rvals)")) # uncensored distances x <- o[d] # calculate Hanisch estimator h <- whist(x[x <= rmax], breaks=bval) H <- cumsum(h/han.denom) df <- cbind(df, data.frame(han=H/max(H[is.finite(H)]))) } # determine appropriate plotting range bestest <- if(KM) "km" else if(HAN) "han" else if(RS) "rs" else "raw" alim <- range(df$r[df[[bestest]] <= pmax]) # convert to fv object nama <- c("r", "km", "hazard", "han", "rs", "raw") avail <- c(TRUE, KM, KM, HAN, RS, RAW) iscdf <- c(FALSE, TRUE, FALSE, TRUE, TRUE, TRUE) labl <- c("r", "hat(%s)[km](r)", "lambda(r)", "hat(%s)[han](r)", "hat(%s)[bord](r)", "hat(%s)[raw](r)")[avail] desc <- c("distance argument r", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)", "Hanisch estimate of %s", "border corrected estimate of %s", "uncorrected estimate of %s")[avail] df <- df[, nama[avail]] Z <- fv(df, "r", substitute(CDF(r), NULL), bestest, . ~ r, alim, labl, desc, fname="CDF") fvnames(Z, ".") <- nama[iscdf & avail] return(Z) } # simple interface for students and code development compileCDF <- function(D, B, r, ..., han.denom=NULL, check=TRUE) { han <- !is.null(han.denom) breaks <- breakpts.from.r(r) if(check) { stopifnot(length(D) == length(B) && all(D >= 0) && all(B >= 0)) if(han) stopifnot(length(han.denom) == length(r)) } D <- as.vector(D) B <- as.vector(B) # observed (censored) lifetimes o <- pmin.int(D, B) # censoring indicators d <- (D <= B) # go result <- censtimeCDFest(o, B, d, breaks, HAN=han, han.denom=han.denom, RAW=TRUE, tt=D) result <- rebadge.fv(result, new.fname="compileCDF") } spatstat/R/xypolygon.R0000755000176000001440000004757712237642727014604 0ustar ripleyusers# # xypolygon.S # # $Revision: 1.60 $ $Date: 2013/09/21 08:24:55 $ # # low-level functions defined for polygons in list(x,y) format # verify.xypolygon <- function(p, fatal=TRUE) { whinge <- NULL if(!is.list(p) || !all(c("x","y") %in% names(p))) whinge <- "polygon must be a list with components x and y" else if(is.null(p$x) || is.null(p$y) || !is.numeric(p$x) || !is.numeric(p$y)) whinge <- "components x and y must be numeric vectors" else if(any(is.na(p$x)) || any(is.na(p$y))) whinge <- "x and y coordinates must not contain NA values" else if(length(p$x) != length(p$y)) whinge <- "lengths of x and y vectors unequal" else if(length(p$x) < 3) whinge <- "need at least 3 vertices for a polygon" ok <- is.null(whinge) if(!ok && fatal) stop(whinge) return(ok) } inside.xypolygon <- function(pts, polly, test01=TRUE, method="C") { # pts: list(x,y) points to be tested # polly: list(x,y) vertices of a single polygon (n joins to 1) # test01: logical - if TRUE, test whether all values in output are 0 or 1 pts <- xy.coords(pts, NULL) verify.xypolygon(polly) x <- pts$x y <- pts$y xp <- polly$x yp <- polly$y full.npts <- npts <- length(x) nedges <- length(xp) # sic # Check for points (x,y) that coincide with vertices (xp, yp) # Handle them separately DUP <- spatstat.options("dupC") z <- .C("Cmatchxy", na=as.integer(npts), xa=as.double(x), ya=as.double(y), nb=as.integer(nedges), xb=as.double(xp), yb=as.double(yp), match=as.integer(integer(npts)), DUP=DUP) # PACKAGE="spatstat") is.vertex <- (z$match != 0) retain <- !is.vertex # Remove vertices from subsequent consideration; replace them later if(vertices.present <- !all(retain)) { x <- x[retain] y <- y[retain] npts <- sum(retain) } #------------- MAIN ALGORITHM ------------------------------- score <- numeric(npts) on.boundary <- rep.int(FALSE, npts) if(anyretain<- any(retain)) { switch(method, C={ #------------------ call C routine ------------------ temp <- .C("inxyp", x=as.double(x), y=as.double(y), xp=as.double(xp), yp=as.double(yp), npts=as.integer(npts), nedges=as.integer(nedges), score=as.integer(score), onbndry=as.integer(on.boundary)) # PACKAGE="spatstat") score <- temp$score/2 on.boundary <- as.logical(temp$onbndry) }, Fortran={ #------------------ call Fortran routine ------------------ temp <- .Fortran("inxyp", x=as.double(x), y=as.double(y), xp=as.double(xp), yp=as.double(yp), npts=as.integer(npts), nedges=as.integer(nedges), score=as.double(score), onbndry=as.logical(on.boundary)) # PACKAGE="spatstat") score <- temp$score on.boundary <- temp$onbndry }, interpreted={ #----------------- original interpreted code -------------- for(i in 1:nedges) { x0 <- xp[i] y0 <- yp[i] x1 <- if(i == nedges) xp[1] else xp[i+1] y1 <- if(i == nedges) yp[1] else yp[i+1] dx <- x1 - x0 dy <- y1 - y0 if(dx < 0) { # upper edge xcriterion <- (x - x0) * (x - x1) consider <- (xcriterion <= 0) if(any(consider)) { ycriterion <- y[consider] * dx - x[consider] * dy + x0 * dy - y0 * dx # closed inequality contrib <- (ycriterion >= 0) * ifelseAB(xcriterion[consider] == 0, 1/2, 1) # positive edge sign score[consider] <- score[consider] + contrib # detect whether any point lies on this segment on.boundary[consider] <- on.boundary[consider] | (ycriterion == 0) } } else if(dx > 0) { # lower edge xcriterion <- (x - x0) * (x - x1) consider <- (xcriterion <= 0) if(any(consider)) { ycriterion <- y[consider] * dx - x[consider] * dy + x0 * dy - y0 * dx # open inequality contrib <- (ycriterion < 0) * ifelseAB(xcriterion[consider] == 0, 1/2, 1) # negative edge sign score[consider] <- score[consider] - contrib # detect whether any point lies on this segment on.boundary[consider] <- on.boundary[consider] | (ycriterion == 0) } } else { # vertical edge consider <- (x == x0) if(any(consider)) { # zero score # detect whether any point lies on this segment yconsider <- y[consider] ycriterion <- (yconsider - y0) * (yconsider - y1) on.boundary[consider] <- on.boundary[consider] | (ycriterion <= 0) } } } }, stop(paste("Unrecognised choice for", sQuote("method"))) ) } #------------------- END SWITCH ------------------------------ # replace any polygon vertices that were temporarily removed if(vertices.present) { full.score <- numeric(full.npts) full.on.boundary <- rep.int(FALSE, full.npts) if(anyretain) { full.score[retain] <- score full.on.boundary[retain] <- on.boundary } full.score[is.vertex] <- 1 full.on.boundary[is.vertex] <- TRUE score <- full.score on.boundary <- full.on.boundary npts <- full.npts } #------------------------------------------------- # any point recognised as lying on the boundary gets score 1. score[on.boundary] <- 1 if(test01) { # check sanity if(!all((score == 0) | (score == 1))) warning("internal error: some scores are not equal to 0 or 1") } attr(score, "on.boundary") <- on.boundary return(score) } owinpoly2mask <- function(w, rasta, check=TRUE) { if(check) { verifyclass(w, "owin") stopifnot(w$type == "polygonal") verifyclass(rasta, "owin") stopifnot(rasta$type == "mask") } bdry <- w$bdry x0 <- rasta$xcol[1] y0 <- rasta$yrow[1] xstep <- rasta$xstep ystep <- rasta$ystep dimyx <- rasta$dim nx <- dimyx[2] ny <- dimyx[1] epsilon <- with(.Machine, double.base^floor(double.ulp.digits/2)) score <- numeric(nx*ny) for(i in seq_along(bdry)) { p <- bdry[[i]] xp <- p$x yp <- p$y np <- length(p$x) # repeat last vertex xp <- c(xp, xp[1]) yp <- c(yp, yp[1]) np <- np + 1 # rescale coordinates so that pixels are at integer locations xp <- (xp - x0)/xstep yp <- (yp - y0)/ystep # avoid exact integer locations for vertices whole <- (ceiling(xp) == floor(xp)) xp[whole] <- xp[whole] + epsilon whole <- (ceiling(yp) == floor(yp)) yp[whole] <- yp[whole] + epsilon # call C z <- .C("poly2imI", xp=as.double(xp), yp=as.double(yp), np=as.integer(np), nx=as.integer(nx), ny=as.integer(ny), out=as.integer(integer(nx * ny))) # PACKAGE="spatstat") if(i == 1) score <- z$out else score <- score + z$out } status <- (score != 0) out <- owin(rasta$xrange, rasta$yrange, mask=matrix(status, ny, nx)) return(out) } is.hole.xypolygon <- function(polly) { h <- polly$hole if(is.null(h)) h <- (area.xypolygon(polly) < 0) return(h) } area.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # # area could be pre-calculated if(!is.null(pa <- polly$area) && is.numeric(pa) && length(pa)==1) return(pa) # else calculate verify.xypolygon(polly) xp <- polly$x yp <- polly$y nedges <- length(xp) # sic # place x axis below polygon yp <- yp - min(yp) # join vertex n to vertex 1 nxt <- c(2:nedges, 1) # x step, WITH sign dx <- xp[nxt] - xp # average height ym <- (yp + yp[nxt])/2 -sum(dx * ym) } bdrylength.xypolygon <- function(polly) { verify.xypolygon(polly) xp <- polly$x yp <- polly$y nedges <- length(xp) nxt <- c(2:nedges, 1) dx <- xp[nxt] - xp dy <- yp[nxt] - yp sum(sqrt(dx^2 + dy^2)) } reverse.xypolygon <- function(p, adjust=FALSE) { # reverse the order of vertices # (=> change sign of polygon) verify.xypolygon(p) p$x <- rev(p$x) p$y <- rev(p$y) if(adjust) { if(!is.null(p$hole)) p$hole <- !p$hole if(!is.null(p$area)) p$area <- -p$area } return(p) } overlap.xypolygon <- function(P, Q) { # compute area of overlap of two simple closed polygons verify.xypolygon(P) verify.xypolygon(Q) xp <- P$x yp <- P$y np <- length(xp) nextp <- c(2:np, 1) xq <- Q$x yq <- Q$y nq <- length(xq) nextq <- c(2:nq, 1) # adjust y coordinates so all are nonnegative ylow <- min(c(yp,yq)) yp <- yp - ylow yq <- yq - ylow area <- 0 for(i in 1:np) { ii <- c(i, nextp[i]) xpii <- xp[ii] ypii <- yp[ii] for(j in 1:nq) { jj <- c(j, nextq[j]) area <- area + overlap.trapezium(xpii, ypii, xq[jj], yq[jj]) } } return(area) } overlap.trapezium <- function(xa, ya, xb, yb, verb=FALSE) { # compute area of overlap of two trapezia # which have same baseline y = 0 # # first trapezium has vertices # (xa[1], 0), (xa[1], ya[1]), (xa[2], ya[2]), (xa[2], 0). # Similarly for second trapezium # Test for vertical edges dxa <- diff(xa) dxb <- diff(xb) if(dxa == 0 || dxb == 0) return(0) # Order x coordinates, x0 < x1 if(dxa > 0) { signa <- 1 lefta <- 1 righta <- 2 if(verb) cat("A is positive\n") } else { signa <- -1 lefta <- 2 righta <- 1 if(verb) cat("A is negative\n") } if(dxb > 0) { signb <- 1 leftb <- 1 rightb <- 2 if(verb) cat("B is positive\n") } else { signb <- -1 leftb <- 2 rightb <- 1 if(verb) cat("B is negative\n") } signfactor <- signa * signb # actually (-signa) * (-signb) if(verb) cat(paste("sign factor =", signfactor, "\n")) # Intersect x ranges x0 <- max(xa[lefta], xb[leftb]) x1 <- min(xa[righta], xb[rightb]) if(x0 >= x1) return(0) if(verb) { cat(paste("Intersection of x ranges: [", x0, ",", x1, "]\n")) abline(v=x0, lty=3) abline(v=x1, lty=3) } # Compute associated y coordinates slopea <- diff(ya)/diff(xa) y0a <- ya[lefta] + slopea * (x0-xa[lefta]) y1a <- ya[lefta] + slopea * (x1-xa[lefta]) slopeb <- diff(yb)/diff(xb) y0b <- yb[leftb] + slopeb * (x0-xb[leftb]) y1b <- yb[leftb] + slopeb * (x1-xb[leftb]) # Determine whether upper edges intersect # if not, intersection is a single trapezium # if so, intersection is a union of two trapezia yd0 <- y0b - y0a yd1 <- y1b - y1a if(yd0 * yd1 >= 0) { # edges do not intersect areaT <- (x1 - x0) * (min(y1a,y1b) + min(y0a,y0b))/2 if(verb) cat(paste("Edges do not intersect\n")) } else { # edges do intersect # find intersection xint <- x0 + (x1-x0) * abs(yd0/(yd1 - yd0)) yint <- y0a + slopea * (xint - x0) if(verb) { cat(paste("Edges intersect at (", xint, ",", yint, ")\n")) points(xint, yint, cex=2, pch="O") } # evaluate left trapezium left <- (xint - x0) * (min(y0a, y0b) + yint)/2 # evaluate right trapezium right <- (x1 - xint) * (min(y1a, y1b) + yint)/2 areaT <- left + right if(verb) cat(paste("Left area = ", left, ", right=", right, "\n")) } # return area of intersection multiplied by signs return(signfactor * areaT) } xypolygon2psp <- function(p, w, check=spatstat.options("checksegments")) { verify.xypolygon(p) n <- length(p$x) nxt <- c(2:n, 1) return(psp(p$x, p$y, p$x[nxt], p$y[nxt], window=w, check=check)) } xypolyselfint <- function(p, eps=.Machine$double.eps, proper=FALSE, yesorno=FALSE, checkinternal=FALSE) { verify.xypolygon(p) n <- length(p$x) verbose <- (n > 1000) if(verbose) cat(paste("[Checking polygon with", n, "edges...")) x0 <- p$x y0 <- p$y dx <- diff(x0[c(1:n,1)]) dy <- diff(y0[c(1:n,1)]) DUP <- spatstat.options("dupC") if(yesorno) { # get a yes-or-no answer answer <- .C("xypsi", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), xsep=as.double(2 * max(abs(dx))), ysep=as.double(2 * max(abs(dy))), eps=as.double(eps), proper=as.integer(proper), answer=as.integer(integer(1)), DUP=DUP)$answer # PACKAGE="spatstat") if(verbose) cat("]\n") return(answer != 0) } out <- .C("Cxypolyselfint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), DUP=DUP) # PACKAGE="spatstat") uhoh <- (matrix(out$ok, n, n) != 0) if(proper) { # ignore cases where two vertices coincide ti <- matrix(out$ti, n, n)[uhoh] tj <- matrix(out$tj, n, n)[uhoh] i.is.vertex <- (abs(ti) < eps) | (abs(ti - 1) < eps) j.is.vertex <- (abs(tj) < eps) | (abs(tj - 1) < eps) dup <- i.is.vertex & j.is.vertex uhoh[uhoh] <- !dup } if(checkinternal && any(uhoh != t(uhoh))) warning("Internal error: incidence matrix is not symmetric") xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) uptri <- (row(uhoh) < col(uhoh)) xx <- as.vector(xx[uhoh & uptri]) yy <- as.vector(yy[uhoh & uptri]) result <- list(x=xx, y=yy) if(verbose) cat("]\n") return(result) } owinpolycheck <- function(W, verbose=TRUE) { verifyclass(W, "owin") stopifnot(W$type == "polygonal") # extract stuff B <- W$bdry npoly <- length(B) outerframe <- owin(W$xrange, W$yrange) # can't use as.rectangle here; we're still checking validity boxarea.mineps <- area.owin(outerframe) * (1 - 0.00001) # detect very large datasets BS <- object.size(B) blowbyblow <- verbose & (BS > 1e4 || npoly > 20) # answer <- TRUE notes <- character(0) err <- character(0) # check for duplicated points, self-intersection, outer frame if(blowbyblow) cat(paste("Checking", npoly, ngettext(npoly, "polygon...", "polygons..."))) dup <- self <- is.box <- logical(npoly) for(i in 1:npoly) { if(blowbyblow && npoly > 1) progressreport(i, npoly) Bi <- B[[i]] # check for duplicated vertices dup[i] <- any(duplicated(ppp(Bi$x, Bi$y, window=outerframe, check=FALSE))) if(dup[i] && blowbyblow) message(paste("Polygon", i, "contains duplicated vertices")) # check for self-intersection self[i] <- xypolyselfint(B[[i]], proper=TRUE, yesorno=TRUE) if(self[i] && blowbyblow) message(paste("Polygon", i, "is self-intersecting")) # check whether one of the current boundary polygons # is the bounding box itself (with + sign) is.box[i] <- (length(Bi$x) == 4) && (area.xypolygon(Bi) >= boxarea.mineps) } if(blowbyblow) cat("done.\n") if((ndup <- sum(dup)) > 0) { whinge <- paste(ngettext(ndup, "Polygon", "Polygons"), if(npoly == 1) NULL else commasep(which(dup)), ngettext(ndup, "contains", "contain"), "duplicated vertices") notes <- c(notes, whinge) err <- c(err, "duplicated vertices") if(verbose) message(whinge) answer <- FALSE } if((nself <- sum(self)) > 0) { whinge <- paste(ngettext(nself, "Polygon", "Polygons"), if(npoly == 1) NULL else commasep(which(self)), ngettext(nself, "is", "are"), "self-intersecting") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "self-intersection") answer <- FALSE } if((nbox <- sum(is.box)) > 1) { answer <- FALSE whinge <- paste("Polygons", commasep(which(is.box)), "coincide with the outer frame") notes <- c(notes, whinge) err <- c(err, "polygons duplicating the outer frame") } # check for crossings between different polygons cross <- matrix(FALSE, npoly, npoly) if(npoly > 1) { if(blowbyblow) cat(paste("Checking for cross-intersection between", npoly, "polygons...")) P <- lapply(B, xypolygon2psp, w=outerframe, check=FALSE) for(i in seq_len(npoly-1)) { if(blowbyblow) progressreport(i, npoly-1) Pi <- P[[i]] for(j in (i+1):npoly) { crosses <- if(is.box[i] || is.box[j]) FALSE else { anycrossing.psp(Pi, P[[j]]) } cross[i,j] <- cross[j,i] <- crosses if(crosses) { answer <- FALSE whinge <- paste("Polygons", i, "and", j, "cross over") notes <- c(notes, whinge) if(verbose) message(whinge) err <- c(err, "overlaps between polygons") } } } if(blowbyblow) cat("done.\n") } err <- unique(err) attr(answer, "notes") <- notes attr(answer, "err") <- err return(answer) } simplify.xypolygon <- function(p, dmin) { verify.xypolygon(p) x <- p$x y <- p$y n <- length(x) if(n <= 3) return(p) dmin2 <- dmin^2 # edge lengths: len2[i] is distance from i to i+1 len2 <- (x - c(x[-1], x[1]))^2 + (y - c(y[-1],y[1]))^2 # while(n > 3 && any(len2 < dmin2)) { # delete the shortest edge kill <- which.min(len2) # edge from 'kill' to 'kill+1' will be removed # Replacement vertex is midpoint of segment left <- if(kill == 1) n else (kill - 1) killplus1 <- if(kill == n) 1 else (kill + 1) right <- if(killplus1 == n) 1 else (killplus1 + 1) xmid <- (x[kill]+x[killplus1])/2 ymid <- (y[kill]+y[killplus1])/2 d2leftmid <- (xmid-x[left])^2+(ymid-y[left])^2 d2midright <- (xmid-x[right])^2+(ymid-y[right])^2 # adjust vectors: first replace segment endpoints without deleting x[kill] <- xmid y[kill] <- ymid x[killplus1] <- xmid y[killplus1] <- ymid len2[left] <- d2leftmid len2[kill] <- 0 len2[killplus1] <- d2midright # now delete x <- x[-kill] y <- y[-kill] len2 <- len2[-kill] n <- n-1 } # p$x <- x p$y <- y p$area <- area.xypolygon(p[c("x","y")]) return(p) } inside.triangle <- function(x, y, xx, yy) { # test whether points x[i], y[i] lie in triangle xx[1:3], yy[1:3] # using barycentric coordinates # vector 0 is edge from A to C v0x <- xx[3] - xx[1] v0y <- yy[3] - yy[1] # vector 1 is edge from A to B v1x <- xx[2] - xx[1] v1y <- yy[2] - yy[1] # vector 2 is from vertex A to point P v2x <- x - xx[1] v2y <- y - yy[1] # inner products dot00 <- v0x^2 + v0y^2 dot01 <- v0x * v1x + v0y * v1y dot02 <- v0x * v2x + v0y * v2y dot11 <- v1x^2 + v1y^2 dot12 <- v1x * v2x + v1y * v2y # unnormalised barycentric coordinates Denom <- dot00 * dot11 - dot01 * dot01 u <- dot11 * dot02 - dot01 * dot12 v <- dot00 * dot12 - dot01 * dot02 # test return((u > 0) & (v > 0) & (u + v < Denom)) } spatstat/R/edgeRipley.R0000755000176000001440000001350412237642727014604 0ustar ripleyusers# # edgeRipley.R # # $Revision: 1.10 $ $Date: 2012/08/17 10:27:05 $ # # Ripley isotropic edge correction weights # # edge.Ripley(X, r, W) compute isotropic correction weights # for centres X[i], radii r[i,j], window W # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Ripley <- local({ small <- function(x) { abs(x) < .Machine$double.eps } hang <- function(d, r) { nr <- nrow(r) nc <- ncol(r) answer <- matrix(0, nrow=nr, ncol=nc) # replicate d[i] over j index d <- matrix(d, nrow=nr, ncol=nc) hit <- (d < r) answer[hit] <- acos(d[hit]/r[hit]) answer } edge.Ripley <- function(X, r, W=X$window, method="C", maxweight=100) { # X is a point pattern, or equivalent X <- as.ppp(X, W) W <- X$window switch(W$type, rectangle={}, polygonal={ if(method != "C") stop(paste("Ripley isotropic correction for polygonal windows", "requires method = ", dQuote("C"))) }, mask={ stop(paste("sorry, Ripley isotropic correction", "is not implemented for binary masks")) } ) n <- npoints(X) if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { if(length(r) != n) stop("length of r is incompatible with the number of points in X") r <- matrix(r, nrow=n) } # Nr <- nrow(r) Nc <- ncol(r) if(Nr * Nc == 0) return(r) ########## x <- X$x y <- X$y stopifnot(method %in% c("interpreted", "C")) switch(method, interpreted = { ######## interpreted R code for rectangular case ######### # perpendicular distance from point to each edge of rectangle # L = left, R = right, D = down, U = up dL <- x - W$xrange[1] dR <- W$xrange[2] - x dD <- y - W$yrange[1] dU <- W$yrange[2] - y # detect whether any points are corners of the rectangle corner <- (small(dL) + small(dR) + small(dD) + small(dU) >= 2) # angle between (a) perpendicular to edge of rectangle # and (b) line from point to corner of rectangle bLU <- atan2(dU, dL) bLD <- atan2(dD, dL) bRU <- atan2(dU, dR) bRD <- atan2(dD, dR) bUL <- atan2(dL, dU) bUR <- atan2(dR, dU) bDL <- atan2(dL, dD) bDR <- atan2(dR, dD) # The above are all vectors [i] # Now we compute matrices [i,j] # half the angle subtended by the intersection between # the circle of radius r[i,j] centred on point i # and each edge of the rectangle (prolonged to an infinite line) aL <- hang(dL, r) aR <- hang(dR, r) aD <- hang(dD, r) aU <- hang(dU, r) # apply maxima # note: a* are matrices; b** are vectors; # b** are implicitly replicated over j index cL <- pmin.int(aL, bLU) + pmin.int(aL, bLD) cR <- pmin.int(aR, bRU) + pmin.int(aR, bRD) cU <- pmin.int(aU, bUL) + pmin.int(aU, bUR) cD <- pmin.int(aD, bDL) + pmin.int(aD, bDR) # total exterior angle ext <- cL + cR + cU + cD # add pi/2 for corners if(any(corner)) ext[corner,] <- ext[corner,] + pi/2 # OK, now compute weight weight <- 1 / (1 - ext/(2 * pi)) }, C = { ############ C code ############################# DUP <- spatstat.options("dupC") switch(W$type, rectangle={ z <- .C("ripleybox", nx=as.integer(n), x=as.double(x), y=as.double(y), rmat=as.double(r), nr=as.integer(Nc), #sic xmin=as.double(W$xrange[1]), ymin=as.double(W$yrange[1]), xmax=as.double(W$xrange[2]), ymax=as.double(W$yrange[2]), epsilon=as.double(.Machine$double.eps), out=as.double(numeric(Nr * Nc)), DUP=DUP) # PACKAGE="spatstat") weight <- matrix(z$out, nrow=Nr, ncol=Nc) }, polygonal={ Y <- as.psp(W) z <- .C("ripleypoly", nc=as.integer(n), xc=as.double(x), yc=as.double(y), nr=as.integer(Nc), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), out=as.double(numeric(Nr * Nc)), DUP=DUP) # PACKAGE="spatstat") angles <- matrix(z$out, nrow = Nr, ncol = Nc) weight <- 2 * pi/angles } ) } ) # eliminate wild values weight <- matrix(pmax.int(0, pmin.int(maxweight, weight)), nrow=Nr, ncol=Nc) return(weight) } edge.Ripley }) spatstat/R/bermantest.R0000755000176000001440000002340212237642727014655 0ustar ripleyusers# # bermantest.R # # Test statistics from Berman (1986) # # $Revision: 1.12 $ $Date: 2013/02/15 11:20:20 $ # # # --------------------------- bermantest <- function(...) { UseMethod("bermantest") } bermantest.ppp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) which <- match.arg(which) alternative <- match.arg(alternative) do.call("bermantestEngine", resolve.defaults(list(ppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } bermantest.ppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) verifyclass(model, "ppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call("bermantestEngine", resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Qname))) } bermantest.lpp <- function(X, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { Xname <- short.deparse(substitute(X)) covname <- short.deparse(substitute(covariate)) which <- match.arg(which) alternative <- match.arg(alternative) do.call("bermantestEngine", resolve.defaults(list(lppm(X), covariate, which, alternative), list(...), list(modelname="CSR", covname=covname, dataname=Xname))) } bermantest.lppm <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ...) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) verifyclass(model, "lppm") which <- match.arg(which) alternative <- match.arg(alternative) if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call("bermantestEngine", resolve.defaults(list(model, covariate, which, alternative), list(...), list(modelname=modelname, covname=covname, dataname=model$Xname))) } bermantestEngine <- function(model, covariate, which=c("Z1", "Z2"), alternative=c("two.sided", "less", "greater"), ..., modelname, covname, dataname="") { csr <- is.poisson(model) && is.stationary(model) if(missing(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(missing(covname)) covname <- short.deparse(substitute(covariate)) which <- match.arg(which) alternative <- match.arg(alternative) if(!is.poisson(model)) stop("Only implemented for Poisson point process models") # ........... first assemble data ............... fram <- spatialCDFframe(model, covariate, ..., modelname=modelname, covname=covname, dataname=dataname) fvalues <- fram$values info <- fram$info # values of covariate at data points ZX <- fvalues$ZX # transformed to Unif[0,1] under H0 U <- fvalues$U # values of covariate at pixels Zvalues <- fvalues$Zvalues # corresponding pixel areas/weights weights <- fvalues$weights # intensity of model lambda <- fvalues$lambda switch(which, Z1={ #......... Berman Z1 statistic ..................... method <- paste("Berman Z1 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) # sum of covariate values at data points Sn <- sum(ZX) # predicted mean and variance lamwt <- lambda * weights En <- sum(lamwt) ESn <- sum(lamwt * Zvalues) varSn <- sum(lamwt * Zvalues^2) # working, for plot method working <- list(meanZX=mean(ZX), meanZ=ESn/En) # standardise statistic <- (Sn - ESn)/sqrt(varSn) names(statistic) <- "Z1" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="mean value of covariate at random points is less than predicted under model", greater="mean value of covariate at random points is greater than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname)) }, Z2={ #......... Berman Z2 statistic ..................... method <- paste("Berman Z2 test of", if(info$csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) npts <- length(ZX) statistic <- sqrt(12/npts) * (sum(U) - npts/2) working <- list(meanU=mean(U)) names(statistic) <- "Z2" p.value <- switch(alternative, two.sided=2 * pnorm(-abs(statistic)), less=pnorm(statistic), greater=pnorm(statistic, lower.tail=FALSE)) altblurb <- switch(alternative, two.sided="two-sided", less="covariate values at random points have lower quantiles than predicted under model", greater="covariate values at random points have higher quantiles than predicted under model") valuename <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under", if(info$csr) modelname else sQuote(modelname)) }) out <- list(statistic=statistic, p.value=p.value, alternative=altblurb, method=method, which=which, working=working, data.name=valuename, fram=fram) class(out) <- c("htest", "bermantest") return(out) } plot.bermantest <- function(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=col, lty0=lty) { fram <- x$fram if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style ks <- x$ks values <- attr(ks, "prep") info <- attr(ks, "info") } work <- x$working op <- options(useFancyQuotes=FALSE) switch(x$which, Z1={ # plot cdf's of Z FZ <- values$FZ xxx <- get("x", environment(FZ)) yyy <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z1 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call("plot.default", resolve.defaults( list(x=xxx, y=yyy, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) FZX <- values$FZX if(is.null(FZX)) FZX <- ecdf(values$ZX) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) abline(v=work$meanZ, lwd=lwd0,col=col0, lty=lty0) abline(v=work$meanZX, lwd=lwd,col=col, lty=lty) }, Z2={ # plot cdf of U U <- values$U cdfU <- ecdf(U) main <- c(x$method, paste("based on distribution of covariate", sQuote(info$covname)), paste("Z2 statistic =", signif(x$statistic, 4)), paste("p-value=", signif(x$p.value, 4))) do.call("plot.ecdf", resolve.defaults( list(cdfU), list(...), list(do.points=FALSE, asp=1), list(lwd=lwd, col=col, lty=lty), list(xlab="U", ylab="relative frequency"), list(main=main))) abline(0,1,lwd=lwd0,col=col0,lty=lty0) abline(v=0.5, lwd=lwd0,col=col0,lty=lty0) abline(v=work$meanU, lwd=lwd,col=col,lty=lty) }) options(op) return(invisible(NULL)) } spatstat/R/nnclean.R0000755000176000001440000001266012237642727014133 0ustar ripleyusers# # nnclean.R # # Nearest-neighbour clutter removal # # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # # $Revision: 1.7 $ $Date: 2013/05/01 07:27:35 $ # nnclean <- function(X, k, ...) { UseMethod("nnclean") } nnclean.pp3 <- function(X, k, ..., convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley stopifnot(inherits(X, "pp3")) validposint(k, "nnclean.pp3") kthNND <- nndist(X, k=k) # apply classification algorithm em <- nncleanEngine(kthNND, k=k, d=3, ..., tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit) # tack results onto point pattern as marks pp <- em$probs zz <- factor(em$z, levels=c(0,1)) levels(zz) <- c("noise", "feature") mm <- hyperframe(prob=pp, label=zz) marks(X) <- cbind(marks(X), mm) return(X) } nnclean.ppp <- function(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley n <- X$n validposint(k, "nnclean.ppp") if(!edge.correct) { # compute vector of k-th nearest neighbour distances kthNND <- nndist(X, k=k) } else { # replicate data periodically # (ensuring original points are listed first) Xbox <- X[as.rectangle(X)] Xpand <- periodify(Xbox, ix=c(0,-1,1), iy=c(0,-1,1), check=FALSE) # trim to margin W <- expand.owin(X$window, (1+2*wrap)^2) Xpand <- Xpand[W] kthNND <- nndist(Xpand, k=k) } # apply classification algorithm em <- nncleanEngine(kthNND, k=k, d=2, ..., tol=convergence, plothist=plothist, verbose=verbose, maxit=maxit) # extract results pp <- em$probs zz <- em$z zz <- factor(zz, levels=c(0,1)) levels(zz) <- c("noise", "feature") df <- data.frame(class=zz,prob=pp) if(edge.correct) { # trim back to original point pattern df <- df[seq_len(X$n), ] } # tack on marx <- marks(X, dfok=TRUE) if(is.null(marx)) marks(X, dfok=TRUE) <- df else marks(X, dfok=TRUE) <- cbind(df, marx) return(X) } nncleanEngine <- function(kthNND, k, d, ..., tol = 0.001, plothist = FALSE, verbose=TRUE, maxit=50) { # Adapted from statlib file NNclean.q # Authors: Simon Byers and Adrian Raftery # Adapted for spatstat by Adrian Baddeley n <- length(kthNND) alpha.d <- (2. * pi^(d/2.))/(d * gamma(d/2.)) # raise to power d for efficiency kNNDpowd <- kthNND^d # # Now use kthNND in E-M algorithm. # First set up starting guesses. # # probs <- numeric(n) thresh <- (min(kthNND) + diff(range(kthNND))/3.) high <- (kthNND > thresh) delta <- as.integer(high) p <- 0.5 lambda1 <- k/(alpha.d * mean(kNNDpowd[!high])) lambda2 <- k/(alpha.d * mean(kNNDpowd[ high])) loglik.old <- 0. loglik.new <- 1. # # Iterator starts here, # Z <- !kthNND niter <- 0 while(abs(loglik.new - loglik.old)/(1 + abs(loglik.new)) > tol) { if(niter >= maxit) { warning(paste("E-M algorithm failed to converge in", maxit, "iterations")) break } niter <- niter + 1 # E - step f1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d) f2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d) delta[!Z] <- deltaNZ <- (p * f1)/(p * f1 + (1 - p) * f2) delta[Z] <- 0 # M - step p <- sum(delta)/n lambda1 <- (k * sum(delta))/(alpha.d * sum(kNNDpowd * delta)) lambda2 <- (k * sum((1. - delta)))/(alpha.d * sum(kNNDpowd * (1. - delta))) # evaluate loglikelihood loglik.old <- loglik.new loglik.new <- sum( - p * lambda1 * alpha.d * (kNNDpowd * delta) - (1. - p) * lambda2 * alpha.d * (kNNDpowd * (1 - delta)) + delta * k * log(lambda1 * alpha.d) + (1. - delta) * k * log(lambda2 * alpha.d)) if(verbose) cat(paste("Iteration", niter, "\tlogLik =", loglik.new, "\tp =", signif(p,4), "\n")) } if(plothist) { xlim <- c(0, max(kthNND)) barheights <- hist(kthNND, nclass=40, plot=FALSE)$density support <- seq(from=xlim[1], to=xlim[2], length.out = 200.) fittedy <- p * dknn(support, lambda=lambda1, k = k, d = d) + (1. - p) * dknn(support, lambda=lambda2, k = k, d = d) ylim <- range(c(0, barheights, fittedy)) xlab <- paste("Distance to", ordinal(k), "nearest neighbour") hist(kthNND, nclass=40, probability = TRUE, xlim = xlim, ylim=ylim, axes = TRUE, xlab = xlab, ylab = "Probability density") box() lines(support, fittedy, col="green") } # delta1 <- dknn(kthNND[!Z], lambda=lambda1, k = k, d = d) delta2 <- dknn(kthNND[!Z], lambda=lambda2, k = k, d = d) probs[!Z] <- delta1/(delta1 + delta2) probs[Z] <- 1 # if(verbose) { cat("Estimated parameters:\n") cat(paste("p [cluster] =", signif(p, 5), "\n")) cat(paste("lambda [cluster] =", signif(lambda1, 5), "\n")) cat(paste("lambda [noise] =", signif(lambda2, 5), "\n")) } # # z will be the classifications. 1= in cluster. 0= in noise. # return(list(z = round(probs), probs = probs, lambda1 = lambda1, lambda2 = lambda2, p = p, kthNND = kthNND, d=d, n=n, k=k)) } spatstat/R/clickppp.R0000755000176000001440000000445712237642727014327 0ustar ripleyusers# Dominic Schuhmacher's idea clickppp <- function(n=NULL, win=square(1), types=NULL, ..., add=FALSE, main=NULL, hook=NULL) { win <- as.owin(win) instructions <- if(!is.null(n)) paste("click", n, "times in window") else paste("add points: click left mouse button in window\n", "exit: click middle mouse button") if(is.null(main)) main <- instructions #### single type ######################### if(is.null(types)) { plot(win, add=add, main=main) if(!is.null(hook)) plot(hook, add=TRUE) if(!is.null(n)) xy <- do.call("locator", resolve.defaults(list(...), list(n=n, type="p"))) else xy <- do.call("locator", resolve.defaults(list(...), list(type="p"))) # check whether all points lie inside window if((nout <- sum(!inside.owin(xy$x, xy$y, win))) > 0) { warning(paste(nout, ngettext(nout, "point", "points"), "lying outside specified window; window was expanded")) win <- bounding.box(win, bounding.box.xy(xy)) } X <- ppp(xy$x, xy$y, window=win) return(X) } ##### multitype ####################### ftypes <- factor(types, levels=types) getem <- function(i, instr, ...) { main <- paste("Points of type", sQuote(i), "\n", instr) do.call("clickppp", resolve.defaults(list(...), list(main=main))) } # input points of type 1 X <- getem(ftypes[1], instructions, n=n, win=win, add=add, ..., pch=1) X <- X %mark% ftypes[1] # input points of types 2, 3, ... in turn naughty <- FALSE for(i in 2:length(types)) { Xi <- getem(ftypes[i], instructions, n=n, win=win, add=add, ..., hook=X, pch=i) Xi <- Xi %mark% ftypes[i] if(!naughty && identical(Xi$window, win)) { # normal case X <- superimpose(X, Xi, W=win) } else { # User has clicked outside original window. naughty <- TRUE # Use bounding box for simplicity bb <- bounding.box(Xi$window, X$window) X <- superimpose(X, Xi, W=bb) } } if(!add) { if(!naughty) plot(X, main="Final pattern") else { plot(X$window, main="Final pattern (in expanded window)") plot(win, add=TRUE) plot(X, add=TRUE) } } return(X) } spatstat/R/relrisk.R0000755000176000001440000002213512237642727014166 0ustar ripleyusers# # relrisk.R # # Estimation of relative risk # # $Revision: 1.20 $ $Date: 2013/08/29 03:57:16 $ # relrisk <- function(X, sigma=NULL, ..., varcov=NULL, at="pixels", casecontrol=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") marx <- marks(X) imarks <- as.integer(marx) lev <- levels(marx) # trap arguments dotargs <- list(...) isbwarg <- names(dotargs) %in% c("method", "nh", "hmin", "hmax", "warn") bwargs <- dotargs[isbwarg] dargs <- dotargs[!isbwarg] # bandwidth if(is.null(sigma) && is.null(varcov)) { sigma <- do.call(bw.relrisk, append(list(X), bwargs)) } # compute probabilities if(ntypes == 2 && casecontrol) { # 1 = control, 2 = case # compute densities Deach <- do.call(density.splitppp, append(list(Y, sigma=sigma, varcov=varcov, at=at), dargs)) Dall <- do.call(density.ppp, append(list(X, sigma=sigma, varcov=varcov, at=at), dargs)) # compute probability of case switch(at, pixels= { Dcase <- Deach[[2]] result <- eval.im(Dcase/Dall) # trap NaN values nbg <- as.matrix(eval.im(badprobability(result, FALSE))) if(any(nbg)) { # apply l'Hopital's rule: # p(case) = 1{nearest neighbour is case} dist1 <- distmap(Y[[1]], xy=result) dist2 <- distmap(Y[[2]], xy=result) close2 <- eval.im(as.integer(dist2 < dist1)) result[nbg] <- close2[nbg] } }, points={ result <- numeric(npoints(X)) iscase <- (imarks == 2) result[iscase] <- Deach[[2]]/Dall[iscase] result[!iscase] <- 1 - Deach[[1]]/Dall[!iscase] # trap NaN values if(any(nbg <- badprobability(result, TRUE))) { # apply l'Hopital's rule nntype <- imarks[nnwhich(X)] result[nbg] <- as.integer(nntype[nbg] == 2) } }) } else { # several types switch(at, pixels={ Deach <- do.call(density.splitppp, append(list(Y, sigma=sigma, varcov=varcov, at=at), dargs)) Dall <- do.call(density.ppp, append(list(X, sigma=sigma, varcov=varcov, at=at), dargs)) result <- as.listof(lapply(Deach, function(d, dall) { eval.im(d/dall) }, dall = Dall)) # trap NaN values nbg <- lapply(result, function(z) { as.matrix(eval.im(badprobability(z, FALSE))) }) nbg <- Reduce("|", nbg) if(any(nbg)) { # apply l'Hopital's rule distX <- distmap(X, xy=Dall) whichnn <- attr(distX, "index") typenn <- eval.im(imarks[whichnn]) typennsub <- as.matrix(typenn)[nbg] for(k in seq_along(result)) result[[k]][nbg] <- (typennsub == k) } }, points = { npts <- npoints(X) # dummy variable matrix dumm <- matrix(0, npts, ntypes) dumm[cbind(seq_len(npts), imarks)] <- 1 colnames(dumm) <- lev # compute probability of each type Z <- X %mark% dumm result <- do.call(Smooth, append(list(Z, sigma=sigma, varcov=varcov, at="points"), dargs)) # trap NaN values bad <- badprobability(as.matrix(result), TRUE) badrow <- apply(bad, 1, any) if(any(badrow)) { # apply l'Hopital's rule typenn <- imarks[nnwhich(X)] result[badrow, ] <- (typenn == col(result))[badrow, ] } }) } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } bw.stoyan <- function(X, co=0.15) { # Stoyan's rule of thumb stopifnot(is.ppp(X)) n <- npoints(X) W <- as.owin(X) a <- area.owin(W) stoyan <- co/sqrt(5 * n/a) return(stoyan) } bw.relrisk <- function(X, method="likelihood", nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.multitype(X)) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # Y <- split(X) ntypes <- length(Y) if(ntypes == 1) stop("Data contains only one type of points") marx <- marks(X) method <- pickoption("method", method, c(likelihood="likelihood", leastsquares="leastsquares", ls="leastsquares", LS="leastsquares", weightedleastsquares="weightedleastsquares", wls="weightedleastsquares", WLS="weightedleastsquares")) # if(method != "likelihood") { # dummy variables for each type imarks <- as.integer(marx) if(ntypes == 2) { # 1 = control, 2 = case indic <- (imarks == 2) y01 <- as.integer(indic) } else { indic <- matrix(FALSE, n, ntypes) indic[cbind(seq_len(n), imarks)] <- TRUE y01 <- indic * 1 } X01 <- X %mark% y01 } # cross-validated bandwidth selection # determine a range of bandwidth values n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- as.owin(X) a <- area.owin(W) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb applied to the least and most common types mcount <- table(marx) nmin <- max(1, min(mcount)) nmax <- max(1, max(mcount)) stoyan.low <- 0.15/sqrt(nmax/a) stoyan.high <- 0.15/sqrt(nmin/a) if(is.null(hmin)) hmin <- max(min(nndist(unique(X))), stoyan.low/5) if(is.null(hmax)) { hmax <- min(d/4, stoyan.high * 20) hmax <- max(hmax, hmin * 2) } } else stopifnot(hmin < hmax) # h <- exp(seq(from=log(hmin), to=log(hmax), length.out=nh)) cv <- numeric(nh) # # compute cross-validation criterion switch(method, likelihood={ methodname <- "Likelihood" # for efficiency, only compute the estimate of p_j(x_i) # when j = m_i = mark of x_i. Dthis <- numeric(n) for(i in seq_len(nh)) { Dall <- density.ppp(X, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) Deach <- density.splitppp(Y, sigma=h[i], at="points", edge=FALSE, sorted=TRUE) split(Dthis, marx) <- Deach pthis <- Dthis/Dall cv[i] <- -mean(log(pthis)) } }, leastsquares={ methodname <- "Least Squares" for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) cv[i] <- mean((y01 - phat)^2) } }, weightedleastsquares={ methodname <- "Weighted Least Squares" # need initial value of h from least squares h0 <- bw.relrisk(X, "leastsquares", nh=ceiling(nh/4)) phat0 <- Smooth(X01, sigma=h0, at="points", leaveoneout=TRUE, sorted=TRUE) var0 <- phat0 * (1-phat0) var0 <- pmax.int(var0, 1e-6) for(i in seq_len(nh)) { phat <- Smooth(X01, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) cv[i] <- mean((y01 - phat)^2/var0) } }) # optimize iopt <- which.min(cv) # if(warn && (iopt == nh || iopt == 1)) warning(paste("Cross-validation criterion was minimised at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", "[", signif(hmin, 3), ",", signif(hmax, 3), "];", "use arguments hmin, hmax to specify a wider interval")) # result <- bw.optim(cv, h, iopt, hname="sigma", creator="bw.relrisk", criterion=paste(methodname, "Cross-Validation")) return(result) } which.max.im <- function(x) { stopifnot(is.list(x)) n <- length(x) if(n == 0) return(list()) if(!all(unlist(lapply(x, is.im)))) stop("x should be a list of images") nama <- names(x) xmax <- x[[1]] wmax <- as.im(1L, xmax) if(n > 1) { for(i in 2:n) { xi <- x[[i]] xmaxnew <- eval.im(pmax.int(xi, xmax)) wmaxnew <- eval.im(ifelseAX(xi > xmax, i, wmax)) xmax <- xmaxnew wmax <- wmaxnew } } wmax <- eval.im(factor(wmax, levels=1:n)) if(!is.null(nama)) levels(wmax) <- nama return(wmax) } spatstat/R/defaultwin.R0000755000176000001440000000252212237642727014653 0ustar ripleyusers# # # defaultwin.R # # $Revision: 1.9 $ $Date: 2012/05/11 11:20:09 $ # default.expand <- function(object, m=2, epsilon=1e-6) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # no expansion necessary if model is Poisson if(is.poisson(object)) return(.no.expansion) # default is no expansion if model is nonstationary if(!is.stationary(object)) return(.no.expansion) # Redundant since a non-expandable model is non-stationary # if(!is.expandable(object)) # return(.no.expansion) # rule is to expand data window by distance d = m * reach rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(rmhexpand()) if(!is.numeric(m) || length(m) != 1 || m < 1) stop("m should be a single number >= 1") mr <- m * rr rule <- rmhexpand(distance = mr) # w <- as.owin(object) if(!is.null(w)) { # apply rule to window wplus <- expand.owin(w, rule) # save as new expansion rule rule <- rmhexpand(wplus) } return(rule) } default.clipwindow <- function(object, epsilon=1e-6) { stopifnot(is.ppm(object) || inherits(object, "rmhmodel")) # data window w <- as.owin(object) if(is.null(w)) return(NULL) # interaction range of model rr <- reach(object, epsilon=epsilon) if(!is.finite(rr)) return(NULL) if(rr == 0) return(w) else return(erosion(w, rr)) } spatstat/R/pcfmulti.inhom.R0000755000176000001440000002164112250312722015430 0ustar ripleyusers# # pcfmulti.inhom.R # # $Revision: 1.10 $ $Date: 2013/12/06 09:15:30 $ # # inhomogeneous multitype pair correlation functions # # pcfcross.inhom <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) result <- pcfmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(g[inhom,i,j](r), list(i=iname,j=jname)), sprintf("g[list(inhom,%s,%s)]", iname, jname), new.yexp=substitute(g[list(inhom,i,j)](r), list(i=iname,j=jname))) return(result) } pcfdot.inhom <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") stopifnot(is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") result <- pcfmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks, kernel=kernel, bw=bw, stoyan=stoyan, correction=correction, sigma=sigma, varcov=varcov, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(g[inhom, i ~ dot](r), list(i=iname)), paste("g[list(inhom,", iname, "~symbol(\"\\267\"))]"), new.yexp=substitute(g[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) return(result) } pcfmulti.inhom <- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), sigma=NULL, varcov=NULL, Iname="points satisfying condition I", Jname="points satisfying condition J") { verifyclass(X, "ppp") r.override <- !is.null(r) win <- X$window area <- area.owin(win) npts <- npoints(X) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("translate", "Ripley") correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/area) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/area) } ########## indices I and J ######################## if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != npts || length(J) != npts) stop(paste("The length of I and J must equal", "the number of points in the pattern")) nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) XI <- X[I] XJ <- X[J] ########## intensity values ######################### if(is.null(lambdaI)) { # Estimate density by leave-one-out kernel smoothing lambdaI <- density(XI, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaI)) check.nvector(lambdaI, nI) else if(is.im(lambdaI)) lambdaI <- safelookup(lambdaI, XI) else if(is.function(lambdaI)) lambdaI <- lambdaI(XI$x, XI$y) else stop(paste(sQuote("lambdaI"), "should be a vector, a pixel image, or a function")) } if(is.null(lambdaJ)) { # Estimate density by leave-one-out kernel smoothing lambdaJ <- density(XJ, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else { # lambda values provided if(is.vector(lambdaJ)) check.nvector(lambdaJ, nJ) else if(is.im(lambdaJ)) lambdaJ <- safelookup(lambdaJ, XJ) else if(is.function(lambdaJ)) lambdaJ <- lambdaJ(XJ$x, XJ$y) else stop(paste(sQuote("lambdaJ"), "should be a vector, a pixel image, or a function")) } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, npts/area) breaks <- handle.r.b.args(r, breaks, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", substitute(g[inhom,multi](r), NULL), "theo", , alim, c("r","{%s^{Pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname="g[list(inhom, multi)]") ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances # identify close pairs of points close <- crosspairs(XI, XJ, rmax+hmax) # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$xi <- close$xi[ok] close$yi <- close$yi[ok] close$xj <- close$xj[ok] close$yj <- close$yj[ok] close$dx <- close$dx[ok] close$dy <- close$dy[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) ###### compute ####### if(any(correction=="translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) gT <- sewpcf(dclose, edgewt * weight, denargs, area)$g out <- bind.fv(out, data.frame(trans=gT), "hat(%s^{Trans})(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) gR <- sewpcf(dclose, edgewt * weight, denargs, area)$g out <- bind.fv(out, data.frame(iso=gR), "hat(%s^{Ripley})(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? nama2 <- names(out) corrxns <- rev(nama2[nama2 != "r"]) # default is to display them all formula(out) <- deparse(as.formula(paste( "cbind(", paste(corrxns, collapse=","), ") ~ r"))) unitname(out) <- unitname(X) return(out) } spatstat/R/randomseg.R0000755000176000001440000000401512237642727014467 0ustar ripleyusers# # randomseg.R # # $Revision: 1.8 $ $Date: 2011/05/18 08:59:23 $ # rpoisline <- function(lambda, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) return(psp(numeric(0), numeric(0), numeric(0), numeric(0), window=win)) theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, window=boundbox, check=FALSE) # clip to window X <- X[win] return(X) } rlinegrid <- function(angle=45, spacing=0.1, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle width <- diff(win$xrange) height <- diff(win$yrange) rmax <- sqrt(width^2 + height^2)/2 xmid <- mean(win$xrange) ymid <- mean(win$yrange) # generate randomly-displaced grid of lines through circumcircle u <- runif(1, min=0, max=spacing) - rmax if(u >= rmax) return(psp(numeric(0), numeric(0), numeric(0), numeric(0), window=win, check=FALSE)) p <- seq(from=u, to=rmax, by=spacing) # compute intersection points with circle q <- sqrt(rmax^2 - p^2) theta <- pi * ((angle - 90)/180) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, window=owin(xmid+c(-1,1)*rmax, ymid+c(-1,1)*rmax), check=FALSE) # clip to window X <- X[win] return(X) } spatstat/R/kstest.mppm.R0000644000176000001440000001300712237642727014773 0ustar ripleyusers# # kstest.mppm.R # # $Revision: 1.11 $ $Date: 2007/05/18 17:59:41 $ # kstest.mppm <- function(model, covariate, ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) { modelname <- short.deparse(substitute(model)) covname <- short.deparse(substitute(covariate)) stopifnot(is.mppm(model)) if(!is.poisson.mppm(model)) stop("Only implemented for Poisson models") pixelvalues <- function(z) { as.vector(as.matrix(z)) } # extract things from model data <- model$data npat <- model$npat Y <- data.mppm(model) if(fast) { # extract original quadrature schemes and convert to point patterns QQ <- quad.mppm(model) PP <- lapply(QQ, union.quad) Zweights <- lapply(QQ, w.quad) } else Zweights <- list() # `evaluate' covariate if(verbose) cat("Extracting covariate...") if(is.character(covariate)) { # extract covariate with this name from data used to fit model if(!(covariate %in% names(data))) stop(paste("Model does not contain a covariate called", dQuote(covariate))) covname <- covariate covariate <- data[, covname, drop=TRUE] } else if(inherits(covariate, "listof")) { if(length(covariate) != npat) stop(paste("Length of list of covariate values does not match", "number of point patterns in data of original model")) } else if(is.hyperframe(covariate)) { # extract first column covariate <- covariate[,1, drop=TRUE] if(length(covariate) != npat) stop(paste("Number of rows of covariate hyperframe does not match", "number of point patterns in data of original model")) } else if(is.function(covariate) || is.im(covariate)) { # replicate to make a list covariate <- rep(list(covariate), npat) class(covariate) <- c("listof", class(covariate)) } else stop(paste("Format of argument", sQuote("covariates"), "not understood")) if(verbose) cat("done.\nComputing statistics for each pattern...") # compile information for KS test from each row Zvalues <- ZX <- Win <- list() for(i in 1:npat) { if(verbose) progressreport(i, npat) XI <- Y[[i]] if(fast) PI <- PP[[i]] else WI <- XI$window covariateI <- covariate[[i]] if(is.im(covariateI)) { type <- "im" # evaluate at data points ZXI <- if(interpolate) interp.im(covariateI, XI$x, XI$y) else covariateI[XI] if(fast) { # covariate values for quadrature points ZI <- covariateI[PI] } else { # covariate image inside window ZI <- covariateI[WI, drop=FALSE] # corresponding mask WI <- as.owin(ZI) # pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else if(is.function(covariateI)) { type <- "function" # evaluate exactly at data points ZXI <- covariateI(XI$x, XI$y) if(fast) { # covariate values for quadrature points ZI <- covariateI(PI$x, PI$y) } else { # window WI <- as.mask(WI) # covariate image inside window ZI <- as.im(covariateI, W=WI) # pixel areas Zweights[[i]] <- rep(WI$xstep * WI$ystep, prod(WI$dim)) } } else stop("covariate should be an image or a function(x,y)") ZX[[i]] <- ZXI if(fast) Zvalues[[i]] <- ZI else { Win[[i]] <- WI # values of covariate in window Zvalues[[i]] <- pixelvalues(ZI) } } if(verbose) cat("done.\nComputing predicted intensity...") # compute predicted intensities trend <- if(fast) fitted(model, type="trend") else predict(model, type="trend", locations=Win, verbose=verbose)$trend if(verbose) cat("done.\nExtracting...") # extract relevant values lambda <- if(fast) trend else lapply(trend, pixelvalues) if(verbose) cat("done.\nPerforming test...") # flatten to vectors lambda <- unlist(lambda) Zweights <- unlist(Zweights) Zvalues <- unlist(Zvalues) ZX <- unlist(ZX) if(length(lambda) != length(Zvalues)) stop("Internal error: mismatch between predicted values and Z values") if(length(Zvalues) != length(Zweights)) stop("Internal error: mismatch between Z values and Z weights") lambda <- lambda * Zweights # form weighted cdf of Z values in window FZ <- ewcdf(Zvalues, lambda/sum(lambda)) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # evaluate at data points if(!jitter) U <- FZ(ZX) else { # jitter observed values to avoid ties grain <- min(diff(sort(unique(ZX))))/8 jit <- runif(length(ZX), min=0, max=grain) sgn <- sample(c(-1,1), length(ZX), replace=TRUE) sgn[ZX==min(xxx)] <- 1 sgn[ZX==max(xxx)] <- -1 U <- FZ(ZX + sgn*jit) } # Test uniformity result <- ks.test(U, "punif", ...) result$data.name <- paste("predicted cdf of covariate", sQuote(paste(covname, collapse="")), "evaluated at data points of", sQuote(modelname)) if(verbose) cat("done.\n") class(result) <- c("kstest", class(result)) attr(result, "prep") <- list(Zvalues = Zvalues, lambda = lambda, ZX = ZX, FZ = FZ, U = U, type = type) attr(result, "info") <- list(modelname = modelname, covname = covname) return(result) } spatstat/R/pairs.im.R0000755000176000001440000000416012237642727014233 0ustar ripleyusers# # pairs.im.R # # $Revision: 1.5 $ $Date: 2011/05/18 08:16:11 $ # pairs.listof <- pairs.im <- function(..., plot=TRUE) { argh <- list(...) cl <- match.call() # unpack single argument which is a list of images if(length(argh) == 1) { arg1 <- argh[[1]] if(is.list(arg1) && all(unlist(lapply(arg1, is.im)))) argh <- arg1 } # identify which arguments are images isim <- unlist(lapply(argh, is.im)) nim <- sum(isim) if(nim == 0) stop("No images provided") if(nim == 1) { # one image: plot histogram h <- hist(..., plot=plot) return(invisible(h)) } # separate image arguments from others imlist <- argh[isim] rest <- argh[!isim] # determine image names for plotting imnames <- names(imlist) backupnames <- paste(cl)[c(FALSE, isim, FALSE)] if(length(backupnames) != nim) backupnames <- paste("V", seq_len(nim), sep="") if(length(imnames) != nim) imnames <- backupnames else if(any(needname <- !nzchar(imnames))) imnames[needname] <- backupnames[needname] # extract pixel rasters and reconcile them imwins <- lapply(imlist, as.owin) names(imwins) <- NULL rasta <- do.call("intersect.owin", imwins) # extract image pixel values on common raster pixvals <- lapply(imlist, "[.im", i=rasta, raster=rasta, drop=TRUE) # combine into data frame pixdf <- do.call("data.frame", pixvals) # plot if(plot) do.call("pairs", resolve.defaults(list(x=pixdf), rest, list(labels=imnames, pch="."))) labels <- resolve.defaults(rest, list(labels=imnames))$labels colnames(pixdf) <- labels class(pixdf) <- c("plotpairsim", class(pixdf)) return(invisible(pixdf)) } plot.plotpairsim <- function(x, ...) { do.call("pairs.default", resolve.defaults(list(x=as.data.frame(x)), list(...), list(pch="."))) return(invisible(NULL)) } print.plotpairsim <- function(x, ...) { cat("Object of class plotpairsim\n") cat(paste("contains pixel data for", commasep(sQuote(colnames(x))), "\n")) return(invisible(NULL)) } spatstat/R/model.depends.R0000755000176000001440000000441312237642727015233 0ustar ripleyusers# # Determine which 'canonical variables' depend on a supplied covariate # # $Revision: 1.8 $ $Date: 2013/04/25 06:37:43 $ # model.depends <- function(object) { # supplied covariates fo <- formula(object) if(length(as.list(fo)) == 3) { # formula has a response: strip it fo <- fo[-2] } covars <- variablesinformula(fo) # canonical covariates mm <- model.matrix(object) ass <- attr(mm, "assign") # model terms tt <- terms(object) lab <- attr(tt, "term.labels") # 'ass' maps canonical covariates to 'lab' # determine which canonical covariate depends on which supplied covariate depends <- matrix(FALSE, length(ass), length(covars)) for(i in seq(along=ass)) { if(ass[i] == 0) # 0 is the intercept term depends[i,] <- FALSE else { turm <- lab[ass[i]] depends[i, ] <- covars %in% all.vars(parse(text=turm)) } } rownames(depends) <- colnames(mm) colnames(depends) <- covars # detect offsets if(!is.null(oo <- attr(tt, "offset")) && ((noo <- length(oo)) > 0)) { # entries of 'oo' index the list of variables in terms object vv <- attr(tt, "variables") offdep <- matrix(FALSE, noo, length(covars)) offnms <- character(noo) for(i in seq_len(noo)) { offseti <- languageEl(vv, oo[i] + 1) offdep[i, ] <- covars %in% all.vars(offseti) offnms[i] <- deparse(offseti) } rownames(offdep) <- offnms colnames(offdep) <- covars attr(depends, "offset") <- offdep } return(depends) } model.is.additive <- function(object) { dep <- model.depends(object) hit <- t(dep) %*% dep diag(hit) <- 0 ok <- all(hit == 0) return(ok) } model.covariates <- function(object, fitted=TRUE, offset=TRUE) { md <- model.depends(object) nm <- colnames(md) keep <- rep.int(FALSE, length(nm)) # variables used in formula with coefficients if(fitted) keep <- apply(md, 2, any) # variables used in offset if(offset) { oo <- attr(md, "offset") if(!is.null(oo)) keep <- keep | apply(oo, 2, any) } return(nm[keep]) } has.offset.term <- function(object) { # model terms tt <- terms(object) oo <- attr(tt, "offset") return(!is.null(oo) && (length(oo) > 0)) } has.offset <- function(object) { has.offset.term(object) || !is.null(model.offset(model.frame(object))) } spatstat/R/pairdistlpp.R0000755000176000001440000000503412237642727015045 0ustar ripleyusers# # pairdistlpp.R # # $Revision: 1.9 $ $Date: 2012/10/13 02:25:43 $ # # # pairdist.lpp # Calculates the shortest-path distance between each pair of points # in a point pattern on a linear network. # pairdist.lpp <- function(X, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) # L <- as.linnet(X) Y <- as.ppp(X) n <- npoints(Y) # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point pro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg pairdistmat <- matrix(0,n,n) if(method == "interpreted") { # loop through all pairs of data points for (i in 1:(n-1)) { proi <- pro[i] Xi <- Y[i] nbi1 <- from[proi] nbi2 <- to[proi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in (i+1):n) { Xj <- Y[j] proj <- pro[j] if(proi == proj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Xj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[proj] nbj2 <- to[proj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Xj <- crossdist(vj1,Xj) d2Xj <- crossdist(vj2,Xj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Xj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Xj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Xj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Xj d <- min(d11,d12,d21,d22) } # store result pairdistmat[i,j] <- pairdistmat[j,i] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L segmap <- pro - 1L DUP <- spatstat.options("dupC") zz <- .C("linpairdist", np = as.integer(n), xp = as.double(Y$x), yp = as.double(Y$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), segmap = as.integer(segmap), answer = as.double(pairdistmat), DUP=DUP) # PACKAGE="spatstat") pairdistmat <- matrix(zz$answer, n, n) } return(pairdistmat) } spatstat/R/hardcore.R0000755000176000001440000000670012237642727014302 0ustar ripleyusers# # # hardcore.S # # $Revision: 1.8 $ $Date: 2013/07/19 02:52:54 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hardcore <- local({ BlankHardcore <- list( name = "Hard core process", creator = "Hardcore", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 0 * d v[ d <= par$hc ] <- (-Inf) attr(v, "IsOffset") <- TRUE v }, par = list(hc = NULL), # filled in later parnames = "hard core distance", selfstart = function(X, self) { # self starter for Hardcore nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } md <- min(nndist(X)) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Hardcore model")) return(self) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) Hardcore(hc = hcX) }, init = function(self) { hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc) && !(is.numeric(hc) && hc > 0)) stop("hard core distance hc must be a positive number, or NA") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { hc <- self$par$hc return(hc) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Hardcore interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Hardcore") hc <- potpars$hc # call evaluator for Strauss process counts <- strausscounts(U, X, hc, EqualPairs) # all counts should be zero v <- matrix(ifelseAB(counts > 0, -Inf, 0), ncol=1) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral hc <- self$par$hc return(pi * hc^2) } ) class(BlankHardcore) <- "interact" Hardcore <- function(hc=NA) { instantiate.interact(BlankHardcore, list(hc=hc)) } Hardcore }) spatstat/R/random.R0000755000176000001440000004664312237642727014005 0ustar ripleyusers# # random.R # # Functions for generating random point patterns # # $Revision: 4.59 $ $Date: 2013/04/25 06:37:43 $ # # # runifpoint() n i.i.d. uniform random points ("binomial process") # # runifpoispp() uniform Poisson point process # # rpoispp() general Poisson point process (thinning method) # # rpoint() n independent random points (rejection/pixel list) # # rMaternI() Mat'ern model I # rMaternII() Mat'ern model II # rSSI() Simple Sequential Inhibition process # # rNeymanScott() Neyman-Scott process (generic) # rMatClust() Mat'ern cluster process # rThomas() Thomas process # # rthin() independent random thinning # rjitter() random perturbation # # Examples: # u01 <- owin(0:1,0:1) # plot(runifpoispp(100, u01)) # X <- rpoispp(function(x,y) {100 * (1-x/2)}, 100, u01) # X <- rpoispp(function(x,y) {ifelse(x < 0.5, 100, 20)}, 100) # plot(X) # plot(rMaternI(100, 0.02)) # plot(rMaternII(100, 0.05)) # "runifrect" <- function(n, win=owin(c(0,1),c(0,1))) { # no checking x <- runif(n, min=win$xrange[1], max=win$xrange[2]) y <- runif(n, min=win$yrange[1], max=win$yrange[2]) return(ppp(x, y, window=win, check=FALSE)) } "runifdisc" <- function(n, radius=1, centre=c(0,0), ...) { # i.i.d. uniform points in the disc of radius r and centre (x,y) disque <- disc(centre=centre, radius=radius, ...) theta <- runif(n, min=0, max= 2 * pi) s <- sqrt(runif(n, min=0, max=radius^2)) return(ppp(centre[1] + s * cos(theta), centre[2] + s * sin(theta), window=disque, check=FALSE)) } "runifpoint" <- function(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE) { win <- as.owin(win) check.1.integer(n) stopifnot(n >= 0) if(n == 0) return(ppp(numeric(0), numeric(0), window=win)) if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) warning(paste("Attempting to generate", n, "random points")) } switch(win$type, rectangle = { return(runifrect(n, win)) }, mask = { dx <- win$xstep dy <- win$ystep # extract pixel coordinates and probabilities xpix <- as.vector(raster.x(win)[win$m]) ypix <- as.vector(raster.y(win)[win$m]) # select pixels with equal probability id <- sample(seq_along(xpix), n, replace=TRUE) # extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) return(ppp(x, y, window=win, check=FALSE)) }, polygonal={ # rejection method # initialise empty pattern x <- numeric(0) y <- numeric(0) X <- ppp(x, y, window=win) # # rectangle in which trial points will be generated box <- bounding.box(win) # ntries <- 0 repeat { ntries <- ntries + 1 # generate trial points in batches of n qq <- runifrect(n, box) # retain those which are inside 'win' qq <- qq[win] # add them to result X <- superimpose(X, qq, W=win) # if we have enough points, exit if(X$n > n) return(X[1:n]) else if(X$n == n) return(X) # otherwise get bored eventually else if(ntries >= giveup) stop(paste("Gave up after", giveup * n, "trials,", X$n, "points accepted")) } }) stop("Unrecognised window type") } "runifpoispp" <- function(lambda, win = owin(c(0,1),c(0,1))) { win <- as.owin(win) if(!is.numeric(lambda) || length(lambda) > 1 || !is.finite(lambda) || lambda < 0) stop("Intensity lambda must be a single finite number >= 0") if(lambda == 0) # return empty pattern return(ppp(numeric(0), numeric(0), window=win)) # generate Poisson process in enclosing rectangle box <- bounding.box(win) mean <- lambda * area.owin(box) n <- rpois(1, mean) X <- runifpoint(n, box) # trim to window if(win$type != "rectangle") X <- X[win] return(X) } rpoint <- function(n, f, fmax=NULL, win=unit.square(), ..., giveup=1000,verbose=FALSE) { if(missing(f) || (is.numeric(f) && length(f) == 1)) # uniform distribution return(runifpoint(n, win, giveup)) # non-uniform distribution.... if(!is.function(f) && !is.im(f)) stop(paste(sQuote("f"), "must be either a function or an", sQuote("im"), "object")) if(is.im(f)) { # ------------ PIXEL IMAGE --------------------- wf <- as.owin(f) if(n == 0) return(ppp(numeric(0), numeric(0), window=wf)) w <- as.mask(wf) M <- w$m dx <- w$xstep dy <- w$ystep # extract pixel coordinates and probabilities xpix <- as.vector(raster.x(w)[M]) ypix <- as.vector(raster.y(w)[M]) ppix <- as.vector(f$v[M]) # not normalised - OK # select pixels id <- sample(length(xpix), n, replace=TRUE, prob=ppix) # extract pixel centres and randomise within pixels x <- xpix[id] + runif(n, min= -dx/2, max=dx/2) y <- ypix[id] + runif(n, min= -dy/2, max=dy/2) return(ppp(x, y, window=wf, check=FALSE)) } # ------------ FUNCTION --------------------- # Establish parameters for rejection method verifyclass(win, "owin") if(n == 0) return(ppp(numeric(0), numeric(0), window=win)) if(is.null(fmax)) { # compute approx maximum value of f imag <- as.im(f, win, ...) summ <- summary(imag) fmax <- summ$max + 0.05 * diff(summ$range) } irregular <- (win$type != "rectangle") box <- bounding.box(win) X <- ppp(numeric(0), numeric(0), window=win) ntries <- 0 # generate uniform random points in batches # and apply the rejection method. # Collect any points that are retained in X repeat{ ntries <- ntries + 1 # proposal points prop <- runifrect(n, box) if(irregular) prop <- prop[win] if(prop$n > 0) { fvalues <- f(prop$x, prop$y, ...) paccept <- fvalues/fmax u <- runif(prop$n) # accepted points Y <- prop[u < paccept] if(Y$n > 0) { # add to X X <- superimpose(X, Y, W=win) if(X$n >= n) { # we have enough! if(verbose) cat(paste("acceptance rate = ", round(100 * X$n/(ntries * n), 2), "%\n")) return(X[1:n]) } } } if(ntries > giveup) stop(paste("Gave up after",giveup * n,"trials with", X$n, "points accepted")) } invisible(NULL) } "rpoispp" <- function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)), ...) { # arguments: # lambda intensity: constant, function(x,y,...) or image # lmax maximum possible value of lambda(x,y,...) # win default observation window (of class 'owin') # ... arguments passed to lambda(x, y, ...) if(!(is.numeric(lambda) || is.function(lambda) || is.im(lambda))) stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) if(is.numeric(lambda) && !(length(lambda) == 1 && lambda >= 0)) stop(paste(sQuote("lambda"), "must be a single, nonnegative number")) if(!is.null(lmax)) { if(!is.numeric(lmax)) stop("lmax should be a number") if(length(lmax) > 1) stop("lmax should be a single number") } win <- if(is.im(lambda)) rescue.rectangle(as.owin(lambda)) else as.owin(win) if(is.numeric(lambda)) # uniform Poisson return(runifpoispp(lambda, win)) # inhomogeneous Poisson # perform thinning of uniform Poisson if(is.null(lmax)) { imag <- as.im(lambda, win, ...) summ <- summary(imag) lmax <- summ$max + 0.05 * diff(summ$range) } if(is.function(lambda)) { X <- runifpoispp(lmax, win) # includes sanity checks on `lmax' if(X$n == 0) return(X) prob <- lambda(X$x, X$y, ...)/lmax u <- runif(X$n) retain <- (u <= prob) X <- X[retain, ] return(X) } if(is.im(lambda)) { X <- runifpoispp(lmax, win) if(X$n == 0) return(X) prob <- lambda[X]/lmax u <- runif(X$n) retain <- (u <= prob) X <- X[retain, ] return(X) } stop(paste(sQuote("lambda"), "must be a constant, a function or an image")) } "rMaternI" <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE) { win <- as.owin(win) stopifnot(is.numeric(r) && length(r) == 1) if(stationary) { # generate in a larger window bigbox <- grow.rectangle(as.rectangle(win), r) X <- rpoispp(kappa, win=bigbox) } else { X <- rpoispp(kappa, win=win) } if(npoints(X) > 1) { d <- nndist(X) X <- X[d > r] } if(stationary) X <- X[win] return(X) } "rMaternII" <- function(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE) { win <- as.owin(win) stopifnot(is.numeric(r) && length(r) == 1) if(stationary) { bigbox <- grow.rectangle(as.rectangle(win), r) X <- rpoispp(kappa, win=bigbox) } else { X <- rpoispp(kappa, win=win) } nX <- npoints(X) if(nX > 1) { # matrix of squared pairwise distances d2 <- pairdist(X, squared=TRUE) close <- (d2 <= r^2) # random order 1:n age <- sample(seq_len(nX), nX, replace=FALSE) earlier <- outer(age, age, ">") conflict <- close & earlier # delete <- apply(conflict, 1, any) delete <- matrowany(conflict) X <- X[!delete] } if(stationary) X <- X[win] return(X) } "rSSI" <- function(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL) { win.given <- !missing(win) && !is.null(win) stopifnot(is.numeric(r) && length(r) == 1 && r >= 0) stopifnot(is.numeric(n) && length(n) == 1 && n >= 0) # Simple Sequential Inhibition process # fixed number of points # Naive implementation, proposals are uniform if(is.null(x.init)) { # start with empty pattern in specified window win <- as.owin(win) x.init <- ppp(numeric(0),numeric(0), window=win) } else { # start with specified pattern stopifnot(is.ppp(x.init)) if(!win.given) { win <- as.owin(x.init) } else { # check compatibility of windows if(!identical(win, as.owin(x.init))) warning(paste("Argument", sQuote("win"), "is not the same as the window of", sQuote("x.init"))) x.init.new <- x.init[win] if(npoints(x.init.new) == 0) stop(paste("No points of x.init lie inside the specified window", sQuote("win"))) nlost <- npoints(x.init) - npoints(x.init.new) if(nlost > 0) warning(paste(nlost, "out of", npoints(x.init), "points of the pattern x.init", "lay outside the specified window", sQuote("win"))) x.init <- x.init.new } if(n < npoints(x.init)) stop(paste("x.init contains", npoints(x.init), "points", "but a pattern containing only n =", n, "points", "is required")) if(n == npoints(x.init)) { warning(paste("Initial state x.init already contains", n, "points;", "no further points were added")) return(x.init) } } X <- x.init r2 <- r^2 if(!is.infinite(n) && (n * pi * r2/4 > area.owin(win))) warning(paste("Window is too small to fit", n, "points", "at minimum separation", r)) ntries <- 0 while(ntries < giveup) { ntries <- ntries + 1 qq <- runifpoint(1, win) x <- qq$x[1] y <- qq$y[1] if(X$n == 0 || all(((x - X$x)^2 + (y - X$y)^2) > r2)) X <- superimpose(X, qq, W=win) if(X$n >= n) return(X) } if(!is.infinite(n)) warning(paste("Gave up after", giveup, "attempts with only", X$n, "points placed out of", n)) return(X) } "rPoissonCluster" <- function(kappa, rmax, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL) { # Generic Poisson cluster process # Implementation for bounded cluster radius # # 'rcluster' is a function(x,y) that takes the coordinates # (x,y) of the parent point and generates a list(x,y) of offspring # # "..." are arguments to be passed to 'rcluster()' # win <- as.owin(win) # Generate parents in dilated window frame <- bounding.box(win) dilated <- owin(frame$xrange + c(-rmax, rmax), frame$yrange + c(-rmax, rmax)) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) parents <- rpoispp(kappa, lmax=lmax, win=dilated) # result <- NULL # generate clusters np <- parents$n if(np > 0) { xparent <- parents$x yparent <- parents$y for(i in seq_len(np)) { # generate random offspring of i-th parent point cluster <- rcluster(xparent[i], yparent[i], ...) if(!inherits(cluster, "ppp")) cluster <- ppp(cluster$x, cluster$y, window=frame, check=FALSE) # skip if cluster is empty if(cluster$n > 0) { # trim to window cluster <- cluster[win] if(is.null(result)) { # initialise offspring pattern and offspring-to-parent map result <- cluster parentid <- rep.int(1, cluster$n) } else { # add to pattern result <- superimpose(result, cluster, W=win) # update offspring-to-parent map parentid <- c(parentid, rep.int(i, cluster$n)) } } } } else { # no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } attr(result, "parents") <- parents attr(result, "parentid") <- parentid return(result) } rGaussPoisson <- function(kappa, r, p2, win=owin(c(0,1), c(0,1))) { # Gauss-Poisson process oneortwo <- function(x0, y0, radius, p2) { if(runif(1) > p2) # one point return(list(x=x0, y=y0)) # two points theta <- runif(1, min=0, max=2*pi) return(list(x=x0+c(-1,1)*radius*cos(theta), y=y0+c(-1,1)*radius*sin(theta))) } result <- rPoissonCluster(kappa, 1.05 * r, oneortwo, win, radius=r/2, p2=p2) return(result) } rstrat <- function(win=square(1), nx, ny=nx, k=1) { win <- as.owin(win) stopifnot(nx >= 1 && ny >= 1) stopifnot(k >= 1) xy <- stratrand(win, nx, ny, k) Xbox <- ppp(xy$x, xy$y, win$xrange, win$yrange, check=FALSE) X <- Xbox[win] return(X) } xy.grid <- function(xr, yr, nx, ny, dx, dy) { nx.given <- !is.null(nx) ny.given <- !is.null(ny) dx.given <- !is.null(dx) dy.given <- !is.null(dy) if(nx.given && dx.given) stop("Do not give both nx and dx") if(nx.given) { stopifnot(nx >= 1) x0 <- seq(from=xr[1], to=xr[2], length.out=nx+1) dx <- diff(xr)/nx } else if(dx.given) { stopifnot(dx > 0) x0 <- seq(from=xr[1], to=xr[2], by=dx) nx <- length(x0) } else stop("Need either nx or dx") # determine y grid if(ny.given && dy.given) stop("Do not give both ny and dy") if(ny.given) { stopifnot(ny >= 1) y0 <- seq(from=yr[1], to=yr[2], length.out=ny+1) dy <- diff(yr)/ny } else { if(is.null(dy)) dy <- dx stopifnot(dy > 0) y0 <- seq(from=yr[1], to=yr[2], by=dy) ny <- length(y0) } return(list(x0=x0, y0=y0, nx=nx, ny=ny, dx=dx, dy=dy)) } rsyst <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx) { win <- as.owin(win) xr <- win$xrange yr <- win$yrange # determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy # assemble grid and randomise location xy0 <- expand.grid(x=x0, y=y0) x <- xy0$x + runif(1, min = 0, max = dx) y <- xy0$y + runif(1, min = 0, max = dy) Xbox <- ppp(x, y, xr, yr, check=FALSE) # trim to window X <- Xbox[win] return(X) } rcellnumber <- function(n, N=10) { if(!missing(N)) { if(round(N) != N) stop("N must be an integer") stopifnot(is.finite(N)) stopifnot(N > 1) } u <- runif(n, min=0, max=1) p0 <- 1/N pN <- 1/(N * (N-1)) k <- ifelse(u < 1/N, 0, ifelse(u < (1 - pN), 1, N)) return(k) } rcell <- function(win=square(1), nx=NULL, ny=nx, ..., dx=NULL, dy=dx, N=10) { win <- as.owin(win) xr <- win$xrange yr <- win$yrange # determine grid coordinates if(missing(ny)) ny <- NULL if(missing(dy)) dy <- NULL g <- xy.grid(xr, yr, nx, ny, dx, dy) nx <- g$nx ny <- g$ny x0 <- g$x0 y0 <- g$y0 dx <- g$dx dy <- g$dy # generate pattern x <- numeric(0) y <- numeric(0) for(ix in seq_len(nx)) for(iy in seq_len(ny)) { nij <- rcellnumber(1, N) x <- c(x, x0[ix] + runif(nij, min=0, max=dx)) y <- c(y, y0[iy] + runif(nij, min=0, max=dy)) } Xbox <- ppp(x, y, xr, yr, check=FALSE) X <- Xbox[win] return(X) } rthin <- function(X, P, ...) { verifyclass(X, "ppp") nX <- npoints(X) if(nX == 0) return(X) if(is.numeric(P)) { # vector of retention probabilities pX <- P if(length(pX) != nX) { if(length(pX) == 1) pX <- rep.int(pX, nX) else stop("Length of vector P does not match number of points of X") } if(any(is.na(pX))) stop("P contains NA's") } else if(is.function(P)) { # function - evaluate it at points of X pX <- P(X$x, X$y, ...) if(length(pX) != nX) stop("Function P returned a vector of incorrect length") if(!is.numeric(pX)) stop("Function P returned non-numeric values") if(any(is.na(pX))) stop("Function P returned some NA values") prange <- range(pX) } else if(is.im(P)) { # image - look it up if(!(P$type %in% c("integer", "real"))) stop("Values of image P should be numeric") pX <- P[X, drop=FALSE] if(any(is.na(pX))) stop("some points of X lie outside the domain of image P") } else stop("Unrecognised format for P") if(min(pX) < 0) stop("some probabilities are negative") if(max(pX) > 1) stop("some probabilities are greater than 1") retain <- (runif(length(pX)) < pX) Y <- X[retain] # also handle offspring-to-parent map if present if(!is.null(parentid <- attr(X, "parentid"))) attr(Y, "parentid") <- parentid[retain] return(Y) } # rjitter rjitter <- function(X, radius, retry=TRUE, giveup=10000) { verifyclass(X, "ppp") nX <- npoints(X) if(nX == 0) return(X) W <- X$window if(!retry) { # points outside window are lost D <- runifdisc(nX, radius=radius) xnew <- X$x + D$x ynew <- X$y + D$y ok <- inside.owin(xnew, ynew, W) return(ppp(xnew[ok], ynew[ok], window=W)) } # retry = TRUE: condition on points being inside window undone <- rep.int(TRUE, nX) while(any(undone)) { giveup <- giveup - 1 if(giveup <= 0) return(X) Y <- X[undone] D <- runifdisc(Y$n, radius=radius) xnew <- Y$x + D$x ynew <- Y$y + D$y ok <- inside.owin(xnew, ynew, W) if(any(ok)) { changed <- seq_len(nX)[undone][ok] X$x[changed] <- xnew[ok] X$y[changed] <- ynew[ok] undone[changed] <- FALSE } } return(X) } spatstat/R/ippm.R0000755000176000001440000001126612237642727013463 0ustar ripleyusers# # ippm.R # # $Revision: 2.7 $ $Date: 2011/12/03 11:37:03 $ # # Fisher scoring algorithm for irregular parameters in ppm trend # ippm <- function(..., iScore=NULL, start=list(), covfunargs=start, maxiter=20, tol=1e-4, progress=TRUE, stepfactor=1, dbug=FALSE) { # validate if(!is.list(start) || length(start) == 0) stop("start should be a list of initial values for irregular parameters") if(!is.list(iScore) || length(iScore) != length(start)) stop("iScore should be a list of the same length as start") stopifnot(identical(names(iScore), names(start))) if(!all(unlist(lapply(iScore, is.function)))) stop("iScore should be a list of functions") # smap <- match(names(start), names(covfunargs)) if(any(is.na(smap))) stop("variables in start should be a subset of variables in covfunargs") covfunargs[smap] <- start # # fit the initial model and extract information fit0 <- ppm(..., covfunargs=covfunargs) lpl0 <- fit0$maxlogpl p <- length(coef(fit0)) # examine covariates and trend covariates <- fit0$covariates isfun <- unlist(lapply(covariates, is.function)) covfuns <- covariates[isfun] # determine which covariates depend on which irregular parameters pnames <- names(start) hasarg <- function(f,a) { a %in% names(formals(f)) } depmat <- matrix(FALSE, nrow=length(covfuns), ncol=length(pnames)) rownames(depmat) <- names(covfuns) colnames(depmat) <- pnames for(j in 1:length(pnames)) depmat[,j] <- unlist(lapply(covfuns, hasarg, pnames[j])) # find covariates that depend on ANY irregular parameter depvar <- rownames(depmat)[apply(depmat, 1, any)] # check that these covariates appear only in offset terms covnames.fitted <- model.covariates(fit0, fitted=TRUE, offset=FALSE) if(any(uhoh <- depvar %in% covnames.fitted)) stop(paste(ngettext(sum(uhoh), "The covariate", "The covariates"), commasep(sQuote(depvar[uhoh])), "should appear only in offset terms")) # check that every irregular parameter to be updated appears somewhere covnames.offset <- model.covariates(fit0, fitted=FALSE, offset=TRUE) usearg <- apply(depmat[covnames.offset, , drop=FALSE], 2, any) if(!all(usearg)) { nbad <- sum(!usearg) warning(paste("Cannot maximise over the irregular", ngettext(nbad, "parameter", "parameters"), commasep(sQuote(names(usearg)[!usearg])), ngettext(nbad, "because it is", "because they are"), "not used in any term of the model")) # restrict start <- start[usearg] iScore <- iScore[usearg] pnames <- names(start) } # ready iter <- 0 param <- start pvec <- as.numeric(param) ndigits <- max(0, -ceiling(log10(tol))) + 1 # go for(phase in 1:2) { if(progress) cat(paste("Phase", phase, "\n")) maxit <- if(phase == 1) 2 else maxiter for(iter in 0:maxit) { # fit model with current irregular parameters covfunargs[smap] <- param fit <- ppm(..., covfunargs=covfunargs) lpl <- logLik(fit, warn=FALSE) if(progress) { co <- coef(fit) cat(paste(paren(iter, "["), paste(paste(pnames, "=", round(pvec, digits=ndigits)), collapse=", "), "->", paste(paste(names(co), "=", round(co, digits=ndigits)), collapse=", "), "; logPL=", signif(lpl, 8), "\n")) } # compute model matrix and inverse fisher information stuff <- ppm.influence(fit, what="derivatives", iScore=iScore, iArgs=param) score <- stuff$deriv$score vc <- stuff$deriv$vc fush <- stuff$deriv$fush if(dbug) { cat("\nscore=\n") print(score) cat("\nvc=\n") print(vc) cat("\nfush=\n") print(fush) } if(phase == 1) { # Fisher scoring on partial matrix subscore <- score[, -(1:p), drop=FALSE] subfush <- fush[-(1:p), -(1:p)] stepvec <- as.numeric(solve(subfush) %*% t(subscore)) } else { # Fisher scoring step on full parameter vector stepvec <- as.numeric(vc %*% t(score))[ -(1:p)] } if(dbug) { cat("\nstep=\n") print(stepvec) } # update parameters or exit if(iter > 0 && all(abs(stepvec) < tol)) break pvec <- pvec + stepfactor * stepvec param <- as.list(pvec) names(param) <- pnames } } if(iter == maxiter) warning(paste("Maximum number of iterations", paren(maxiter), "reached without convergence\n")) return(fit) } spatstat/R/versions.R0000755000176000001440000000236112237642727014362 0ustar ripleyusers# # versions.R # # version numbers # # $Revision: 1.9 $ $Date: 2013/04/30 04:21:50 $ # ##################### # Extract version string from ppm object versionstring.ppm <- function(object) { verifyclass(object, "ppm") v <- object$version if(is.null(v) || !is.list(v)) v <- list(major=1, minor=3, release=4) vs <- paste(v$major, ".", v$minor, "-", v$release, sep="") return(vs) } # Extract version string from interact object versionstring.interact <- function(object) { verifyclass(object, "interact") v <- object$version return(v) # NULL before 1.11-0 } # Get version number of current spatstat installation # This is now saved in the spatstat cache environment rather than read from file every time versionstring.spatstat <- function() { get("SpatstatVersion", envir = .spEnv) } store.versionstring.spatstat <- function() { vs <- read.dcf(file=system.file("DESCRIPTION", package="spatstat"), fields="Version") vs <- as.character(vs) assign("SpatstatVersion", vs, envir=.spEnv) } # Extract major and minor versions only. majorminorversion <- function(v) { vp <- package_version(v) return(package_version(paste(vp$major, vp$minor, sep="."))) } # legacy function RandomFieldsSafe <- function() { TRUE } spatstat/R/envelopelpp.R0000755000176000001440000001046412237642727015046 0ustar ripleyusers# # envelopelpp.R # # $Revision: 1.15 $ $Date: 2013/04/25 06:37:43 $ # # Envelopes for 'lpp' objects # # envelope.lpp <- function(Y, fun=linearK, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.lpp")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoisppOnLines # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y nY <- if(!is.marked(Y)) npoints(Y) else table(marks(Y)) NETWORK <- Y$domain totlen <- sum(lengths.psp(NETWORK$lines)) Yintens <- nY/totlen # expression that will be evaluated simexpr <- expression(rpoislpp(Yintens, NETWORK)) # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.lppm <- function(Y, fun=linearK, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- linearK if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated using rpoisppOnLines if(!is.poisson.ppm(Y$fit)) stop("Simulation of non-Poisson models is not yet implemented") X <- Y$X MODEL <- Y NETWORK <- X$domain type <- "lppm" lambdaFit <- predict(MODEL) LMAX <- if(is.im(lambdaFit)) max(lambdaFit) else unlist(lapply(lambdaFit, max)) simexpr <- expression(rpoislpp(lambdaFit, NETWORK, lmax=LMAX)) # evaluate in THIS environment simrecipe <- simulrecipe(type = "lppm", expr = simexpr, envir = envir.here, csr = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } spatstat/R/pcfcross.R0000755000176000001440000000740612237642727014341 0ustar ripleyusers# # # pcfcross.R # # kernel estimation of cross-type pair correlation function # # Currently computed by differencing pcf # pcfcross <- function(X, i, j, ...) { stopifnot(is.multitype(X)) if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] # extract points of types i and j Xsplit <- split(X) Xi <- Xsplit[[i]] Xj <- Xsplit[[j]] iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) if(i == j) { p.ii <- pcf(Xi, ...) p.ii <- rebadge.fv(p.ii, new.ylab=substitute(g[i,i](r), list(i=iname)), new.fname=sprintf("g[list(%s,%s)]", iname, iname), new.yexp=substitute(g[list(i,i)](r), list(i=iname))) p.ii <- tweak.fv.entry(p.ii, "theo", new.labl="{%s^{pois}}(r)") p.ii <- tweak.fv.entry(p.ii, "trans", new.labl="hat(%s^{trans})(r)") p.ii <- tweak.fv.entry(p.ii, "iso", new.labl="hat(%s^{iso})(r)") return(p.ii) } Xall <- superimpose(Xi, Xj, W=X$window) # estimate intensities lambda.i <- summary(Xi)$intensity lambda.j <- summary(Xj)$intensity lambda.all <- lambda.i + lambda.j # kernel estimates of unmarked pcf's p.all <- pcf(Xall, ...) rr <- p.all$r p.ii <- do.call("pcf", resolve.defaults(list(Xi), list(...), list(r=rr))) p.jj <- do.call("pcf", resolve.defaults(list(Xj), list(...), list(r=rr))) # differencing p.ij <- eval.fv(pmax.int(0, (p.all * lambda.all^2 - p.ii * lambda.i^2 - p.jj * lambda.j^2)/(2 * lambda.i * lambda.j))) # rebadge p.ij <- rebadge.fv(p.ij, new.ylab=substitute(g[i,j](r), list(i=iname, j=jname)), new.fname=sprintf("g[list(%s,%s)]", iname, jname), new.yexp=substitute(g[list(i,j)](r), list(i=iname, j=jname))) p.ij <- tweak.fv.entry(p.ij, "theo", new.labl="{%s^{pois}}(r)") p.ij <- tweak.fv.entry(p.ij, "trans", new.labl="hat(%s^{trans})(r)") p.ij <- tweak.fv.entry(p.ij, "iso", new.labl="hat(%s^{iso})(r)") return(p.ij) } pcfdot <- function(X, i, ...) { stopifnot(is.multitype(X)) marx <- marks(X) if(missing(i)) i <- levels(marx)[1] # map i and not-i to two types marks(X) <- factor(ifelse(marx == i, "i", "n"), levels=c("i", "n")) # extract points of type i and not-i splitX <- split(X) Xi <- splitX[["i"]] Xn <- splitX[["n"]] Xall <- unmark(X) # estimate intensities lambda.i <- summary(Xi)$intensity lambda.n <- summary(Xn)$intensity lambda.all <- lambda.i + lambda.n # compute cross type pcf from i to not-i p.in <- pcfcross(X, "i", "n", ...) rr <- p.in$r # compute pcf of type i points using same parameters p.ii <- do.call("pcf", resolve.defaults(list(Xi), list(...), list(r=rr))) # add p.idot <- eval.fv((p.in * lambda.n + p.ii * lambda.i)/lambda.all) # # rebadge iname <- make.parseable(paste(i)) p.idot <- rebadge.fv(p.idot, substitute(g[i ~ dot](r), list(i=iname)), new.fname=paste("g[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(g[i ~ symbol("\267")](r), list(i=iname))) p.idot <- tweak.fv.entry(p.idot, "theo", new.labl="{%s^{pois}}(r)") p.idot <- tweak.fv.entry(p.idot, "trans", new.labl="hat(%s^{trans})(r)") p.idot <- tweak.fv.entry(p.idot, "iso", new.labl="hat(%s^{iso})(r)") return(p.idot) } spatstat/R/linim.R0000755000176000001440000001374712237642727013634 0ustar ripleyusers# # linim.R # # $Revision: 1.10 $ $Date: 2013/10/20 01:05:35 $ # # Image/function on a linear network # linim <- function(L, Z, ..., df=NULL) { L <- as.linnet(L) stopifnot(is.im(Z)) if(is.null(df)) { # compute the data frame of mapping information xx <- rasterx.im(Z) yy <- rastery.im(Z) mm <- !is.na(Z$v) xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(Z), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, as.psp(L)) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) # extract values values <- Z[pixelcentres] # bundle df <- cbind(pixdf, projloc, projmap, data.frame(values=values)) } else { stopifnot(is.data.frame(df)) neednames <- c("xc", "yc", "x", "y", "mapXY", "tp", "values") ok <- neednames %in% names(df) if(any(!ok)) { nn <- sum(!ok) stop(paste(ngettext(nn, "A column", "Columns"), "named", commasep(sQuote(neednames[!ok])), ngettext(nn, "is", "are"), "missing from argument", sQuote("df"))) } } out <- Z attr(out, "L") <- L attr(out, "df") <- df class(out) <- c("linim", class(out)) return(out) } print.linim <- function(x, ...) { cat("Image on linear network\n") print(attr(x, "L")) NextMethod("print") } plot.linim <- function(x, ..., style=c("colour", "width"), scale, adjust=1) { xname <- short.deparse(substitute(x)) style <- match.arg(style) # colour style: plot as pixel image if(style == "colour") return(do.call("plot.im", resolve.defaults(list(x), list(...), list(main=xname)))) # width style L <- attr(x, "L") df <- attr(x, "df") Llines <- as.psp(L) # initialise plot W <- as.owin(L) do.call.matched("plot.owin", resolve.defaults(list(x=W, type="n"), list(...), list(main=xname)), extrargs="type") # rescale values to a plottable range vr <- range(df$values) vr[1] <- min(0, vr[1]) if(missing(scale)) { maxsize <- mean(distmap(Llines))/2 scale <- maxsize/diff(vr) } df$values <- adjust * scale * (df$values - vr[1]) # split data by segment mapXY <- factor(df$mapXY, levels=seq_len(Llines$n)) dfmap <- split(df, mapXY, drop=TRUE) # sort each segment's data by position along segment dfmap <- lapply(dfmap, function(z) { z[fave.order(z$tp), ] }) # plot each segment's data Lends <- Llines$ends Lperp <- angles.psp(Llines) + pi/2 Lfrom <- L$from Lto <- L$to Lvert <- L$vertices for(i in seq(length(dfmap))) { z <- dfmap[[i]] segid <- unique(z$mapXY)[1] xx <- z$x yy <- z$y vv <- z$values # add endpoints of segment leftend <- Lvert[Lfrom[segid]] rightend <- Lvert[Lto[segid]] xx <- c(leftend$x, xx, rightend$x) yy <- c(leftend$y, yy, rightend$y) vv <- c(vv[1], vv, vv[length(vv)]) # create polygon xx <- c(xx, rev(xx)) yy <- c(yy, rev(yy)) vv <- c(vv, -rev(vv))/2 ang <- Lperp[segid] xx <- xx + cos(ang) * vv yy <- yy + sin(ang) * vv do.call.matched("polygon", resolve.defaults(list(x=xx, y=yy), list(...), list(border=NA, col=1))) } return(invisible(NULL)) } as.im.linim <- function(X, ...) { as.im(X$Z, ...) } as.linim <- function(X, ...) { UseMethod("as.linim") } as.linim.default <- function(X, L, ...) { stopifnot(inherits(L, "linnet")) Y <- as.im(X, W=as.rectangle(as.owin(L)), ...) Z <- as.im(as.mask.psp(as.psp(L), as.owin(Y))) Y <- eval.im(Z * Y) out <- linim(L, Y) return(out) } as.linim.linim <- function(X, ...) { if(length(list(...)) == 0) return(X) Y <- as.linim.default(X, as.linnet(X), ...) return(Y) } # analogue of eval.im eval.linim <- function(expr, envir, harmonize=TRUE) { sc <- sys.call() # Get names of all variables in the expression e <- as.expression(substitute(expr)) varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") # get the values of the variables if(missing(envir)) envir <- sys.parent() vars <- lapply(as.list(varnames), function(x, e) get(x, envir=e), e=envir) names(vars) <- varnames funs <- lapply(as.list(funnames), function(x, e) get(x, envir=e), e=envir) names(funs) <- funnames # Find out which variables are (linear) images islinim <- unlist(lapply(vars, inherits, what="linim")) if(!any(islinim)) stop("There are no linear images (class linim) in this expression") # .................................... # Evaluate the pixel values using eval.im # .................................... sc[[1]] <- as.name('eval.im') Y <- eval(sc) # ......................................... # Then evaluate data frame entries if feasible # ......................................... dfY <- NULL linims <- vars[islinim] nlinims <- length(linims) dframes <- lapply(linims, attr, which="df") nets <- lapply(linims, attr, which="L") isim <- unlist(lapply(vars, is.im)) if(!any(isim & !islinim)) { # all images are 'linim' objects # Check that the images refer to the same linear network if(nlinims > 1) { agree <- unlist(lapply(nets[-1], identical, y=nets[[1]])) if(!all(agree)) stop(paste("Images do not refer to the same linear network")) } dfempty <- unlist(lapply(dframes, is.null)) if(!any(dfempty)) { # replace each image variable by its data frame column of values vars[islinim] <- lapply(dframes, getElement, "values") # now evaluate expression Yvalues <- eval(e, append(vars, funs)) # pack up dfY <- dframes[[1]] dfY$values <- Yvalues } } result <- linim(nets[[1]], Y, df=dfY) return(result) } spatstat/R/randommk.R0000755000176000001440000003245012237642727014324 0ustar ripleyusers# # # randommk.R # # Random generators for MULTITYPE point processes # # $Revision: 1.31 $ $Date: 2013/04/25 06:37:43 $ # # rmpoispp() random marked Poisson pp # rmpoint() n independent random marked points # rmpoint.I.allim() ... internal # rpoint.multi() temporary wrapper # "rmpoispp" <- function(lambda, lmax=NULL, win = owin(c(0,1),c(0,1)), types, ...) { # arguments: # lambda intensity: # constant, function(x,y,m,...), image, # vector, list of function(x,y,...) or list of images # # lmax maximum possible value of lambda # constant, vector, or list # # win default observation window (of class 'owin') # # types possible types for multitype pattern # # ... extra arguments passed to lambda() # # Validate arguments is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } single.arg <- checkone(lambda) vector.arg <- !single.arg && is.numvector(lambda) list.arg <- !single.arg && is.list(lambda) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("lambda"), "not understood")) if(list.arg && !all(unlist(lapply(lambda, checkone)))) stop(paste("Each entry in the list", sQuote("lambda"), "must be either a constant, a function or an image")) if(vector.arg && any(lambda < 0)) stop(paste("Some entries in the vector", sQuote("lambda"), "are negative")) # Determine & validate the set of possible types if(missing(types)) { if(single.arg) stop(paste(sQuote("types"), "must be given explicitly when", sQuote("lambda"), "is a constant, a function or an image")) else types <- seq_along(lambda) } ntypes <- length(types) if(!single.arg && (length(lambda) != ntypes)) stop(paste("The lengths of", sQuote("lambda"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) # Validate `lmax' if(! (is.null(lmax) || is.numvector(lmax) || is.list(lmax) )) stop(paste(sQuote("lmax"), "should be a constant, a vector, a list or NULL")) # coerce lmax to a vector, to save confusion if(is.null(lmax)) maxes <- rep(NULL, ntypes) else if(is.numvector(lmax) && length(lmax) == 1) maxes <- rep.int(lmax, ntypes) else if(length(lmax) != ntypes) stop(paste("The length of", sQuote("lmax"), "does not match the number of possible types")) else if(is.list(lmax)) maxes <- unlist(lmax) else maxes <- lmax # coerce lambda to a list, to save confusion lam <- if(single.arg) lapply(1:ntypes, function(x, y){y}, y=lambda) else if(vector.arg) as.list(lambda) else lambda # Ensure that m can be passed as a single value to function(x,y,m,...) slice.fun <- function(x,y,fun,mvalue, ...) { m <- if(length(mvalue) == 1) rep.int(mvalue, length(x)) else mvalue result <- fun(x,y,m, ...) return(result) } # Simulate for(i in 1:ntypes) { if(single.arg && is.function(lambda)) # call f(x,y,m, ...) Y <- rpoispp(slice.fun, lmax=maxes[i], win=win, fun=lambda, mvalue=types[i], ...) else # call f(x,y, ...) or use other formats Y <- rpoispp(lam[[i]], lmax=maxes[i], win=win, ...) Y <- Y %mark% factortype[i] X <- if(i == 1) Y else superimpose(X, Y, W=X$window, check=FALSE) } # Randomly permute, just in case the order is important permu <- sample(X$n) return(X[permu]) } # ------------------------------------------------------------------------ "rmpoint" <- function(n, f=1, fmax=NULL, win = unit.square(), types, ptypes, ..., giveup = 1000, verbose = FALSE) { if(!is.numeric(n)) stop("n must be a scalar or vector") if(any(ceiling(n) != floor(n))) stop("n must be an integer or integers") if(any(n < 0)) stop("n must be non-negative") if(sum(n) == 0) { nopoints <- ppp(x=numeric(0), y=numeric(0), window=win, check=FALSE) nomarks <- factor(types[numeric(0)], levels=types) empty <- nopoints %mark% nomarks return(empty) } ############# Model <- if(length(n) == 1) { if(missing(ptypes)) "I" else "II" } else "III" ############## Validate f argument is.numvector <- function(x) {is.numeric(x) && is.vector(x)} is.constant <- function(x) {is.numvector(x) && length(x) == 1} checkone <- function(x) { if(is.constant(x)) { if(x >= 0) return(TRUE) else stop("Intensity is negative!") } return(is.function(x) || is.im(x)) } single.arg <- checkone(f) vector.arg <- !single.arg && is.numvector(f) list.arg <- !single.arg && is.list(f) if(! (single.arg || vector.arg || list.arg)) stop(paste("argument", sQuote("f"), "not understood")) if(list.arg && !all(unlist(lapply(f, checkone)))) stop(paste("Each entry in the list", sQuote("f"), "must be either a constant, a function or an image")) if(vector.arg && any(f < 0)) stop(paste("Some entries in the vector", sQuote("f"), "are negative")) # cases where it's known that all types of points # have the same conditional density of location (x,y) const.density <- vector.arg || (list.arg && all(unlist(lapply(f, is.constant)))) same.density <- const.density || (single.arg && !is.function(f)) ################ Determine & validate the set of possible types if(missing(types)) { if(single.arg && length(n) == 1) stop(paste(sQuote("types"), "must be given explicitly when", sQuote("f"), "is a single number, a function or an image and", sQuote("n"), "is a single number")) else if(single.arg) types <- seq_len(n) else types <- seq_along(f) } ntypes <- length(types) if(!single.arg && (length(f) != ntypes)) stop(paste("The lengths of", sQuote("f"), "and", sQuote("types"), "do not match")) if(length(n) > 1 && ntypes != length(n)) stop(paste("The lengths of", sQuote("n"), "and", sQuote("types"), "do not match")) factortype <- factor(types, levels=types) ####################### Validate `fmax' if(! (is.null(fmax) || is.numvector(fmax) || is.list(fmax) )) stop(paste(sQuote("fmax"), "should be a constant, a vector, a list or NULL")) # coerce fmax to a vector, to save confusion if(is.null(fmax)) maxes <- rep(NULL, ntypes) else if(is.constant(fmax)) maxes <- rep.int(fmax, ntypes) else if(length(fmax) != ntypes) stop(paste("The length of", sQuote("fmax"), "does not match the number of possible types")) else if(is.list(fmax)) maxes <- unlist(fmax) else maxes <- fmax # coerce f to a list, to save confusion flist <- if(single.arg) lapply(1:ntypes, function(i, f){f}, f=f) else if(vector.arg) as.list(f) else f #################### START ################################## ## special algorithm for Model I when all f[[i]] are images if(Model == "I" && !same.density && all(unlist(lapply(flist, is.im)))) return(rmpoint.I.allim(n, flist, types)) ## otherwise, first select types, then locations given types if(Model == "I") { # Compute approximate marginal distribution of type if(vector.arg) ptypes <- f/sum(f) else if(list.arg) { integratexy <- function(f, win, ...) { imag <- as.im(f, win, ...) summ <- summary(imag) summ$integral } fintegrals <- unlist(lapply(flist, integratexy, win=win, ...)) ptypes <- fintegrals/sum(fintegrals) } else { #single argument if(is.constant(f)) ptypes <- rep.int(1/ntypes, ntypes) else { # f is a function (x,y,m) # create a counterpart of f that works when m is a single value g <- function(xx, yy, ..., m, f) { mm <- rep.int(m, length(xx)) f(xx, yy, mm, ...) } # then convert to images and integrate fintegrals <- unlist(lapply(types, function(typ, ..., win, g) { fim <- as.im(g, W=win, ..., m=typ) summary(fim)$integral }, win=win, g=g, f=f)) # normalise ptypes <- fintegrals/sum(fintegrals) } } } # Generate marks if(Model == "I" || Model == "II") { # i.i.d.: n marks with distribution 'ptypes' marques <- sample(factortype, n, prob=ptypes, replace=TRUE) nn <- table(marques) } else { # multinomial: fixed number n[i] of types[i] repmarks <- factor(rep.int(types, n), levels=types) marques <- sample(repmarks) nn <- n } ntot <- sum(nn) ############## SIMULATE !!! ######################### # If all types have the same conditional density of location, # generate the locations using rpoint, and return. if(same.density) { X <- rpoint(ntot, flist[[1]], maxes[[1]], win=win, ..., giveup=giveup, verbose=verbose) X <- X %mark% marques return(X) } # Otherwise invoke rpoint() for each type separately X <- ppp(numeric(ntot), numeric(ntot), window=win, marks=marques, check=FALSE) for(i in 1:ntypes) { if(verbose) cat(paste("Type", i, "\n")) if(single.arg && is.function(f)) { # want to call f(x,y,m, ...) # create a counterpart of f that works when m is a single value gg <- function(xx, yy, ..., m, fun) { mm <- rep.int(m, length(xx)) fun(xx, yy, mm, ...) } Y <- rpoint(nn[i], gg, fmax=maxes[i], win=win, ..., m=factortype[i], fun=f, giveup=giveup, verbose=verbose) } else # call f(x,y, ...) or use other formats Y <- rpoint(nn[i], flist[[i]], fmax=maxes[i], win=win, ..., giveup=giveup, verbose=verbose) Y <- Y %mark% factortype[i] X[marques == factortype[i]] <- Y } return(X) } rmpoint.I.allim <- function(n, f, types) { # Internal use only! # Generates random marked points (Model I *only*) # when all f[[i]] are pixel images. # # Extract pixel coordinates and probabilities get.stuff <- function(imag) { w <- as.mask(as.owin(imag)) dx <- w$xstep dy <- w$ystep xpix <- as.vector(raster.x(w)[w$m]) ypix <- as.vector(raster.y(w)[w$m]) ppix <- as.vector(imag$v[w$m]) # not normalised - OK npix <- length(xpix) return(list(xpix=xpix, ypix=ypix, ppix=ppix, dx=rep.int(dx,npix), dy=rep.int(dy, npix), npix=npix)) } stuff <- lapply(f, get.stuff) # Concatenate into loooong vectors xpix <- unlist(lapply(stuff, function(z) { z$xpix })) ypix <- unlist(lapply(stuff, function(z) { z$ypix })) ppix <- unlist(lapply(stuff, function(z) { z$ppix })) dx <- unlist(lapply(stuff, function(z) { z$dx })) dy <- unlist(lapply(stuff, function(z) { z$dy })) # replicate types numpix <- unlist(lapply(stuff, function(z) { z$npix })) tpix <- rep.int(seq_along(types), numpix) # # sample pixels from union of all images # npix <- sum(numpix) id <- sample(npix, n, replace=TRUE, prob=ppix) # get pixel centre coordinates and randomise within pixel x <- xpix[id] + (runif(n) - 1/2) * dx[id] y <- ypix[id] + (runif(n) - 1/2) * dy[id] # compute types marx <- factor(types[tpix[id]],levels=types) # et voila! return(ppp(x, y, window=as.owin(f[[1]]), marks=marx, check=FALSE)) } # # wrapper for Rolf's function # rpoint.multi <- function (n, f, fmax=NULL, marks = NULL, win = unit.square(), giveup = 1000, verbose = FALSE, warn=TRUE) { no.marks <- is.null(marks) || (is.factor(marks) && length(levels(marks)) == 1) if(warn) { nhuge <- spatstat.options("huge.npoints") if(n > nhuge) warning(paste("Attempting to generate", n, "random points")) } # unmarked case if (no.marks) { if(is.function(f)) return(rpoint(n, f, fmax, win, giveup=giveup, verbose=verbose)) else return(rpoint(n, f, fmax, giveup=giveup, verbose=verbose)) } # multitype case if(length(marks) != n) stop("length of marks vector != n") if(!is.factor(marks)) stop("marks should be a factor") types <- levels(marks) types <- factor(types, levels=types) # generate required number of points of each type nums <- table(marks) X <- rmpoint(nums, f, fmax, win=win, types=types, giveup=giveup, verbose=verbose) if(any(table(marks(X)) != nums)) stop("Internal error: output of rmpoint illegal") # reorder them to correspond to the desired 'marks' vector Y <- X Xmarks <- marks(X) for(ty in types) { to <- (marks == ty) from <- (Xmarks == ty) if(sum(to) != sum(from)) stop(paste("Internal error: mismatch for mark =", ty)) if(any(to)) { Y$x[to] <- X$x[from] Y$y[to] <- X$y[from] Y$marks[to] <- ty } } return(Y) } spatstat/R/triplet.family.R0000644000176000001440000000640112237642727015451 0ustar ripleyusers# # # triplet.family.R # # $Revision: 1.1 $ $Date: 2011/11/05 07:18:51 $ # # Family of `third-order' point process models # # triplet.family: object of class 'isf' # # # ------------------------------------------------------------------- # triplet.family <- list( name = "triplet", print = function(self) { cat("Family of third-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `triplet' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # This function is currently modelled on 'inforder.family'. # It simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(triplet.family) <- "isf" spatstat/R/Tstat.R0000644000176000001440000002101012237642727013576 0ustar ripleyusers# # tstat.R Estimation of T function # # $Revision: 1.7 $ $Date: 2013/04/25 06:37:43 $ # Tstat <- local({ # helper functions edgetri.Trans <- function(X, triid, trim=spatstat.options("maxedgewt")) { triid <- as.matrix(triid) ntri <- nrow(triid) if(ntri == 0) return(numeric(0)) W <- rescue.rectangle(as.owin(X)) if(W$type != "rectangle") stop("Translation correction is only implemented for rectangular windows") x <- matrix(X$x[triid], nrow=ntri) y <- matrix(X$y[triid], nrow=ntri) dx <- apply(x, 1, function(z) diff(range(z))) dy <- apply(y, 1, function(z) diff(range(z))) wide <- diff(W$xrange) high <- diff(W$yrange) weight <- wide * high/((wide - dx) * (high - dy)) weight <- pmin.int(trim, weight) return(weight) } # helper function implemented.for.T <- function(correction, windowtype, explicit) { rect <- (windowtype == "rectangle") if(any(correction == "best")) { # select best available correction correction <- if(rect) "translate" else "border" } else { # available selection of edge corrections depends on window if(!rect) { tra <- (correction == "translate") if(any(tra)) { whinge <- "Translation correction is only implemented for rectangular windows" if(explicit) { if(all(tra)) stop(whinge) else warning(whinge) } correction <- correction[!tra] } } } return(correction) } # .......... main function .................... Tstat <- function(X, ..., r=NULL, rmax=NULL, correction=c("border", "translate"), ratio=FALSE, verbose=TRUE) { verifyclass(X, "ppp") rfixed <- !is.null(r) npts <- npoints(X) W <- X$window area <- area.owin(W) lambda <- npts/area lambda2 <- (npts * (npts - 1))/(area^2) lambda3 <- (npts * (npts - 1) * (npts - 2))/(area^3) rmaxdefault <- if(!is.null(rmax)) rmax else rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(!correction.given) correction <- c("border", "bord.modif", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.T(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame TT <- data.frame(r=r, theo= (pi/2) * (pi - 3 * sqrt(3)/4) * r^4) desc <- c("distance argument r", "theoretical Poisson %s") TT <- fv(TT, "r", quote(T(r)), "theo", , alim, c("r","%s[pois](r)"), desc, fname="T") # save numerator and denominator? if(ratio) { denom <- lambda2 * area numT <- eval.fv(denom * TT) denT <- eval.fv(denom + TT * 0) attributes(numT) <- attributes(denT) <- attributes(T) attr(numT, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denT, "desc")[2] <- "denominator for theoretical Poisson %s" } # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax, ordered=FALSE) I <- close$i J <- close$j DIJ <- close$d nI <- length(I) # estimate computation time if(verbose) { nTmax <- nI * (nI-1) /2 esttime <- exp(1.25 * log(nTmax) - 21.5) message(paste("Searching", nTmax, "potential triangles;", "estimated time", codetime(esttime))) } # find triangles with their diameters tri <- trianglediameters(I, J, DIJ, nvert=npts) stopifnot(identical(colnames(tri), c("i", "j", "k", "diam"))) # reassemble so each triangle appears 3 times, once for each vertex II <- with(tri, c(i, j, k)) DD <- with(tri, rep.int(diam, 3)) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DD, breaks$val) # no weights numTun <- cumsum(wh) denTun <- lambda3 * area # uncorrected estimate of T Tun <- numTun/denTun TT <- bind.fv(TT, data.frame(un=Tun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(un=numTun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denT <- bind.fv(denT, data.frame(un=denTun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) bI <- b[II] # apply reduced sample algorithm RS <- Kount(DD, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numTbm <- RS$numerator denTbm <- lambda3 * denom.area Tbm <- numTbm/denTbm TT <- bind.fv(TT, data.frame(bord.modif=Tbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numT <- bind.fv(numT, data.frame(bord.modif=numTbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denT <- bind.fv(denT, data.frame(bord.modif=denTbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numTb <- RS$numerator denTb <- lambda2 * RS$denom.count Tb <- numTb/denTb TT <- bind.fv(TT, data.frame(border=Tb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numT <- bind.fv(numT, data.frame(border=numTb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denT <- bind.fv(denT, data.frame(border=denTb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction # apply to triangle list edgewt <- edgetri.Trans(X, tri[, 1:3]) wh <- whist(tri$diam, breaks$val, edgewt) numTtrans <- 3 * cumsum(wh) denTtrans <- lambda3 * area Ttrans <- numTtrans/denTtrans h <- diameter(W)/2 Ttrans[r >= h] <- NA TT <- bind.fv(TT, data.frame(trans=Ttrans), "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans") if(ratio) { numT <- bind.fv(numT, data.frame(trans=numTtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denT <- bind.fv(denT, data.frame(trans=denTtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } # default plot will display all edge corrections formula(TT) <- . ~ r unitname(TT) <- unitname(X) # if(ratio) { # finish up numerator & denominator formula(numT) <- formula(denT) <- . ~ r unitname(numT) <- unitname(denT) <- unitname(TT) # tack on to result TT <- rat(TT, numT, denT, check=FALSE) } return(TT) } Tstat }) spatstat/R/linearpcf.R0000755000176000001440000001111212237642727014447 0ustar ripleyusers# # linearpcf.R # # $Revision: 1.9 $ $Date: 2013/04/25 06:37:43 $ # # pair correlation function for point pattern on linear network # # linearpcf <- function(X, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # compute denom <- np * (np - 1)/lengthL g <- linearpcfengine(X, r=r, ..., denom=denom, correction=correction) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[L](r)) fname <- "g[L]" }, none = { ylab <- quote(g[net](r)) fname <- "g[net]" }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) return(g) } linearpcfinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearpcf(X, r=r, ..., correction=correction) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # XX <- as.ppp(X) lambdaX <- if(is.vector(lambda)) lambda else if(is.function(lambda)) lambda(XX$x, XX$y, ...) else if(is.im(lambda)) safelookup(lambda, XX) else if(is.ppm(lambda) || inherits(lambda, "lppm")) predict(lambda, locations=as.data.frame(XX)) else stop("lambda should be a numeric vector, function, image or ppm object") if(!is.numeric(lambdaX)) stop("Values of lambda are not numeric") if((nv <- length(lambdaX)) != np) stop(paste("Obtained", nv, "values of lambda", "but point pattern contains", np, "points")) if(any(lambdaX < 0)) stop("Negative values of lambda obtained") if(any(lambdaX == 0)) stop("Zero values of lambda obtained") invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") denom <- if(!normalise) lengthL else sum(invlam) g <- linearpcfengine(X, ..., r=r, reweight=invlam2, denom=denom, correction=correction) # extract bandwidth bw <- attr(g, "bw") # set appropriate y axis label switch(correction, Ang = { ylab <- quote(g[LI](r)) fname <- "g[LI]" }, none = { ylab <- quote(g[netI](r)) fname <- "g[netI]" }) g <- rebadge.fv(g, new.fname=fname, new.ylab=ylab) # reattach bandwidth attr(g, "bw") <- bw return(g) } linearpcfengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang") { # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # extract linear network L <- X$domain # extract points Y <- as.ppp(X) W <- Y$window # determine r values rmaxdefault <- 0.98 * circumradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) g <- fv(df, "r", substitute(linearpcf(r), NULL), "est", . ~ r, c(0, rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname = "linearpcf") return(g) } # compute pairwise distances D <- pairdist(X) #--- compile into pcf --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(D, r, denom=denom) unitname(g) <- unitname(X) return(g) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Wei's correction) # compute m[i,j] m <- matrix(1, np, np) for(j in 1:np) m[ -j, j] <- countends(L, Y[-j], D[-j,j]) edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(D, r, weights=wt, denom=denom, ...) # extract bandwidth bw <- attr(g, "bw") # tack on theoretical value g <- bind.fv(g, data.frame(theo=rep.int(1,length(r))), "%s[theo](r)", "theoretical Poisson %s") # tweak g <- rebadge.fv(g, new.fname="linearpcfengine") unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # tack on bandwidth again attr(g, "bw") <- bw return(g) } spatstat/R/xysegment.R0000755000176000001440000001604212237642727014536 0ustar ripleyusers# # xysegment.S # # $Revision: 1.15 $ $Date: 2013/08/22 08:27:19 $ # # Low level utilities for analytic geometry for line segments # # author: Adrian Baddeley 2001 # from an original by Rob Foxall 1997 # # distpl(p, l) # distance from a single point p = (xp, yp) # to a single line segment l = (x1, y1, x2, y2) # # distppl(p, l) # distances from each of a list of points p[i,] # to a single line segment l = (x1, y1, x2, y2) # [uses only vector parallel ops] # # distppll(p, l) # distances from each of a list of points p[i,] # to each of a list of line segments l[i,] # [interpreted code uses large matrices and 'outer()'] # [Fortran implementation included!] distpl <- function(p, l) { xp <- p[1] yp <- p[2] dx <- l[3]-l[1] dy <- l[4]-l[2] leng <- sqrt(dx^2 + dy^2) # vector from 1st endpoint to p xpl <- xp - l[1] ypl <- yp - l[2] # distance from p to 1st & 2nd endpoints d1 <- sqrt(xpl^2 + ypl^2) d2 <- sqrt((xp-l[3])^2 + (yp-l[4])^2) dmin <- min(d1,d2) # test for zero length if(leng < .Machine$double.eps) return(dmin) # rotation sine & cosine co <- dx/leng si <- dy/leng # back-rotated coords of p xpr <- co * xpl + si * ypl ypr <- - si * xpl + co * ypl # test if(xpr >= 0 && xpr <= leng) dmin <- min(dmin, abs(ypr)) return(dmin) } distppl <- function(p, l) { xp <- p[,1] yp <- p[,2] dx <- l[3]-l[1] dy <- l[4]-l[2] leng <- sqrt(dx^2 + dy^2) # vector from 1st endpoint to p xpl <- xp - l[1] ypl <- yp - l[2] # distance from p to 1st & 2nd endpoints d1 <- sqrt(xpl^2 + ypl^2) d2 <- sqrt((xp-l[3])^2 + (yp-l[4])^2) dmin <- pmin.int(d1,d2) # test for zero length if(leng < .Machine$double.eps) return(dmin) # rotation sine & cosine co <- dx/leng si <- dy/leng # back-rotated coords of p xpr <- co * xpl + si * ypl ypr <- - si * xpl + co * ypl # ypr is perpendicular distance to infinite line # Applies only when xp, yp in the middle middle <- (xpr >= 0 & xpr <= leng) if(any(middle)) dmin[middle] <- pmin.int(dmin[middle], abs(ypr[middle])) return(dmin) } distppll <- function(p, l, mintype=0, method=c("Fortran", "C", "interpreted"), listit=FALSE) { np <- nrow(p) nl <- nrow(l) xp <- p[,1] yp <- p[,2] if(is.na(match(mintype,0:2))) stop(paste("Argument", sQuote("mintype"), "must be 0, 1 or 2.\n")) method <- match.arg(method) switch(method, interpreted={ dx <- l[,3]-l[,1] dy <- l[,4]-l[,2] # segment lengths leng <- sqrt(dx^2 + dy^2) # rotation sines & cosines co <- dx/leng si <- dy/leng co <- matrix(co, nrow=np, ncol=nl, byrow=TRUE) si <- matrix(si, nrow=np, ncol=nl, byrow=TRUE) # matrix of squared distances from p[i] to 1st endpoint of segment j xp.x1 <- outer(xp, l[,1], "-") yp.y1 <- outer(yp, l[,2], "-") d1 <- xp.x1^2 + yp.y1^2 # ditto for 2nd endpoint xp.x2 <- outer(xp, l[,3], "-") yp.y2 <- outer(yp, l[,4], "-") d2 <- xp.x2^2 + yp.y2^2 # for each (i,j) rotate p[i] around 1st endpoint of segment j # so that line segment coincides with x axis xpr <- xp.x1 * co + yp.y1 * si ypr <- - xp.x1 * si + yp.y1 * co d3 <- ypr^2 # test lenf <- matrix(leng, nrow=np, ncol=nl, byrow=TRUE) zero <- (lenf < .Machine$double.eps) outside <- (zero | xpr < 0 | xpr > lenf) if(any(outside)) d3[outside] <- Inf dsq <- matrix(pmin.int(d1, d2, d3),nrow=np, ncol=nl) d <- sqrt(dsq) if(mintype >= 1) min.d <- apply(d, 1, min) if(mintype == 2) min.which <- apply(d, 1, which.min) }, Fortran={ eps <- .Machine$double.eps if(mintype > 0) { big <- sqrt(2)*diff(range(c(p,l))) xmin <- rep.int(big,np) } else { xmin <- 1 } n2 <- if(mintype > 1) np else 1 temp <- .Fortran("dppll", x=as.double(xp), y=as.double(yp), l1=as.double(l[,1]), l2=as.double(l[,2]), l3=as.double(l[,3]), l4=as.double(l[,4]), np=as.integer(np), nl=as.integer(nl), eps=as.double(eps), mint=as.integer(mintype), rslt=double(np*nl), xmin=as.double(xmin), jmin=integer(n2)) # PACKAGE="spatstat") d <- matrix(temp$rslt, nrow=np, ncol=nl) if(mintype >= 1) min.d <- temp$xmin if(mintype == 2) min.which <- temp$jmin }, C = { eps <- .Machine$double.eps DUP <- spatstat.options("dupC") temp <- .C("prdist2segs", x=as.double(xp), y=as.double(yp), npoints =as.integer(np), x0=as.double(l[,1]), y0=as.double(l[,2]), x1=as.double(l[,3]), y1=as.double(l[,4]), nsegments=as.integer(nl), epsilon=as.double(eps), dist2=as.double(numeric(np * nl)), DUP=DUP) # PACKAGE="spatstat") d <- sqrt(matrix(temp$dist2, nrow=np, ncol=nl)) if(mintype == 2) { min.which <- apply(d, 1, which.min) min.d <- d[cbind(1:np, min.which)] } else if (mintype == 1) { min.d <- apply(d, 1, min) } }) ###### end switch ##### if(mintype==0) return(if(listit) list(d=d) else d) else if(mintype==1) return(list(d=d, min.d=min.d)) else if(mintype==2) return(list(d=d, min.d=min.d, min.which=min.which)) } # faster code if you don't want the n*m matrix 'd' distppllmin <- function(p, l, big=NULL) { np <- nrow(p) nl <- nrow(l) # initialise squared distances to large value if(is.null(big)) { xdif <- diff(range(c(p[,1],l[, c(1,3)]))) ydif <- diff(range(c(p[,2],l[, c(2,4)]))) big <- 2 * (xdif^2 + ydif^2) } dist2 <- rep.int(big, np) # DUP <- spatstat.options("dupC") z <- .C("nndist2segs", xp=as.double(p[,1]), yp=as.double(p[,2]), npoints=as.integer(np), x0=as.double(l[,1]), y0=as.double(l[,2]), x1=as.double(l[,3]), y1=as.double(l[,4]), nsegments=as.integer(nl), epsilon=as.double(.Machine$double.eps), dist2=as.double(dist2), index=as.integer(integer(np)), DUP=DUP) # PACKAGE="spatstat") min.d <- sqrt(z$dist2) min.which <- z$index+1L return(list(min.d=min.d, min.which=min.which)) } spatstat/R/hybrid.R0000755000176000001440000002525012237642727013775 0ustar ripleyusers# # # hybrid.R # # $Revision: 1.4 $ $Date: 2013/04/25 06:37:43 $ # # Hybrid of several interactions # # Hybrid() create a hybrid of several interactions # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Hybrid <- function(...) { interlist <- list(...) n <- length(interlist) if(n == 0) stop("No arguments given") # arguments may be interaction objects or ppm objects isinter <- unlist(lapply(interlist, is.interact)) isppm <- unlist(lapply(interlist, is.ppm)) if(any(nbg <- !(isinter | isppm))) stop(paste(ngettext(sum(nbg), "Argument", "Arguments"), paste(which(nbg), collapse=", "), ngettext(sum(nbg), "is not an interaction", "are not interactions"))) # ensure the list contains only interaction objects if(any(isppm)) interlist[isppm] <- lapply(interlist[isppm], as.interact) # recursively expand any components that are themselves hybrids while(any(ishybrid <- unlist(lapply(interlist, is.hybrid)))) { i <- min(which(ishybrid)) n <- length(interlist) expandi <- interlist[[i]]$par interlist <- c(if(i > 1) interlist[1:(i-1)] else NULL, expandi, if(i < n) interlist[(i+1):n] else NULL) } # ncomponents <- length(interlist) if(ncomponents == 1) { # single interaction - return it return(interlist[[1]]) } # ensure all components have names names(interlist) <- good.names(names(interlist), "HybridComponent", 1:ncomponents) out <- list( name = "Hybrid interaction", creator = "Hybrid", family = hybrid.family, pot = NULL, par = interlist, parnames = names(interlist), init = NULL, update = NULL, # default OK print = function(self, ..., family=FALSE, brief=FALSE) { if(family) print.isf(self$family) ncomponents <- length(self$par) clabs <- self$parnames cat(paste("Hybrid of", ncomponents, "components:", commasep(sQuote(clabs)), "\n\n")) for(i in 1:ncomponents) { cat(paste(clabs[i], ":\n", sep="")) print(self$par[[i]], ..., family=family, brief=brief) } cat("\n") NULL }, interpret = function(coeffs, self) { interlist <- self$par result <- list(param=list(), inames=character(0), printable=list()) for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract them if(any(relevant)) { Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # invoke the self-interpretation of interI interpretI <- interI$interpret if(is.function(interpretI)) { resultI <- interpretI(Crelevant, interI) paramI <- resultI$param prinI <- resultI$printable inamesI <- resultI$inames inamesI <- paste(nameI, inamesI) if(length(prinI) > 0) { result$param <- append(result$param, paramI) result$printable <- append(result$printable, list(prinI)) result$inames <- c(result$inames, inamesI) } } } } return(result) }, valid = function(coeffs, self) { # check validity via mechanism used for 'rmhmodel' siminfo <- .Spatstat.Rmhinfo[["Hybrid interaction"]] Z <- siminfo(coeffs, self) cifs <- Z$cif pars <- Z$par ntypes <- Z$ntypes if((Ncif <- length(cifs)) == 1) { # single cif pars <- append(pars, list(beta=rep.int(1, ntypes))) } else { for(i in 1:Ncif) pars[[i]] <- append(pars[[i]], list(beta=rep.int(1, ntypes[i]))) } RM <- rmhmodel(cif=cifs, par=pars, types=1:max(ntypes), stopinvalid=FALSE) return(RM$integrable) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) # separate into components spl <- splitHybridInteraction(coeffs, self) interlist <- spl$interlist coeflist <- spl$coeflist # compute projection for each component interaction Ncif <- length(interlist) projlist <- vector(mode="list", length=Ncif) nproj <- integer(Ncif) for(i in 1:Ncif) { coefsI <- coeflist[[i]] interI <- interlist[[i]] if(!is.interact(interI)) stop("Internal error: interlist entry is not an interaction") projI <- interI$project if(is.null(projI)) stop(paste("Projection is not yet implemented for a", interI$name)) p <- projI(coefsI, interI) # p can be NULL (indicating no projection required for interI) # or a single interaction or a list of interactions. if(is.null(p)) { if(Ncif == 1) return(NULL) # no projection required p <- list(NULL) nproj[i] <- 0 } else if(is.interact(p)) { p <- list(p) nproj[i] <- 1 } else if(is.list(p) && all(unlist(lapply(p, is.interact)))) { nproj[i] <- length(p) } else stop("Internal error: result of projection had wrong format") projlist[[i]] <- p } # for interaction i there are nproj[i] **new** interactions to try. if(all(nproj == 0)) return(NULL) if(spatstat.options("project.fast")) { # Single interaction required. # Extract first entry from each list # (there should be only one entry, but...) qlist <- lapply(projlist, function(z) z[[1]]) # replace NULL entries by corresponding original interactions isnul <- unlist(lapply(qlist, is.null)) if(all(isnul)) return(NULL) if(any(isnul)) qlist[isnul] <- interlist[isnul] names(qlist) <- names(interlist) # build hybrid and return result <- do.call("Hybrid", qlist) return(result) } # Full case result <- list() for(i in which(nproj > 0)) { ntry <- nproj[i] tries <- projlist[[i]] for(j in 1:ntry) { # assemble list of component interactions for hybrid qlist <- interlist qlist[[i]] <- tries[[j]] # eliminate Poisson ispois <- unlist(lapply(qlist, is.poisson)) if(all(ispois)) { # collapse to single Poisson h <- Poisson() } else { if(any(ispois)) qlist <- qlist[!ispois] h <- do.call("Hybrid", qlist) } result <- append(result, list(h)) } } # 'result' is a list of interactions, each a hybrid if(length(result) == 1) result <- result[[1]] return(result) }, irange = function(self, coeffs=NA, epsilon=0, ...) { interlist <- self$par answer <- 0 for(i in 1:length(interlist)) { interI <- interlist[[i]] nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. if(all(is.na(coeffs))) Crelevant <- NA else { Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract them Crelevant <- coeffs[relevant] names(Crelevant) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) } # compute reach reachI <- interI$irange if(is.function(reachI)) { resultI <- reachI(interI, coeffs=Crelevant, epsilon=epsilon, ...) answer <- max(answer, resultI) } } return(answer) }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } is.hybrid <- function(x) { UseMethod("is.hybrid") } is.hybrid.interact <- function(x) { return(is.interact(x) && (x$name == "Hybrid interaction")) } is.hybrid.ppm <- function(x) { return(is.hybrid(as.interact(x))) } splitHybridInteraction <- function(coeffs, inte) { # For hybrids, $par is a list of the component interactions, # but coeffs is a numeric vector. # Split the coefficient vector into the relevant coeffs for each interaction interlist <- inte$par N <- length(interlist) coeflist <- vector(mode="list", length=N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("A hybrid-of-hybrid interactions is not implemented") # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is Poisson or an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to interaction if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # store coeflist[[i]] <- coeffsI } names(coeflist) <- names(interlist) return(list(coeflist=coeflist, interlist=interlist)) } spatstat/R/Kmodel.R0000755000176000001440000000032412237642727013722 0ustar ripleyusers# # Kmodel.R # # Kmodel and pcfmodel # # $Revision: 1.1 $ $Date: 2011/05/30 14:02:21 $ # Kmodel <- function(model, ...) { UseMethod("Kmodel") } pcfmodel <- function(model, ...) { UseMethod("pcfmodel") } spatstat/R/ho.R0000755000176000001440000000333512237642727013122 0ustar ripleyusers# # ho.R # # Huang-Ogata method # # $Revision: 1.13 $ $Date: 2010/05/07 12:18:59 $ # ho.engine <- function(model, ..., nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE) { verifyclass(model, "ppm") if(is.null(start)) start <- list(n.start=data.ppm(model)$n) # check that the model can be simulated if(!valid.ppm(model)) { warning("Fitted model is invalid - cannot be simulated") return(NULL) } # compute the observed value of the sufficient statistic X <- data.ppm(model) sobs <- suffstat(model, X) # generate 'nsim' realisations of the fitted model # and compute the sufficient statistics of the model rmhinfolist <- rmh(model, start, control, preponly=TRUE, verbose=FALSE) if(verb) cat("Simulating... ") for(i in 1:nsim) { if(verb) progressreport(i, nsim) Xi <- rmhEngine(rmhinfolist, verbose=FALSE) v <- suffstat(model,Xi) if(i == 1) svalues <- matrix(, nrow=nsim, ncol=length(v)) svalues[i, ] <- v } if(verb) cat("Done.\n\n") # calculate the sample mean and variance of the # sufficient statistic for the simulations smean <- apply(svalues, 2, mean, na.rm=TRUE) svar <- var(svalues, na.rm=TRUE) # value of canonical parameter from MPL fit theta0 <- coef(model) # Newton-Raphson update Vinverse <- solve(svar) theta <- theta0 + as.vector(Vinverse %*% (sobs - smean)) # update model newmodel <- model newmodel$coef <- theta newmodel$coef.orig <- theta0 newmodel$method <- "ho" newmodel$fitter <- "ho" newmodel$fisher <- svar newmodel$varcov <- Vinverse # recompute fitted interaction newmodel$fitin <- NULL newmodel$fitin <- fitin(newmodel) return(newmodel) } spatstat/R/tess.R0000755000176000001440000003535512237642727013501 0ustar ripleyusers# # tess.R # # support for tessellations # # $Revision: 1.47 $ $Date: 2013/10/06 07:59:38 $ # tess <- function(..., xgrid=NULL, ygrid=NULL, tiles=NULL, image=NULL, window=NULL, keepempty=FALSE) { if(!is.null(window)) win <- as.owin(window) else win <- NULL isrect <- !is.null(xgrid) && !is.null(ygrid) istiled <- !is.null(tiles) isimage <- !is.null(image) if(isrect + istiled + isimage != 1) stop("Must specify either (xgrid, ygrid) or tiles or img") if(isrect) { stopifnot(is.numeric(xgrid) && all(diff(xgrid) > 0)) stopifnot(is.numeric(ygrid) && all(diff(ygrid) > 0)) if(is.null(win)) win <- owin(range(xgrid), range(ygrid)) ntiles <- (length(xgrid)-1) * (length(ygrid)-1) out <- list(type="rect", window=win, xgrid=xgrid, ygrid=ygrid, n=ntiles) } else if(istiled) { stopifnot(is.list(tiles)) if(!all(unlist(lapply(tiles, is.owin)))) stop("tiles must be a list of owin objects") if(!keepempty) { # remove empty tiles isempty <- unlist(lapply(tiles, is.empty)) if(all(isempty)) stop("All tiles are empty") if(any(isempty)) tiles <- tiles[!isempty] } ntiles <- length(tiles) nam <- names(tiles) lev <- if(!is.null(nam) && all(nzchar(nam))) nam else 1:ntiles if(is.null(win)) { for(i in 1:ntiles) { if(i == 1) win <- tiles[[1]] else win <- union.owin(win, tiles[[i]]) } } ismask <- function(x) {x$type == "mask"} if(ismask(win) || any(unlist(lapply(tiles, ismask)))) { # convert to pixel image tessellation win <- as.mask(win) ima <- as.im(win) ima$v[] <- NA for(i in 1:ntiles) ima[tiles[[i]]] <- i ima <- ima[win, drop=FALSE] ima <- eval.im(factor(ima, levels=1:ntiles)) levels(ima) <- lev out <- list(type="image", window=win, image=ima, n=length(lev)) } else { # tile list win <- rescue.rectangle(win) out <- list(type="tiled", window=win, tiles=tiles, n=length(tiles)) } } else if(isimage) { # convert to factor valued image image <- as.im(image) switch(image$type, logical={ # convert to factor if(keepempty) image <- eval.im(factor(image, levels=c(FALSE,TRUE))) else image <- eval.im(factor(image)) }, factor={ # eradicate unused levels if(!keepempty) image <- eval.im(factor(image)) }, { # convert to factor image <- eval.im(factor(image)) }) if(is.null(win)) win <- as.owin(image) out <- list(type="image", window=win, image=image, n=length(levels(image))) } else stop("Internal error: unrecognised format") class(out) <- c("tess", class(out)) return(out) } is.tess <- function(x) { inherits(x, "tess") } print.tess <- function(x, ..., brief=FALSE) { full <- !brief if(full) cat("Tessellation\n") win <- x$window switch(x$type, rect={ if(full) { unitinfo <- summary(unitname(win)) equispaced <- function(z) { dz <- diff(z) diff(range(dz))/mean(dz) < 0.01 } if(equispaced(x$xgrid) && equispaced(x$ygrid)) cat(paste("Tiles are equal rectangles, of dimension", signif(mean(diff(x$xgrid)), 5), "x", signif(mean(diff(x$ygrid)), 5), unitinfo$plural, " ", unitinfo$explain, "\n")) else cat(paste("Tiles are unequal rectangles\n")) } cat(paste(length(x$xgrid)-1, "by", length(x$ygrid)-1, "grid of tiles", "\n")) }, tiled={ if(full) { if(win$type == "polygonal") cat("Tiles are irregular polygons\n") else cat("Tiles are windows of general type\n") } cat(paste(length(x$tiles), "tiles (irregular windows)\n")) }, image={ nlev <- length(levels(x$image)) if(full) { cat(paste("Tessellation is determined by", "a factor-valued image", "with", nlev, "levels\n")) } else cat(paste(nlev, "tiles (levels of a pixel image)\n")) }) if(full) print(win) invisible(NULL) } plot.tess <- function(x, ..., main, add=FALSE, col=NULL) { xname <- short.deparse(substitute(x)) if(missing(main)) main <- xname switch(x$type, rect={ win <- x$window if(!add) do.call.matched("plot.owin", resolve.defaults(list(x=win, main=main), list(...)), extrargs=c("sub", "lty", "lwd")) xg <- x$xgrid yg <- x$ygrid do.call.matched("segments", resolve.defaults(list(x0=xg, y0=win$yrange[1], x1=xg, y1=win$yrange[2]), list(col=col), list(...), .StripNull=TRUE)) do.call.matched("segments", resolve.defaults(list(x0=win$xrange[1], y0=yg, x1=win$xrange[2], y1=yg), list(col=col), list(...), .StripNull=TRUE)) }, tiled={ if(!add) do.call.matched("plot.owin", resolve.defaults(list(x=x$window, main=main), list(...))) til <- tiles(x) plotem <- function(z, ..., col=NULL) { if(is.null(col)) plot(z, ..., add=TRUE) else if(z$type != "mask") plot(z, ..., border=col, add=TRUE) else plot(z, ..., col=col, add=TRUE) } lapply(til, plotem, ..., col=col) }, image={ plot(x$image, main=main, ..., add=add) }) return(invisible(NULL)) } "[<-.tess" <- function(x, ..., value) { switch(x$type, rect=, tiled={ til <- tiles(x) til[...] <- value ok <- !unlist(lapply(til, is.null)) x <- tess(tiles=til[ok]) }, image={ stop("Cannot assign new values to subsets of a pixel image") }) return(x) } "[.tess" <- function(x, ...) { switch(x$type, rect=, tiled={ til <- tiles(x)[...] return(tess(tiles=til)) }, image={ img <- x$image oldlev <- levels(img) newlev <- unique(oldlev[...]) img <- eval.im(factor(img, levels=newlev)) return(tess(image=img)) }) } tiles <- function(x) { switch(x$type, rect={ out <- list() xg <- x$xgrid yg <- x$ygrid nx <- length(xg) - 1 ny <- length(yg) - 1 for(j in rev(seq_len(ny))) for(i in seq_len(nx)) { winij <- owin(xg[c(i,i+1)], yg[c(j,j+1)]) dout <- list(winij) names(dout) <- paste("Tile row ", ny-j+1, ", col ", i, sep="") out <- append(out, dout) } }, tiled={ out <- x$tiles if(is.null(names(out))) names(out) <- paste("Tile", seq_along(out)) }, image={ out <- list() ima <- x$image lev <- levels(ima) for(i in seq_along(lev)) out[[i]] <- solutionset(ima == lev[i]) names(out) <- paste(lev) }) out <- as.listof(out) return(out) } tilenames <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ nx <- length(x$xgrid) - 1 ny <- length(x$ygrid) - 1 nam <- outer(rev(seq_len(ny)), seq_len(nx), function(j,i,ny) { paste("Tile row ", ny-j+1, ", col ", i, sep="")}, ny=ny) return(nam) }, tiled={ til <- x$tiles if(!is.null(names(til))) nam <- names(til) else nam <- paste("Tile", seq_along(til)) }, image={ ima <- x$image lev <- levels(ima) nam <- paste(lev) }) return(nam) } "tilenames<-" <- function(x, value) { stopifnot(is.tess(x)) switch(x$type, rect = { warning("Cannot change names of the tiles in a rectangular grid") }, tiled = { names(x$tiles) <- value }, image = { levels(x$image) <- value }) return(x) } tile.areas <- function(x) { stopifnot(is.tess(x)) switch(x$type, rect={ xg <- x$xgrid yg <- x$ygrid nx <- length(xg) - 1 ny <- length(yg) - 1 a <- outer(rev(diff(yg)), diff(xg), "*") a <- as.vector(t(a)) names(a) <- as.vector(t(tilenames(x))) }, tiled={ a <- lapply(x$tiles, area.owin) }, image={ z <- x$image a <- table(z$v) * z$xstep * z$ystep }) return(a) } as.im.tess <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { # if W is present, it may have to be converted if(!is.null(W)) { stopifnot(is.owin(W)) if(W$type != "mask") W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) } switch(X$type, image={ out <- as.im(X$image, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) }, tiled={ if(is.null(W)) W <- as.mask(as.owin(X), eps=eps, dimyx=dimyx, xy=xy) til <- X$tiles ntil <- length(til) nama <- names(til) if(is.null(nama) || !all(nzchar(nama))) nama <- paste(seq_len(ntil)) xy <- list(x=W$xcol, y=W$yrow) for(i in seq_len(ntil)) { indic <- as.mask(til[[i]], xy=xy) tag <- as.im(indic, value=i) if(i == 1) { out <- tag outv <- out$v } else { outv <- pmin.int(outv, tag$v, na.rm=TRUE) } } out <- im(factor(outv, levels=seq_len(ntil), labels=nama), out$xcol, out$yrow) unitname(out) <- unitname(W) }, rect={ if(is.null(W)) out <- as.im(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) else out <- as.im(W) xg <- X$xgrid yg <- X$ygrid nrows <- length(yg) - 1 ncols <- length(xg) - 1 jx <- findInterval(out$xcol, xg, rightmost.closed=TRUE) iy <- findInterval(out$yrow, yg, rightmost.closed=TRUE) M <- as.matrix(out) Jcol <- jx[col(M)] Irow <- nrows - iy[row(M)] + 1 Ktile <- Jcol + ncols * (Irow - 1) Ktile <- factor(Ktile, levels=seq_len(nrows * ncols)) out <- im(Ktile, xcol=out$xcol, yrow=out$yrow, unitname=unitname(W)) } ) return(out) } as.tess <- function(X) { UseMethod("as.tess") } as.tess.tess <- function(X) { fields <- switch(X$type, rect={ c("xgrid", "ygrid") }, tiled={ "tiles" }, image={ "image" }, stop(paste("Unrecognised tessellation type", sQuote(X$type)))) fields <- c(c("type", "window"), fields) X <- unclass(X)[fields] class(X) <- c("tess", class(X)) return(X) } as.tess.im <- function(X) { return(tess(image = X)) } as.tess.list <- function(X) { W <- lapply(X, as.owin) return(tess(tiles=W)) } as.tess.owin <- function(X) { return(tess(tiles=list(X))) } intersect.tess <- function(X, Y, ...) { X <- as.tess(X) if(is.owin(Y) && Y$type == "mask") { # special case # convert to pixel image result <- as.im(Y) Xtiles <- tiles(X) for(i in seq_along(Xtiles)) { tilei <- Xtiles[[i]] result[tilei] <- i } result <- result[Y, drop=FALSE] return(tess(image=result, window=Y)) } if(is.owin(Y)) { # efficient code when Y is a window, retaining names of tiles of X Ztiles <- lapply(tiles(X), intersect.owin, B=Y, ..., fatal=FALSE) isempty <- unlist(lapply(Ztiles, function(x) { is.null(x) || is.empty(x)})) Ztiles <- Ztiles[!isempty] Xwin <- as.owin(X) Ywin <- Y } else { # general case Y <- as.tess(Y) Xtiles <- tiles(X) Ytiles <- tiles(Y) Ztiles <- list() namesX <- names(Xtiles) for(i in seq_along(Xtiles)) { Xi <- Xtiles[[i]] Ti <- lapply(Ytiles, intersect.owin, B=Xi, ..., fatal=FALSE) isempty <- unlist(lapply(Ti, function(x) { is.null(x) || is.empty(x)})) Ti <- Ti[!isempty] names(Ti) <- paste(namesX[i], names(Ti), sep="x") Ztiles <- append(Ztiles, Ti) } Xwin <- as.owin(X) Ywin <- as.owin(Y) } Zwin <- intersect.owin(Xwin, Ywin) return(tess(tiles=Ztiles, window=Zwin)) } bdist.tiles <- local({ vdist <- function(x,w) { z <- as.ppp(vertices(x), W=w, check=FALSE) min(bdist.points(z)) } edist <- function(x,b) { xd <- crossdist(as.psp(x, check=FALSE), b, type="separation") min(xd) } bdist.tiles <- function(X) { if(!is.tess(X)) stop("X must be a tessellation") W <- as.owin(X) switch(X$type, rect=, tiled={ tt <- tiles(X) if(is.convex(W)) { # distance is minimised at a tile vertex d <- sapply(tt, vdist, w=W) } else { # coerce everything to polygons W <- as.polygonal(W) tt <- lapply(tt, as.polygonal) # compute min dist from tile edges to window edges d <- sapply(tt, edist, b=as.psp(W)) } }, image={ Xim <- X$image # compute boundary distance for each pixel bd <- bdist.pixels(as.owin(Xim), style="image") bd <- bd[W, drop=FALSE] # split over tiles bX <- split(bd, X) # compute minimum distance over each level of factor d <- sapply(bX, function(z) { summary(z)$min }) } ) return(d) } bdist.tiles }) spatstat/R/Jinhom.R0000644000176000001440000002352412237642727013737 0ustar ripleyusers# # Jinhom.R # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # Ginhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) area <- area.owin(W) # determine 'r' values rmaxdefault <- rmax.rule("G", W, npts/area) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) # Intensity values at data points if(is.null(lambda)) { # No intensity data provided # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda)) lambdaX <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, lambdaX) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio bad <- (lratio > 1) if((nbad <- sum(bad)) > 0) stop(paste("Value of", sQuote("lmin"), "exceeds", nbad, gettext(nbad, "value", "values"), "of", sQuote("lambda"))) # sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # compute local cumulative products DUP <- spatstat.options("dupC") z <- .C("locprod", n = as.integer(npts), x = as.double(xord), y = as.double(yord), v = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(npts * nr)), DUP=DUP) # PACKAGE="spatstat") ans <- matrix(z$ans, nrow=nr, ncol=npts) # revert to original ordering loccumprod <- matrix(, nrow=nr, ncol=npts) loccumprod[, oX] <- ans # border correction bX <- bdist.points(X) ok <- outer(r, bX, "<=") denom <- rowSums(ok) loccumprod[!ok] <- 0 numer <- rowSums(loccumprod) # pack up Gdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) G <- ratfv(Gdf, NULL, theo.denom, "r", quote(Ginhom(r)), "theo", NULL, c(0,rmax), c("r","%s[pois](r)"), desc, fname="Ginhom", ratio=ratio) G <- bind.ratfv(G, data.frame(bord=denom-numer), denom, "hat(%s)[bord](r)", "border estimate of %s", "bord", ratio=ratio) # formula(G) <- . ~ r fvnames(G, ".") <- c("bord", "theo") unitname(G) <- unitname(X) if(ratio) G <- conform.ratfv(G) return(G) } Finhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL, ratio=FALSE) { stopifnot(is.ppp(X)) npts <- npoints(X) W <- as.owin(X) area <- area.owin(W) # determine 'r' values rmaxdefault <- rmax.rule("F", W, npts/area) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) if(!breaks$even) stop("r values must be evenly spaced") r <- breaks$r rmax <- breaks$max nr <- length(r) # Intensity values at data points if(is.null(lambda)) { # No intensity data provided # Estimate density at points by leave-one-out kernel smoothing lamX <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambdaX <- as.numeric(lamX) # negative or zero values are due to numerical error lambdaX <- pmax.int(lambdaX, .Machine$double.eps) } else { # lambda values provided if(is.im(lambda)) lambdaX <- safelookup(lambda, X) else if(is.ppm(lambda)) lambdaX <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambdaX <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) { lambdaX <- lambda check.nvector(lambdaX, npts) } else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # negative values are illegal minX <- min(lambdaX) if(minX < 0) stop("Negative values of lambda were encountered at data points") if(minX == 0) stop("Zero values of lambda were encountered at data points") } # Minimum intensity if(!is.null(lmin)) { check.1.real(lmin) stopifnot(lmin >= 0) } else { # Compute minimum value over window if(is.null(lambda)) { # extract previously selected smoothing bandwidth sigma <- attr(lamX, "sigma") varcov <- attr(lamX, "varcov") # estimate density on a pixel grid and minimise lam <- density(X, ..., sigma=sigma, varcov=varcov, at="pixels") lmin <- min(lam) # negative or zero values may occur due to numerical error lmin <- max(lmin, .Machine$double.eps) } else { if(is.im(lambda)) lmin <- min(lambda) else if(is.ppm(lambda)) lmin <- min(predict(lambda)) else if(is.function(lambda)) lmin <- min(as.im(lambda, W)) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) lmin <- min(lambdaX) } if(lmin < 0) stop("Negative values of intensity encountered") # ensure lmin < lambdaX lmin <- min(lmin, lambdaX) } # Compute intensity factor lratio <- lmin/lambdaX vv <- 1 - lratio bad <- (lratio > 1) if((nbad <- sum(bad)) > 0) stop(paste("Value of", sQuote("lmin"), "exceeds", nbad, gettext(nbad, "value", "values"), "of", sQuote("lambda"))) # sort data points in order of increasing x coordinate xx <- X$x yy <- X$y oX <- fave.order(xx) xord <- xx[oX] yord <- yy[oX] vord <- vv[oX] # determine pixel grid and compute distance to boundary M <- do.call.matched("as.mask", append(list(w=W), list(...))) bM <- bdist.pixels(M, style="matrix") bM <- as.vector(bM) # x, y coordinates of pixels are already sorted by increasing x xM <- as.vector(raster.x(M)) yM <- as.vector(raster.y(M)) nM <- length(xM) # compute local cumulative products DUP <- spatstat.options("dupC") z <- .C("locxprod", ntest = as.integer(nM), xtest = as.double(xM), ytest = as.double(yM), ndata = as.integer(npts), xdata = as.double(xord), ydata = as.double(yord), vdata = as.double(vord), nr = as.integer(nr), rmax = as.double(rmax), ans = as.double(numeric(nM * nr)), DUP=DUP) # PACKAGE="spatstat") loccumprod <- matrix(z$ans, nrow=nr, ncol=nM) # border correction ok <- outer(r, bM, "<=") denom <- rowSums(ok) loccumprod[!ok] <- 0 numer <- rowSums(loccumprod) # pack up Fdf <- data.frame(r=r, theo = 1 - exp(- lmin * pi * r^2)) desc <- c("distance argument r", "theoretical Poisson %s") theo.denom <- rep.int(npts, nr) FX <- ratfv(Fdf, NULL, theo.denom, "r", quote(Finhom(r)), "theo", NULL, c(0,rmax), c("r","%s[pois](r)"), desc, fname="Finhom", ratio=ratio) FX <- bind.ratfv(FX, data.frame(bord=denom-numer), denom, "hat(%s)[bord](r)", "border estimate of %s", "bord", ratio=ratio) # formula(FX) <- . ~ r fvnames(FX, ".") <- c("bord", "theo") unitname(FX) <- unitname(X) if(ratio) FX <- conform.ratfv(FX) return(FX) } Jinhom <- function(X, lambda=NULL, lmin=NULL, ..., sigma=NULL, varcov=NULL, r=NULL, breaks=NULL) { GX <- Ginhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, breaks=breaks, ratio=FALSE) r <- GX$r FX <- Finhom(X, lambda=lambda, lmin=lmin, ..., sigma=sigma, varcov=varcov, r=r, ratio=FALSE) JX <- eval.fv((1-GX)/(1-FX)) # relabel the fv object JX <- rebadge.fv(JX, quote(Jinhom(r)), "Jinhom", names(JX), new.labl=attr(GX, "labl")) # tack on extra info attr(JX, "G") <- GX attr(JX, "F") <- FX return(JX) } spatstat/R/nearestsegment.R0000755000176000001440000000473012237642727015540 0ustar ripleyusers# # nearestsegment.R # # $Revision: 1.8 $ $Date: 2013/05/01 07:27:08 $ # # Given a point pattern X and a line segment pattern Y, # for each point x of X, determine which segment of Y is closest to x # and find the point on Y closest to x. # nearestsegment <- function(X,Y) { return(ppllengine(X,Y,"identify")) } project2segment <- function(X, Y) { return(ppllengine(X,Y,"project")) } ppllengine <- function(X, Y, action="project", check=FALSE) { stopifnot(is.ppp(X)) stopifnot(is.psp(Y)) stopifnot(action %in% c("distance", "identify", "project")) # deal with empty patterns if(Y$n == 0) stop("Segment pattern Y contains 0 segments; projection undefined") if(X$n == 0) { nowt <- numeric(0) none <- integer(0) switch(action, identify = return(none), distance = return(list(dist=nowt, which=none)), project = return(list(Xproj=X, mapXY=none, d=nowt, tp=nowt))) } # XX <- as.matrix(as.data.frame(unmark(X))) YY <- as.matrix(as.data.frame(unmark(Y))) # determine which segment lies closest to each point huge <- max(diameter(as.rectangle(as.owin(X))), diameter(as.rectangle(as.owin(Y)))) d <- distppllmin(XX, YY, huge^2) mapXY <- d$min.which if(action == "identify") return(mapXY) else if(action == "distance") return(data.frame(dist=d$min.d, which=mapXY)) # combine relevant rows of data alldata <- as.data.frame(cbind(XX, YY[mapXY, ,drop=FALSE])) colnames(alldata) <- c("x", "y", "x0", "y0", "x1", "y1") # coordinate geometry dx <- with(alldata, x1-x0) dy <- with(alldata, y1-y0) leng <- sqrt(dx^2 + dy^2) # rotation sines & cosines co <- dx/leng si <- dy/leng # vector to point from first endpoint of segment xv <- with(alldata, x - x0) yv <- with(alldata, y - y0) # rotate coordinate system so that x axis is parallel to line segment xpr <- xv * co + yv * si ypr <- - xv * si + yv * co # determine whether projection is an endpoint or interior point of segment left <- (xpr <= 0) right <- (xpr >= leng) # location of projected point in rotated coordinates xr <- with(alldata, ifelseAX(left, 0, ifelseXY(right, leng, xpr))) # back to standard coordinates xproj <- with(alldata, x0 + xr * co) yproj <- with(alldata, y0 + xr * si) Xproj <- ppp(xproj, yproj, window=X$window, marks=X$marks, check=check) # parametric coordinates tp <- xr/leng tp[!is.finite(tp)] <- 0 # return(list(Xproj=Xproj, mapXY=mapXY, d=d$min.d, tp=tp)) } spatstat/R/Kest.R0000755000176000001440000006057512237642727013433 0ustar ripleyusers# # Kest.R Estimation of K function # # $Revision: 5.95 $ $Date: 2013/08/25 10:17:24 $ # # # -------- functions ---------------------------------------- # Kest() compute estimate of K # using various edge corrections # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lest" <- function(X, ...) { K <- Kest(X, ...) L <- eval.fv(sqrt(K/pi)) # handle variance estimates if(any(varcols <- colnames(K) %in% c("rip", "ls"))) { r <- with(L, .x) L[,varcols] <- as.data.frame(K)[,varcols]/(2 * pi * r)^2 # fix 0/0 n <- npoints(X) A <- area.owin(as.owin(X)) if(any(colnames(K) == "rip")) L[r == 0, "rip"] <- (2 * A/(n-1)^2)/(4 * pi) if(any(colnames(K) == "ls")) L[r == 0, "ls"] <- (2 * A/(n * (n-1)))/(4 * pi) } # relabel the fv object L <- rebadge.fv(L, quote(L(r)), "L", names(K), new.labl=attr(K, "labl")) # return(L) } "Kest"<- function(X, ..., r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) && !is.null(nlarge) rfixed <- !is.null(r) || !is.null(breaks) npts <- npoints(X) W <- X$window area <- area.owin(W) lambda <- npts/area lambda2 <- (npts * (npts - 1))/(area^2) if(!is.null(domain)) { # estimate based on contributions from a subdomain domain <- as.owin(domain) if(!is.subset.owin(domain, X$window)) stop(paste(dQuote("domain"), "is not a subset of the window of X")) # trick Kdot() into doing it indom <- factor(inside.owin(X$x, X$y, domain), levels=c(FALSE,TRUE)) Kd <- Kdot(X %mark% indom, i="TRUE", r=r, breaks=breaks, correction=correction, ratio=ratio) # relabel and exit Kd <- rebadge.fv(Kd, quote(K(r)), "K") return(Kd) } rmaxdefault <- rmax.rule("K", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", good="good", best="best"), multi=TRUE) best.wanted <- ("best" %in% correction) # replace 'good' by the optimal choice for this size of dataset if("good" %in% correction) correction[correction == "good"] <- good.correction.K(X) # retain only corrections that are implemented for the window correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border method # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even large.n <- (npts >= nlarge) demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !correction.given fastcorrections <- c("border", "bord.modif", "none") fastdefault <- "border" correction.fast <- all(correction %in% fastcorrections) will.do.fast <- can.do.fast && (correction.fast || large.n.trigger) asked <- correction.fast || (nlarge.given && large.n.trigger) if(asked && !can.do.fast) warning("r values not evenly spaced - cannot use efficient code") if(will.do.fast) { # determine correction(s) ok <- correction %in% fastcorrections correction <- if(any(ok)) correction[ok] else fastdefault bord <- any(correction %in% c("border", "bord.modif")) none <- any(correction =="none") if(!all(ok)) { # some corrections were overridden; notify user corx <- c(if(bord) "border correction estimate" else NULL, if(none) "uncorrected estimate" else NULL) corx <- paste(corx, collapse=" and ") message(paste("number of data points exceeds", nlarge, "- computing", corx , "only")) } # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) if(bord) Kb <- Kborder.engine(X, max(r), length(r), correction, ratio=ratio) if(none) Kn <- Knone.engine(X, max(r), length(r), ratio=ratio) if(bord && none) return(cbind.fv(Kb, Kn[, names(Kn) != "theo"])) if(bord) return(Kb) if(none) return(Kn) } ########################################### # Slower code ########################################### # this will be the output data frame Kdf <- data.frame(r=r, theo = pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") denom <- lambda2 * area K <- ratfv(Kdf, NULL, denom, "r", quote(K(r)), "theo", NULL, alim, c("r","%s[pois](r)"), desc, fname="K", ratio=ratio) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambda2 * area # uncorrected estimate of K K <- bind.ratfv(K, data.frame(un=numKun), denKun, "hat(%s)[un](r)", "uncorrected estimate of %s", "un", ratio=ratio) } if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "bord.modif")) { # modified border correction denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- lambda2 * denom.area K <- bind.ratfv(K, data.frame(bord.modif=numKbm), data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif", ratio=ratio) } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambda * RS$denom.count K <- bind.ratfv(K, data.frame(border=numKb), data.frame(border=denKb), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border", ratio=ratio) } } if(any(correction == "translate")) { # translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambda2 * area h <- diameter(as.rectangle(W))/2 numKtrans[r >= h] <- NA K <- bind.ratfv(K, data.frame(trans=numKtrans), denKtrans, "hat(%s)[trans](r)", "translation-corrected estimate of %s", "trans", ratio=ratio) } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambda2 * area h <- diameter(W)/2 numKiso[r >= h] <- NA K <- bind.ratfv(K, data.frame(iso=numKiso), denKiso, "hat(%s)[iso](r)", "Ripley isotropic correction estimate of %s", "iso", ratio=ratio) } # if(var.approx) { # Compute variance approximations A <- area P <- perimeter(W) n <- npts # Ripley asymptotic approximation rip <- 2 * ((A/(n-1))^2) * (pi * r^2/A + 0.96 * P * r^3/A^2 + 0.13 * (n/A) * P * r^5/A^2) if(!ratio) { K <- bind.fv(K, data.frame(rip=rip), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } else { den <- (n-1)^2 ripnum <- den * rip ripden <- rep.int(den, length(rip)) K <- bind.ratfv(K, data.frame(rip=ripnum), data.frame(rip=ripden), "vR(r)", "Ripley approximation to var(%s) under CSR", "iso") } if(W$type == "rectangle") { # Lotwick-Silverman a1r <- (0.21 * P * r^3 + 1.3 * r^4)/A^2 a2r <- (0.24 * P * r^5 + 2.62 * r^6)/A^3 # contains correction to typo on p52 of Diggle 2003 # cf Lotwick & Silverman 1982 eq (5) br <- (pi * r^2/A) * (1 - pi * r^2/A) + (1.0716 * P * r^3 + 2.2375 * r^4)/A^2 ls <- (A^2) * (2 * br - a1r + (n-2) * a2r)/(n*(n-1)) # add column if(!ratio) { K <- bind.fv(K, data.frame(ls=ls), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } else { den <- n*(n-1) lsnum <- ls * den lsden <- rep.int(den, length(ls)) K <- bind.ratfv(K, data.frame(ls=lsnum), data.frame(ls=lsden), "vLS(r)", "Lotwick-Silverman approx to var(%s) under CSR", "iso") } } } # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) nama <- nama[!(nama %in% c("r", "rip", "ls"))] fvnames(K, ".") <- nama unitname(K) <- unitname(X) # copy to other components if(ratio) K <- conform.ratfv(K) return(K) } ################################################################ ############# SUPPORTING ALGORITHMS ########################### ################################################################ Kount <- function(dIJ, bI, b, breaks) { # # "internal" routine to compute border-correction estimate of K or Kij # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # b: vector of ALL distances to window boundary # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val) # count censoring times beyond rightmost breakpoint uppercen <- sum(b > max(breaks$val)) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denom.count <- RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denom.count) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denom.count=denom.count)) } #### interface to C code for border method Kborder.engine <- function(X, rmax, nr=100, correction=c("border", "bord.modif"), weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) area <- area.owin(W) lambda <- npts/area lambda2 <- (npts * (npts - 1))/(area^2) if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator denom <- lambda2 * area numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # boundary distances b <- bdist.points(Xsort) # call the C code DUP <- spatstat.options("dupC") if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KborderI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), denom=as.integer(integer(nr)), DUP=DUP) # PACKAGE="spatstat") } else { # no - need double precision storage res <- .C("KborderD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") } if("bord.modif" %in% correction) { denom.area <- eroded.areas(W, r) numKbm <- res$numer denKbm <- lambda2 * denom.area bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if("border" %in% correction) { numKb <- res$numer denKb <- lambda * res$denom bord <- numKb/denKb Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(any(is.na(weights))) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwborder", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), b=as.double(b), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), denom=as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") if("border" %in% correction) { bord <- res$numer/res$denom Kfv <- bind.fv(Kfv, data.frame(border=bord), "hat(%s)[bord](r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=res$numer), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=res$denom), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } if("bord.modif" %in% correction) { numKbm <- res$numer denKbm <- eroded.areas(W, r) bm <- numKbm/denKbm Kfv <- bind.fv(Kfv, data.frame(bord.modif=bm), "hat(%s)[bordm](r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } Knone.engine <- function(X, rmax, nr=100, weights=NULL, ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) W <- as.owin(X) area <- area.owin(W) lambda <- npts/area lambda2 <- (npts * (npts - 1))/(area^2) denom <- lambda2 * area if(missing(rmax)) rmax <- diameter(W)/4 r <- seq(from=0, to=rmax, length.out=nr) # this will be the output data frame Kdf <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") Kfv <- fv(Kdf, "r", quote(K(r)), "theo", , c(0,rmax), c("r","%s[pois](r)"), desc, fname="K") if(ratio) { # save numerator and denominator numK <- eval.fv(denom * Kfv) denK <- eval.fv(denom + Kfv * 0) attributes(numK) <- attributes(denK) <- attributes(Kfv) numK <- rebadge.fv(numK, tags="theo", new.desc="numerator for theoretical Poisson %s") denK <- rebadge.fv(denK, tags="theo", new.desc="denominator for theoretical Poisson %s") } ####### start computing ############ # sort in ascending order of x coordinate orderX <- fave.order(X$x) Xsort <- X[orderX] x <- Xsort$x y <- Xsort$y # call the C code DUP <- spatstat.options("dupC") if(is.null(weights)) { # determine whether the numerator can be stored as an integer bigint <- .Machine$integer.max if(npts < sqrt(bigint)) { # yes - use faster integer arithmetic res <- .C("KnoneI", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.integer(integer(nr)), DUP=DUP) # PACKAGE="spatstat") } else { # no - need double precision storage res <- .C("KnoneD", nxy=as.integer(npts), x=as.double(x), y=as.double(y), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") } numKun <- res$numer denKun <- denom # = lambda2 * area Kun <- numKun/denKun } else { # weighted version if(is.numeric(weights)) { if(length(weights) != X$n) stop("length of weights argument does not match number of points in X") } else { wim <- as.im(weights, W) weights <- wim[X, drop=FALSE] if(any(is.na(weights))) stop("domain of weights image does not contain all points of X") } weights.Xsort <- weights[orderX] res <- .C("Kwnone", nxy=as.integer(npts), x=as.double(x), y=as.double(y), w=as.double(weights.Xsort), nr=as.integer(nr), rmax=as.double(rmax), numer=as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") numKun <- res$numer denKun <- sum(weights) Kun <- numKun/denKun } # tack on to fv object Kfv <- bind.fv(Kfv, data.frame(un=Kun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") if(ratio) { numK <- bind.fv(numK, data.frame(un=numKun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } ## # default is to display them all formula(Kfv) <- . ~ r unitname(Kfv) <- unitname(X) if(ratio) { # finish off numerator and denominator formula(numK) <- formula(denK) <- . ~ r unitname(denK) <- unitname(numK) <- unitname(X) # tack on to result Kfv <- rat(Kfv, numK, denK, check=FALSE) } return(Kfv) } rmax.rule <- function(fun="K", W, lambda) { verifyclass(W, "owin") switch(fun, K = { # Ripley's Rule ripley <- min(diff(W$xrange), diff(W$yrange))/4 # Count at most 1000 neighbours per point rlarge <- if(!missing(lambda)) sqrt(1000 /(pi * lambda)) else Inf rmax <- min(rlarge, ripley) }, F = , G = , J = { # rule of thumb rdiam <- diameter(as.rectangle(W))/2 # Poisson process has F(rlarge) = 1 - 10^(-5) rlarge <- if(!missing(lambda)) sqrt(log(1e5)/(pi * lambda)) else Inf rmax <- min(rlarge, rdiam) }, stop(paste("Unrecognised function type", sQuote(fun))) ) return(rmax) } implemented.for.K <- function(correction, windowtype, explicit) { pixels <- (windowtype == "mask") if(any(correction == "best")) { # select best available correction correction <- if(!pixels) "isotropic" else "translate" } else { # available selection of edge corrections depends on window if(pixels) { iso <- (correction == "isotropic") if(any(iso)) { whinge <- "Isotropic correction not implemented for binary masks" if(explicit) { if(all(iso)) stop(whinge) else warning(whinge) } correction <- correction[!iso] } } } return(correction) } good.correction.K <- function(X) { nX <- npoints(X) W <- as.owin(X) avail <- c("none", if(nX < 1e5) "border" else NULL, if(nX < 3000)"translate" else NULL, if(nX < 1000 && !is.mask(W)) "isotropic" else NULL) chosen <- rev(avail)[1] return(chosen) } spatstat/R/news.R0000755000176000001440000000056012237642727013465 0ustar ripleyusers# # news.R # # News and warnings # latest.news <- function(package="spatstat") { # get version number v <- read.dcf(file=system.file("DESCRIPTION", package=package), fields="Version") ne <- eval(substitute(news(Version >= v0, package=package), list(v0=v))) page(ne, method="print") return(invisible(ne)) } class(latest.news) <- "autoexec" spatstat/R/distan3D.R0000755000176000001440000002071712237642727014170 0ustar ripleyusers# # distan3D.R # # $Revision: 1.10 $ $Date: 2013/11/03 02:18:14 $ # # Interpoint distances for 3D points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.pp3 <- function(X, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1) return(matrix(0,nrow=1,ncol=1)) # DUP <- spatstat.options("dupC") if(!periodic) { Cout <- .C("D3pairdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), squared = as.integer(squared), d = as.double(numeric(n*n)), DUP=DUP) } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3pairPdist", n = as.integer(n), x = as.double(x), y = as.double(y), z = as.double(z), xwidth=as.double(wide), yheight=as.double(high), zdepth=as.double(deep), squared = as.integer(squared), d= as.double(numeric(n*n)), DUP=DUP) } dout <- matrix(Cout$d, nrow=n, ncol=n) } nndist.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("nnd3D", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), z= as.double(z[o]), nnd= as.double(nnd), nnwhich = as.integer(integer(1)), huge=as.double(big), DUP=DUP) nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("knnd3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(nnd), nnwhich = as.integer(integer(1)), huge = as.double(big), DUP=DUP) nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.pp3 <- function(X, ..., k=1) { verifyclass(X, "pp3") if((narg <- length(list(...))) > 0) warning(paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored")) # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates xyz <- coords(X) n <- nrow(xyz) x <- xyz$x y <- xyz$y z <- xyz$z # special cases if(n <= 1) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("nnw3D", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1)), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(z) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("knnw3D", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), z = as.double(z[o]), nnd = as.double(numeric(1)), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) # [sic] Conversion from C to R indexing is done in C code. witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: illegal index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.integer(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } crossdist.pp3 <- function(X, Y, ..., periodic=FALSE, squared=FALSE) { verifyclass(X, "pp3") verifyclass(Y, "pp3") cX <- coords(X) cY <- coords(Y) nX <- nrow(cX) nY <- nrow(cY) if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) DUP <- spatstat.options("dupC") if(!periodic) { Cout <- .C("D3crossdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), DUP=DUP) } else { b <- as.box3(X) wide <- diff(b$xrange) high <- diff(b$yrange) deep <- diff(b$zrange) Cout <- .C("D3crossPdist", nfrom = as.integer(nX), xfrom = as.double(cX$x), yfrom = as.double(cX$y), zfrom = as.double(cX$z), nto = as.integer(nY), xto = as.double(cY$x), yto = as.double(cY$y), zto = as.double(cY$z), xwidth = as.double(wide), yheight = as.double(high), zheight = as.double(deep), squared = as.integer(squared), d = as.double(matrix(0, nrow=nX, ncol=nY)), DUP=DUP) } return(matrix(Cout$d, nrow=nX, ncol=nY)) } spatstat/R/is.cadlag.R0000755000176000001440000000031612237642727014335 0ustar ripleyusersis.cadlag <- function (s) { if(!is.stepfun(s)) stop("s is not a step function.\n") r <- knots(s) h <- s(r) n <- length(r) r1 <- c(r[-1],r[n]+1) rm <- (r+r1)/2 hm <- s(rm) identical(all.equal(h,hm),TRUE) } spatstat/R/dummify.R0000644000176000001440000000153412237642727014162 0ustar ripleyusers# # dummify.R # # Convert a factor to a matrix of dummy variables, etc. # # $Revision: 1.4 $ $Date: 2013/04/25 06:37:43 $ # dummify <- function(x) { if(is.matrix(x) || is.data.frame(x)) { x <- as.data.frame(x) y <- do.call("data.frame", lapply(x, dummify)) return(as.matrix(y)) } # x is 1-dimensional if(is.complex(x)) return(as.matrix(data.frame(Re=Re(x), Im=Im(x)))) # convert factors etc if(is.character(x)) x <- factor(x) if(is.logical(x)) x <- factor(x, levels=c(FALSE,TRUE)) if(is.factor(x)) { # convert to dummy variables nx <- length(x) lev <- levels(x) y <- matrix(0, nrow=nx, ncol=length(lev)) colnames(y) <- lev y[cbind(seq_len(nx), as.integer(x))] <- 1 return(y) } # convert to numeric y <- as.numeric(x) if(!is.matrix(y)) y <- matrix(y, ncol=1) return(y) } spatstat/R/pairwise.family.R0000755000176000001440000004113712237642727015621 0ustar ripleyusers# # # pairwise.family.S # # $Revision: 1.59 $ $Date: 2013/10/24 04:21:59 $ # # The pairwise interaction family of point process models # # pairwise.family: object of class 'isf' defining pairwise interaction # # # ------------------------------------------------------------------- # pairwise.family <- list( name = "pairwise", print = function(self) { cat("Pairwise interaction family\n") }, plot = function(fint, ..., d=NULL, plotit=TRUE) { verifyclass(fint, "fii") inter <- fint$interaction if(is.null(inter) || is.null(inter$family) || inter$family$name != "pairwise") stop("Tried to plot the wrong kind of interaction") # get fitted coefficients of interaction terms # and set coefficients of offset terms to 1 Vnames <- fint$Vnames IsOffset <- fint$IsOffset coeff <- rep.int(1, length(Vnames)) names(coeff) <- Vnames coeff[!IsOffset] <- fint$coefs[Vnames[!IsOffset]] # pairpot <- inter$pot potpars <- inter$par rmax <- reach(fint, epsilon=1e-3) xlim <- list(...)$xlim if(is.infinite(rmax)) { if(!is.null(xlim)) rmax <- max(xlim) else { warning("Reach of interaction is infinite; need xlim to plot it") return(invisible(NULL)) } } if(is.null(d)) { dmax <- 1.25 * rmax d <- seq(from=0, to=dmax, length.out=1024) } else { stopifnot(is.numeric(d) && all(is.finite(d)) && all(diff(d) > 0)) dmax <- max(d) } if(is.null(xlim)) xlim <- c(0, dmax) types <- potpars$types if(is.null(types)) { # compute potential function as `fv' object dd <- matrix(d, ncol=1) p <- pairpot(dd, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fun <- fv(data.frame(r=d, h=y, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) if(plotit) do.call("plot.fv", resolve.defaults(list(fun), list(...), list(ylab="Pairwise interaction", xlab="Distance", ylim=ylim))) return(invisible(fun)) } else{ # compute each potential and store in `fasp' object if(!is.factor(types)) types <- factor(types, levels=types) m <- length(types) nd <- length(d) dd <- matrix(rep.int(d, m), nrow=nd * m, ncol=m) tx <- rep.int(types, rep.int(nd, m)) ty <- types p <- pairpot(dd, tx, ty, potpars) if(length(dim(p))==2) p <- array(p, dim=c(dim(p),1), dimnames=NULL) if(dim(p)[3] != length(coeff)) stop("Dimensions of potential do not match coefficient vector") for(k in seq_len(dim(p)[3])) p[,,k] <- multiply.only.finite.entries( p[,,k] , coeff[k] ) y <- exp(apply(p, c(1,2), sum)) ylim <- range(0, 1.1, y, finite=TRUE) fns <- vector(m^2, mode="list") which <- matrix(, m, m) for(i in seq_len(m)) { for(j in seq_len(m)) { # relevant position in matrix ijpos <- i + (j-1) * m which[i,j] <- ijpos # extract values of potential yy <- y[tx == types[i], j] # make fv object fns[[ijpos]] <- fv(data.frame(r=d, h=yy, one=1), "r", substitute(h(r), NULL), "h", cbind(h,one) ~ r, xlim, c("r", "h(r)", "1"), c("distance argument r", "pairwise interaction term h(r)", "reference value 1")) # } } funz <- fasp(fns, which=which, formulae=list(cbind(h, one) ~ r), title="Fitted pairwise interactions", rowNames=paste(types), colNames=paste(types)) if(plotit) do.call("plot.fasp", resolve.defaults(list(funz), list(...), list(ylim=ylim, ylab="Pairwise interaction", xlab="Distance"))) return(invisible(funz)) } }, # end of function `plot' # ---------------------------------------------------- eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, pot.only=FALSE) { # # This is the eval function for the `pairwise' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairwise' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # # The pair potential function 'pairpot' should be either # pairpot(d, par) [for potentials that don't depend on marks] # or # pairpot(d, tx, tu, par) [for potentials that do depend on mark] # where d is a matrix of interpoint distances, # tx is the vector of types for the data points, # tu is the vector of types for all quadrature points # and # par is a list of parameters for the potential. # # It must return a matrix with the same dimensions as d # or an array with its first two dimensions the same as the dimensions of d. fop <- names(formals(pairpot)) if(identical(all.equal(fop, c("d", "par")), TRUE)) marx <- FALSE else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) marx <- TRUE else stop("Formal arguments of pair potential function are not understood") ## edge correction argument if(length(correction) > 1) stop("Only one edge correction allowed at a time!") if(!any(correction == c("periodic", "border", "translate", "translation", "isotropic", "Ripley", "none"))) stop(paste("Unrecognised edge correction", sQuote(correction))) no.correction <- #### Compute basic data # Decide whether to apply faster algorithm using 'closepairs' use.closepairs <- (correction %in% c("none", "border", "translate", "translation")) && !is.null(Reach) && is.finite(Reach) && is.null(precomputed) && !savecomputed if(!is.null(precomputed)) { # precomputed X <- precomputed$X U <- precomputed$U EqualPairs <- precomputed$E M <- precomputed$M } else { U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window if(!use.closepairs) # Form the matrix of distances M <- crossdist(X, U, periodic=(correction=="periodic")) } nX <- npoints(X) nU <- npoints(U) dimM <- c(nX, nU) # Evaluate the pairwise potential without edge correction if(use.closepairs) POT <- evalPairPotential(X,U,EqualPairs,pairpot,potpars,Reach) else if(!marx) POT <- pairpot(M, potpars) else POT <- pairpot(M, marks(X), marks(U), potpars) # Determine whether each column of potential is an offset IsOffset <- attr(POT, "IsOffset") # Check errors and special cases if(!is.matrix(POT) && !is.array(POT)) { if(length(POT) == 0 && X$n == 0) # empty pattern POT <- array(POT, dim=c(dimM,1)) else stop("Pair potential did not return a matrix or array") } if(length(dim(POT)) == 1 || any(dim(POT)[1:2] != dimM)) { whinge <- paste0( "The pair potential function ",short.deparse(substitute(pairpot)), " must produce a matrix or array with its first two dimensions\n", "the same as the dimensions of its input.\n") stop(whinge) } # make it a 3D array if(length(dim(POT))==2) POT <- array(POT, dim=c(dim(POT),1), dimnames=NULL) if(correction == "translate" || correction == "translation") { edgewt <- edge.Trans(X, U) # sanity check ("everybody knows there ain't no...") if(!is.matrix(edgewt)) stop("internal error: edge.Trans() did not yield a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Trans() has wrong dimensions") POT <- c(edgewt) * POT } else if(correction == "isotropic" || correction == "Ripley") { # weights are required for contributions from QUADRATURE points edgewt <- t(edge.Ripley(U, t(M), X$window)) if(!is.matrix(edgewt)) stop("internal error: edge.Ripley() did not return a matrix") if(nrow(edgewt) != X$n || ncol(edgewt) != length(U$x)) stop("internal error: edge weights matrix returned by edge.Ripley() has wrong dimensions") POT <- c(edgewt) * POT } # No pair potential term between a point and itself if(length(EqualPairs) > 0) { nplanes <- dim(POT)[3] for(k in 1:nplanes) POT[cbind(EqualPairs, k)] <- 0 } # Return just the pair potential? if(pot.only) return(POT) # Sum the pairwise potentials V <- apply(POT, c(2,3), sum) # attach the original pair potentials attr(V, "POT") <- POT # attach the offset identifier attr(V, "IsOffset") <- IsOffset # pass computed information out the back door if(savecomputed) attr(V, "computed") <- list(E=EqualPairs, M=M) return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairwise.family$suffstat") { # for pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"pairwise")) stop("Model is not a pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Xin <- X[contribute] Xout <- X[!contribute] # partial model matrix arising from ordered pairs of data points # which both contribute to the pseudolikelihood Empty <- X[numeric(0)] momINxIN <- partialModelMatrix(Xin, Empty, model, "suffstat") # partial model matrix arising from ordered pairs of data points # the second of which does not contribute to the pseudolikelihood mom <- partialModelMatrix(Xout, Xin, model, "suffstat") indx <- Xout$n + (1:(Xin$n)) momINxOUT <- mom[indx, , drop=FALSE] # parameters order2 <- names(coef(model)) %in% model$internal$Vnames order1 <- !order2 result <- 0 * coef(model) if(any(order1)) { # first order contributions can be determined from INxIN o1terms <- momINxIN[ , order1, drop=FALSE] o1sum <- apply(o1terms, 2, sum) result[order1] <- o1sum } if(any(order2)) { # adjust for double counting of ordered pairs in INxIN but not INxOUT o2termsINxIN <- momINxIN[, order2, drop=FALSE] o2termsINxOUT <- momINxOUT[, order2, drop=FALSE] o2sum <- apply(o2termsINxIN, 2, sum)/2 + apply(o2termsINxOUT, 2, sum) result[order2] <- o2sum } return(result) }, ######### end of function $suffstat delta2 = function(X, inte, correction, ...) { # Sufficient statistic for second order conditional intensity # for pairwise interaction processes # Equivalent to evaluating pair potential. X <- as.ppp(X) nX <- npoints(X) E <- cbind(1:nX, 1:nX) R <- reach(inte) result <- pairwise.family$eval(X,X,E, inte$pot,inte$par, correction, pot.only=TRUE, Reach=R) } ######### end of function $delta2 ) ######### end of list class(pairwise.family) <- "isf" # externally visible evalPairPotential <- function(X, P, E, pairpot, potpars, R) { # Evaluate pair potential without edge correction weights nX <- npoints(X) nP <- npoints(P) stopifnot(is.function(pairpot)) fop <- names(formals(pairpot)) if(identical(all.equal(fop, c("d", "par")), TRUE)) { unmarked <- TRUE } else if(identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) { unmarked <- FALSE } else stop("Formal arguments of pair potential function are not understood") # determine dimension of potential, etc fakePOT <- if(unmarked) pairpot(matrix(, 0, 0), potpars) else pairpot(matrix(, 0, 0), marks(X)[integer(0)], marks(P)[integer(0)], potpars) IsOffset <- attr(fakePOT, "IsOffset") fakePOT <- ensure3Darray(fakePOT) Vnames <- dimnames(fakePOT)[[3]] p <- dim(fakePOT)[3] # Identify close pairs X[i], P[j] cl <- crosspairs(X, P, R) I <- cl$i J <- cl$j D <- matrix(cl$d, ncol=1) # deal with empty cases if(nX == 0 || nP == 0 || length(I) == 0) { result <- array(0, dim=c(nX, nP, p), dimnames=list(NULL, NULL, Vnames)) attr(result, "IsOffset") <- IsOffset return(result) } # evaluate potential for close pairs # POT is a 1-column matrix or array, with rows corresponding to close pairs if(unmarked) { # unmarked POT <- pairpot(D, potpars) IsOffset <- attr(POT, "IsOffset") } else { # marked marX <- marks(X) marP <- marks(P) if(!identical(levels(marX), levels(marP))) stop("Internal error: marks of X and P have different levels") types <- levels(marX) mI <- marX[I] mJ <- marP[J] POT <- NULL # split data by type of P[j] for(k in types) { relevant <- which(mJ == k) if(length(relevant) > 0) { fk <- factor(k, levels=types) POTk <- pairpot(D[relevant, , drop=FALSE], mI[relevant], fk, potpars) POTk <- ensure3Darray(POTk) if(is.null(POT)) { # use first result of 'pairpot' to determine dimension POT <- array(, dim=c(length(I), 1, dim(POTk)[3])) # capture information about offsets, and names of interaction terms IsOffset <- attr(POTk, "IsOffset") Vnames <- dimnames(POTk)[[3]] } # insert values just computed POT[relevant, , ] <- POTk } } } POT <- ensure3Darray(POT) p <- dim(POT)[3] # create result array result <- array(0, dim=c(npoints(X), npoints(P), p), dimnames=list(NULL, NULL, Vnames)) # insert results II <- rep(I, p) JJ <- rep(J, p) KK <- rep(1:p, each=length(I)) result[cbind(II,JJ,KK)] <- POT # finally identify identical pairs and set value to 0 if(length(E) > 0) { E.rep <- apply(E, 2, rep, times=p) p.rep <- rep(1:p, each=nrow(E)) result[cbind(E.rep, p.rep)] <- 0 } attr(result, "IsOffset") <- IsOffset return(result) } spatstat/R/as.im.R0000755000176000001440000001555712237642727013534 0ustar ripleyusers# # as.im.R # # conversion to class "im" # # $Revision: 1.40 $ $Date: 2013/05/01 05:42:10 $ # # as.im() # as.im <- function(X, ...) { UseMethod("as.im") } as.im.im <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { X <- repair.old.factor.image(X) if(is.null(W)) { if(is.null(eps) && is.null(dimyx) && is.null(xy)) { X <- repair.image.xycoords(X) X <- na.handle.im(X, na.replace) return(X) } # pixel raster determined by dimyx etc W <- as.mask(as.rectangle(X), eps=eps, dimyx=dimyx, xy=xy) # invoke as.im.owin Y <- as.im(W) } else { # apply dimyx (etc) if present, # otherwise use W to determine pixel raster Y <- as.im(W, eps=eps, dimyx=dimyx, xy=xy) } # resample X onto raster of Y Y <- rastersample(X, Y) return(na.handle.im(Y, na.replace)) } as.im.owin <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) { if(!(is.null(eps) && is.null(dimyx) && is.null(xy))) { # raster dimensions determined by dimyx etc # convert X to a mask M <- as.mask(X, eps=eps, dimyx=dimyx, xy=xy) # convert mask to image d <- M$dim v <- matrix(value, d[1], d[2]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, xrange=M$xrange, yrange=M$yrange, unitname=unitname(X)) return(out) } if(!is.null(W) && is.owin(W) && W$type == "mask") { # raster dimensions determined by W # convert W to zero image d <- W$dim Z <- im(matrix(0, d[1], d[2]), W$xcol, W$yrow, unitname=unitname(X)) # adjust values to indicator of X Z[X] <- 1 if(missing(value) && is.null(na.replace)) { # done out <- Z } else { # map {0, 1} to {na.replace, value} v <- matrix(ifelseAB(Z$v == 0, na.replace, value), d[1], d[2]) out <- im(v, W$xcol, W$yrow, unitname=unitname(X)) } return(out) } if(X$type == "mask") { # raster dimensions determined by X # convert X to image d <- X$dim v <- matrix(value, d[1], d[2]) m <- X$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, X$xcol, X$yrow, unitname=unitname(X)) return(out) } # X is not a mask. # W is either missing, or is not a mask. # Convert X to a image using default settings M <- as.mask(X) # convert mask to image d <- M$dim v <- matrix(value, d[1], d[2]) m <- M$m v[!m] <- if(is.null(na.replace)) NA else na.replace out <- im(v, M$xcol, M$yrow, unitname=unitname(X)) return(out) } as.im.function <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { f <- X if(is.null(W)) stop("A window W is required") W <- as.owin(W) W <- as.mask(W, eps=eps, dimyx=dimyx, xy=xy) m <- W$m funnywindow <- !all(m) xx <- as.vector(raster.x(W)) yy <- as.vector(raster.y(W)) # evaluate function value at each pixel if(!funnywindow) values <- f(xx, yy, ...) else { # evaluate only inside window inside <- as.vector(m) val <- f(xx[inside], yy[inside], ...) # create space for full matrix msize <- length(m) values <- if(!is.factor(val)) vector(mode=typeof(val), length=msize) else { lev <- levels(val) factor(rep.int(lev[1], msize), levels=lev) } # copy values, assigning NA outside window values[inside] <- val values[!inside] <- NA } nc <- length(W$xcol) nr <- length(W$yrow) if(nr == 1 || nc == 1) { # exception: can't determine pixel width/height from centres out <- im(matrix(values, nr, nc), xrange=W$xrange, yrange=W$yrange, unitname=unitname(W)) } else { out <- im(values, W$xcol, W$yrow, unitname=unitname(W)) } return(na.handle.im(out, na.replace)) } as.im.matrix <- function(X, W=NULL, ...) { nr <- nrow(X) nc <- ncol(X) if(is.null(W)) return(im(X, ...)) W <- as.owin(W) if(W$type == "mask") { xcol <- W$xcol yrow <- W$yrow # pixel coordinate information if(length(xcol) == nc && length(yrow) == nr) return(im(X, xcol, yrow, unitname=unitname(W))) } # range information R <- as.rectangle(W) xrange <- R$xrange yrange <- R$yrange return(im(X, xrange=xrange, yrange=yrange, unitname=unitname(W))) } as.im.default <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { if((is.vector(X) || is.factor(X)) && length(X) == 1) { # numerical value: interpret as constant function xvalue <- X X <- function(xx, yy, ...) { rep.int(xvalue, length(xx)) } return(as.im(X, W, ..., dimyx=dimyx, na.replace=na.replace)) } if(is.list(X) && checkfields(X, c("x","y","z"))) { stopifnot(is.matrix(X$z)) z <- X$z y <- X$y x <- X$x # Usual S convention as in contour.default() and image.default() # Rows of z correspond to x values. nr <- nrow(z) nc <- ncol(z) lx <- length(x) ly <- length(y) if(lx == nr + 1) x <- (x[-1] + x[-lx])/2 else if(lx != nr) stop("length of x coordinate vector does not match number of rows of z") if(ly == nc + 1) y <- (y[-1] + y[-ly])/2 else if(ly != nc) stop("length of y coordinate vector does not match number of columns of z") # convert to class "im" out <- im(t(z), x, y) # now apply W and dimyx if present if(is.null(W) && !(is.null(eps) && is.null(dimyx) && is.null(xy))) out <- as.im(out, eps=eps, dimyx=dimyx, xy=xy) else if(!is.null(W)) out <- as.im(out, W=W, eps=eps, dimyx=dimyx, xy=xy) return(na.handle.im(out, na.replace)) } stop("Can't convert X to a pixel image") } as.im.ppp <- function(X, ...) { pixellate(X, ..., weights=NULL, zeropad=FALSE) } # convert to image from some other format, then do something do.as.im <- function(x, action, ..., W = NULL, eps = NULL, dimyx = NULL, xy = NULL, na.replace = NULL) { Z <- as.im(x, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) Y <- do.call(action, list(Z, ...)) return(Y) } na.handle.im <- function(X, na.replace) { if(is.null(na.replace)) return(X) if(length(na.replace) != 1) stop("na.replace should be a single value") X$v[is.na(X$v)] <- na.replace return(X) } repair.old.factor.image <- function(x) { # convert from old to new representation of factor images if(x$type != "factor") return(x) v <- x$v isold <- !is.null(lev <- attr(x, "levels")) isnew <- is.factor(v) && is.matrix(v) if(isnew) return(x) if(!isold) stop("Internal error: unrecognised format for factor-valued image") v <- factor(v, levels=lev) dim(v) <- x$dim x$v <- v return(x) } repair.image.xycoords <- function(x) { im(x$v, xrange=x$xrange, yrange=x$yrange, unitname=unitname(x)) } spatstat/R/cut.ppp.R0000755000176000001440000000505012237642727014101 0ustar ripleyusers# # cut.ppp.R # # cut method for ppp objects # # $Revision: 1.12 $ $Date: 2013/04/25 06:37:43 $ # cut.ppp <- function(x, z=marks(x), ...) { x <- as.ppp(x) if(missing(z) || is.null(z)) { z <- marks(x, dfok=TRUE) if(is.null(z)) stop("x has no marks to cut") } if(is.character(z)) { if(length(z) == npoints(x)) { # interpret as a factor z <- factor(z) } else if((length(z) == 1) && (z %in% colnames(marks(x)))) { # interpret as the name of a column of marks zname <- z m <- marks(x) z <- m[, zname] } else stop("format of argument z not understood") } if(is.factor(z) || is.vector(z)) { stopifnot(length(z) == npoints(x)) g <- if(is.factor(z)) z else if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.data.frame(z) || is.matrix(z)) { stopifnot(nrow(z) == npoints(x)) # take first column z <- z[,1] g <- if(is.numeric(z)) cut(z, ...) else factor(z) marks(x) <- g return(x) } if(is.im(z)) return(cut(x, z[x, drop=FALSE], ...)) if(is.tess(z)) { switch(z$type, rect={ jx <- findInterval(x$x, z$xgrid, rightmost.closed=TRUE) iy <- findInterval(x$y, z$ygrid, rightmost.closed=TRUE) nrows <- length(z$ygrid) - 1 ncols <- length(z$xgrid) - 1 jcol <- jx irow <- nrows - iy + 1 ktile <- jcol + ncols * (irow - 1) m <- factor(ktile, levels=seq_len(nrows*ncols)) ij <- expand.grid(j=seq_len(ncols),i=seq_len(nrows)) levels(m) <- paste("Tile row ", ij$i, ", col ", ij$j, sep="") }, tiled={ todo <- seq_len(npoints(x)) nt <- length(z$tiles) m <- integer(x$n) for(i in 1:nt) { ti <- z$tiles[[i]] hit <- inside.owin(x$x[todo], x$y[todo], ti) if(any(hit)) { m[todo[hit]] <- i todo <- todo[!hit] } if(length(todo) == 0) break } m[m == 0] <- NA nama <- names(z$tiles) lev <- seq_len(nt) lab <- if(!is.null(nama) && all(nzchar(nama))) nama else paste("Tile", lev) m <- factor(m, levels=lev, labels=lab) }, image={ zim <- z$image m <- factor(zim[x, drop=FALSE], levels=levels(zim)) } ) marks(x) <- m return(x) } stop("Format of z not understood") } spatstat/R/rmhsnoop.R0000644000176000001440000005122612250737567014362 0ustar ripleyusers# # rmhsnoop.R # # visual debug mechanism for rmh # # $Revision: 1.25 $ $Date: 2013/12/08 00:38:05 $ # # When rmh is called in visual debug mode (snooping = TRUE), # it calls e <- rmhSnoopEnv(...) to create an R environment 'e' # containing variables that will represent the current state # of the M-H algorithm with initial state X and model reach R. # # The environment 'e' is passed to the C routine xmethas. # This makes it possible for data to be exchanged between # the C and R code. # # When xmethas reaches the debugger's stopping time, # the current state of the simulation and the proposal # are copied from C into the R environment 'e'. # # Then to execute the visual display, the C code calls # 'eval' to execute the R function rmhsnoop(). # # The function rmhsnoop uses the 'simplepanel' class # to generate a plot showing the state of the simulation # and the proposal, and then wait for point-and-click input using # locator(). # # When rmhsnoop() exits, it returns an integer giving the # (user-specified) next stoppping time. This is read back into # the C code. Then xmethas resumes simulations. # # I said it was simple! %^] rmhSnoopEnv <- function(Xinit, Wclip, R) { stopifnot(is.ppp(Xinit)) # Create an environment that will be accessible to R and C code e <- new.env() # initial state (point pattern) X <- Xinit assign("Wsim", as.owin(X), envir=e) assign("xcoords", coords(X)[,1], envir=e) assign("ycoords", coords(X)[,2], envir=e) if(is.multitype(X)) { mcodes <- as.integer(marks(X)) - 1 mlevels <- levels(marks(X)) assign("mcodes", mcodes, envir=e) assign("mlevels", mlevels, envir=e) } else { assign("mcodes", NULL, envir=e) assign("mlevels", NULL, envir=e) } # clipping window assign("Wclip", Wclip, envir=e) # reach of model (could be infinite) assign("R", R, envir=e) # current iteration number assign("irep", 0L, envir=e) # next iteration to be inspected assign("inxt", 1L, envir=e) # next transition to be inspected assign("tnxt", 1L, envir=e) # proposal type assign("proptype", NULL, envir=e) # outcome of proposal assign("itype", NULL, envir=e) # proposal location assign("proplocn", NULL, envir=e) # proposal mark assign("propmark", NULL, envir=e) # index of proposal point in existing pattern assign("propindx", NULL, envir=e) # Hastings ratio assign("numerator", NULL, envir=e) assign("denominator", NULL, envir=e) # Expression actually evaluated to execute visual debug # Expression is evaluated in the environment 'e' snoopexpr <- expression({ rslt <- rmhsnoop(Wsim=Wsim, Wclip=Wclip, R=R, xcoords=xcoords, ycoords=ycoords, mlevels=mlevels, mcodes=mcodes, irep=irep, itype=itype, proptype=proptype, proplocn=proplocn, propmark=propmark, propindx=propindx, numerator=numerator, denominator=denominator) inxt <- rslt$inxt tnxt <- rslt$tnxt itype <- if(rslt$accepted) rslt$itype else 0 storage.mode(tnxt) <- storage.mode(inxt) <- storage.mode(itype) <- "integer" }) assign("snoopexpr", snoopexpr, envir=e) # callback expression assign("callbackexpr", quote(eval(snoopexpr)), envir=e) return(e) } # visual debug display using base graphics rmhsnoop <- local({ rmhsnoop <- function(..., Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator) { trap.extra.arguments(..., .Context="In rmhsnoop") X <- ppp(xcoords, ycoords, window=Wsim) if(!missing(mlevels) && length(mlevels) > 0) marks(X) <- factor(mlevels[mcodes+1], levels=mlevels) Wclip.orig <- Wclip # determine plot arguments if(is.mask(Wclip)) { parg.Wclip <- list(invert=TRUE, col="grey") } else { Wclip <- as.psp(Wclip) parg.Wclip <- list(lty=3, lwd=2, col="grey") } parg.birth <- list(pch=16, cols="green") parg.death <- list(pch=4, cols="red", lwd=2) parg.birthcircle <- list(col="green", lty=3) parg.deathcircle <- list(col="red", lty=3) # assemble a layered object representing the state and the proposal if(is.null(proptype)) { # initial state L <- layered(Wsim, Wclip, X) layerplotargs(L)$Wclip <- parg.Wclip accepted <- TRUE } else { accepted <- (itype == proptype) # add proposal info switch(decode.proptype(proptype), Reject= { propname <- "rejected" L <- layered(Wsim=Wsim, Wclip=Wclip, X=X) layerplotargs(L)$Wclip <- parg.Wclip }, Birth = { propname <- "birth proposal" U <- ppp(proplocn[1], proplocn[2], window=Wsim) D <- if(is.finite(R) && R > 0) { as.psp(disc(R, proplocn))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, PrevState=X, Reach=D, NewPoint=U) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$NewPoint <- parg.birth }, Death = { propname <- "death proposal" # convert from C to R indexing propindx <- propindx + 1 XminI <- X[-propindx] XI <- X[propindx] D <- if(is.finite(R) && R > 0) { as.psp(disc(R, c(XI$x, XI$y)))[Wsim] } else NULL L <- layered(Wsim=Wsim, Wclip=Wclip, RetainedPoints=XminI, Reach=D, Deletion=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$Reach <- parg.deathcircle layerplotargs(L)$Deletion <- parg.death }, Shift = { propname <- "shift proposal" # convert from C to R indexing propindx <- propindx + 1 # make objects XminI <- X[-propindx] XI <- X[propindx] U <- ppp(proplocn[1], proplocn[2], window=Wsim) if(is.finite(R) && R > 0) { DU <- as.psp(disc(R, proplocn))[Wsim] DXI <- as.psp(disc(R, c(XI$x, XI$y)))[Wsim] } else { DU <- DXI <- NULL } # make layers L <- layered(Wsim=Wsim, Wclip=Wclip, OtherPoints=XminI, ReachAfter=DU, AfterShift=U, ReachBefore=DXI, BeforeShift=XI) layerplotargs(L)$Wclip <- parg.Wclip layerplotargs(L)$ReachAfter <- parg.birthcircle layerplotargs(L)$AfterShift <- parg.birth layerplotargs(L)$ReachBefore <- parg.deathcircle layerplotargs(L)$BeforeShift <- parg.death }, stop("Unrecognised proposal type") ) } header <- c(paste("Iteration", irep), propname, paste("Hastings ratio =", signif(numerator, 4), "/", signif(denominator, 4))) info <- list(irep=irep, Wsim=Wsim, Wclip=Wclip.orig, X=X, proptype=proptype, proplocn=proplocn, propindx=propindx, propmark=propmark, accepted=accepted, numerator=numerator, denominator=denominator) inspectProposal(L, info, title=header) } decode.proptype <- function(n) { if(n < 0 || n > 3) stop(paste("Unrecognised proposal type:", n)) switch(n+1, "Reject", "Birth", "Death", "Shift") } encode.proptype <- function(s) { switch(s, Reject=0, Birth=1, Death=2, Shift=3) } inspectProposal <- function(X, info, ..., title) { if(missing(title)) title <- short.deparse(substitute(X)) if(!inherits(X, "layered")) X <- layered(X) lnames <- names(X) if(sum(nzchar(lnames)) != length(X)) lnames <- paste("Layer", seq_len(length(X))) # Find window and bounding box (validates X) W <- as.owin(X) BX <- as.rectangle(W) # Initialise environment for state variables etc # This environment is accessible to the panel button functions en <- new.env() assign("X", X, envir=en) assign("W", W, envir=en) assign("BX", BX, envir=en) assign("zoomfactor", 1L, envir=en) midX <- unlist(centroid.owin(BX)) assign("midX", midX, envir=en) assign("zoomcentre", midX, envir=en) assign("irep", info$irep, envir=en) assign("inxt", info$irep+1, envir=en) assign("tnxt", -1, envir=en) assign("accepted", info$accepted, envir=en) assign("proplocn", info$proplocn, envir=en) assign("info", info, envir=en) # Build interactive panel # Start with data panel P <- simplepanel(title, BX, list(Data=BX), list(Data=dataclickfun), list(Data=dataredrawfun), snoopexit, en) # Add pan buttons margin <- max(sidelengths(BX))/4 panelwidth <- sidelengths(BX)[1]/2 P <- grow.simplepanel(P, "top", margin, navfuns["Up"], aspect=1) P <- grow.simplepanel(P, "bottom", margin, navfuns["Down"], aspect=1) P <- grow.simplepanel(P, "left", margin, navfuns["Left"], aspect=1) P <- grow.simplepanel(P, "right", margin, navfuns["Right"], aspect=1) # Zoom/Pan buttons at right P <- grow.simplepanel(P, "right", panelwidth, zoomfuns) # Accept/reject buttons at top P <- grow.simplepanel(P, "top", margin, accept.clicks, accept.redraws) # Dump/print buttons at bottom P <- grow.simplepanel(P, "bottom", margin, dumpfuns) # Jump controls at left maxchars <- max(4, nchar(names(jump.clicks))) P <- grow.simplepanel(P, "left", panelwidth * maxchars/6, jump.clicks) # go rslt <- run.simplepanel(P, popup=FALSE) clear.simplepanel(P) rm(en) return(rslt) } # button control functions zoomfuns <- rev(list( "Zoom In"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z * 2, envir=env) return(TRUE) }, "Zoom Out"=function(env, xy) { z <- get("zoomfactor", envir=env) assign("zoomfactor", z / 2, envir=env) return(TRUE) }, "At Proposal"=function(env, xy) { proplocn <- get("proplocn", envir=env) assign("zoomcentre", proplocn, envir=env) return(TRUE) }, Reset=function(env, xy) { assign("zoomfactor", 1L, envir=env) midX <- get("midX", envir=env) assign("zoomcentre", midX, envir=env) return(TRUE) })) navfuns <- list( Left = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1] stepsize <- (width/4)/zoom ce <- ce - c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Right = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) width <- sidelengths(BX)[1] stepsize <- (width/4)/zoom ce <- ce + c(stepsize, 0) assign("zoomcentre", ce, envir=env) return(TRUE) }, Up = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce + c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }, Down = function(env, xy) { zoom <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) BX <- get("BX", envir=env) height <- sidelengths(BX)[2] stepsize <- (height/4)/zoom ce <- ce - c(0, stepsize) assign("zoomcentre", ce, envir=env) return(TRUE) }) accept.clicks <- rev(list( Accept=function(env, xy) { assign("accepted", TRUE, envir=env) return(TRUE) }, Reject=function(env, xy) { assign("accepted", FALSE, envir=env) return(TRUE) })) accept.redraws <- rev(list( Accept=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE, col="green") } else { plot(button, add=TRUE) } text(centroid.owin(button), labels=name) }, Reject=function(button, name, env) { accepted <- get("accepted", envir=env) if(accepted) { plot(button, add=TRUE) } else { plot(button, add=TRUE, col="pink") } text(centroid.owin(button), labels=name) })) jump.clicks <- rev(list( "Next Iteration"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1, envir=env) return(FALSE) }, "Skip 10"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10, envir=env) return(FALSE) }, "Skip 100"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100, envir=env) return(FALSE) }, "Skip 1000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+1000, envir=env) return(FALSE) }, "Skip 10,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+10000, envir=env) return(FALSE) }, "Skip 100,000"=function(env, xy) { irep <- get("irep", envir=env) assign("inxt", irep+100000, envir=env) return(FALSE) }, "Next Birth"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Birth"), envir=env) return(FALSE) }, "Next Death"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Death"), envir=env) return(FALSE) }, "Next Shift"=function(env, xy) { assign("inxt", -1, envir=env) assign("tnxt", encode.proptype("Shift"), envir=env) return(FALSE) }, "Exit Debugger"=function(env, xy) { assign("inxt", -1L, envir=env) return(FALSE) })) dataclickfun <- function(env, xy) { # function for handling clicks in the data window z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) midX <- get("midX", envir=env) ce <- ce + (unlist(xy) - midX)/z assign("zoomcentre", ce, envir=env) return(TRUE) } dataredrawfun <- function(button, name, env) { # redraw data window X <- get("X", envir=env) BX <- get("BX", envir=env) W <- get("W", envir=env) midX <- get("midX", envir=env) z <- get("zoomfactor", envir=env) ce <- get("zoomcentre", envir=env) scaleX <- shift(affine(shift(X, -ce), diag(c(z,z))), unlist(midX)) scaleW <- shift(affine(shift(W, -ce), diag(c(z,z))), unlist(midX)) scaleX <- scaleX[, BX] scaleW <- intersect.owin(scaleW, BX, fatal=FALSE) # redraw data in 'BX' if(!is.null(scaleW)) { if(z == 1 && is.rectangle(scaleW)) { plot(scaleW, add=TRUE, lwd=2) } else { plot(BX, add=TRUE, lty=3, border="red") if(!identical(BX, scaleW)) plot(scaleW, add=TRUE, invert=TRUE) } } if(!is.null(scaleX)) plot(scaleX, add=TRUE) invisible(NULL) } # functions to dump the current state, etc dumpfuns <- list( "Dump to file"=function(env, xy) { irep <- get("irep", envir=env) X <- get("X", envir=env) xname <- paste("dump", irep, sep="") assign(xname, X) fname <- paste(xname, ".rda", sep="") eval(substitute(save(x, file=y, compress=TRUE), list(x=xname, y=fname))) cat(paste("Saved to", sQuote(fname), "\n")) return(TRUE) }, "Print Info"=function(env, xy) { info <- get("info", envir=env) will.accept <- get("accepted", envir=env) with(info, { cat(paste("Iteration", irep, "\n")) cat("Simulation window:\n") print(Wsim) cat("Clipping window:\n") print(Wclip) cat("Current state:\n") print(X) propname <- decode.proptype(proptype) cat(paste("Proposal type:", propname, "\n")) prxy <- function(z) paren(paste(z, collapse=", ")) switch(propname, Reject = { }, Birth = { cat(paste("Birth of new point at location", prxy(proplocn), "\n")) }, Death = { Xi <- X[propindx] cat(paste("Death of data point", propindx, "located at", prxy(as.numeric(coords(Xi))), "\n")) }, Shift = { Xi <- X[propindx] cat(paste("Shift data point", propindx, "from current location", prxy(as.numeric(coords(Xi))), "to new location", prxy(proplocn), "\n")) }) cat(paste("Hastings ratio = ", numerator, "/", denominator, "=", numerator/denominator, "\n")) cat(paste("Fate of proposal:", if(will.accept) "Accepted" else "Rejected", "\n")) return(TRUE) }) }) # function to determine return value snoopexit <- function(env) { ans <- eval(quote(list(inxt=inxt, tnxt=tnxt, accepted=accepted)), envir=env) return(ans) } testit <- function() { rmhsnoop(Wsim=owin(), Wclip=square(0.7), R=0.1, xcoords=runif(40), ycoords=runif(40), mlevels=NULL, mcodes=NULL, irep=3, itype=1, proptype=1, proplocn=c(0.5, 0.5), propmark=0, propindx=0, numerator=42, denominator=24) } rmhsnoop }) spatstat/R/fiksel.R0000755000176000001440000001214712237642727013772 0ustar ripleyusers# # # fiksel.R # # $Revision: 1.8 $ $Date: 2012/07/14 06:36:26 $ # # Fiksel interaction # # ee Stoyan Kendall Mcke 1987 p 161 # # ------------------------------------------------------------------- # Fiksel <- local({ # ......... auxiliary functions ........... fikselterms <- function(U, X, r, kappa, EqualPairs=NULL) { answer <- crossfikselterms(U, X, r, kappa) nU <- npoints(U) # subtract contrinbutions from identical pairs (exp(-0) = 1 for each) if(length(EqualPairs) > 0) { idcount <- as.integer(table(factor(EqualPairs[,2], levels=1:nU))) answer <- answer - idcount } return(answer) } crossfikselterms <- function(X, Y, r, kappa) { stopifnot(is.numeric(r)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine DUP <- spatstat.options("dupC") out <- .C("Efiksel", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), kkappa = as.double(kappa), values = as.double(double(nX)), DUP=DUP) # PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ........ template object .............. BlankFiksel <- list( name = "Fiksel process", creator = "Fiksel", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- (d <= par$r) * exp( - d * par$kappa) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL, kappa=NULL), # filled in later parnames = c("interaction distance", "hard core distance", "rate parameter"), init = function(self) { r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa if(!is.numeric(hc) || length(hc) != 1 || hc <= 0) stop("hard core distance hc must be a positive number") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hardcore dstance hc") if(!is.numeric(kappa) || length(kappa) != 1) stop("rate parameter kappa must be a single number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { a <- as.numeric(coeffs[1]) return(list(param=list(a=a), inames="interaction strength a", printable=round(a,2))) }, valid = function(coeffs, self) { a <- (self$interpret)(coeffs, self)$param$a return(is.finite(a)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(any(is.na(coeffs))) return(r) a <- coeffs[1] if(abs(a) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Fiksel interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Fiksel") r <- potpars$r hc <- potpars$hc kappa <- potpars$kappa hclose <- strausscounts(U, X, hc, EqualPairs) fikselbit <- fikselterms(U, X, r, kappa, EqualPairs) answer <- ifelseXB(hclose == 0, fikselbit, -Inf) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral a <- as.numeric(coeffs[1]) r <- self$par$r hc <- self$par$hc kappa <- self$par$kappa f <- function(x, kappa, a){ 2 * pi * x * (1 - exp(a * exp(-x * kappa))) } hardbit <- integrate(f=f, lower=hc, upper=r, a=a, kappa=kappa) mess <- hardbit[["message"]] if(!identical(mess, "OK")) { warning(mess) return(NA) } return(pi * hc^2 + hardbit$value) } ) class(BlankFiksel) <- "interact" Fiksel <- function(r, hc, kappa) { instantiate.interact(BlankFiksel, list(r = r, hc = hc, kappa=kappa)) } Fiksel }) spatstat/R/fitted.ppm.R0000755000176000001440000000513712237642727014570 0ustar ripleyusers# # fitted.ppm.R # # method for 'fitted' for ppm objects # # $Revision: 1.11 $ $Date: 2013/11/08 15:56:58 $ # fitted.ppm <- function(object, ..., type="lambda", dataonly=FALSE, new.coef=NULL, drop=FALSE, check=TRUE, repair=TRUE) { verifyclass(object, "ppm") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } fitcoef <- coef(object) if(!is.null(new.coef)) { # validate if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } uniform <- is.poisson.ppm(object) && no.trend.ppm(object) typelist <- c("lambda", "cif", "trend") typevalu <- c("lambda", "lambda", "trend") if(is.na(m <- pmatch(type, typelist))) stop(paste("Unrecognised choice of ", sQuote("type"), ": ", sQuote(type), sep="")) type <- typevalu[m] if(uniform) { lambda <- exp(coeffs[[1]]) Q <- quad.ppm(object, drop=drop) lambda <- rep.int(lambda, n.quad(Q)) } else { glmdata <- getglmdata(object, drop=drop) glmfit <- getglmfit(object) Vnames <- object$internal$Vnames interacting <- (length(Vnames) != 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values changecoef <- !is.null(new.coef) || (object$method != "mpl") lambda <- GLMpredict(glmfit, glmdata, coeffs, changecoef=changecoef) # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. Assuming drop=FALSE. if(interacting && type=="lambda") { # reinsert zeroes lam <- numeric(length(forbid)) lam[forbid] <- 0 lam[!forbid] <- lambda lambda <- lam } } if(dataonly) lambda <- lambda[is.data(quad.ppm(object))] return(lambda) } spatstat/R/density.psp.R0000755000176000001440000000203712237642727014772 0ustar ripleyusers# # # density.psp.R # # $Revision: 1.5 $ $Date: 2011/05/18 01:42:11 $ # # density.psp <- function(x, sigma, ..., edge=TRUE) { verifyclass(x, "psp") w <- x$window n <- x$n if(missing(sigma)) sigma <- 0.1 * diameter(w) w <- as.mask(w, ...) len <- lengths.psp(x) if(n == 0 || all(len == 0)) return(as.im(0, w)) # ang <- angles.psp(x, directed=TRUE) coz <- cos(ang) zin <- sin(ang) xx <- as.vector(raster.x(w)) yy <- as.vector(raster.y(w)) # compute matrix contribution from each segment for(i in seq_len(n)) { en <- x$ends[i,] dx <- xx - en$x0 dy <- yy - en$y0 u1 <- dx * coz[i] + dy * zin[i] u2 <- - dx * zin[i] + dy * coz[i] value <- dnorm(u2, sd=sigma) * (pnorm(u1, sd=sigma) - pnorm(u1-len[i], sd=sigma)) totvalue <- if(i == 1) value else (value + totvalue) } dens <- im(totvalue, w$xcol, w$yrow) if(edge) { edg <- second.moment.calc(midpoints.psp(x), sigma, what="edge", ...) dens <- eval.im(dens/edg) } dens <- dens[x$window, drop=FALSE] return(dens) } spatstat/R/plot.fv.R0000755000176000001440000005476512237642727014121 0ustar ripleyusers# # plot.fv.R (was: conspire.S) # # $Revision: 1.94 $ $Date: 2013/10/03 03:35:23 $ # # conspire <- function(...) { .Deprecated("plot.fv", package="spatstat") plot.fv(...) } plot.fv <- function(x, fmla, ..., subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=NULL, shadecol="grey", add=FALSE, log="", limitsonly=FALSE) { xname <- if(is.language(substitute(x))) short.deparse(substitute(x)) else "" force(legendavoid) if(is.null(legend)) legend <- !add verifyclass(x, "fv") env.user <- parent.frame() indata <- as.data.frame(x) xlogscale <- (log %in% c("x", "xy", "yx")) ylogscale <- (log %in% c("y", "xy", "yx")) if(missing(shade) && !is.null(defaultshade <- attr(x, "shade"))) shade <- defaultshade # ---------------- determine plot formula ---------------- defaultplot <- missing(fmla) || is.null(fmla) if(defaultplot) fmla <- formula(x) # This *is* the last possible moment, so... fmla <- as.formula(fmla, env=env.user) # validate the variable names vars <- variablesinformula(fmla) reserved <- c(".", ".x", ".y") external <- !(vars %in% c(colnames(x), reserved)) if(any(external)) { sought <- vars[external] found <- unlist(lapply(sought, exists, envir=env.user, mode="numeric")) if(any(!found)) { nnot <- sum(!found) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!found])), ngettext(nnot, "was", "were"), "not found")) } else { # validate the found variables externvars <- lapply(sought, get, envir=env.user) ok <- unlist(lapply(externvars, function(z, n) { is.numeric(z) && length(z) %in% c(1,n) }, n=nrow(x))) if(!all(ok)) { nnot <- sum(!ok) stop(paste(ngettext(nnot, "Variable", "Variables"), commasep(sQuote(sought[!ok])), ngettext(nnot, "is", "are"), "not of the right format")) } } } # Extract left hand side as given lhs.original <- fmla[[2]] fmla.original <- fmla # expand "." dotnames <- fvnames(x, ".") u <- if(length(dotnames) == 1) as.name(dotnames) else { as.call(lapply(c("cbind", dotnames), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) fmla <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla))) # ------------------- extract data for plot --------------------- # extract LHS and RHS of formula lhs <- fmla[[2]] rhs <- fmla[[3]] # extract data lhsdata <- eval(lhs, envir=indata) rhsdata <- eval(rhs, envir=indata) # reformat if(is.vector(lhsdata)) { lhsdata <- matrix(lhsdata, ncol=1) lhsvars <- all.vars(as.expression(lhs)) lhsvars <- lhsvars[lhsvars %in% names(x)] colnames(lhsdata) <- if(length(lhsvars) == 1) lhsvars else paste(short.deparse(lhs), collapse="") } # check lhs names exist lnames <- colnames(lhsdata) nc <- ncol(lhsdata) lnames0 <- paste("V", seq_len(nc), sep="") if(length(lnames) != nc) colnames(lhsdata) <- lnames0 else if(any(uhoh <- !nzchar(lnames))) colnames(lhsdata)[uhoh] <- lnames0[uhoh] lhs.names <- colnames(lhsdata) # check whether each lhs column is associated with a single column of 'x' # that is one of the alternative versions of the function. # This is a bit mysterious as it depends on the # column names assigned to lhsdata by eval() nmatches <- function(a, b) { sum(all.vars(parse(text=a)) %in% b) } nstar <- unlist(lapply(lhs.names, nmatches, b=fvnames(x, "*"))) ndot <- unlist(lapply(lhs.names, nmatches, b=dotnames)) explicit.lhs.names <- ifelse(ndot == 1 & nstar == ndot, lhs.names, "") # check rhs data if(is.matrix(rhsdata)) stop("rhs of formula should yield a vector") rhsdata <- as.numeric(rhsdata) nplots <- ncol(lhsdata) allind <- 1:nplots # ---------- extra plots may be implied by 'shade' ----------------- extrashadevars <- NULL if(!is.null(shade)) { # select columns by name or number names(allind) <- explicit.lhs.names shind <- try(allind[shade]) if(inherits(shind, "try-error")) stop("The argument shade should be a valid subset index for columns of x") if(any(nbg <- is.na(shind))) { # columns not included in formula: add them morelhs <- try(as.matrix(indata[ , shade[nbg], drop=FALSE])) if(inherits(morelhs, "try-error")) stop("The argument shade should be a valid subset index for columns of x") nmore <- ncol(morelhs) extrashadevars <- colnames(morelhs) if(defaultplot) { success <- TRUE } else if("." %in% variablesinformula(fmla.original)) { # evaluate lhs of formula, expanding "." to shade names u <- if(length(extrashadevars) == 1) as.name(extrashadevars) else { as.call(lapply(c("cbind", extrashadevars), as.name)) } ux <- as.name(fvnames(x, ".x")) uy <- as.name(fvnames(x, ".y")) foo <- eval(substitute(substitute(fom, list(.=u, .x=ux, .y=uy)), list(fom=fmla.original))) lhsnew <- foo[[2]] morelhs <- eval(lhsnew, envir=indata) success <- identical(colnames(morelhs), extrashadevars) } else { success <- FALSE } if(success) { # add these columns to the plotting data lhsdata <- cbind(lhsdata, morelhs) shind[nbg] <- nplots + seq_len(nmore) extendifvector <- function(a, n, nmore) { if(is.null(a)) return(a) if(length(a) == 1) return(a) return(c(a, rep(a[1], nmore))) } lty <- extendifvector(lty, nplots, nmore) col <- extendifvector(col, nplots, nmore) lwd <- extendifvector(lwd, nplots, nmore) nplots <- nplots + nmore } else { # cannot add columns warning(paste("Shade", ngettext(sum(nbg), "column", "columns"), commasep(sQuote(shade[nbg])), "were missing from the plot formula, and were omitted")) shade <- NULL extrashadevars <- NULL } } } # -------------------- determine plotting limits ---------------------- # restrict data to subset if desired if(!is.null(subset)) { keep <- if(is.character(subset)) eval(parse(text=subset), envir=indata) else eval(subset, envir=indata) lhsdata <- lhsdata[keep, , drop=FALSE] rhsdata <- rhsdata[keep] } # determine x and y limits and clip data to these limits if(is.null(xlim) && add) { # x limits are determined by existing plot xlim <- par("usr")[1:2] } if(!is.null(xlim)) { ok <- (xlim[1] <= rhsdata & rhsdata <= xlim[2]) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { # if we're using the default argument, use its recommended range if(rhs == fvnames(x, ".x")) { xlim <- attr(x, "alim") if(xlogscale && xlim[1] <= 0) xlim[1] <- min(rhsdata[is.finite(rhsdata) & rhsdata > 0], na.rm=TRUE) rok <- is.finite(rhsdata) & rhsdata >= xlim[1] & rhsdata <= xlim[2] lok <- apply(is.finite(lhsdata), 1, any) ok <- lok & rok rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] } else { # actual range of values to be plotted rok <- is.finite(rhsdata) lok <- apply(is.finite(lhsdata), 1, any) ok <- lok & rok if(xlogscale) ok <- ok & (rhsdata > 0) & apply(lhsdata > 0, 1, any) rhsdata <- rhsdata[ok] lhsdata <- lhsdata[ok, , drop=FALSE] xlim <- range(rhsdata) } } if(is.null(ylim)) { yok <- is.finite(lhsdata) if(ylogscale) yok <- yok & (lhsdata > 0) ylim <- range(lhsdata[yok],na.rm=TRUE) } if(!is.null(ylim.covers)) ylim <- range(ylim, ylim.covers) # return x, y limits only? if(limitsonly) return(list(xlim=xlim, ylim=ylim)) # ------------- work out how to label the plot -------------------- # extract plot labels labl <- attr(x, "labl") # expand plot labels if(!is.null(fname <- attr(x, "fname"))) labl <- sprintf(labl, fname) # create plot label map (key -> algebraic expression) map <- fvlabelmap(x) # ......... label for x axis .................. if(is.null(xlab)) { argname <- fvnames(x, ".x") if(as.character(fmla)[3] == argname) { # the x axis is the default function argument. # Add name of unit of length ax <- summary(unitname(x))$axis xlab <- if(!is.null(ax)) paste(argname, ax) else as.expression(as.name(argname)) } else { # map ident to label xlab <- eval(substitute(substitute(rh, mp), list(rh=rhs, mp=map))) } } if(is.language(xlab) && !is.expression(xlab)) xlab <- as.expression(xlab) # ......... label for y axis ................... leftside <- lhs.original if(ncol(lhsdata) > 1 || length(dotnames) == 1) { # For labelling purposes only, simplify the LHS by # replacing 'cbind(.....)' by '.' # even if not all columns are included. leftside <- paste(as.expression(leftside)) eln <- explicit.lhs.names eln <- eln[nzchar(eln)] cb <- if(length(eln) == 1) eln else { paste("cbind(", paste(eln, collapse=", "), ")", sep="") } compactleftside <- gsub(cb, ".", leftside, fixed=TRUE) # Separately expand "." to cbind(.....) and ".x", ".y" to their real names dotdot <- c(dotnames, extrashadevars) cball <- if(length(dotdot) == 1) dotdot else { paste("cbind(", paste(dotdot, collapse=", "), ")", sep="") } expandleftside <- gsub(".x", fvnames(x, ".x"), leftside, fixed=TRUE) expandleftside <- gsub(".y", fvnames(x, ".y"), expandleftside, fixed=TRUE) expandleftside <- gsubdot(cball, expandleftside) # convert back to language compactleftside <- parse(text=compactleftside)[[1]] expandleftside <- parse(text=expandleftside)[[1]] } else { compactleftside <- expandleftside <- leftside } # construct label for y axis if(is.null(ylab)) { yl <- attr(x, "yexp") if(defaultplot && !is.null(yl)) { ylab <- yl } else { # replace "." and short identifiers by plot labels ylab <- eval(substitute(substitute(le, mp), list(le=compactleftside, mp=map))) } } if(is.language(ylab) && !is.expression(ylab)) ylab <- as.expression(ylab) # ------------------ start plotting --------------------------- # create new plot if(!add) do.call("plot.default", resolve.defaults(list(xlim, ylim, type="n", log=log), list(xlab=xlab, ylab=ylab), list(...), list(main=xname))) # handle 'type' = "n" giventype <- resolve.defaults(list(...), list(type=NA))$type if(identical(giventype, "n")) return(invisible(NULL)) # process lty, col, lwd arguments fixit <- function(a, n, a0, a00) { if(is.null(a)) a <- if(!is.null(a0)) a0 else a00 if(length(a) == 1) return(rep.int(a, n)) else if(length(a) != n) stop(paste("Length of", short.deparse(substitute(a)), "does not match number of curves to be plotted")) else return(a) } opt0 <- spatstat.options("par.fv") lty <- fixit(lty, nplots, opt0$lty, 1:nplots) col <- fixit(col, nplots, opt0$col, 1:nplots) lwd <- fixit(lwd, nplots, opt0$lwd, 1) if(!is.null(shade)) { # shade region between critical boundaries # extract relevant columns for shaded bands shdata <- lhsdata[, shind] if(!is.matrix(shdata) || ncol(shdata) != 2) stop("The argument shade should select two columns of x") # determine plot limits for shaded bands shdata1 <- shdata[,1] shdata2 <- shdata[,2] rhsOK <- is.finite(rhsdata) shade1OK <- rhsOK & is.finite(shdata1) shade2OK <- rhsOK & is.finite(shdata2) shadeOK <- shade1OK & shade2OK # work out which one is the upper limit up1 <- all(shdata1[shadeOK] > shdata2[shadeOK]) # half-infinite intervals if(!is.null(ylim)) { shdata1[shade2OK & !shade1OK] <- if(up1) ylim[2] else ylim[1] shdata2[shade1OK & !shade2OK] <- if(up1) ylim[1] else ylim[2] shadeOK <- shade1OK | shade2OK } # plot grey polygon xpoly <- c(rhsdata[shadeOK], rev(rhsdata[shadeOK])) ypoly <- c(shdata1[shadeOK], rev(shdata2[shadeOK])) polygon(xpoly, ypoly, border=shadecol, col=shadecol) # overwrite graphical parameters lty[shind] <- 1 # try to preserve the same type of colour specification if(is.character(col) && is.character(shadecol)) { # character representations col[shind] <- shadecol } else if(is.numeric(col) && !is.na(sc <- paletteindex(shadecol))) { # indices in colour palette col[shind] <- sc } else { # convert colours to hexadecimal and edit relevant values col <- col2hex(col) col[shind] <- col2hex(shadecol) } # remove these columns from further plotting allind <- allind[-shind] # } else xpoly <- ypoly <- numeric(0) # ----------------- plot lines ------------------------------ for(i in allind) lines(rhsdata, lhsdata[,i], lty=lty[i], col=col[i], lwd=lwd[i]) if(nplots == 1) return(invisible(NULL)) # ---------------- determine legend ------------------------- key <- colnames(lhsdata) mat <- match(key, names(x)) keyok <- !is.na(mat) matok <- mat[keyok] legdesc <- rep.int("constructed variable", length(key)) legdesc[keyok] <- attr(x, "desc")[matok] leglabl <- lnames0 leglabl[keyok] <- labl[matok] ylab <- attr(x, "ylab") if(!is.null(ylab)) { if(is.language(ylab)) ylab <- deparse(ylab) legdesc <- sprintf(legdesc, ylab) } # compute legend info legtxt <- key if(legendmath) { legtxt <- leglabl if(defaultplot) { # try to convert individual labels to expressions fancy <- try(parse(text=leglabl), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } else { # try to navigate the parse tree fancy <- try(fvlegend(x, expandleftside), silent=TRUE) if(!inherits(fancy, "try-error")) legtxt <- fancy } } # --------------- handle legend plotting ----------------------------- if(identical(legend, TRUE)) { # legend will be plotted # Basic parameters of legend legendxpref <- if(identical(legendpos, "float")) NULL else legendpos legendspec <- resolve.defaults(legendargs, list(x=legendxpref, legend=legtxt, lty=lty, col=col, inset=0.05, y.intersp=if(legendmath) 1.3 else 1), .StripNull=TRUE) if(legendavoid || identical(legendpos, "float")) { # Automatic determination of legend position # Assemble data for all plot objects linedata <- list() xmap <- if(xlogscale) log10 else identity ymap <- if(ylogscale) log10 else identity inv.xmap <- if(xlogscale) function(x) { 10^x } else identity inv.ymap <- if(ylogscale) function(x) { 10^x } else identity for(i in seq_along(allind)) linedata[[i]] <- list(x=xmap(rhsdata), y=ymap(lhsdata[,i])) polydata <- if(length(xpoly) > 0) list(x=xmap(xpoly), y=ymap(ypoly)) else NULL objects <- assemble.plot.objects(xmap(xlim), ymap(ylim), lines=linedata, polygon=polydata) # find best position to avoid them legendbest <- findbestlegendpos(objects, preference=legendpos, legendspec=legendspec) # handle log scale if((xlogscale || ylogscale) && checkfields(legendbest, c("x", "xjust", "yjust"))) { # back-transform x, y coordinates legendbest$x$x <- inv.xmap(legendbest$x$x) legendbest$x$y <- inv.ymap(legendbest$x$y) } } else legendbest <- list() # ********** plot legend ************************* if(!is.null(legend) && legend) do.call("legend", resolve.defaults(legendargs, legendbest, legendspec, .StripNull=TRUE)) } # return legend info df <- data.frame(lty=lty, col=col, key=key, label=paste.expr(legtxt), meaning=legdesc, row.names=key) return(df) } assemble.plot.objects <- function(xlim, ylim, ..., lines=NULL, polygon=NULL) { # Take data that would have been passed to the commands 'lines' and 'polygon' # and form corresponding geometrical objects. objects <- list() if(!is.null(lines)) { if(is.psp(lines)) { objects <- list(lines) } else { if(checkfields(lines, c("x", "y"))) { lines <- list(lines) } else if(!all(unlist(lapply(lines, checkfields, L=c("x", "y"))))) stop("lines should be a psp object, a list(x,y) or a list of list(x,y)") W <- owin(xlim, ylim) for(i in seq_along(lines)) { lines.i <- lines[[i]] x.i <- lines.i$x y.i <- lines.i$y n <- length(x.i) if(length(y.i) != n) stop(paste(paste("In lines[[", i, "]]", sep=""), "the vectors x and y have unequal length")) if(!all(ok <- (is.finite(x.i) & is.finite(y.i)))) { x.i <- x.i[ok] y.i <- y.i[ok] n <- sum(ok) } segs.i <- psp(x.i[-n], y.i[-n], x.i[-1], y.i[-1], W, check=FALSE) objects <- append(objects, list(segs.i)) } } } if(!is.null(polygon)) { # Add filled polygon pol <- polygon[c("x", "y")] ok <- with(pol, is.finite(x) & is.finite(y)) if(!all(ok)) pol <- with(pol, list(x=x[ok], y=y[ok])) if(area.xypolygon(pol) < 0) pol <- lapply(pol, rev) P <- try(owin(poly=pol, xrange=xlim, yrange=ylim, check=FALSE)) if(!inherits(P, "try-error")) objects <- append(objects, list(P)) } return(objects) } findbestlegendpos <- local({ # Given a list of geometrical objects, find the best position # to avoid them. thefunction <- function(objects, show=FALSE, aspect=1, bdryok=TRUE, preference="float", verbose=FALSE, legendspec=NULL) { # find bounding box W <- do.call("bounding.box", lapply(objects, as.rectangle)) # convert to common box objects <- lapply(objects, rebound, rect=W) # comp # rescale x and y axes so that bounding box has aspect ratio 'aspect' aspectW <- with(W, diff(yrange)/diff(xrange)) s <- aspect/aspectW mat <- diag(c(1, s)) invmat <- diag(c(1, 1/s)) scaled.objects <- lapply(objects, affine, mat=mat) scaledW <- affine(W, mat=mat) if(verbose) { cat("Scaled space:\n") print(scaledW) } # pixellate the scaled objects asma <- function(z) { if(is.owin(z)) as.mask(z) else if(is.psp(z)) as.mask.psp(z) else NULL } pix.scal.objects <- lapply(scaled.objects, asma) # apply distance transforms in scaled space D1 <- distmap(pix.scal.objects[[1]]) Dlist <- lapply(pix.scal.objects, distmap, xy=list(x=D1$xcol, y=D1$yrow)) # distance transform of superposition D <- Reduce(function(A,B){ eval.im(pmin.int(A,B)) }, Dlist) if(!bdryok) { # include distance to boundary B <- attr(D1, "bdry") D <- eval.im(pmin.int(D, B)) } if(show) { plot(affine(D, mat=invmat), add=TRUE) lapply(lapply(scaled.objects, affine, mat=invmat), plot, add=TRUE) } if(preference != "float") { # evaluate preferred location (check for collision) if(!is.null(legendspec)) { # pretend to plot the legend as specified legout <- do.call("legend", append(legendspec, list(plot=FALSE))) # determine bounding box legbox <- with(legout$rect, owin(c(left, left+w), c(top-h, top))) scaledlegbox <- affine(legbox, mat=mat) # check for collision Dmin <- min(D[scaledlegbox]) if(Dmin >= 0.02) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } else { # no legend information. # Pretend legend is 15% of plot width and height xr <- scaledW$xrange yr <- scaledW$yrange testloc <- switch(preference, topleft = c(xr[1],yr[2]), top = c(mean(xr), yr[2]), topright = c(xr[2], yr[2]), right = c(xr[2], mean(yr)), bottomright = c(xr[2], yr[1]), bottom = c(mean(xr), yr[1]), bottomleft = c(xr[1], yr[1]), left = c(xr[1], mean(yr)), center = c(mean(xr), mean(yr)), NULL) if(!is.null(testloc)) { # look up distance value at preferred location val <- safelookup(D, list(x=testloc[1], y=testloc[2])) crit <- 0.15 * min(diff(xr), diff(yr)) if(verbose) cat(paste("val=",val, ", crit=", crit, "\n")) if(val > crit) { # no collision: stay at preferred location. Exit. return(list(x=preference)) } # collision occurred! } } # collision occurred! } # find location of max locmax <- which(D$v == max(D), arr.ind=TRUE) locmax <- unname(locmax[1,]) pos <- list(x=D$xcol[locmax[2]], y=D$yrow[locmax[1]]) pos <- affinexy(pos, mat=invmat) if(show) points(pos) # determine justification of legend relative to this point # to avoid crossing edges of plot xrel <- (pos$x - W$xrange[1])/diff(W$xrange) yrel <- (pos$y - W$yrange[1])/diff(W$yrange) xjust <- if(xrel < 0.1) 0 else if(xrel > 0.9) 1 else 0.5 yjust <- if(yrel < 0.1) 0 else if(yrel > 0.9) 1 else 0.5 # out <- list(x=pos, xjust=xjust, yjust=yjust) return(out) } callit <- function(...) { rslt <- try(thefunction(...)) if(!inherits(rslt, "try-error")) return(rslt) return(list()) } callit }) spatstat/R/distanxD.R0000755000176000001440000001455112237642727014274 0ustar ripleyusers# # distanxD.R # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # # Interpoint distances for multidimensional points # # Methods for pairdist, nndist, nnwhich, crossdist # pairdist.ppx <- function(X, ...) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) return(as.matrix(dist(coo))) } crossdist.ppx <- function(X, Y, ...) { verifyclass(X, "ppx") verifyclass(Y, "ppx") # extract point coordinates cooX <- as.matrix(coords(X, ...)) cooY <- as.matrix(coords(Y, ...)) nX <- nrow(cooX) nY <- nrow(cooY) if(ncol(cooX) != ncol(cooY)) stop("X and Y have different dimensions (different numbers of coordinates)") if(nX == 0 || nY == 0) return(matrix(numeric(0), nrow=nX, ncol=nY)) coo <- rbind(cooX, cooY) dis <- as.matrix(dist(coo)) ans <- dis[1:nX, nX + (1:nY)] return(ans) } nndist.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # extract point coordinates coo <- as.matrix(coords(X, ...)) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nndist.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1) { # calculate nearest neighbour distance only nnd<-numeric(n) o <- fave.order(coo[,1]) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("nndMD", n= as.integer(n), m=as.integer(m), x= as.double(t(coo[o,])), nnd= as.double(nnd), as.double(big), DUP=DUP) # PACKAGE="spatstat") nnd[o] <- Cout$nnd } else { # case kmaxcalc > 1 nnd<-numeric(n * kmaxcalc) o <- fave.order(coo[,1]) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("knndMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(nnd), huge = as.double(big), DUP=DUP) # PACKAGE="spatstat") nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(Cout$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf's infs <- matrix(as.numeric(Inf), nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich.ppx <- function(X, ..., k=1) { verifyclass(X, "ppx") # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # extract point coordinates coo <- coords(X, ...) n <- nrow(coo) m <- ncol(coo) if(m == 0) { warning("nnwhich.ppx: Zero-dimensional coordinates: returning NA") if(length(k) == 1) return(rep.int(NA_real_, n)) else return(matrix(NA_real_, n, length(k))) } # special cases if(n <= 1) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(NA_integer_, nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1) { # identify nearest neighbour only nnw <- integer(n) o <- fave.order(coo[,1]) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("nnwMD", n = as.integer(n), m = as.integer(m), x = as.double(t(coo[o,])), nnd = as.double(numeric(n)), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) # PACKAGE="spatstat") witch <- Cout$nnwhich if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] } else { # case kmaxcalc > 1 nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(coo[,1]) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") Cout <- .C("knnwMD", n = as.integer(n), m = as.integer(m), kmax = as.integer(kmaxcalc), x = as.double(t(coo[o,])), nnd = as.double(numeric(n * kmaxcalc)), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) # PACKAGE="spatstat") witch <- Cout$nnwhich witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(NA_integer_, nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } spatstat/R/clip.psp.R0000755000176000001440000001540712237642727014247 0ustar ripleyusers# # clip.psp.R # # $Revision: 1.15 $ $Date: 2013/05/01 05:45:01 $ # # ######################################################## # clipping operation (for subset) ######################################################## clip.psp <- function(x, window, check=TRUE) { verifyclass(x, "psp") verifyclass(window, "owin") if(check && !is.subset.owin(window, x$window)) warning("The clipping window is not a subset of the window containing the line segment pattern x") if(x$n == 0) { emptypattern <- psp(numeric(0), numeric(0), numeric(0), numeric(0), window=window, marks=x$marks) return(emptypattern) } switch(window$type, rectangle=cliprect.psp(x, window), polygonal=clippoly.psp(x, window), mask=stop("sorry, clipping is not implemented for masks")) } ##### # # clipping to a rectangle # cliprect.psp <- function(x, window) { verifyclass(x, "psp") verifyclass(window, "owin") ends <- x$ends marx <- marks(x, dfok=TRUE) # find segments which are entirely inside the window # (by convexity) in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ok <- in0 & in1 # if all segments are inside, return them if(all(ok)) return(as.psp(ends, window=window, marks=marx, check=FALSE)) # otherwise, store those segments which are inside the window ends.inside <- ends[ok, , drop=FALSE] marks.inside <- marx %msub% ok x.inside <- as.psp(ends.inside, window=window, marks=marks.inside, check=FALSE) # now consider the rest ends <- ends[!ok, , drop=FALSE] in0 <- in0[!ok] in1 <- in1[!ok] marx <- marx %msub% (!ok) # first clip segments to the range x \in [xmin, xmax] # use parametric coordinates small <- function(x) { abs(x) <= .Machine$double.eps } tvalue <- function(z0, z1, zt) { y1 <- z1 - z0 yt <- zt - z0 tval <- ifelseAX(small(y1), 0.5, yt/y1) betwee <- (yt * (zt - z1)) <= 0 result <- ifelseXB(betwee, tval, NA) return(result) } between <- function(x, r) { ((x-r[1]) * (x-r[2])) <= 0 } tx <- cbind(ifelse0NA(between(ends$x0, window$xrange)), ifelse1NA(between(ends$x1, window$xrange)), tvalue(ends$x0, ends$x1, window$xrange[1]), tvalue(ends$x0, ends$x1, window$xrange[2])) # discard segments which do not lie in the x range nx <- apply(!is.na(tx), 1, sum) ok <- (nx >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] tx <- tx[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok # Clip the segments to the x range tmin <- apply(tx, 1, min, na.rm=TRUE) tmax <- apply(tx, 1, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.xclipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) # Now clip the segments to the range y \in [ymin, ymax] ends <- ends.xclipped in0 <- inside.owin(ends$x0, ends$y0, window) in1 <- inside.owin(ends$x1, ends$y1, window) ty <- cbind(ifelse0NA(in0), ifelse1NA(in1), tvalue(ends$y0, ends$y1, window$yrange[1]), tvalue(ends$y0, ends$y1, window$yrange[2])) # discard segments which do not lie in the y range ny <- apply(!is.na(ty), 1, sum) ok <- (ny >= 2) if(!any(ok)) return(x.inside) ends <- ends[ok, , drop=FALSE] ty <- ty[ok, , drop=FALSE] in0 <- in0[ok] in1 <- in1[ok] marx <- marx %msub% ok # Clip the segments to the y range tmin <- apply(ty, 1, min, na.rm=TRUE) tmax <- apply(ty, 1, max, na.rm=TRUE) dx <- ends$x1 - ends$x0 dy <- ends$y1 - ends$y0 ends.clipped <- data.frame(x0=ends$x0 + tmin * dx, y0=ends$y0 + tmin * dy, x1=ends$x0 + tmax * dx, y1=ends$y0 + tmax * dy) marks.clipped <- marx # OK - segments clipped # Put them together with the unclipped ones ends.all <- rbind(ends.inside, ends.clipped) marks.all <- marks.inside %mapp% marks.clipped as.psp(ends.all, window=window, marks=marks.all, check=FALSE) } ############################ # # clipping to a polygonal window # clippoly.psp <- function(s, window) { verifyclass(s, "psp") verifyclass(window, "owin") stopifnot(window$type == "polygonal") marx <- marks(s) has.marks <- !is.null(marx) eps <- .Machine$double.eps # find the intersection points between segments and window edges ns <- s$n es <- s$ends x0s <- es$x0 y0s <- es$y0 dxs <- es$x1 - es$x0 dys <- es$y1 - es$y0 bdry <- as.psp(window) nw <- bdry$n ew <- bdry$ends x0w <- ew$x0 y0w <- ew$y0 dxw <- ew$x1 - ew$x0 dyw <- ew$y1 - ew$y0 DUP <- spatstat.options("dupC") out <- .C("xysegint", na=as.integer(ns), x0a=as.double(x0s), y0a=as.double(y0s), dxa=as.double(dxs), dya=as.double(dys), nb=as.integer(nw), x0b=as.double(x0w), y0b=as.double(y0w), dxb=as.double(dxw), dyb=as.double(dyw), eps=as.double(eps), xx=as.double(numeric(ns * nw)), yy=as.double(numeric(ns * nw)), ta=as.double(numeric(ns * nw)), tb=as.double(numeric(ns * nw)), ok=as.integer(integer(ns * nw)), DUP=DUP) # PACKAGE="spatstat") ok <- (matrix(out$ok, ns, nw) != 0) ts <- matrix(out$ta, ns, nw) # form all the chopped segments (whether in or out) chopped <- empty <- s[numeric(0)] chopped$window <- bounding.box(s$window, window) for(seg in seq_len(ns)) { segment <- s$ends[seg, , drop=FALSE] hit <- ok[seg, ] if(!any(hit)) { # no intersection with boundary - add single segment chopped$ends <- rbind(chopped$ends, segment) if(has.marks) chopped$marks <- (chopped$marks) %mapp% (marx %msub% seg) } else { # crosses boundary - add several pieces tvals <- ts[seg,] tvals <- sort(tvals[hit]) x0 <- segment$x0 dx <- segment$x1 - x0 y0 <- segment$y0 dy <- segment$y1 - y0 newones <- data.frame(x0 = x0 + c(0,tvals) * dx, y0 = y0 + c(0,tvals) * dy, x1 = x0 + c(tvals,1) * dx, y1 = y0 + c(tvals,1) * dy) chopped$ends <- rbind(chopped$ends, newones) if(has.marks) { hitmarks <- marx %msub% seg newmarks <- hitmarks %mrep% nrow(newones) chopped$marks <- (chopped$marks) %mapp% newmarks } } } chopped$n <- nrow(chopped$ends) # select those chopped segments which are inside the window mid <- midpoints.psp(chopped) ins <- inside.owin(mid$x, mid$y, window) retained <- chopped[ins] retained$window <- window return(retained) } spatstat/R/markcorr.R0000755000176000001440000004437412237642727014344 0ustar ripleyusers# # # markcorr.R # # $Revision: 1.62 $ $Date: 2012/08/22 01:43:05 $ # # Estimate the mark correlation function # and related functions # # ------------------------------------------------------------------------ "markvario" <- function(X, correction=c("isotropic", "Ripley", "translate"), r=NULL, method="density", ..., normalise=FALSE) { m <- onecolumn(marks(X)) if(!is.numeric(m)) stop("Marks are not numeric") if(missing(correction)) correction <- NULL # Compute estimates v <- markcorr(X, f=function(m1, m2) { (1/2) * (m1-m2)^2 }, r=r, correction=correction, method=method, normalise=normalise, ...) # adjust theoretical value v$theo <- if(normalise) 1 else var(m) # fix labels v <- rebadge.fv(v, substitute(gamma(r), NULL), "gamma") return(v) } markconnect <- function(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.multitype(X)) if(missing(correction)) correction <- NULL marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] if(missing(j)) j <- lev[2] indicateij <- function(m1, m2, i, j) { (m1 == i) & (m2 == j) } # compute estimates p <- markcorr(X, f=indicateij, r=r, correction=correction, method=method, ..., fargs=list(i=i, j=j), normalise=normalise) # alter theoretical value and fix labels if(!normalise) { pipj <- mean(marx==i) * mean(marx==j) p$theo <- pipj } else { p$theo <- 1 } p <- rebadge.fv(p, substitute(p[i,j](r), list(i=paste(i),j=paste(j))), sprintf("p[list(%s, %s)]", i, j), new.yexp=substitute(p[list(i,j)](r), list(i=paste(i),j=paste(j)))) return(p) } Emark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { stopifnot(is.ppp(X) && is.marked(X) && is.numeric(marks(X))) if(missing(correction)) correction <- NULL f <- function(m1, m2) { m1 } E <- markcorr(X, f, r=r, correction=correction, method=method, ..., normalise=normalise) E <- rebadge.fv(E, substitute(E(r), NULL), "E") return(E) } Vmark <- function(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., normalise=FALSE) { if(missing(correction)) correction <- NULL E <- Emark(X, r=r, correction=correction, method=method, ..., normalise=FALSE) f2 <- function(m1, m2) { m1^2 } E2 <- markcorr(X, f2, r=E$r, correction=correction, method=method, ..., normalise=FALSE) V <- eval.fv(E2 - E^2) if(normalise) { sig2 <- var(marks(X)) V <- eval.fv(V/sig2) } V <- rebadge.fv(V, substitute(V(r), NULL), "V") attr(V, "labl") <- attr(E, "labl") return(V) } ############## workhorses 'markcorr' and 'markcorrint' #################### markcorrint <- function(X, f=NULL, r=NULL, correction=c("isotropic", "Ripley", "translate"), ..., f1=NULL, normalise=TRUE, returnL=FALSE, fargs=NULL) { # Computes the analogue of Kest(X) # where each pair (x_i,x_j) is weighted by w(m_i,m_j) # # If multiplicative=TRUE then w(u,v) = f(u) f(v) # If multiplicative=FALSE then w(u,v) = f(u, v) # stopifnot(is.ppp(X) && is.marked(X)) is.marked(X, dfok=FALSE) # validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype multiplicative <- ftype %in% c("mul", "product") # # check corrections correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) isborder <- correction %in% c("border", "bord.modif") if(any(isborder) && !multiplicative) { whinge <- paste("Border correction is not valid unless", "test function is of the form f(u,v) = f1(u)*f1(v)") correction <- correction[!isborder] if(length(correction) == 0) stop(whinge) else warning(whinge) } # estimated intensity lambda <- X$n/area.owin(as.owin(X)) mX <- marks(X) switch(ftype, mul={ wt <- mX/lambda K <- Kinhom(X, reciplambda=wt, correction=correction, ...) Ef2 <- mean(mX)^2 }, equ={ fXX <- outer(mX, mX, "==") wt <- fXX/lambda^2 K <- Kinhom(X, reciplambda2=wt, correction=correction, ...) mtable <- table(mX) Ef2 <- sum(mtable^2)/length(mX)^2 }, product={ f1X <- do.call(f1, append(list(mX), fargs)) wt <- f1X/lambda K <- Kinhom(X, reciplambda=wt, correction=correction, ...) Ef2 <- mean(f1X)^2 }, general={ fXX <- do.call("outer", append(list(mX, mX, f), fargs)) wt <- fXX/lambda^2 K <- Kinhom(X, reciplambda2=wt, correction=correction, ...) Ef2 <- mean(fXX) }) K$theo <- K$theo * Ef2 labl <- attr(K, "labl") if(normalise) K <- eval.fv(K/Ef2) if(returnL) K <- eval.fv(sqrt(K/pi)) attr(K, "labl") <- labl if(normalise && !returnL) { ylab <- substitute(K[f](r), NULL) fnam <- "K[f]" } else if(normalise && returnL) { ylab <- substitute(L[f](r), NULL) fnam <- "L[f]" } else if(!normalise && !returnL) { ylab <- substitute(C[f](r), NULL) fnam <- "C[f]" } else { ylab <- substitute(sqrt(C[f](r)/pi), NULL) fnam <- "sqrt(C[f]/pi)" } K <- rebadge.fv(K, ylab, fnam) return(K) } markcorr <- function(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", ..., f1=NULL, normalise=TRUE, fargs=NULL) { # mark correlation function with test function f stopifnot(is.ppp(X) && is.marked(X)) # set defaults to NULL if(missing(f)) f <- NULL if(missing(correction)) correction <- NULL # handle data frame of marks marx <- marks(X, dfok=TRUE) if(is.data.frame(marx)) { nc <- ncol(marx) result <- list() for(j in 1:nc) { Xj <- X %mark% marx[,j] result[[j]] <- markcorr(Xj, f=f, r=r, correction=correction, method=method, ..., f1=f1, normalise=normalise, fargs=fargs) } result <- as.listof(result) names(result) <- colnames(marx) return(result) } # validate test function h <- check.testfun(f, f1, X) f <- h$f f1 <- h$f1 ftype <- h$ftype # # npts <- npoints(X) W <- X$window # determine r values rmaxdefault <- rmax.rule("K", W, npts/area.owin(W)) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max if(length(method) > 1) stop("Select only one method, please") if(method=="density" && !breaks$even) stop(paste("Evenly spaced r values are required if method=", sQuote("density"), sep="")) # available selection of edge corrections depends on window correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # Denominator # Ef = Ef(M,M') when M, M' are independent # Apply f to every possible pair of marks, and average Ef <- switch(ftype, mul = { mean(marx)^2 }, equ = { mtable <- table(marx) sum(mtable^2)/sum(mtable)^2 }, product={ f1m <- do.call(f1, append(list(marx), fargs)) mean(f1m)^2 }, general = { if(is.null(fargs)) mean(outer(marx, marx, f)) else mean(do.call("outer", append(list(marx,marx,f),fargs))) }, stop("Internal error: invalid ftype")) if(normalise) { theory <- 1 Efdenom <- Ef } else { theory <- Ef Efdenom <- 1 } if(normalise) { # check validity of denominator if(Efdenom == 0) stop("Cannot normalise the mark correlation; the denominator is zero") else if(Efdenom < 0) warning(paste("Problem when normalising the mark correlation:", "the denominator is negative")) } # this will be the output data frame result <- data.frame(r=r, theo= rep.int(theory,length(r))) desc <- c("distance argument r", "theoretical value (independent marks) for %s") alim <- c(0, min(rmax, rmaxdefault)) # determine conventional name of function if(ftype %in% c("mul", "equ")) { if(normalise) { ylab <- substitute(k[mm](r), NULL) fnam <- "k[mm]" } else { ylab <- substitute(c[mm](r), NULL) fnam <- "c[mm]" } } else { if(normalise) { ylab <- substitute(k[f](r), NULL) fnam <- "k[f]" } else { ylab <- substitute(c[f](r), NULL) fnam <- "c[f]" } } result <- fv(result, "r", ylab, "theo", , alim, c("r","{%s^{iid}}(r)"), desc, fname=fnam) # find close pairs of points close <- closepairs(X, rmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=W, check=FALSE) # apply f to marks of close pairs of points # mI <- marx[I] mJ <- marx[J] ff <- switch(ftype, mul = mI * mJ, equ = (mI == mJ), product={ if(is.null(fargs)) { fI <- f1(mI) fJ <- f1(mJ) } else { fI <- do.call(f1, append(list(mI), fargs)) fJ <- do.call(f1, append(list(mJ), fargs)) } fI * fJ }, general={ if(is.null(fargs)) f(marx[I], marx[J]) else do.call(f, append(list(marx[I], marx[J]), fargs)) }) # check values of f(M1, M2) if(is.logical(ff)) ff <- as.numeric(ff) else if(!is.numeric(ff)) stop("function f did not return numeric values") if(any(is.na(ff))) switch(ftype, mul=, equ=stop("some marks were NA"), product=, general=stop("function f returned some NA values")) if(any(ff < 0)) switch(ftype, mul=, equ=stop("negative marks are not permitted"), product=, general=stop("negative values of function f are not permitted")) #### Compute estimates ############## if(any(correction == "translate")) { # translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) # get smoothed estimate of mark covariance Mtrans <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(trans=Mtrans), "hat(%s^{trans})(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) # get smoothed estimate of mark covariance Miso <- sewsmod(dIJ, ff, edgewt, Efdenom, r, method, ...) result <- bind.fv(result, data.frame(iso=Miso), "hat(%s^{iso})(r)", "Ripley isotropic correction estimate of %s", "iso") } # which corrections have been computed? nama2 <- names(result) corrxns <- rev(nama2[nama2 != "r"]) # default is to display them all formula(result) <- (. ~ r) fvnames(result, ".") <- corrxns # unitname(result) <- unitname(X) return(result) } sewsmod <- function(d, ff, wt, Ef, rvals, method="smrep", ..., nwtsteps=500) { # Smooth Estimate of Weighted Second Moment Density # (engine for computing mark correlations, etc) # ------ # Vectors containing one entry for each (close) pair of points # d = interpoint distance # ff = f(M1, M2) where M1, M2 are marks at the two points # wt = edge correction weight # ----- # Ef = E[f(M, M')] where M, M' are independent random marks # d <- as.vector(d) ff <- as.vector(ff) wt <- as.vector(wt) switch(method, density={ fw <- ff * wt sum.fw <- sum(fw) sum.wt <- sum(wt) # smooth estimate of kappa_f est <- density(d, weights=fw/sum.fw, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y numerator <- est * sum.fw # smooth estimate of kappa_1 est0 <- density(d, weights=wt/sum.wt, from=min(rvals), to=max(rvals), n=length(rvals), ...)$y denominator <- est0 * Ef * sum.wt result <- numerator/denominator }, sm={ # This is slow! oldopt <- options(warn=-1) smok <- require(sm) options(oldopt) if(!smok) stop(paste("Option method=sm requires package sm,", "which is not available")) # smooth estimate of kappa_f fw <- ff * wt est <- sm.density(d, weights=fw, eval.points=rvals, display="none", nbins=0, ...)$estimate numerator <- est * sum(fw)/sum(est) # smooth estimate of kappa_1 est0 <- sm.density(d, weights=wt, eval.points=rvals, display="none", nbins=0, ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, smrep={ oldopt <- options(warn=-1) smok <- require(sm) options(oldopt) if(!smok) stop(paste("Option method=smrep requires package sm,", "which is not available")) hstuff <- resolve.defaults(list(...), list(hmult=1, h.weights=NA)) if(hstuff$hmult == 1 && all(is.na(hstuff$h.weights))) warning("default smoothing parameter may be inappropriate") # use replication to effect the weights (it's faster) nw <- round(nwtsteps * wt/max(wt)) drep.w <- rep.int(d, nw) fw <- ff * wt nfw <- round(nwtsteps * fw/max(fw)) drep.fw <- rep.int(d, nfw) # smooth estimate of kappa_f est <- sm.density(drep.fw, eval.points=rvals, display="none", ...)$estimate numerator <- est * sum(fw)/sum(est) # smooth estimate of kappa_1 est0 <- sm.density(drep.w, eval.points=rvals, display="none", ...)$estimate denominator <- est0 * (sum(wt)/ sum(est0)) * Ef result <- numerator/denominator }, loess = { # set up data frame df <- data.frame(d=d, ff=ff, wt=wt) # fit curve to numerator using loess fitobj <- loess(ff ~ d, data=df, weights=wt, ...) # evaluate fitted curve at desired r values Eff <- predict(fitobj, newdata=data.frame(d=rvals)) # normalise: # denominator is the sample mean of all ff[i,j], # an estimate of E(ff(M1,M2)) for M1,M2 independent marks result <- Eff/Ef }, ) return(result) } ############## user interface bits ################################## check.testfun <- function(f=NULL, f1=NULL, X) { # Validate f or f1 as a test function for point pattern X # Determine function type 'ftype' ("mul", "equ", "product" or "general") fmul <- function(m1, m2) { m1 * m2 } fequ <- function(m1, m2) { m1 == m2 } f1id <- function(m) { m } if(is.null(f) && is.null(f1)) { # no functions given # default depends on kind of marks if(is.multitype(X)) { f <- fequ ftype <- "equ" } else { f1 <- f1id ftype <- "mul" } } else if(!is.null(f1)) { # f1 given # specifies test function of the form f(u,v) = f1(u) f1(v) if(!is.null(f)) warning("argument f ignored (overridden by f1)") stopifnot(is.function(f1)) ftype <- "product" } else { # f given if(is.character(fname <- f)) { switch(fname, "mul" = { f1 <- f1id ftype <- "mul" }, "equ" = { f <- fequ ftype <- "equ" }, { f <- get(fname) ftype <- "general" }) } else if(is.function(f)) { same <- function(f, g) { environment(g) <- environment(f) identical(f,g) } ftype <- if(same(f, fmul)) "mul" else if(same(f, fequ)) "equ" else "general" if(ftype == "mul" && is.multitype(X)) stop(paste("Inappropriate choice of function f;", "point pattern is multitype;", "types cannot be multiplied.")) } else stop("Argument f must be a function or the name of a function") } return(list(f=f, f1=f1, ftype=ftype)) } spatstat/R/eval.fv.R0000755000176000001440000001424712237642727014061 0ustar ripleyusers# # eval.fv.R # # # eval.fv() Evaluate expressions involving fv objects # # compatible.fv() Check whether two fv objects are compatible # # $Revision: 1.21 $ $Date: 2013/04/25 06:37:43 $ # eval.fv <- local({ # main function eval.fv <- function(expr, envir, dotonly=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) envir <- sys.parent() vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fv objects fvs <- unlist(lapply(vars, is.fv)) nfuns <- sum(fvs) if(nfuns == 0) stop("No fv objects in this expression") # extract them funs <- vars[fvs] # restrict to columns identified by 'dotnames' if(dotonly) funs <- lapply(funs, restrict.to.dot) # test whether the fv objects are compatible if(nfuns > 1 && !(ok <- do.call("compatible", unname(funs)))) stop(paste(if(nfuns > 2) "some of" else NULL, "the functions", commasep(sQuote(names(funs))), "are not compatible")) # copy first object as template result <- funs[[1]] labl <- attr(result, "labl") origdotnames <- fvnames(result, ".") origshadenames <- fvnames(result, ".s") # determine which function estimates are supplied argname <- fvnames(result, ".x") nam <- names(result) ynames <- nam[nam != argname] # for each function estimate, evaluate expression for(yn in ynames) { # extract corresponding estimates from each fv object funvalues <- lapply(funs, "[[", i=yn) # insert into list of argument values vars[fvs] <- funvalues # evaluate result[[yn]] <- eval(e, vars) } # determine mathematical labels. # 'yexp' determines y axis label # 'ylab' determines y label in printing and description # 'fname' is sprintf-ed into 'labl' for legend yexps <- lapply(funs, attr, which="yexp") ylabs <- lapply(funs, attr, which="ylab") fnames <- unlist(lapply(funs, getfname)) # Repair 'fname' attributes if blank if(any(blank <- !nzchar(fnames))) { # Set function names to be object names as used in the expression for(i in which(blank)) attr(funs[[i]], "fname") <- fnames[i] <- names(funs)[i] } # Remove duplicated names # Typically occurs when combining several K functions, etc. oldfnames <- fnames # Tweak fv objects so their function names are their object names # as used in the expression if(any(duplicated(fnames))) { newfnames <- names(funs) for(i in 1:nfuns) funs[[i]] <- rebadge.fv(funs[[i]], new.fname=newfnames[i]) fnames <- newfnames } if(any(duplicated(ylabs))) { for(i in 1:nfuns) { new.ylab <- substitute(f(r), list(f=as.name(fnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.ylab=new.ylab) } ylabs <- lapply(funs, attr, which="ylab") } if(any(duplicated(yexps))) { newfnames <- names(funs) for(i in 1:nfuns) { new.yexp <- substitute(f(r), list(f=as.name(newfnames[i]))) funs[[i]] <- rebadge.fv(funs[[i]], new.yexp=new.yexp) } yexps <- lapply(funs, attr, which="yexp") } # now compute y axis labels for the result attr(result, "yexp") <- eval(substitute(substitute(e, yexps), list(e=elang))) attr(result, "ylab") <- eval(substitute(substitute(e, ylabs), list(e=elang))) # compute fname equivalent to expression if(nfuns > 1) { # take original expression the.fname <- paren(flatten(deparse(elang))) } else if(nzchar(oldname <- oldfnames[1])) { # replace object name in expression by its function name namemap <- list(as.name(oldname)) names(namemap) <- names(funs)[1] the.fname <- deparse(eval(substitute(substitute(e, namemap), list(e=elang)))) } else the.fname <- names(funs)[1] attr(result, "fname") <- the.fname # now compute the [modified] y labels labelmaps <- lapply(funs, fvlabelmap, dot=FALSE) for(yn in ynames) { # labels for corresponding columns of each argument funlabels <- lapply(labelmaps, "[[", i=yn) # form expression involving these columns labl[match(yn, names(result))] <- flatten(deparse(eval(substitute(substitute(e, f), list(e=elang, f=funlabels))))) } attr(result, "labl") <- labl # copy dotnames and shade names from template fvnames(result, ".") <- origdotnames[origdotnames %in% names(result)] if(!is.null(origshadenames) && all(origshadenames %in% names(result))) fvnames(result, ".s") <- origshadenames return(result) } # helper functions restrict.to.dot <- function(z) { argu <- fvnames(z, ".x") dotn <- fvnames(z, ".") shadn <- fvnames(z, ".s") ok <- colnames(z) %in% unique(c(argu, dotn, shadn)) return(z[, ok]) } getfname <- function(x) { if(!is.null(y <- attr(x, "fname"))) y else "" } flatten <- function(x) { paste(x, collapse=" ") } eval.fv }) compatible <- function(A, B, ...) { UseMethod("compatible") } compatible.fv <- function(A, B, ...) { verifyclass(A, "fv") if(missing(B)) return(TRUE) verifyclass(B, "fv") # are columns the same? namesmatch <- identical(all.equal(names(A),names(B)), TRUE) && (fvnames(A, ".x") == fvnames(B, ".x")) && (fvnames(A, ".y") == fvnames(B, ".y")) if(!namesmatch) return(FALSE) # are 'r' values the same ? rA <- with(A, .x) rB <- with(B, .x) approx.equal <- function(x, y) { max(abs(x-y)) <= .Machine$double.eps } rmatch <- (length(rA) == length(rB)) && approx.equal(rA, rB) if(!rmatch) return(FALSE) # A and B are compatible if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fv(B, ...)) } spatstat/R/closepairs.R0000755000176000001440000003217612243543436014657 0ustar ripleyusers# # closepairs.R # # $Revision: 1.25 $ $Date: 2013/09/21 08:56:11 $ # # simply extract the r-close pairs from a dataset # # Less memory-hungry for large patterns # closepairs <- function(X, rmax, ordered=TRUE, what=c("all", "indices")) { verifyclass(X, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1) stopifnot(is.finite(rmax)) stopifnot(rmax >= 0) npts <- npoints(X) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }) if(npts == 0) return(null.answer) # sort points by increasing x coordinate oo <- fave.order(X$x) Xsort <- X[oo] # First make an OVERESTIMATE of the number of pairs nsize <- ceiling(4 * pi * (npts^2) * (rmax^2)/area.owin(X$window)) nsize <- max(1024, nsize) # Now extract pairs if(spatstat.options("closepairs.newcode")) { # ------------------- use new faster code --------------------- x <- Xsort$x y <- Xsort$y r <- rmax ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vclosepairs", xx=x, yy=y, rr=r, nguess=ng) # PACKAGE="spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vclosepairs") i <- z[[1]] # NB no increment required j <- z[[2]] xi <- z[[3]] yi <- z[[4]] xj <- z[[5]] yj <- z[[6]] dx <- z[[7]] dy <- z[[8]] d <- z[[9]] }, indices = { z <- .Call("VcloseIJpairs", xx=x, yy=y, rr=r, nguess=ng) # PACKAGE="spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcloseIJpairs") i <- z[[1]] # NB no increment required j <- z[[2]] }) } else { # ------------------- use older code -------------------------- DUP <- spatstat.options("dupC") z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(z$status != 0) { # Guess was insufficient # Obtain an OVERCOUNT of the number of pairs # (to work around gcc bug #323) rmaxplus <- 1.25 * rmax nsize <- .C("paircount", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1)), DUP=DUP)$count # PACKAGE="spatstat")$count if(nsize <= 0) return(null.answer) # add a bit more for safety nsize <- ceiling(1.1 * nsize) + 2 * npts # now extract points z <- .C("Fclosepairs", nxy=as.integer(npts), x=as.double(Xsort$x), y=as.double(Xsort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) } # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] if(what == "all") { xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] } # ------------------- end code switch ------------------------ } # convert i,j indices to original sequence i <- oo[i] j <- oo[j] # are (i, j) and (j, i) equivalent? if(!ordered) { ok <- (i < j) i <- i[ok] j <- j[ok] if(what == "all") { xi <- xi[ok] yi <- yi[ok] xj <- xj[ok] yj <- yj[ok] dx <- dx[ok] dy <- dy[ok] d <- d[ok] } } # done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i = i, j = j) }) return(answer) } ####################### crosspairs <- function(X, Y, rmax, what=c("all", "indices")) { verifyclass(X, "ppp") verifyclass(Y, "ppp") what <- match.arg(what) stopifnot(is.numeric(rmax) && length(rmax) == 1 && rmax >= 0) null.answer <- switch(what, all = { list(i=integer(0), j=integer(0), xi=numeric(0), yi=numeric(0), xj=numeric(0), yj=numeric(0), dx=numeric(0), dy=numeric(0), d=numeric(0)) }, indices = { list(i=integer(0), j=integer(0)) }) nX <- npoints(X) nY <- npoints(Y) if(nX == 0 || nY == 0) return(null.answer) # order patterns by increasing x coordinate ooX <- fave.order(X$x) Xsort <- X[ooX] ooY <- fave.order(Y$x) Ysort <- Y[ooY] if(spatstat.options("crosspairs.newcode")) { # ------------------- use new faster code --------------------- # First (over)estimate the number of pairs nsize <- ceiling(2 * pi * (rmax^2) * nX * nY/area.owin(Y$window)) nsize <- max(1024, nsize) # .Call Xx <- Xsort$x Xy <- Xsort$y Yx <- Ysort$x Yy <- Ysort$y r <- rmax ng <- nsize storage.mode(Xx) <- storage.mode(Xy) <- "double" storage.mode(Yx) <- storage.mode(Yy) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" switch(what, all = { z <- .Call("Vcrosspairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng) # PACKAGE="spatstat") if(length(z) != 9) stop("Internal error: incorrect format returned from Vcrosspairs") i <- z[[1]] # NB no increment required j <- z[[2]] xi <- z[[3]] yi <- z[[4]] xj <- z[[5]] yj <- z[[6]] dx <- z[[7]] dy <- z[[8]] d <- z[[9]] }, indices = { z <- .Call("VcrossIJpairs", xx1=Xx, yy1=Xy, xx2=Yx, yy2=Yy, rr=r, nguess=ng) # PACKAGE="spatstat") if(length(z) != 2) stop("Internal error: incorrect format returned from VcrossIJpairs") i <- z[[1]] # NB no increment required j <- z[[2]] }) } else { # Older code # obtain upper estimate of number of pairs # (to work around gcc bug 323) DUP <- spatstat.options("dupC") rmaxplus <- 1.25 * rmax nsize <- .C("crosscount", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Ysort$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), rmaxi=as.double(rmaxplus), count=as.integer(integer(1)), DUP=DUP)$count # PACKAGE="spatstat")$count if(nsize <= 0) return(null.answer) # allow slightly more space to work around gcc bug #323 nsize <- ceiling(1.1 * nsize) + X$n + Y$n # now extract pairs z <- .C("Fcrosspairs", nn1=as.integer(X$n), x1=as.double(Xsort$x), y1=as.double(Xsort$y), nn2=as.integer(Y$n), x2=as.double(Ysort$x), y2=as.double(Ysort$y), r=as.double(rmax), noutmax=as.integer(nsize), nout=as.integer(integer(1)), iout=as.integer(integer(nsize)), jout=as.integer(integer(nsize)), xiout=as.double(numeric(nsize)), yiout=as.double(numeric(nsize)), xjout=as.double(numeric(nsize)), yjout=as.double(numeric(nsize)), dxout=as.double(numeric(nsize)), dyout=as.double(numeric(nsize)), dout=as.double(numeric(nsize)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(z$status != 0) stop(paste("Internal error: C routine complains that insufficient space was allocated:", nsize)) # trim vectors to the length indicated npairs <- z$nout if(npairs <= 0) return(null.answer) actual <- seq_len(npairs) i <- z$iout[actual] # sic j <- z$jout[actual] xi <- z$xiout[actual] yi <- z$yiout[actual] xj <- z$xjout[actual] yj <- z$yjout[actual] dx <- z$dxout[actual] dy <- z$dyout[actual] d <- z$dout[actual] } # convert i,j indices to original sequences i <- ooX[i] j <- ooY[j] # done switch(what, all = { answer <- list(i=i, j=j, xi=xi, yi=yi, xj=xj, yj=yj, dx=dx, dy=dy, d=d) }, indices = { answer <- list(i=i, j=j) }) return(answer) } closethresh <- function(X, R, S, ordered=TRUE) { # list all R-close pairs # and indicate which of them are S-close (S < R) # so that results are consistent with closepairs(X,S) verifyclass(X, "ppp") stopifnot(is.numeric(R) && length(R) == 1 && R >= 0) stopifnot(is.numeric(S) && length(S) == 1 && S >= 0) stopifnot(S < R) npts <- npoints(X) if(npts == 0) return(list(i=integer(0), j=integer(0), t=logical(0))) # sort points by increasing x coordinate oo <- fave.order(X$x) Xsort <- X[oo] # First make an OVERESTIMATE of the number of pairs nsize <- ceiling(4 * pi * (npts^2) * (R^2)/area.owin(X$window)) nsize <- max(1024, nsize) # Now extract pairs x <- Xsort$x y <- Xsort$y r <- R ng <- nsize storage.mode(x) <- "double" storage.mode(y) <- "double" storage.mode(r) <- "double" storage.mode(ng) <- "integer" z <- .Call("Vclosethresh", xx=x, yy=y, rr=r, ss=s, nguess=ng) # PACKAGE="spatstat") if(length(z) != 3) stop("Internal error: incorrect format returned from Vclosethresh") i <- z[[1]] # NB no increment required j <- z[[2]] th <- as.logical(z[[3]]) # convert i,j indices to original sequence i <- oo[i] j <- oo[j] # are (i, j) and (j, i) equivalent? if(!ordered) { ok <- (i < j) i <- i[ok] j <- j[ok] th <- th[ok] } # done return(list(i=i, j=j, th=th)) } crosspairquad <- function(Q, rmax, what=c("all", "indices")) { # find all close pairs X[i], U[j] stopifnot(inherits(Q, "quad")) what <- match.arg(what) X <- Q$data D <- Q$dummy clX <- closepairs(X=X, rmax=rmax, what=what) clXD <- crosspairs(X=X, Y=D, rmax=rmax, what=what) # convert all indices to serial numbers in union.quad(Q) # assumes data are listed first clXD$j <- npoints(X) + clXD$j result <- list(rbind(as.data.frame(clX), as.data.frame(clXD))) return(result) } spatstat/R/rLGCP.R0000755000176000001440000000334112237642727013420 0ustar ripleyusers# # rLGCP.R # # simulation of log-Gaussian Cox process # # original code by Abdollah Jalilian # # $Revision: 1.7 $ $Date: 2013/01/13 04:01:55 $ # rLGCP <- function(model="exponential", mu = 0, param = NULL, ..., win=NULL) { if(!missing(mu)) { if (!(is.numeric(mu) || is.function(mu) || is.im(mu))) stop(paste(sQuote("mu"), "must be a constant, a function or an image")) if (is.numeric(mu) && !(length(mu) == 1)) stop(paste(sQuote("mu"), "must be a single number")) } if(!require(RandomFields)) stop("Simulation of log-Gaussian Cox process requires the package RandomFields") win.given <- !is.null(win) mu.image <- is.im(mu) win <- if(win.given) as.owin(win) else if(mu.image) as.owin(mu) else owin() if(win.given && mu.image && !is.subset.owin(win, as.owin(mu))) stop(paste("The spatial domain of the pixel image", sQuote("mu"), "does not cover the simulation window", sQuote("win"))) w <- as.mask(win) x <- w$xcol y <- w$yrow dim <- w$dim xy <- expand.grid(x=x, y=y) xx <- xy$x yy <- xy$y muxy <- if(is.numeric(mu)) mu else if (is.function(mu)) mu(xx,yy) else lookup.im(mu, xx, yy, naok=TRUE, strict=TRUE) muxy[is.na(muxy)] <- -Inf # generate Gaussian random field xgrid <- c(x[1], x[length(x)], w$xstep) ygrid <- c(y[1], y[length(y)], w$ystep) z <- RandomFields::GaussRF(xgrid, ygrid, grid = TRUE, gridtriple=TRUE, model = model, param = param, ...) logLambda <- muxy + z # convert to log-Gaussian image Lambda <- matrix(exp(logLambda), nrow=dim[1], ncol=dim[2], byrow=TRUE) Lambda <- as.im(Lambda, W=w) # generate Poisson points X <- rpoispp(Lambda)[win] # attr(X, "Lambda") <- Lambda return(X) } spatstat/R/parres.R0000755000176000001440000005064412237642727014015 0ustar ripleyusers# # parres.R # # code to plot transformation diagnostic # # $Revision: 1.1 $ $Date: 2013/04/25 06:37:43 $ # parres <- function(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw="nrd0", adjust=1, from=NULL,to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname) { modelname <- deparse(substitute(model)) if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(!is.null(subregion)) stopifnot(is.owin(subregion)) if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) # extract spatial locations Q <- quad.ppm(model) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- npoints(quadpoints) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Inverse lambda residuals rx <- residuals(model, type="inverse") resid <- with(rx, "increment") ################################################################# # identify the covariate # if(length(covariate) == 0) stop("No covariate specified") covtype <- "unknown" if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate covtype <- "external" beta <- 0 covvalues <- evalCovariate(covariate, quadpoints) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # 'original covariates' orig.covars <- variablesinformula(formula(model)) # 'canonical covariates' canon.covars <- names(coef(model)) # offsets offset.covars <- offsetsinformula(formula(model)) # if(covname %in% orig.covars) { # one of the original covariates covtype <- "original" covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } else if(covname %in% canon.covars) { # one of the canonical covariates covtype <- "canonical" mm <- model.matrix(model) covvalues <- mm[, covname] ## extract the corresponding coefficient beta <- coef(model)[[covname]] } else if(covname %in% offset.covars) { # an offset term only covtype <- "offset" mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(!(covname %in% colnames(mf))) stop(paste("Internal error: offset term", covname, "not found in model frame")) covvalues <- mf[, covname] ## fixed coefficient (not an estimated parameter) beta <- 1 } else{ # must be an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 covvalues <- evalCovariate(findCovariate(covname, model), quadpoints) } } # validate covvalues # if(is.null(covvalues)) stop("Unable to extract covariate values") if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) vtype <- typeof(covvalues) switch(vtype, real=, double = { }, integer = { warning("Covariate is integer-valued") }, stop(paste("Cannot handle covariate of type", sQuote(vtype)))) ################################################################# # Compute covariate effect if(covtype != "original") { effect <- beta * covvalues mediator <- covtype effectfundata <- list(beta=beta) effectFun <- function(x) { (effectfundata$beta) * x } isoffset <- (covtype == "offset") names(isoffset) <- covname } else { # `original' covariate (passed as argument to ppm) # may determine one or more canonical covariates and/or offsets # # Initialise termnames <- character(0) termbetas <- numeric(0) isoffset <- logical(0) mediator <- character(0) effect <- 0 effectFun <- function(x) { effectFun.can(x) + effectFun.off(x) } effectFun.can <- effectFun.off <- function(x) { 0 * x } # Identify relevant canonical covariates dmat <- model.depends(model) if(!(covname %in% colnames(dmat))) stop("Internal error: cannot match covariate names") othercov <- (colnames(dmat) != covname) relevant <- dmat[, covname] if(any(relevant)) { # original covariate determines one or more canonical covariates mediator <- "canonical" # check whether covariate is separable if(any(conflict <- dmat[relevant, othercov, drop=FALSE])) { conflictterms <- apply(conflict, 1, any) conflictcovs <- apply(conflict, 2, any) stop(paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms])) )) } # termnames <- rownames(dmat)[relevant] isoffset <- rep.int(FALSE, length(termnames)) names(isoffset) <- termnames # Extract relevant canonical covariates mm <- model.matrix(model) termvalues <- mm[, relevant, drop=FALSE] # extract corresponding coefficients termbetas <- coef(model)[relevant] # evaluate model effect effect <- as.numeric(termvalues %*% termbetas) # check length if(length(effect) != npoints(quadpoints)) stop(paste("Internal error: number of values of fitted effect =", length(effect), "!=", npoints(quadpoints), "= number of quadrature points")) # Trap loglinear case if(length(termnames) == 1 && identical(termnames, covname)) { covtype <- "canonical" beta <- termbetas } # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] effectfundata.can <- list(covname=covname, fmla = formula(model), termbetas = termbetas, defaultdata = defaultdata, relevant = relevant, termnames = termnames) effectFun.can <- function(x) { d <- effectfundata.can # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model matrix m <- model.matrix(d$fmla, df) # check it conforms to expected structure if(!identical(colnames(m)[d$relevant], d$termnames)) stop("Internal error: mismatch in term names in effectFun") me <- m[, d$relevant, drop=FALSE] y <- me %*% as.matrix(d$termbetas, ncol=1) return(y) } } if(!is.null(offmat <- attr(dmat, "offset")) && any(relevant <- offmat[, covname])) { # covariate appears in a model offset term mediator <- c(mediator, "offset") # check whether covariate is separable if(any(conflict <- offmat[relevant, othercov, drop=FALSE])) { conflictterms <- apply(conflict, 1, any) conflictcovs <- apply(conflict, 2, any) stop(paste("The covariate", sQuote(covname), "cannot be separated from the", ngettext(sum(conflictcovs), "covariate", "covariates"), commasep(sQuote(colnames(conflict)[conflictcovs])), "in the model", ngettext(sum(conflictterms), "term", "terms"), commasep(sQuote(rownames(conflict)[conflictterms])) )) } # collect information about relevant offset offnames <- rownames(offmat)[relevant] termnames <- c(termnames, offnames) noff <- length(offnames) termbetas <- c(termbetas, rep.int(1, noff)) isoffset <- c(isoffset, rep.int(TRUE, noff)) names(termbetas) <- names(isoffset) <- termnames # extract values of relevant offset mf <- model.frame(model, subset=rep.int(TRUE, n.quad(Q))) if(any(nbg <- !(offnames %in% colnames(mf)))) stop(paste("Internal error:", ngettext(sum(nbg), "offset term", "offset terms"), offnames[nbg], "not found in model frame")) effex <- mf[, offnames, drop=FALSE] effect <- effect + apply(effex, 1, sum) # # construct the corresponding function gd <- getglmdata(model) goodrow <- min(which(complete.cases(gd))) defaultdata <- gd[goodrow, , drop=FALSE] effectfundata.off <- list(covname=covname, fmla = formula(model), defaultdata = defaultdata, offnames = offnames) effectFun.off <- function(x) { d <- effectfundata.off # replicate default data to correct length df <- as.data.frame(lapply(d$defaultdata, rep, length(x))) # overwrite value of covariate with new data df[,covname] <- x # construct model FRAME mf <- model.frame(d$fmla, df) # check it conforms to expected structure if(!all(d$offnames %in% colnames(mf))) stop("Internal error: mismatch in term names in effectFun") moff <- mf[, d$offnames, drop=FALSE] y <- apply(moff, 1, sum) return(y) } } if(length(termnames) == 0) { # Sanity clause # (everyone knows there ain't no Sanity Clause...) warning(paste("Internal error: could not find any", "canonical covariates or offset terms", "that depended on the covariate", sQuote(covname))) # Assume it's an external covariate (i.e. not used in fitted model) covtype <- "external" beta <- 0 effect <- beta * covvalues effectFun <- function(x) { 0 * x } isoffset <- FALSE names(isoffset) <- covname } } #### Canonical covariates and coefficients switch(covtype, original={ cancovs <- termnames canbeta <- termbetas }, offset = , canonical={ cancovs <- covname canbeta <- beta }, external={ cancovs <- canbeta <- NA }) ################################################################# # Validate covariate values # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg.cov <- !is.finite(covvalues) if(any(offending <- nbg.cov & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } nbg.eff <- !is.finite(effect) if(any(offending <- nbg.eff & operative)) { warning(paste(sum(offending), "out of", length(offending), "values of fitted effect discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values nbg <- nbg.cov | nbg.eff ok <- !nbg & operative Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] resid <- resid[ok] lam <- lam[ok] effect <- effect[ok] insubregion <- insubregion[ok] Z <- Z[ok] wts <- wts[ok] #################################################### # assemble data for smoothing x <- covvalues y <- resid/wts if(smooth.effect) y <- y + effect w <- wts # if(makefrom <- is.null(from)) from <- min(x) if(maketo <- is.null(to)) to <- max(x) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(x, weights=w*y, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(x[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator x <- x[insubregion] y <- y[insubregion] w <- w[insubregion] Z <- Z[insubregion] lam <- lam[insubregion] if(makefrom) from <- min(x) if(maketo) to <- max(x) numer <- unnormdensity(x, weights=w*y, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(x, weights=w, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(x[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x yyy <- numfun(xxx)/denfun(xxx) # variance estimation # smooth 1/lambda(u) with smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(x, weights=w/lam, bw=tau, adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) varestxxx <- varnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sd <- sqrt(varestxxx) # alternative estimate of variance using data points only varXnumer <- unnormdensity(x[Z], weights=1/lam[Z]^2, bw=tau, adjust=1, n=n,from=from,to=to, ...) varXnumfun <- interpolate(varXnumer) varXestxxx <- varXnumfun(xxx)/(2 * sigma * sqrt(pi) * denfun(xxx)^2) sdX <- sqrt(varXestxxx) # fitted effect effxxx <- effectFun(xxx) # add fitted effect of covariate, if not added before smoothing if(!smooth.effect) yyy <- yyy + effxxx #################################################### # pack into fv object df <- data.frame(xxx=xxx, h =yyy, varh=varestxxx, hi=yyy+2*sd, lo=yyy-2*sd, hiX=yyy+2*sdX, loX=yyy-2*sdX, fit=effxxx) # remove any funny characters in name of covariate (e.g. if it is an offset) Covname <- make.names(covname) names(df)[1] <- Covname desc <- c(paste("covariate", sQuote(covname)), "Smoothed partial residual", "Variance", "Upper limit of pointwise 5%% significance band (integral)", "Lower limit of pointwise 5%% significance band (integral)", "Upper limit of pointwise 5%% significance band (sum)", "Lower limit of pointwise 5%% significance band (sum)", paste("Parametric fitted effect of", sQuote(covname))) rslt <- fv(df, argu=Covname, ylab=substitute(h(X), list(X=as.name(covname))), valu="h", fmla= as.formula(paste(". ~ ", Covname)), alim=alim, labl=c(covname, paste("%s", paren(covname), sep=""), paste("var", paren(covname), sep=""), paste("hi", paren(covname), sep=""), paste("lo", paren(covname), sep=""), paste("hiX", paren(covname), sep=""), paste("loX", paren(covname), sep=""), paste("fit", paren(covname), sep="")), desc=desc, fname="h", yexp=as.expression(substitute(hat(h)(X), list(X=covname)))) attr(rslt, "dotnames") <- c("h", "hi", "lo", "fit") # add special class data class(rslt) <- c("parres", class(rslt)) attr(rslt, "stuff") <- list(covname = paste(covname, collapse=""), covtype = covtype, mediator = mediator, cancovs = cancovs, canbeta = canbeta, isoffset = isoffset, modelname = modelname, modelcall = modelcall, callstring = callstring, sigma = sigma, smooth.effect = smooth.effect, restricted = !is.null(subregion), bw.input = bw.input) return(rslt) } print.parres <- function(x, ...) { cat("Transformation diagnostic (class parres)\n") s <- attr(x, "stuff") cat(paste("for the", s$covtype, "covariate", sQuote(s$covname), if(s$covtype != "external") "in" else "for", "the fitted model", if(nchar(s$modelcall) < 30) "" else "\n\t", s$modelcall, "\n")) switch(s$covtype, original={ cancovs <- s$cancovs med <- s$mediator isoffset <- s$isoffset if(is.null(isoffset)) isoffset <- rep.int(FALSE, length(cancovs)) ncc <- length(cancovs) noff <- sum(isoffset) nother <- sum(!isoffset) explain <- paste(ngettext(ncc, "Fitted effect:", "Fitted effect: sum of"), if(noff == 0) { paste(paste(med, collapse=" and "), ngettext(ncc, "term", "terms"), commasep(dQuote(cancovs))) } else { paste(paste(med[med != "offset"], collapse=" and "), ngettext(nother, "term", "terms"), commasep(dQuote(cancovs[!isoffset])), "and offset", ngettext(noff, "term", "terms"), commasep(dQuote(cancovs[isoffset]))) }) cat(paste(explain, "\n")) }, external={ cat("Note: effect estimate not justified by delta method\n") }, offset={}, canonical={}) # earlier versions were equivalent to restricted=FALSE if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n")) # earlier versions were equivalent to smooth.effect=TRUE sme <- !identical(s$smooth.effect, FALSE) if(sme) { cat("Algorithm: smooth(effect + residual)\n\n") } else { cat("Algorithm: effect + smooth(residual)\n\n") } NextMethod("print") } plot.parres <- function(x, ...) { xname <- deparse(substitute(x)) do.call("plot.fv", resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo")))) } spatstat/R/linearKmulti.R0000644000176000001440000002300612237642727015146 0ustar ripleyusers# # linearKmulti # # $Revision: 1.5 $ $Date: 2013/01/31 02:46:00 $ # # K functions for multitype point pattern on linear network # # linearKdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearKmulti(X, I, J, r=r, correction=correction, ...) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(linearK[i ~ dot](r), list(i=iname)), paste("linearK[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(linearK[i ~ symbol("\267")](r), list(i=iname))) return(result) } linearKcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearK(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti(X, I, J, r=r, correction=correction, ...) } # rebrand iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(linearKcross[i,j](r), list(i=iname,j=jname)), sprintf("linearK[list(%s,%s)]", iname, jname), new.yexp=substitute(linearK[list(i,j)](r), list(i=iname,j=jname))) return(result) } linearKmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) lambdaI <- nI/lengthL lambdaJ <- nJ/lengthL # compute K denom <- (nI * nJ - nIandJ)/lengthL K <- linearKmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(Kmulti[L](r)) fname <- "Kmulti[L]" }, none = { ylab <- quote(Kmulti[net](r)) fname <- "Kmulti[net]" }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) return(K) } # ................ inhomogeneous ............................ linearKdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # for better error messages lambdadot <- getlambda.lpp(lambdadot, X, ...) # compute result <- linearKmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(linearK[inhom, i ~ dot](r), list(i=iname)), paste("linearK[list(inhom,", iname, "~ symbol(\"\\267\"))]"), new.yexp=substitute(linearK[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) return(result) } linearKcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearKinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(linearK[inhom,i,j](r), list(i=iname,j=jname)), sprintf("linearK[list(inhom,%s,%s)]", iname, jname), new.yexp=substitute(linearK[list(inhom,i,j)](r), list(i=iname,j=jname))) return(result) } linearKmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X[I], ...) lambdaJ <- getlambda.lpp(lambdaJ, X[J], ...) # compute K weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) K <- linearKmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(Kmulti[L](r)) fname <- "Kmulti[L]" }, none = { ylab <- quote(Kmulti[net](r)) fname <- "Kmulti[net]" }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) return(K) } # .............. internal ............................... linearKmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # extract linear network L <- X$domain # extract points XP <- as.ppp(X) W <- as.owin(XP) # determine r values rmaxdefault <- 0.98 * circumradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) K <- fv(df, "r", substitute(linearK(r), NULL), "est", . ~ r, c(0, rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname = "linearK") return(K) } # nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) clash <- I & J has.clash <- any(clash) # compute pairwise distances if(exists("crossdist.lpp")) { DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { # exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } } else { D <- pairdist(X) diag(D) <- Inf DIJ <- D[I, J] } #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(DIJ, r, denom=denom, check=FALSE) unitname(K) <- unitname(X) return(K) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Ang's correction) # compute m[i,j] m <- matrix(1, nI, nJ) XPI <- XP[I] if(!has.clash) { for(k in seq_len(nJ)) { j <- whichJ[k] m[,k] <- countends(L, XPI, DIJ[, k]) } } else { # don't count identical pairs for(k in seq_len(nJ)) { j <- whichJ[k] inotj <- (whichI != j) m[inotj, k] <- countends(L, XPI[inotj], DIJ[inotj, k]) } } edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(DIJ, r, weights=wt, denom=denom, check=FALSE) # tack on theoretical value K <- bind.fv(K, data.frame(theo=r), "%s[theo](r)", "theoretical Poisson %s") unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(DIJ=DIJ, wt=wt) return(K) } spatstat/R/envelope3.R0000755000176000001440000000465612237642727014423 0ustar ripleyusers# # envelope3.R # # simulation envelopes for pp3 # # $Revision: 1.7 $ $Date: 2013/08/14 02:50:32 $ # envelope.pp3 <- function(Y, fun=K3est, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- K3est if("clipdata" %in% names(list(...))) stop(paste("The argument", sQuote("clipdata"), "is not available for envelope.pp3")) envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y) Yintens <- sY$intensity Ydomain <- Y$domain # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp3(Yintens, domain=Ydomain)) } else { stop("Sorry, simulation of marked 3D point patterns is not yet implemented") } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } en <- envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=FALSE, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, expected.arg=c("rmax", "nrval"), do.pwrong=do.pwrong) } spatstat/R/interact.R0000755000176000001440000002444612245370377014331 0ustar ripleyusers# # interact.S # # # $Revision: 1.22 $ $Date: 2013/11/27 13:16:35 $ # # Class 'interact' representing the interpoint interaction # of a point process model # (e.g. Strauss process with a given threshold r) # # Class 'isf' representing a generic interaction structure # (e.g. pairwise interactions) # # These do NOT specify the "trend" part of the model, # only the "interaction" component. # # The analogy is: # # glm() ppm() # # model formula trend formula # # family interaction # # That is, the 'systematic' trend part of a point process # model is specified by a 'trend' formula argument to ppm(), # and the interpoint interaction is specified as an 'interact' # object. # # You only need to know about these classes if you want to # implement a new point process model. # # THE DISTINCTION: # An object of class 'isf' describes an interaction structure # e.g. pairwise interaction, triple interaction, # pairwise-with-saturation, Dirichlet interaction. # Think of it as determining the "order" of interaction # but not the specific interaction potential function. # # An object of class 'interact' completely defines the interpoint # interactions in a specific point process model, except for the # regular parameters of the interaction, which are to be estimated # by ppm() or otherwise. An 'interact' object specifies the values # of all the 'nuisance' or 'irregular' parameters. An example # is the Strauss process with a given, fixed threshold r # but with the parameters beta and gamma undetermined. # # DETAILS: # # An object of class 'isf' contains the following: # # $name Name of the interaction structure # e.g. "pairwise" # # $print How to 'print()' this object # [A function; invoked by the 'print' method # 'print.isf()'] # # $eval A function which evaluates the canonical # sufficient statistic for an interaction # of this general class (e.g. any pairwise # interaction.) # # If lambda(u,X) denotes the conditional intensity at a point u # for the point pattern X, then we assume # log lambda(u, X) = theta . S(u,X) # where theta is the vector of regular parameters, # and we call S(u,X) the sufficient statistic. # # A typical calling sequence for the $eval function is # # (f$eval)(X, U, E, potentials, potargs, correction) # # where X is the data point pattern, U is the list of points u # at which the sufficient statistic S(u,X) is to be evaluated, # E is a logical matrix equivalent to (X[i] == U[j]), # $potentials defines the specific potential function(s) and # $potargs contains any nuisance/irregular parameters of these # potentials [the $potargs are passed to the $potentials without # needing to be understood by $eval.] # $correction is the name of the edge correction method. # # # An object of class 'interact' contains the following: # # # $name Name of the specific potential # e.g. "Strauss" # # $family Object of class "isf" describing # the interaction structure # # $pot The interaction potential function(s) # -- usually a function or list of functions. # (passed as an argument to $family$eval) # # $par list of any nuisance/irregular parameters # (passed as an argument to $family$eval) # # $parnames vector of long names/descriptions # of the parameters in 'par' # # $init() initialisation action # or NULL indicating none required # # $update() A function to modify $par # [Invoked by 'update.interact()'] # or NULL indicating a default action # # $print How to 'print()' this object # [Invoked by 'print' method 'print.interact()'] # or NULL indicating a default action # # -------------------------------------------------------------------------- print.isf <- function(x, ...) { if(is.null(x)) return(invisible(NULL)) verifyclass(x, "isf") if(!is.null(x$print)) (x$print)(x) invisible(NULL) } print.interact <- function(x, ..., family=TRUE, brief=FALSE) { verifyclass(x, "interact") if(family && !brief && !is.null(xf <- x$family)) print.isf(xf) if(!brief) cat("Interaction:") cat(paste(x$name, "\n")) # Now print the parameters if(!is.null(x$print)) { (x$print)(x) } else { # default # just print the parameter names and their values cat(paste(x$parnames, ":\t", x$par, "\n", sep="")) } invisible(NULL) } is.interact <- function(x) { inherits(x, "interact") } update.interact <- function(object, ...) { verifyclass(object, "interact") if(!is.null(object$update)) (object$update)(object, ...) else { # Default # First update the version if(outdated.interact(object)) object <- reincarnate.interact(object) # just match the arguments in "..." # with those in object$par and update them want <- list(...) if(length(want) > 0) { m <- match(names(want),names(object$par)) nbg <- is.na(m) if(any(nbg)) { which <- paste((names(want))[nbg]) warning(paste("Arguments not matched: ", which)) } m <- m[!nbg] object$par[m] <- want } # call object's own initialisation routine if(!is.null(object$init)) (object$init)(object) object } } is.poisson.interact <- function(x) { verifyclass(x, "interact") is.null(x$family) } # Test whether interact object was made by an older version of spatstat outdated.interact <- function(object) { ver <- object$version older <- is.null(ver) || (package_version(ver) < versionstring.spatstat()) return(older) } # Test whether the functions in the interaction object # expect the coefficient vector to contain ALL coefficients, # or only the interaction coefficients. # This change was introduced in 1.11-0, at the same time # as interact objects were given version numbers. newstyle.coeff.handling <- function(object) { stopifnot(inherits(object, "interact")) ver <- object$version old <- is.null(ver) || (package_version(ver) < "1.11") return(!old) } # ###### # # Re-create an interact object in the current version of spatstat # # reincarnate.interact <- function(object) { # re-creates an interact object in the current version of spatstat if(!is.null(object$update)) { newobject <- (object$update)(object) return(newobject) } par <- object$par pot <- object$pot name <- object$name # get creator function creator <- object$creator if(is.null(creator)) { # old version: look up list creator <- .Spatstat.Old.InteractionList[[name]] if(is.null(creator)) stop(paste("Don't know how to update", sQuote(name), "to current version of spatstat")) } if(is.character(creator)) creator <- get(creator) if(!is.function(creator) && !is.expression(creator)) stop("Internal error: creator is not a function or expression") # call creator if(is.expression(creator)) newobject <- eval(creator) else { # creator is a function # It's assumed that the creator function's arguments are # either identical to components of 'par' (the usual case) # or to one of the components of the object itself (Ord, Saturated) # or to printfun=object$print (Pairwise). argnames <- names(formals(creator)) available <- append(par, object) available <- append(available, list(printfun=object$print)) ok <- argnames %in% names(available) if(!all(ok)) stop(paste("Internal error:", ngettext(sum(!ok), "argument", "arguments"), paste(sQuote(argnames[!ok]), collapse=", "), "in creator function were not understood")) newobject <- do.call(creator, available[argnames]) } if(!inherits(newobject, "interact")) stop("Internal error: creator did not return an object of class interact") return(newobject) } # This list is necessary to deal with older formats of 'interact' objects # which did not include the creator name .Spatstat.Old.InteractionList <- list("Diggle-Gratton process" = "DiggleGratton", "Geyer saturation process" = "Geyer", "Lennard-Jones potential" = "LennardJones", "Multitype Strauss process" = "MultiStrauss", "Multitype Strauss Hardcore process" = "MultiStraussHard", "Ord process with threshold potential"="OrdThresh", "Piecewise constant pairwise interaction process"="PairPiece", "Poisson process" = "Poisson", "Strauss process" = "Strauss", "Strauss - hard core process" = "StraussHard", "Soft core process" = "Softcore", # weird ones: "Ord process with user-defined potential" = expression(Ord(object$pot)), "Saturated process with user-defined potential" =expression(Saturated(object$pot)), "user-defined pairwise interaction process"= expression( Pairwise(object$pot, par=object$par, parnames=object$parnames, printfun=object$print)) ) as.interact <- function(object) { UseMethod("as.interact") } as.interact.interact <- function(object) { verifyclass(object, "interact") return(object) } #### internal code for streamlining initialisation of interactions # # x should be a partially-completed 'interact' object # instantiate.interact <- function(x, par) { if(is.character(x$family)) x$family <- get(x$family) # set parameter values x$par <- par # validate parameters x$init(x) x$version <- versionstring.spatstat() return(x) } spatstat/R/deldir.R0000755000176000001440000001673612237642727013770 0ustar ripleyusers# # deldir.R # # Interface to deldir package # # $Revision: 1.16 $ $Date: 2013/04/25 06:37:43 $ # .spst.triEnv <- new.env() assign("use.trigraf", TRUE, envir=.spst.triEnv) assign("use.trigrafS", TRUE, envir=.spst.triEnv) assign("debug.delaunay", FALSE, envir=.spst.triEnv) dirichlet <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) w <- X$window dd <- deldir(X$x, X$y, rw=c(w$xrange,w$yrange)) pp <- lapply(tile.list(dd), function(z) { owin(poly=z[c("x","y")]) }) if(length(pp) == npoints(X)) names(pp) <- seq_len(npoints(X)) dir <- tess(tiles=pp, window=as.rectangle(w)) if(w$type != "rectangle") dir <- intersect.tess(dir, w) return(dir) } delaunay <- function(X) { stopifnot(is.ppp(X)) X <- unique(X, rule="deldir", warn=TRUE) nX <- npoints(X) if(nX < 3) return(NULL) w <- X$window dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5] b <- dd$delsgs[,6] use.trigraf <- get("use.trigraf", envir=.spst.triEnv) use.trigrafS <- get("use.trigrafS", envir=.spst.triEnv) debug.delaunay <- get("debug.delaunay", envir=.spst.triEnv) if(use.trigrafS) { # first ensure a[] < b[] swap <- (a > b) if(any(swap)) { oldb <- b b[swap] <- a[swap] a[swap] <- oldb[swap] } # next ensure a is sorted o <- order(a, b) a <- a[o] b <- b[o] # nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigrafS", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1)), it = as.integer(integer(ne)), jt = as.integer(integer(ne)), kt = as.integer(integer(ne)), status = as.integer(integer(1))) # PACKAGE="spatstat") if(z$status != 0) stop("Internal error: overflow in trigrafS") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else if(use.trigraf) { nv <- nX ne <- length(a) ntmax <- ne z <- .C("trigraf", nv = as.integer(nv), ne = as.integer(ne), ie = as.integer(a), je = as.integer(b), ntmax = as.integer(ntmax), nt = as.integer(integer(1)), it = as.integer(integer(ntmax)), jt = as.integer(integer(ntmax)), kt = as.integer(integer(ntmax)), status = as.integer(integer(1))) # PACKAGE="spatstat") if(z$status != 0) stop("Internal error: overflow in trigraf") tlist <- with(z, cbind(it, jt, kt)[1:nt, ]) } else { tlist <- matrix(integer(0), 0, 3) for(i in seq_len(nX)) { # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # select those with a higher index than i jj <- jj[jj > i] # find pairs of neighbours which are Delaunay neighbours # (thus, triangles where the first numbered vertex is i) if(length(jj) > 0) for(j in jj) { kk <- c(b[a == j], a[b == j]) kk <- kk[(kk %in% jj) & (kk > j)] if(length(kk) > 0) for(k in kk) # add (i,j,k) to list of triangles (i < j < k) tlist <- rbind(tlist, c(i, j, k)) } } } # At this point, `tlist' contains all triangles formed by the Delaunay edges, # with vertices given in ascending order i < j < k in the 3 columns of tlist. # Some of these triangles may not belong to the Delaunay triangulation. # They will be weeded out later. # Assemble coordinates of triangles x <- X$x y <- X$y xtri <- matrix(x[tlist], nrow(tlist), 3) ytri <- matrix(y[tlist], nrow(tlist), 3) # ensure triangle vertices are in anticlockwise order ztri <- ytri - min(y) dx <- cbind(xtri[,2]-xtri[,1], xtri[,3]-xtri[,2], xtri[,1]-xtri[,3]) zm <- cbind(ztri[,1]+ztri[,2], ztri[,2]+ztri[,3], ztri[,3]+ztri[,1]) negareas <- apply(dx * zm, 1, sum) clockwise <- (negareas > 0) # if(any(clockwise)) { xc <- xtri[clockwise, , drop=FALSE] yc <- ytri[clockwise, , drop=FALSE] tc <- tlist[clockwise, , drop=FALSE] xtri[clockwise,] <- xc[,c(1,3,2)] ytri[clockwise,] <- yc[,c(1,3,2)] tlist[clockwise,] <- tc[, c(1,3,2)] } # At this point, triangle vertices are listed in anticlockwise order. # The same directed edge (i, j) cannot appear twice. # To weed out invalid triangles, check for such duplication triedges <- rbind(tlist[, c(1,2)], tlist[, c(2,3)], tlist[, c(3,1)]) if(any(bad <- duplicated(triedges))) { badedges <- unique(triedges[bad, , drop=FALSE]) ntri <- nrow(tlist) triid <- rep.int(seq_len(ntri), 3) illegal <- rep.int(FALSE, ntri) for(j in seq_len(nrow(badedges))) { from <- badedges[j, 1] to <- badedges[j, 2] if(debug.delaunay) cat(paste("Suspect edge from vertex", from, "to vertex", to, "\n")) # find all triangles sharing this edge in this orientation sustri <- triid[(triedges[,1] == from) & (triedges[,2] == to)] if(debug.delaunay) cat(paste("\tInvestigating triangles", commasep(sustri), "\n")) # list all vertices associated with the suspect triangles susvert <- sort(unique(as.vector(tlist[sustri, ]))) if(debug.delaunay) cat(paste("\tInvestigating vertices", commasep(susvert), "\n")) xsusvert <- x[susvert] ysusvert <- y[susvert] # take each triangle in turn and check whether it contains a data point for(k in sustri) { if(!illegal[k] && any(inside.triangle(xsusvert, ysusvert, xtri[k,], ytri[k,]))) { if(debug.delaunay) cat(paste("Triangle", k, "is illegal\n")) illegal[k] <- TRUE } } } if(!any(illegal)) { if(debug.delaunay) cat("No illegal triangles found\n") } else { if(debug.delaunay) cat(paste("Removing", sum(illegal), "triangles\n")) tlist <- tlist[!illegal, , drop=FALSE] xtri <- xtri[!illegal, , drop=FALSE] ytri <- ytri[!illegal, , drop=FALSE] } } # make tile list tiles <- list() for(m in seq_len(nrow(tlist))) { p <- list(x=xtri[m,], y=ytri[m,]) tiles[[m]] <- owin(poly=p, check=FALSE) } wc <- convexhull.xy(x, y) del <- tess(tiles=tiles, window=wc) if(w$type != "rectangle") del <- intersect.tess(del, w) return(del) } delaunay.distance <- function(X) { stopifnot(is.ppp(X)) nX <- npoints(X) w <- as.owin(X) ok <- !duplicated(X, rule="deldir") Y <- X[ok] nY <- npoints(Y) if(nY < 3) return(matrix(Inf, nX, nX)) dd <- deldir(Y$x, Y$y, rw=c(w$xrange,w$yrange)) joins <- as.matrix(dd$delsgs[,5:6]) joins <- rbind(joins, joins[,2:1]) d <- matrix(-1L, nY, nY) diag(d) <- 0 d[joins] <- 1 adj <- matrix(FALSE, nY, nY) diag(adj) <- TRUE adj[joins] <- TRUE z <- .C("Idist2dpath", nv = as.integer(nY), d = as.integer(d), adj = as.integer(adj), dpath = as.integer(integer(nY * nY)), tol = as.integer(0), niter = as.integer(integer(1)), status = as.integer(integer(1))) # PACKAGE = "spatstat") if (z$status == -1) warning(paste("graph connectivity algorithm did not converge after", z$niter, "iterations", "on", nY, "vertices and", sum(adj) - nY, "edges")) dpathY <- matrix(z$dpath, nY, nY) if(all(ok)) { dpathX <- dpathY } else { dpathX <- matrix(NA_integer_, nX, nX) dpathX[ok, ok] <- dpathY } return(dpathX) } spatstat/R/Kres.R0000755000176000001440000000526712237642727013426 0ustar ripleyusers# # Kres.R # # Residual K # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Kres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad K <- Kcom(object, ...) } else { # case where 'object' is the output of 'Kcom' a <- attr(object, "maker") if(is.null(a) || a != "Kcom") stop("fv object was not created by Kcom") K <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=K$r, theo=numeric(length(K$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(K)(r), NULL), "theo", . ~ r , attr(K, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="K") # add residual functions nam <- names(K) if("border" %in% nam) ans <- bind.fv(ans, data.frame(bres=with(K, border-bcom)), "bold(R)~hat(%s)[bord](r)", "residual function %s based on border correction", "bres") if(all(c("trans","tcom") %in% nam)) ans <- bind.fv(ans, data.frame(tres=with(K, trans-tcom)), "bold(R)~hat(%s)[trans](r)", "residual function %s based on translation correction", "tres") if(all(c("iso","icom") %in% nam)) ans <- bind.fv(ans, data.frame(ires=with(K, iso-icom)), "bold(R)~hat(%s)[iso](r)", "residual function %s based on isotropic correction", "ires") if("ivar" %in% nam) { savedotnames <- fvnames(ans, ".") ans <- bind.fv(ans, as.data.frame(K)[, c("ivar", "isd", "ihi", "ilo")], c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of isotropic-corrected residual %s", "pseudo-SD of isotropic-corrected residual %s", "upper critical band for isotropic-corrected residual %s", "lower critical band for isotropic-corrected residual %s"), "ires") ans <- bind.fv(ans, data.frame(istdres=with(ans, ires/isd)), "bold(T)~hat(%s)[iso](r)", "standardised isotropic-corrected residual %s", "ires") fvnames(ans, ".") <- c(savedotnames, c("ihi", "ilo")) } unitname(ans) <- unitname(K) return(ans) } spatstat/R/multihard.R0000755000176000001440000001244212240721046014466 0ustar ripleyusers# # # multihard.R # # $Revision: 1.9 $ $Date: 2013/05/01 10:17:27 $ # # The Hard core process # # Hardcore() create an instance of the Hard Core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # MultiHard <- local({ # .... multitype hard core potential MHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(h) <= col(h)) & (!is.na(h)) mark1 <- (lx[row(h)])[uptri] mark2 <- (lx[col(h)])[uptri] # corresponding names mark1name <- (lxname[row(h)])[uptri] mark2name <- (lxname[col(h)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant hard core distance to each pair of points hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- array(0, dim=dim(d)) value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } attr(z, "IsOffset") <- TRUE return(z) } #### end of 'pot' function #### # ............ template object ................... BlankMH <- list( name = "Multitype Hardcore process", creator = "MultiHard", family = "pairwise.family", # evaluated later pot = MHpotential, par = list(types=NULL, hradii = NULL), # filled in later parnames = c("possible types", "hardcore distances"), selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiHard(types=types,hradii=self$par$hradii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { h <- self$par$hradii nt <- length(types) MultiPair.checkmatrix(h, nt, sQuote("hradii")) if(length(types) == 0) stop(paste("The", sQuote("types"), "argument should be", "either NULL or a vector of all possible types")) if(any(is.na(types))) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { h <- self$par$hradii cat(paste(nrow(h), "types of points\n")) types <- self$par$types if(!is.null(types)) { cat("Possible types: \n") print(types) } else cat("Possible types: \t not yet determined\n") cat("Hardcore radii:\n") print(h) invisible() }, interpret = function(coeffs, self) { # there are no regular parameters (woo-hoo!) return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { h <- self$par$hradii return(max(0, h, na.rm=TRUE)) }, version=NULL # fix later ) class(BlankMH) <- "interact" MultiHard <- function(types=NULL, hradii) { out <- instantiate.interact(BlankMH, list(types=types, hradii = hradii)) if(!is.null(types)) dimnames(out$par$hradii) <- list(types, types) return(out) } MultiHard }) spatstat/R/infline.R0000755000176000001440000001255112237642727014140 0ustar ripleyusers# # infline.R # # Infinite lines # # $Revision: 1.19 $ $Date: 2013/10/06 08:26:59 $ # infline <- function(a=NULL, b=NULL, h=NULL, v=NULL, p=NULL, theta=NULL) { if(is.null(a) != is.null(b)) stop("invalid specification of a,b") if(is.null(p) != is.null(theta)) stop("invalid specification of p,theta") if(!is.null(h)) out <- data.frame(a=h, b=0, h=h, v=NA, p=h, theta=pi/2) else if(!is.null(v)) out <- data.frame(a=NA,b=NA,h=NA,v=v,p=v,theta=ifelseAB(v < 0, pi, 0)) else if(!is.null(a)) { # a, b specified z <- data.frame(a=a,b=b) a <- z$a b <- z$b theta <- ifelseAX(b == 0, pi/2, atan(-1/b)) theta <- theta %% pi p <- a * sin(theta) out <- data.frame(a=a, b=b, h=ifelseXB(b==0, a, NA), v=NA, p=p, theta=theta) } else if(!is.null(p)) { # p, theta specified z <- data.frame(p=p,theta=theta) p <- z$p theta <- z$theta theta <- theta %% (2*pi) if(any(reverse <- (theta >= pi))) { theta[reverse] <- theta[reverse] - pi p[reverse] <- -p[reverse] } vert <- (theta == 0) horz <- (cos(theta) == 0) gene <- !(vert | horz) v <- ifelseXB(vert, p, NA) h <- ifelseXB(horz, p, NA) a <- ifelseXB(gene, p/sin(theta), NA) b <- ifelseXB(gene, -cos(theta)/sin(theta), NA) out <- data.frame(a=a,b=b,h=h,v=v,p=p,theta=theta) } else stop("No data given!") class(out) <- c("infline", class(out)) return(out) } is.infline <- function(x) { inherits(x, "infline") } plot.infline <- function(x, ...) { for(i in seq_len(nrow(x))) { xi <- x[i, 1:4] xi <- lapply(as.list(xi), function(z){if(is.na(z)) NULL else z}) do.call("abline", append(xi, list(...))) } return(invisible(NULL)) } print.infline <- function(x, ...) { n <- nrow(x) cat(paste(if(n > 1) n else NULL, "infinite ", ngettext(n, "line", "lines"), "\n")) print(as.data.frame(x), ...) return(invisible(NULL)) } clip.infline <- function(L, win) { # clip a set of infinite straight lines to a window win <- as.owin(win) stopifnot(inherits(L, "infline")) # determine circumcircle of win xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # compute intersection points with circumcircle p <- L$p theta <- L$theta hit <- (abs(p) < rmax) if(!any(hit)) return(psp(numeric(0),numeric(0),numeric(0),numeric(0), window=win)) p <- p[hit] theta <- theta[hit] q <- sqrt(rmax^2 - p^2) co <- cos(theta) si <- sin(theta) X <- psp(x0= xmid + p * co + q * si, y0= ymid + p * si - q * co, x1= xmid + p * co - q * si, y1= ymid + p * si + q * co, window=boundbox, check=FALSE) # clip to window X <- X[win] return(X) } chop.tess <- function(X, L) { stopifnot(is.infline(L)) stopifnot(is.tess(X)||is.owin(X)) X <- as.tess(X) if(X$type == "image") { Xim <- X$image xr <- Xim$xrange yr <- Xim$yrange # extract matrices of pixel values and x, y coordinates Zmat <- as.integer(as.matrix(Xim)) xmat <- rasterx.im(Xim) ymat <- rastery.im(Xim) # process lines for(i in seq_len(nrow(L))) { # line i chops window into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h > yr[1] && h < yr[2]) Zmat <- 2 * Zmat + (ymat > h) } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v > xr[1] && v < xr[2]) Zmat <- 2 * Zmat + (xmat < h) } else { # generic line y = a + bx a <- L[i, "a"] b <- L[i, "b"] Zmat <- 2 * Zmat + (ymat > a + b * xmat) } } # Now just put back as factor image Zim <- im(Zmat, xcol=Xim$xcol, yrow=Xim$yrow, unitname=unitname(Xim)) Z <- tess(image=Zim) return(Z) } #---- polygonal computation -------- # get bounding box B <- as.rectangle(as.owin(X)) xr <- B$xrange yr <- B$yrange # get coordinates for(i in seq_len(nrow(L))) { # line i chops box B into two pieces if(!is.na(h <- L[i, "h"])) { # horizontal line if(h < yr[1] || h > yr[2]) Z <- NULL else { lower <- owin(xr, c(yr[1], h)) upper <- owin(xr, c(h, yr[2])) Z <- tess(tiles=list(lower,upper), window=B) } } else if(!is.na(v <- L[i, "v"])) { # vertical line if(v < xr[1] || v > xr[2]) Z <- NULL else { left <- owin(c(xr[1], v), yr) right <- owin(c(v, xr[2]), yr) Z <- tess(tiles=list(left,right), window=B) } } else { # generic line a <- L[i, "a"] b <- L[i, "b"] # Intersect with extended left and right sides of B yleft <- a + b * xr[1] yright <- a + b * xr[2] ylo <- min(yleft, yright, yr[1]) - 1 yhi <- max(yleft, yright, yr[2]) + 1 lower <- owin(poly=list(x=xr[c(1,1,2,2)], y=c(yleft,ylo,ylo,yright))) upper <- owin(poly=list(x=xr[c(1,2,2,1)], y=c(yleft,yright,yhi,yhi))) Bplus <- owin(xr, c(ylo, yhi)) Z <- tess(tiles=list(lower,upper), window=Bplus) } # intersect this simple tessellation with X if(!is.null(Z)) { X <- intersect.tess(X, Z) tilenames(X) <- paste("Tile", seq_len(length(tiles(X)))) } } return(X) } spatstat/R/intensity.R0000644000176000001440000000436412237642727014542 0ustar ripleyusers# # intensity.R # # Code related to intensity and intensity approximations # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # intensity <- function(X, ...) { UseMethod("intensity") } intensity.ppp <- function(X, ...) { sX <- summary(X, quick="no variances") if(is.multitype(X)) { answer <- sX$marks$intensity names(answer) <- row.names(sX$marks) } else { answer <- unname(sX$intensity) } return(answer) } intensity.ppm <- function(X, ...) { if(!identical(valid.ppm(X), TRUE)) { warning("Model is invalid - projecting it") X <- project.ppm(X) } if(is.poisson(X)) { if(is.stationary(X)) { # stationary univariate/multivariate Poisson sX <- summary(X, quick="no variances") return(sX$trend$value) } # Nonstationary Poisson return(predict(X, ...)) } # Gibbs process if(!is.stationary(X)) stop("Not yet implemented for non-stationary Gibbs models") if(is.multitype(X)) stop("Not yet implemented for multitype Gibbs processes") inte <- as.interact(X) if(!identical(inte$family$name, "pairwise")) stop("Intensity approximation is only available for pairwise interaction models") # Stationary, pairwise interaction Mayer <- inte$Mayer if(is.null(Mayer)) stop(paste("Sorry, not yet implemented for", inte$name)) # interaction coefficients co <- with(fitin(X), coefs[Vnames[!IsOffset]]) # compute second Mayer cluster integral G <- Mayer(co, inte) if(is.null(G) || !is.finite(G)) stop("Internal error in computing Mayer cluster integral") if(G < 0) stop(paste("Unable to apply Poisson-saddlepoint approximation:", "Mayer cluster integral is negative")) # activity parameter sX <- summary(X, quick="no variances") beta <- sX$trend$value # solve lambda <- if(G == 0) numeric(length(beta)) else LambertW(G * beta)/G if(length(lambda) == 1) lambda <- unname(lambda) return(lambda) } # Lambert's W-function LambertW <- local({ yexpyminusx <- function(y,x){y*exp(y)-x} W <- function(x) { if(require(gsl, quietly=TRUE)) return(gsl::lambert_W0(x)) result <- rep.int(NA_real_, length(x)) for(i in which(is.finite(x) & (x >= 0))) result[i] <- uniroot(yexpyminusx, c(0, x[i]), x=x[i])$root return(result) } W }) spatstat/R/iplot.R0000755000176000001440000002421412237642727013642 0ustar ripleyusers# # interactive plot for ppp objects using rpanel # # $Revision: 1.13 $ $Date: 2013/04/25 06:37:43 $ # # # Effect: # when the user types # iplot(x) # a pop-up panel displays a standard plot of x and # buttons allowing control of the plot parameters. # Coding: # The panel 'p' contains the following internal variables # x Original point pattern # w Window of point pattern # xname Name of x (for main title) # mtype Type of marks of x # bb frame of x # bbmid midpoint of frame # The following variables in 'p' are controlled by panel buttons etc # split Logical: whether to split multitype pattern # pointmap Plot character, or "marks" indicating that marks are used # zoomfactor Zoom factor # zoomcentre Centre point for zoom # charsize Character expansion factor cex # markscale Mark scale factor markscale # iplot <- function(x, ...) { UseMethod("iplot") } iplot.ppp <- local({ iplot.ppp <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") require(rpanel) if(markformat(x) %in% c("hyperframe", "listof")) marks(x) <- as.data.frame(as.hyperframe(marks(x))) if(markformat(x) == "dataframe" && ncol(marks(x)) > 1) { warning("Using only the first column of marks") marks(x) <- marks(x)[,1] } mtype <- if(is.multitype(x)) "multitype" else if(is.marked(x)) "marked" else "unmarked" bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) ## p <- rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, mtype=mtype, bb=bb, bbmid=bbmid, split=FALSE, pointmap=if(is.marked(x)) "marks" else "o", zoomfactor=1, zoomcentre=bbmid, size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rp.tkrplot(p, mytkr, plotfun=do.iplot.ppp, action=click.iplot.ppp, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rp.textentry(p, xname, action=redraw.iplot.ppp, title="Plot title", pos=pozzie(0)) nextrow <- 1 # split ? if(mtype == "multitype") { rp.checkbox(p, split, initval=FALSE, title="Split according to marks", action=redraw.iplot.ppp, pos=pozzie(1)) nextrow <- 2 } # plot character or mark style ptvalues <- c("o", "bullet", "plus") ptlabels <- c("open circles", "filled circles", "crosshairs") if(is.marked(x)) { ptvalues <- c("marks", ptvalues) ptlabels <- if(mtype == "multitype") c("Symbols depending on mark", ptlabels) else c("Circles proportional to mark", ptlabels) } pointmap <- ptvalues[1] rp.radiogroup(p, pointmap, values=ptvalues, labels=ptlabels, title="how to plot points", action=redraw.iplot.ppp, pos=pozzie(nextrow)) nextrow <- nextrow+1 # plot character size charsize <- 1 rp.slider(p, charsize, 0, 5, action=redraw.iplot.ppp, title="symbol expansion factor (cex)", initval=1, showvalue=TRUE, pos=pozzie(nextrow, sticky="")) nextrow <- nextrow+1 # mark scale if(mtype == "marked") { marx <- x$marks marx <- marx[is.finite(marx)] scal <- mark.scale.default(marx, x$window) markscale <- scal rp.slider(p, markscale, from=scal/10, to = 10*scal, action=redraw.iplot.ppp, initval=scal, title="mark scale factor (markscale)", showvalue=TRUE, pos=pozzie(nextrow)) nextrow <- nextrow+1 } # button to print a summary at console rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { print(summary(panel$x)); panel} ) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) redraw.iplot.ppp(panel) return(panel) }) rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid redraw.iplot.ppp(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.ppp) nextrow <- nextrow+1 # quit button rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.ppp <- function(panel) { rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.ppp <- function(panel, x, y) { if(panel$split) { cat("Mouse interaction is not supported when the point pattern is split\n") } else { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor redraw.iplot.ppp(panel) } return(panel) } # function that updates the plot when the control panel is operated do.iplot.ppp <- function(panel) { use.marks <- TRUE pch <- 16 switch(panel$pointmap, marks={ use.marks <- TRUE pch <- NULL }, o = { use.marks <- FALSE pch <- 1 }, bullet = { use.marks <- FALSE pch <- 16 }, plus = { use.marks <- FALSE pch <- 3 }) # scale and clip the pattern x <- panel$x w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(affine(shift(x, -ce), diag(c(z,z))), bbmid) scalew <- shift(affine(shift(w, -ce), diag(c(z,z))), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it opa <- par(ask=FALSE) if(panel$mtype == "multitype" && panel$split) { scalex <- split(scalex, un=(panel$pointmap != "marks")) plot(scalex, main=panel$xname, use.marks=use.marks, pch=pch, cex=panel$charsize, panel.begin=panel.begin) } else { # draw scaled & clipped window plot(panel.begin, main=panel$xname) # add points if(panel$mtype == "marked" && panel$pointmap == "marks") { plot(scalex, add=TRUE, use.marks=use.marks, markscale=panel$markscale) } else { plot(scalex, add=TRUE, use.marks=use.marks, pch=pch, cex=panel$charsize) } } par(opa) panel } iplot.ppp }) spatstat/R/mincontrast.R0000755000176000001440000011344512251543754015055 0ustar ripleyusers# # mincontrast.R # # Functions for estimation by minimum contrast # ################## base ################################ mincontrast <- local({ # objective function (in a format that is re-usable by other code) contrast.objective <- function(par, objargs, ...) { with(objargs, { theo <- theoretical(par=par, rvals, ...) if(!is.vector(theo) || !is.numeric(theo)) stop("theoretical function did not return a numeric vector") if(length(theo) != nrvals) stop("theoretical function did not return the correct number of values") discrep <- (abs(theo^qq - obsq))^pp return(sum(discrep)) }) } mincontrast <- function(observed, theoretical, startpar, ..., ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL)) { verifyclass(observed, "fv") stopifnot(is.function(theoretical)) if(!any("par" %in% names(formals(theoretical)))) stop(paste("Theoretical function does not include an argument called", sQuote("par"))) # enforce defaults ctrl <- resolve.defaults(ctrl, list(q = 1/4, p = 2, rmin=NULL, rmax=NULL)) fvlab <- resolve.defaults(fvlab, list(label=NULL, desc="minimum contrast fit")) explain <- resolve.defaults(explain, list(dataname=NULL, modelname=NULL, fname=NULL)) # determine range of r values rmin <- ctrl$rmin rmax <- ctrl$rmax if(!is.null(rmin) && !is.null(rmax)) stopifnot(rmin < rmax && rmin >= 0) else { alim <- attr(observed, "alim") if(is.null(rmin)) rmin <- alim[1] if(is.null(rmax)) rmax <- alim[2] } # extract vector of r values argu <- fvnames(observed, ".x") rvals <- observed[[argu]] # extract vector of observed values of statistic valu <- fvnames(observed, ".y") obs <- observed[[valu]] # restrict to [rmin, rmax] if(max(rvals) < rmax) stop(paste("rmax=", signif(rmax,4), "exceeds the range of available data", "= [", signif(min(rvals),4), ",", signif(max(rvals),4), "]")) sub <- (rvals >= rmin) & (rvals <= rmax) rvals <- rvals[sub] obs <- obs[sub] # sanity clause if(!all(ok <- is.finite(obs))) { whinge <- paste("Some values of the empirical function", sQuote(explain$fname), "were infinite or NA.") iMAX <- max(which(ok)) iMIN <- min(which(!ok)) + 1 if(iMAX > iMIN && all(ok[iMIN:iMAX])) { rmin <- rvals[iMIN] rmax <- rvals[iMAX] obs <- obs[iMIN:iMAX] rvals <- rvals[iMIN:iMAX] sub[sub] <- ok warning(paste(whinge, "Range of r values was reset to", prange(c(rmin, rmax))), call.=FALSE) } else stop(paste(whinge, "Please choose a narrower range [rmin, rmax]"), call.=FALSE) } # pack data into a list objargs <- list(theoretical = theoretical, rvals = rvals, nrvals = length(rvals), obsq = obs^(ctrl$q), # for efficiency qq = ctrl$q, pp = ctrl$p, rmin = rmin, rmax = rmax) # go minimum <- optim(startpar, fn=contrast.objective, objargs=objargs, ...) # if convergence failed, issue a warning signalStatus(optimStatus(minimum), errors.only=TRUE) # evaluate the fitted theoretical curve fittheo <- theoretical(minimum$par, rvals, ...) # pack it up as an `fv' object label <- fvlab$label desc <- fvlab$desc if(is.null(label)) label <- paste("fit(", argu, ")", collapse="") fitfv <- bind.fv(observed[sub, ], data.frame(fit=fittheo), label, desc) result <- list(par = minimum$par, fit = fitfv, opt = minimum, ctrl = list(p=ctrl$p,q=ctrl$q,rmin=rmin,rmax=rmax), info = explain, startpar = startpar, objfun = contrast.objective, objargs = objargs, dotargs = list(...)) class(result) <- c("minconfit", class(result)) return(result) } mincontrast }) print.minconfit <- function(x, ...) { # explanatory cat(paste("Minimum contrast fit ", "(", "object of class ", dQuote("minconfit"), ")", "\n", sep="")) mo <- x$info$modelname fu <- x$info$fname da <- x$info$dataname cm <- x$covmodel if(!is.null(mo)) cat(paste("Model:", mo, "\n")) if(!is.null(cm)) { # Covariance/kernel model and nuisance parameters cat(paste("\t", cm$type, "model:", cm$model, "\n")) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) cat(paste("\t", cm$type, "parameters:", paste(tagvalue, collapse=", "), "\n")) } } if(!is.null(fu) && !is.null(da)) cat(paste("Fitted by matching theoretical", fu, "function to", da)) else { if(!is.null(fu)) cat(paste(" based on", fu)) if(!is.null(da)) cat(paste(" fitted to", da)) } cat("\n") # Values cat("Parameters fitted by minimum contrast ($par):\n") print(x$par, ...) mp <- x$modelpar if(!is.null(mp)) { cat(paste("Derived parameters of", if(!is.null(mo)) mo else "model", "($modelpar):\n")) print(mp) } # Diagnostics printStatus(optimStatus(x$opt)) # Starting values cat("Starting values of parameters:\n") print(x$startpar) # Algorithm parameters ct <- x$ctrl cat(paste("Domain of integration:", "[", signif(ct$rmin,4), ",", signif(ct$rmax,4), "]\n")) cat(paste("Exponents:", "p=", paste(signif(ct$p, 3), ",", sep=""), "q=", signif(ct$q,3), "\n")) invisible(NULL) } plot.minconfit <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call("plot.fv", resolve.defaults(list(x$fit), list(...), list(main=xname))) } unitname.minconfit <- function(x) { unitname(x$fit) } "unitname<-.minconfit" <- function(x, value) { unitname(x$fit) <- value return(x) } as.fv.minconfit <- function(x) x$fit ###### convergence status of 'optim' object optimStatus <- function(x, call=NULL) { cgce <- x$convergence switch(paste(cgce), "0" = { simpleMessage( paste("Converged successfully after", x$counts[["function"]], "iterations"), call) }, "1" = simpleWarning("Iteration limit maxit was reached", call), "10" = simpleWarning("Nelder-Mead simplex was degenerate", call), "51"= { simpleWarning( paste("Warning message from L-BGFS-B method:", sQuote(x$message)), call) }, "52"={ simpleError( paste("Error message from L-BGFS-B method:", sQuote(x$message)), call) }, simpleWarning(paste("Unrecognised error code", cgce), call) ) } signalStatus <- function(x, errors.only=FALSE) { stopifnot(inherits(x, "condition")) if(inherits(x, "error")) stop(x) if(inherits(x, "warning")) warning(x) if(inherits(x, "message") && !errors.only) message(x) return(invisible(NULL)) } printStatus <- function(x, errors.only=FALSE) { prefix <- if(inherits(x, "error")) "error: " else if(inherits(x, "warning")) "warning: " else NULL if(!is.null(prefix) || !errors.only) cat(paste(prefix, conditionMessage(x), "\n", sep="")) return(invisible(NULL)) } accumulateStatus <- function(x, stats=NULL) { if(is.null(stats)) stats <- list(values=list(), frequencies=integer(0)) if(!inherits(x, c("error", "warning", "message"))) return(stats) with(stats, { same <- unlist(lapply(values, identical, y=x)) if(any(same)) { i <- min(which(same)) frequencies[i] <- frequencies[i] + 1 } else { values <- append(values, list(x)) frequencies <- c(frequencies, 1) } }) stats <- list(values=values, frequencies=frequencies) return(stats) } printStatusList <- function(stats) { with(stats, { for(i in seq_along(values)) { printStatus(values[i]) cat(paste("\t", paren(paste(frequencies[i], "times")), "\n")) } } ) invisible(NULL) } ############### applications (specific models) ################## # lookup table of explicitly-known K functions and pcf # and algorithms for computing sensible starting parameters .Spatstat.ClusterModelInfoTable <- list( Thomas=list( # Thomas process: par = (kappa, sigma2) modelname = "Thomas process", isPCP=TRUE, # K-function K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2+(1-exp(-rvals^2/(4*par[2])))/par[1] }, # pair correlation function pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + exp(-rvals^2/(4 * par[2]))/(4 * pi * par[1] * par[2]) }, # sensible starting parameters selfstart = function(X) { kappa <- intensity(X) sigma2 <- 4 * mean(nndist(X))^2 c(kappa=kappa, sigma2=sigma2) }, # meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] sigma <- sqrt(par[["sigma2"]]) mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, sigma=sigma, mu=mu) } ), # ............................................... MatClust=list( # Matern Cluster process: par = (kappa, R) modelname = "Matern cluster process", isPCP=TRUE, K = function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1] R <- par[2] Hfun <- funaux$Hfun y <- pi * rvals^2 + (1/kappa) * Hfun(rvals/(2 * R)) return(y) }, pcf= function(par,rvals, ..., funaux){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) kappa <- par[1] R <- par[2] g <- funaux$g y <- 1 + (1/(pi * kappa * R^2)) * g(rvals/(2 * R)) return(y) }, funaux=list( Hfun=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 1 z <- zz[ok] h[ok] <- 2 + (1/pi) * ( (8 * z^2 - 4) * acos(z) - 2 * asin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2) ) return(h) }, DOH=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (16/pi) * (z * acos(z) - (z^2) * sqrt(1 - z^2)) return(h) }, # g(z) = DOH(z)/z has a limit at z=0. g=function(zz) { ok <- (zz < 1) h <- numeric(length(zz)) h[!ok] <- 0 z <- zz[ok] h[ok] <- (2/pi) * (acos(z) - z * sqrt(1 - z^2)) return(h) }), # sensible starting paramters selfstart = function(X) { kappa <- intensity(X) R <- 2 * mean(nndist(X)) c(kappa=kappa, R=R) }, # meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] R <- par[["R"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, R=R, mu=mu) } ), # ............................................... Cauchy=list( # Neyman-Scott with Cauchy clusters: par = (kappa, eta2) modelname = "Neyman-Scott process with Cauchy kernel", isPCP=TRUE, K = function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) pi*rvals^2 + (1 - 1/sqrt(1 + rvals^2/par[2]))/par[1] }, pcf= function(par,rvals, ...){ if(any(par <= 0)) return(rep.int(Inf, length(rvals))) 1 + ((1 + rvals^2/par[2])^(-1.5))/(2 * pi * par[2] * par[1]) }, selfstart = function(X) { kappa <- intensity(X) eta2 <- 4 * mean(nndist(X))^2 c(kappa = kappa, eta2 = eta2) }, # meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- sqrt(par[["eta2"]])/2 mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), # ............................................... VarGamma=list( # Neyman-Scott with VarianceGamma/Bessel clusters: par = (kappa, eta) modelname = "Neyman-Scott process with Variance Gamma kernel", isPCP=TRUE, K = local({ # K function requires integration of pair correlation xgx <- function(x, par, nu.pcf) { # x * pcf(x) without check on par values numer <- (x/par[2])^nu.pcf * besselK(x/par[2], nu.pcf) denom <- 2^(nu.pcf+1) * pi * par[2]^2 * par[1] * gamma(nu.pcf + 1) return(x * (1 + numer/denom)) } vargammaK <- function(par,rvals, ..., margs){ # margs = list(.. nu.pcf.. ) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf out <- numeric(length(rvals)) ok <- (rvals > 0) rvalsok <- rvals[ok] outok <- numeric(sum(ok)) for (i in 1:length(rvalsok)) outok[i] <- 2 * pi * integrate(xgx, lower=0, upper=rvalsok[i], par=par, nu.pcf=nu.pcf)$value out[ok] <- outok return(out) } vargammaK }), # end of 'local' pcf= function(par,rvals, ..., margs){ # margs = list(..nu.pcf..) if(any(par <= 0)) return(rep.int(Inf, length(rvals))) nu.pcf <- margs$nu.pcf sig2 <- 1 / (4 * pi * (par[2]^2) * nu.pcf * par[1]) denom <- 2^(nu.pcf - 1) * gamma(nu.pcf) rr <- rvals / par[2] # Matern correlation function fr <- ifelseXB(rr > 0, (rr^nu.pcf) * besselK(rr, nu.pcf) / denom, 1) return(1 + sig2 * fr) }, parhandler = function(..., nu.ker = -1/4) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 return(list(type="Kernel", model="VarGamma", margs=list(nu.ker=nu.ker, nu.pcf=nu.pcf))) }, # sensible starting values selfstart = function(X) { kappa <- intensity(X) eta <- 2 * mean(nndist(X)) c(kappa=kappa, eta=eta) }, # meaningful model parameters interpret = function(par, lambda) { kappa <- par[["kappa"]] omega <- par[["eta"]] mu <- if(is.numeric(lambda) && length(lambda) == 1) lambda/kappa else NA c(kappa=kappa, omega=omega, mu=mu) } ), # ............................................... LGCP=list( # Log Gaussian Cox process: par = (sigma2, alpha) modelname = "Log-Gaussian Cox process", isPCP=FALSE, # calls Covariance() from RandomFields package K = function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { # For efficiency integrand <- function(r,par,...) 2*pi*r*exp(par[1]*exp(-r/par[2])) } else { # RandomFields must be loaded (this is checked by parhandler) integrand <- function(r,par,model,margs) 2*pi *r *exp(Covariance(r,model=model, param=c(0.0,par[1],0.0,par[2],margs))) } nr <- length(rvals) th <- numeric(nr) if(spatstat.options("fastK.lgcp")) { # integrate using Simpson's rule fvals <- integrand(r=rvals, par=par, model=model, margs=margs) th[1] <- rvals[1] * fvals[1]/2 if(nr > 1) for(i in 2:nr) th[i] <- th[i-1] + (rvals[i] - rvals[i-1]) * (fvals[i] + fvals[i-1])/2 } else { # integrate using 'integrate' th[1] <- if(rvals[1] == 0) 0 else integrate(integrand,lower=0,upper=rvals[1], par=par,model=model,margs=margs)$value for (i in 2:length(rvals)) { delta <- integrate(integrand, lower=rvals[i-1],upper=rvals[i], par=par,model=model,margs=margs) th[i]=th[i-1]+delta$value } } return(th) }, pcf= function(par, rvals, ..., model, margs) { if(any(par <= 0)) return(rep.int(Inf, length(rvals))) if(model == "exponential") { # For efficiency and to avoid need for RandomFields package gtheo <- exp(par[1]*exp(-rvals/par[2])) } else { gtheo <- exp(Covariance(rvals,model=model, param=c(0.0,par[1],0.0,par[2],margs))) } return(gtheo) }, parhandler=function(model = "exponential", ...) { if(!is.character(model)) stop("Covariance function model should be specified by name") margs <- c(...) if(model != "exponential") { if(!require(RandomFields)) stop("The package RandomFields is required") # check validity ok <- try(Covariance(0, model=model,param=c(0,1,0,1,margs))) if(inherits(ok, "try-error")) stop("Error in evaluating Covariance") } return(list(type="Covariance",model=model, margs=margs)) }, # sensible starting values selfstart = function(X) { alpha <- 2 * mean(nndist(X)) c(sigma2=1, alpha=alpha) }, # meaningful model parameters interpret = function(par, lambda) { sigma2 <- par[["sigma2"]] alpha <- par[["alpha"]] mu <- if(is.numeric(lambda) && length(lambda) == 1 && lambda > 0) log(lambda) - sigma2/2 else NA c(sigma2=sigma2, alpha=alpha, mu=mu) } ) ) spatstatClusterModelInfo <- function(name) { if(!is.character(name) || length(name) != 1) stop("Argument must be a single character string", call.=FALSE) nama2 <- names(.Spatstat.ClusterModelInfoTable) if(!(name %in% nama2)) stop(paste(sQuote(name), "is not recognised;", "valid names are", commasep(sQuote(nama2))), call.=FALSE) out <- .Spatstat.ClusterModelInfoTable[[name]] return(out) } getdataname <- function(defaultvalue, ..., dataname=NULL) { if(!is.null(dataname)) dataname else defaultvalue } thomas.estK <- function(X, startpar=c(kappa=1,sigma2=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!(attr(K, "fname") %in% c("K", "K[inhom]"))) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","sigma2")) info <- spatstatClusterModelInfo("Thomas") theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Thomas process"), ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par # infer meaningful model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") return(result) } lgcp.estK <- function(X, startpar=c(sigma2=1,alpha=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!(attr(K, "fname") %in% c("K", "K[inhom]"))) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("sigma2","alpha")) info <- spatstatClusterModelInfo("LGCP") # digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$K result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) # imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") return(result) } matclust.estK <- function(X, startpar=c(kappa=1,R=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!(attr(K, "fname") %in% c("K", "K[inhom]"))) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","R")) info <- spatstatClusterModelInfo("MatClust") theoret <- info$K funaux <- info$funaux result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) # imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") return(result) } ## versions using pcf (suggested by Jan Wild) thomas.estpcf <- function(X, startpar=c(kappa=1,sigma2=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!(attr(g, "fname") %in% c("g", "g[inhom]"))) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call("pcf.ppp", append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","sigma2")) info <- spatstatClusterModelInfo("Thomas") theoret <- info$pcf # avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list( label="%s[fit](r)", desc="minimum contrast fit of Thomas process"), explain=list( dataname=dataname, fname=attr(g, "fname"), modelname="Thomas process"), ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "sigma2") result$par <- par # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Thomas") return(result) } matclust.estpcf <- function(X, startpar=c(kappa=1,R=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()){ dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!(attr(g, "fname") %in% c("g", "g[inhom]"))) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call("pcf.ppp", append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","R")) info <- spatstatClusterModelInfo("MatClust") theoret <- info$pcf funaux <- info$funaux # avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of Matern Cluster process"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Matern Cluster process"), ..., funaux=funaux) # imbue with meaning par <- result$par names(par) <- c("kappa", "R") result$par <- par # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="MatClust") return(result) } lgcp.estpcf <- function(X, startpar=c(sigma2=1,alpha=1), covmodel=list(model="exponential"), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!(attr(g, "fname") %in% c("g", "g[inhom]"))) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call("pcf.ppp", append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("sigma2","alpha")) info <- spatstatClusterModelInfo("LGCP") # digest parameters of Covariance model and test validity ph <- info$parhandler cmodel <- do.call(ph, covmodel) theoret <- info$pcf result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p, rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc="minimum contrast fit of LGCP"), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="log-Gaussian Cox process"), ..., model=cmodel$model, margs=cmodel$margs) # imbue with meaning par <- result$par names(par) <- c("sigma2", "alpha") result$par <- par result$covmodel <- cmodel # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="lgcp") return(result) } cauchy.estK <- function(X, startpar=c(kappa=1,eta2=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ...) { # omega: scale parameter of Cauchy kernel function # eta: scale parameter of Cauchy pair correlation function # eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { K <- X if(!(attr(K, "fname") %in% c("K", "K[inhom]"))) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","eta2")) info <- spatstatClusterModelInfo("Cauchy") theoret <- info$K desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Cauchy process"), ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") return(result) } cauchy.estpcf <- function(X, startpar=c(kappa=1,eta2=1), lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, ..., pcfargs=list()) { # omega: scale parameter of Cauchy kernel function # eta: scale parameter of Cauchy pair correlation function # eta = 2 * omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(inherits(X, "fv")) { g <- X if(!(attr(g, "fname") %in% c("g", "g[inhom]"))) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call("pcf.ppp", append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","eta2")) info <- spatstatClusterModelInfo("Cauchy") theoret <- info$pcf # avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Cauchy kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Cauchy process"), ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "eta2") result$par <- par # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="Cauchy") return(result) } # user-callable resolve.vargamma.shape <- function(..., nu.ker=NULL, nu.pcf=NULL) { if(is.null(nu.ker) && is.null(nu.pcf)) stop("Must specify either nu.ker or nu.pcf", call.=FALSE) if(!is.null(nu.ker) && !is.null(nu.pcf)) stop("Only one of nu.ker and nu.pcf should be specified", call.=FALSE) if(!is.null(nu.ker)) { check.1.real(nu.ker) stopifnot(nu.ker > -1/2) nu.pcf <- 2 * nu.ker + 1 } else { check.1.real(nu.pcf) stopifnot(nu.pcf > 0) nu.ker <- (nu.pcf - 1)/2 } return(list(nu.ker=nu.ker, nu.pcf=nu.pcf)) } vargamma.estK <- function(X, startpar=c(kappa=1,eta=1), nu.ker = -1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, nu.pcf=NULL, ...) { # nu.ker: smoothness parameter of Variance Gamma kernel function # omega: scale parameter of kernel function # nu.pcf: smoothness parameter of Variance Gamma pair correlation function # eta: scale parameter of Variance Gamma pair correlation function # nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(missing(nu.ker) && !is.null(nu.pcf)) nu.ker <- NULL if(inherits(X, "fv")) { K <- X if(!(attr(K, "fname") %in% c("K", "K[inhom]"))) warning("Argument X does not appear to be a K-function") } else if(inherits(X, "ppp")) { K <- Kest(X) dataname <- paste("Kest(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","eta")) info <- spatstatClusterModelInfo("VarGamma") theoret <- info$K # test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu.ker, nu.pcf=nu.pcf) margs <- cmodel$margs desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(K, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(K, "fname"), modelname="Variance Gamma process"), margs=margs, ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") return(result) } vargamma.estpcf <- function(X, startpar=c(kappa=1,eta=1), nu.ker=-1/4, lambda=NULL, q=1/4, p=2, rmin=NULL, rmax=NULL, nu.pcf=NULL, ..., pcfargs=list()) { # nu.ker: smoothness parameter of Variance Gamma kernel function # omega: scale parameter of kernel function # nu.pcf: smoothness parameter of Variance Gamma pair correlation function # eta: scale parameter of Variance Gamma pair correlation function # nu.pcf = 2 * nu.ker + 1 and eta = omega dataname <- getdataname(short.deparse(substitute(X), 20), ...) if(missing(nu.ker) && !is.null(nu.pcf)) nu.ker <- NULL if(inherits(X, "fv")) { g <- X if(!(attr(g, "fname") %in% c("g", "g[inhom]"))) warning("Argument X does not appear to be a pair correlation function") } else if(inherits(X, "ppp")) { g <- do.call("pcf.ppp", append(list(X), pcfargs)) dataname <- paste("pcf(", dataname, ")", sep="") if(is.null(lambda)) lambda <- summary(X)$intensity } else stop("Unrecognised format for argument X") startpar <- check.named.vector(startpar, c("kappa","eta")) info <- spatstatClusterModelInfo("VarGamma") theoret <- info$pcf # test validity of parameter nu and digest ph <- info$parhandler cmodel <- ph(nu.ker=nu.ker, nu.pcf=nu.pcf) margs <- cmodel$margs # avoid using g(0) as it may be infinite argu <- fvnames(g, ".x") rvals <- g[[argu]] if(rvals[1] == 0 && (is.null(rmin) || rmin == 0)) { rmin <- rvals[2] } desc <- "minimum contrast fit of Neyman-Scott process with Variance Gamma kernel" result <- mincontrast(g, theoret, startpar, ctrl=list(q=q, p=p,rmin=rmin, rmax=rmax), fvlab=list(label="%s[fit](r)", desc=desc), explain=list(dataname=dataname, fname=attr(g, "fname"), modelname="Variance Gamma process"), margs=margs, ...) # imbue with meaning par <- result$par names(par) <- c("kappa", "eta") result$par <- par result$covmodel <- cmodel # infer model parameters result$modelpar <- info$interpret(par, lambda) result$internal <- list(model="VarGamma") return(result) } spatstat/R/update.ppm.R0000755000176000001440000002066612237642727014577 0ustar ripleyusers# # update.ppm.R # # # $Revision: 1.38 $ $Date: 2013/04/25 06:37:43 $ # # # update.ppm <- function(object, ..., fixdummy=TRUE, use.internal=NULL, envir=parent.frame()) { verifyclass(object, "ppm") aargh <- list(...) call <- getCall(object) if(!is.call(call)) stop(paste("Internal error - getCall(object) is not of class", sQuote("call"))) callstring <- short.deparse(sys.call()) newformula <- function(old, change, eold=object$callframe, enew=envir) { old <- if(is.null(old)) ~1 else eval(old, eold) change <- if(is.null(change)) ~1 else eval(change, enew) old <- as.formula(old, env=eold) change <- as.formula(change, env=enew) update.formula(old, change) } # Special cases # (1) no new information if(length(aargh) == 0) return(eval(call, as.list(envir), enclos=object$callframe)) # (2) model can be updated using existing covariate data frame ismpl <- with(object, method == "mpl" && !is.null(fitter) && fitter %in% c("gam", "glm")) only.fmla <- length(aargh) == 1 && inherits(fmla <- aargh[[1]], "formula") if(ismpl && only.fmla) { # This is a dangerous hack! glmdata <- object$internal$glmdata # check whether data for new variables are available # (this doesn't work with things like 'pi') vars.available <- c(colnames(glmdata), names(object$covfunargs)) if(all(variablesinformula(fmla) %in% c(".", vars.available))) { # we can update using internal data FIT <- object$internal$glmfit orig.env <- environment(FIT$terms) # ensure fmla has only a right hand side if(!is.null(lh <- lhs.of.formula(fmla))) warning("Ignored left side of formula") fmla <- rhs.of.formula(fmla) # update formulae using "." rules trend <- newformula(object$trend, fmla) fmla <- newformula(formula(FIT), fmla) # update GLM/GAM fit upd.glm.call <- update(FIT, fmla, evaluate=FALSE) FIT <- eval(upd.glm.call, envir=orig.env) environment(FIT$terms) <- orig.env object$internal$glmfit <- FIT # update entries of object object$trend <- trend object$terms <- terms(fmla) object$coef <- co <- FIT$coef object$callstring <- callstring object$callframe <- parent.frame() object$internal$fmla <- fmla if(is.finite(object$maxlogpl)) { # Update maxlogpl provided it is finite # (If the likelihood is infinite, this is due to the interaction; # if we update the trend, the likelihood will remain infinite.) W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(object$Q) object$maxlogpl <- -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) } # update the model call upd.call <- call upd.call$trend <- trend object$call <- upd.call # update fitted interaction (depends on coefficients, if not Poisson) if(!is.null(inter <- object$interaction) && !is.poisson(inter)) object$fitin <- fii(inter, co, object$internal$Vnames, object$internal$IsOffset) return(object) } } # general case. undecided <- is.null(use.internal) || !is.logical(use.internal) force.int <- !undecided && use.internal force.ext <- !undecided && !use.internal if(!force.int) { # check for validity of format badformat <- damaged.ppm(object) } if(undecided) { use.internal <- badformat if(badformat) message("object format corrupted; repairing it") } else if(force.ext && badformat) warning("object format corrupted; try update(object, use.internal=TRUE)") if(use.internal) { # reset the main arguments in the call using the internal data call$Q <- data.ppm(object) namobj <- names(call) if("trend" %in% namobj) call$trend <- newformula(call$trend, object$trend) if("interaction" %in% namobj) call$interaction <- object$interaction if("covariates" %in% namobj) call$covariates <- object$covariates } Q.is.new <- FALSE # split named and unnamed arguments nama <- names(aargh) named <- if(is.null(nama)) rep.int(FALSE, length(aargh)) else (nama != "") namedargs <- aargh[named] unnamedargs <- aargh[!named] nama <- names(namedargs) if(any(named)) { # any named arguments that were also present in the original call # override their original values existing <- !is.na(match(nama, names(call))) for (a in nama[existing]) call[[a]] <- aargh[[a]] # add any named arguments not present in the original call if (any(!existing)) { call <- c(as.list(call), namedargs[!existing]) call <- as.call(call) } # is the point pattern or quadscheme new ? if("Q" %in% nama) Q.is.new <- TRUE } if(any(!named)) { # some objects identified by their class if(n <- sp.foundclasses(c("ppp", "quad"), unnamedargs, "Q", nama)) { call$Q <- unnamedargs[[n]] Q.is.new <- TRUE } if(n<- sp.foundclass("interact", unnamedargs, "interaction", nama)) call$interaction <- unnamedargs[[n]] if(n<- sp.foundclasses(c("data.frame", "im"), unnamedargs, "covariates", nama)) call$covariates <- unnamedargs[[n]] if(n<- sp.foundclass("formula", unnamedargs, "trend", nama)) call$trend <- newformula(call$trend, unnamedargs[[n]]) else if(n <- sp.foundclass("character", unnamedargs, "trend", nama)) { # string that might be interpreted as a formula strg <- unnamedargs[[n]] if(!is.na(charmatch("~", strg))) { fo <- as.formula(strg) call$trend <- newformula(call$trend, fo) } } } # ************************************************************* # ****** Special action when Q is a point pattern ************* # ************************************************************* if(Q.is.new && fixdummy && inherits((X <- eval(call$Q)), "ppp")) { # Instead of allowing default.dummy(X) to occur, # explicitly create a quadrature scheme from X, # using the same dummy points and weight parameters # as were used in the fitted model Qold <- quad.ppm(object) if(is.marked(Qold)) { dpar <- Qold$param$dummy wpar <- Qold$param$weight Qnew <- do.call("quadscheme", append(list(X), append(dpar, wpar))) } else { Dum <- Qold$dummy wpar <- Qold$param$weight Qnew <- do.call("quadscheme", append(list(X, Dum), wpar)) } # replace X by new Q call$Q <- Qnew } # finally call ppm return(eval(call, as.list(envir), enclos=object$callframe)) } sp.foundclass <- function(cname, inlist, formalname, argsgiven) { ok <- unlist(lapply(inlist, inherits, what=cname)) nok <- sum(ok) if(nok > 1) stop(paste("I am confused: there are two unnamed arguments", "of class", sQuote(cname))) if(nok == 0) return(0) absent <- !(formalname %in% argsgiven) if(!absent) stop(paste("I am confused: there is an unnamed argument", "of class", sQuote(cname), "which conflicts with the", "named argument", sQuote(formalname))) theposition <- seq_along(ok)[ok] return(theposition) } sp.foundclasses <- function(cnames, inlist, formalname, argsgiven) { ncn <- length(cnames) pozzie <- logical(ncn) for(i in seq_len(ncn)) pozzie[i] <- sp.foundclass(cnames[i], inlist, formalname, argsgiven) found <- (pozzie > 0) nfound <- sum(found) if(nfound == 0) return(0) else if(nfound == 1) return(pozzie[found]) else stop(paste("I am confused: there are", nfound, "unnamed arguments of different classes (", paste(sQuote(cnames(pozzie[found])), collapse=", "), ") which could be interpreted as", sQuote(formalname))) } damaged.ppm <- function(object) { # guess whether the object format has been damaged # e.g. by dump/restore gf <- getglmfit(object) badfit <- !is.null(gf) && !inherits(gf$terms, "terms") if(badfit) return(TRUE) # escape clause for fake models if(identical(object$fake, TRUE)) return(FALSE) # otherwise it was made by ppm Qcall <- object$call$Q cf <- object$callframe if(is.null(cf)) { # Old format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall))) return(TRUE) Q <- eval(Qcall) } else { # New format of ppm objects if(is.name(Qcall) && !exists(paste(Qcall), cf)) return(TRUE) Q <- eval(Qcall, cf) } badQ <- is.null(Q) || !(inherits(Q, "ppp") || inherits(Q,"quad")) return(badQ) } spatstat/R/psp.R0000755000176000001440000005021712237642727013317 0ustar ripleyusers# # psp.R # # $Revision: 1.68 $ $Date: 2013/04/25 06:37:43 $ # # Class "psp" of planar line segment patterns # # ################################################# # creator ################################################# psp <- function(x0, y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) { stopifnot(is.numeric(x0)) stopifnot(is.numeric(y0)) stopifnot(is.numeric(x1)) stopifnot(is.numeric(y1)) stopifnot(is.vector(x0)) stopifnot(is.vector(y0)) stopifnot(is.vector(x1)) stopifnot(is.vector(y1)) stopifnot(length(x0) == length(y0)) stopifnot(length(x1) == length(y1)) stopifnot(length(x0) == length(x1)) ends <- data.frame(x0=x0,y0=y0,x1=x1,y1=y1) if(!missing(window)) verifyclass(window,"owin") if(check) { ok <- inside.owin(x0,y0, window) & inside.owin(x1,y1,window) if((nerr <- sum(!ok)) > 0) stop(paste(nerr, ngettext(nerr, "segment does not", "segments do not"), "lie entirely inside the window.\n"), call.=FALSE) } out <- list(ends=ends, window=window, n = nrow(ends)) # add marks if any if(!is.null(marks)) { if(is.matrix(marks)) marks <- as.data.frame(marks) if(is.data.frame(marks)) { omf <- "dataframe" nmarks <- nrow(marks) rownames(marks) <- seq_len(nmarks) whinge <- "The number of rows of marks" } else { omf <- "vector" names(marks) <- NULL nmarks <- length(marks) whinge <- "The length of the marks vector" } if(nmarks != out$n) stop(paste(whinge, "!= length of x and y.\n")) out$marks <- marks out$markformat <- omf } else { out$markformat <- "none" } class(out) <- c("psp", class(out)) return(out) } ###################################################### # conversion ###################################################### is.psp <- function(x) { inherits(x, "psp") } as.psp <- function(x, ..., from=NULL, to=NULL) { # special case: two point patterns if(is.null(from) != is.null(to)) stop(paste("If one of", sQuote("from"), "and", sQuote("to"), "is specified, then both must be specified.\n")) if(!is.null(from) && !is.null(to)) { verifyclass(from, "ppp") verifyclass(to, "ppp") if(from$n != to$n) stop(paste("The point patterns", sQuote("from"), "and", sQuote("to"), "have different numbers of points.\n")) uni <- union.owin(from$window, to$window) Y <- do.call("psp", resolve.defaults(list(from$x, from$y, to$x, to$y), list(...), list(window=uni))) return(Y) } UseMethod("as.psp") } as.psp.psp <- function(x, ..., check=FALSE, fatal=TRUE) { if(!verifyclass(x, "psp", fatal=fatal)) return(NULL) ends <- x$ends psp(ends$x0, ends$y0, ends$x1, ends$y1, window=x$window, marks=x$marks, check=check) } as.psp.data.frame <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { window <- suppressWarnings(as.owin(window,fatal=FALSE)) if(!is.owin(window)) { if(fatal) stop("Cannot interpret \"window\" as an object of class owin.\n") return(NULL) } if(checkfields(x,"marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Column named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) x$marks <- NULL } if(checkfields(x, c("x0", "y0", "x1", "y1"))) { out <- psp(x$x0, x$y0, x$x1, x$y1, window=window, check=check) x <- x[-match(c("x0","y0","x1","y1"),names(x))] } else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr bb <- bounding.box(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$x - dx, x$y - dy, x$x + dx, x$y + dy, window=bigbox,check=FALSE) out <- pattern[window] x <- x[-match(c("xmid","ymid","length","angle"),names(x))] } else if(ncol(x) >= 4) { out <- psp(x[,1], x[,2], x[,3], x[,4], window=window, check=check) x <- x[-(1:4)] } else if(fatal) stop("Unable to interpret x as a line segment pattern.\n") else out <- NULL if(!is.null(out)) { if(is.null(marks) & ncol(x) > 0) marks <- x if(is.null(marks)) { out$markformat <- "none" } else { out$marks <- marks out$markformat <- if(is.data.frame(marks)) "dataframe" else "vector" out <- as.psp(out,check=FALSE) } } return(out) } as.psp.matrix <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { x <- as.data.frame(x) as.psp(x,...,window=window,marks=marks,check=check,fatal=fatal) } as.psp.default <- function(x, ..., window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) { if(checkfields(x,"marks")) { if(is.null(marks)) marks <- x$marks else warning(paste("Component of \"x\" named \"marks\" ignored;\n", "argument named \"marks\" has precedence.\n",sep="")) } if(checkfields(x, c("x0", "y0", "x1", "y1"))) return(psp(x$x0, x$y0, x$x1, x$y1, window=window, marks=marks, check=check)) else if(checkfields(x, c("xmid", "ymid", "length", "angle"))) { rr <- x$length/2 dx <- cos(x$angle) * rr dy <- sin(x$angle) * rr window <- as.owin(window) bb <- bounding.box(window) rmax <- max(rr) bigbox <- owin(bb$xrange + c(-1,1) * rmax, bb$yrange + c(-1,1) * rmax) pattern <- psp(x$x - dx, x$y - dy, x$x + dx, x$y + dy, window=bigbox, marks=marks, check=FALSE) clipped <- pattern[window] return(clipped) } else if(fatal) stop("Unable to interpret x as a line segment pattern") return(NULL) } as.psp.owin <- function(x, ..., check=spatstat.options("checksegments"), fatal=TRUE) { verifyclass(x, "owin") # can't use as.rectangle here; still testing validity xframe <- owin(x$xrange, x$yrange) switch(x$type, rectangle = { xx <- x$xrange[c(1,2,2,1)] yy <- x$yrange[c(1,1,2,2)] nxt <- c(2,3,4,1) out <- psp(xx, yy, xx[nxt], yy[nxt], window=x, check=check) return(out) }, polygonal = { x0 <- y0 <- x1 <- y1 <- numeric(0) bdry <- x$bdry for(i in seq_along(bdry)) { po <- bdry[[i]] ni <- length(po$x) nxt <- c(2:ni, 1) x0 <- c(x0, po$x) y0 <- c(y0, po$y) x1 <- c(x1, po$x[nxt]) y1 <- c(y1, po$y[nxt]) } out <- psp(x0, y0, x1, y1, window=xframe, check=check) return(out) }, mask = { if(fatal) stop("x is a mask") else warning("x is a mask - no line segments returned") return(psp(numeric(0), numeric(0), numeric(0), numeric(0), window=xframe, check=FALSE)) }) return(NULL) } ################# as.data.frame.psp <- function(x, row.names=NULL, ...) { df <- as.data.frame(x$ends, row.names=row.names) if(is.marked(x)) df <- cbind(df, if(x$markformat=="dataframe") marks(x) else data.frame(marks=marks(x))) return(df) } ####### manipulation ########################## append.psp <- function(A,B) { verifyclass(A, "psp") verifyclass(B, "psp") stopifnot(identical(A$window, B$window)) marks <- marks(A) %mapp% marks(B) ends <- rbind(A$ends, B$ends) out <- as.psp(ends,window=A$window,marks=marks,check=FALSE) return(out) } rebound.psp <- function(x, rect) { verifyclass(x, "psp") x$window <- rebound.owin(x$window, rect) return(x) } ################################################# # marks ################################################# is.marked.psp <- function(X, ...) { marx <- marks(X, ...) return(!is.null(marx)) } marks.psp <- function(x, ..., dfok = TRUE) { # data frames of marks are as of 19/March 2011 implemented for psp ma <- x$marks if ((is.data.frame(ma) || is.matrix(ma)) && !dfok) stop("Sorry, not implemented when the marks are a data frame.\n") return(ma) } "marks<-.psp" <- function(x, ..., value) { stopifnot(is.psp(x)) if(is.null(value)) { return(unmark(x)) } m <- value if(!(is.vector(m) || is.factor(m) || is.data.frame(m) || is.matrix(m))) stop("Incorrect format for marks") if (is.hyperframe(m)) stop("Hyperframes of marks are not supported in psp objects.\n") nseg <- nsegments(x) if (!is.data.frame(m) && !is.matrix(m)) { if (length(m) == 1) m <- rep.int(m, nseg) else if (nseg == 0) m <- rep.int(m, 0) else if (length(m) != nseg) stop("Number of marks != number of line segments.\n") marx <- m } else { m <- as.data.frame(m) if (ncol(m) == 0) { marx <- NULL } else { if (nrow(m) == nseg) { marx <- m } else { if (nrow(m) == 1 || nseg == 0) { marx <- as.data.frame(lapply(as.list(m),function(x,k) { rep.int(x, k)}, k = nseg)) } else stop("Number of rows of data frame != number of points.\n") } } } Y <- as.psp(x$ends, window = x$window, marks = marx, check = FALSE) return(Y) } markformat.psp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } unmark.psp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } ################################################# # plot and print methods ################################################# plot.psp <- function(x, ..., add=FALSE, which.marks=1, ribbon=TRUE, ribsep=0.15, ribwid=0.05, ribn=1024) { main <- short.deparse(substitute(x)) verifyclass(x, "psp") # n <- nsegments(x) marx <- marks(x) # use.colour <- !is.null(marx) && (n != 0) do.ribbon <- identical(ribbon, TRUE) && use.colour && !add # if(!add) { # create plot region if(!do.ribbon) { # window of x do.call.matched("plot.owin", resolve.defaults(list(x=x$window), list(...), list(main=main))) } else { # enlarged window with room for colour ribbon # x at left, ribbon at right bb <- as.rectangle(as.owin(x)) xwidth <- diff(bb$xrange) xheight <- diff(bb$yrange) xsize <- max(xwidth, xheight) bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * xsize, bb$yrange) bb.all <- bounding.box(bb.rib, bb) # establish coordinate system do.call.matched("plot.default", resolve.defaults(list(x=0, y=0, type="n", axes=FALSE, asp=1, xlim=bb.all$xrange, ylim=bb.all$yrange), list(...), list(main=main, xlab="", ylab=""))) # now plot window of x do.call.matched("plot.owin", resolve.defaults(list(x=x$window, add=TRUE), list(...))) } } # plot segments if(n == 0) return(invisible(NULL)) # determine colours if any if(!use.colour) { # black col <- colmap <- NULL } else { # multicoloured marx <- as.data.frame(marx)[, which.marks] if(is.character(marx) || length(unique(marx)) == 1) marx <- factor(marx) if(is.factor(marx)) { lev <- levels(marx) colmap <- colourmap(col=rainbow(length(lev)), inputs=factor(lev)) } else { if(!all(is.finite(marx))) warning("Some mark values are infinite or NaN or NA") colmap <- colourmap(col=rainbow(ribn), range=range(marx, finite=TRUE)) } col <- colmap(marx) } # plot segments do.call("segments", resolve.defaults(as.list(x$ends), list(...), list(col=col), .StripNull=TRUE)) # plot ribbon if(do.ribbon) plot(colmap, vertical=TRUE, add=TRUE, xlim=bb.rib$xrange, ylim=bb.rib$yrange) # return colour map return(invisible(colmap)) } print.psp <- function(x, ...) { verifyclass(x, "psp") ism <- is.marked(x, dfok = TRUE) cat(paste(if(ism) "marked" else NULL, "planar line segment pattern:", x$n, "line", ngettext(x$n, "segment", "segments"), "\n")) if (ism) { mks <- marks(x, dfok = TRUE) if (is.data.frame(mks)) { cat(paste("Mark variables: ", paste(names(mks), collapse = ", "), "\n")) } else { if (is.factor(mks)) { cat("multitype, with ") cat(paste("levels =", paste(levels(mks), collapse = "\t"), "\n")) } else { cat(paste("marks are", if (is.numeric(mks)) "numeric,", "of type", sQuote(typeof(mks)), "\n")) } } } print(x$window) return(invisible(NULL)) } unitname.psp <- function(x) { return(unitname(x$window)) } "unitname<-.psp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } #################################################### # summary information #################################################### endpoints.psp <- function(x, which="both") { verifyclass(x, "psp") ends <- x$ends n <- x$n switch(which, both={ first <- second <- rep.int(TRUE, n) }, first={ first <- rep.int(TRUE, n) second <- rep.int(FALSE, n) }, second={ first <- rep.int(FALSE, n) second <- rep.int(TRUE, n) }, left={ first <- (ends$x0 < ends$x1) second <- !first }, right={ first <- (ends$x0 > ends$x1) second <- !first }, lower={ first <- (ends$y0 < ends$y1) second <- !first }, upper={ first <- (ends$y0 > ends$y1) second <- !first }, stop(paste("Unrecognised option: which=", sQuote(which))) ) ok <- rbind(first, second) xmat <- rbind(ends$x0, ends$x1) ymat <- rbind(ends$y0, ends$y1) idmat <- col(ok) xx <- as.vector(xmat[ok]) yy <- as.vector(ymat[ok]) id <- as.vector(idmat[ok]) result <- ppp(xx, yy, window=x$window, check=FALSE) attr(result, "id") <- id return(result) } midpoints.psp <- function(x) { verifyclass(x, "psp") xm <- eval(expression((x0+x1)/2), envir=x$ends) ym <- eval(expression((y0+y1)/2), envir=x$ends) win <- x$window ok <- inside.owin(xm, ym, win) if(any(!ok)) { warning(paste("Some segment midpoints lie outside the original window;", "window replaced by bounding box")) win <- bounding.box(win) } ppp(x=xm, y=ym, window=win, check=FALSE) } lengths.psp <- function(x) { verifyclass(x, "psp") eval(expression(sqrt((x1-x0)^2 + (y1-y0)^2)), envir=x$ends) } angles.psp <- function(x, directed=FALSE) { verifyclass(x, "psp") a <- eval(expression(atan2(y1-y0, x1-x0)), envir=x$ends) if(!directed) a <- a %% pi return(a) } summary.psp <- function(object, ...) { verifyclass(object, "psp") len <- lengths.psp(object) out <- list(n = object$n, len = summary(len), totlen = sum(len), ang= summary(angles.psp(object)), w = summary.owin(object$window), marks=if(is.null(object$marks)) NULL else summary(object$marks), unitinfo=summary(unitname(object))) class(out) <- c("summary.psp", class(out)) return(out) } print.summary.psp <- function(x, ...) { cat(paste(x$n, "line segments\n")) cat("Lengths:\n") print(x$len) unitblurb <- paste(x$unitinfo$plural, x$unitinfo$explain) cat(paste("Total length:", x$totlen, unitblurb, "\n")) cat(paste("Length per unit area:", x$totlen/x$w$area, "\n")) cat("Angles (radians):\n") print(x$ang) print(x$w) if(!is.null(x$marks)) { cat("Marks:\n") print(x$marks) } return(invisible(NULL)) } ######################################################## # subsets ######################################################## "[.psp" <- function(x, i, j, drop, ...) { verifyclass(x, "psp") if(missing(i) && missing(j)) return(x) if(!missing(i)) { style <- if(inherits(i, "owin")) "window" else "index" switch(style, window={ x <- clip.psp(x, window=i, check=FALSE) }, index={ enz <- x$ends[i, ] win <- x$window marx <- marksubset(x$marks, i, markformat(x)) x <- with(enz, psp(x0, y0, x1, y1, window=win, marks=marx, check=FALSE)) }) } if(!missing(j)) x <- x[j] # invokes code above return(x) } #################################################### # affine transformations #################################################### affine.psp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "psp") W <- affine.owin(X$window, mat=mat, vec=vec, ...) E <- X$ends ends0 <- affinexy(list(x=E$x0,y=E$y0), mat=mat, vec=vec) ends1 <- affinexy(list(x=E$x1,y=E$y1), mat=mat, vec=vec) psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) } shift.psp <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "psp") if(!is.null(origin)) { stopifnot(is.character(origin)) if(!missing(vec)) warning("Argument vec ignored; argument origin has precedence.\n") origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- as.owin(X) locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1], W$yrange[1]) }) return(shift(X, -locn)) } # perform shift W <- shift.owin(X$window, vec=vec, ...) E <- X$ends ends0 <- shiftxy(list(x=E$x0,y=E$y0), vec=vec, ...) ends1 <- shiftxy(list(x=E$x1,y=E$y1), vec=vec, ...) Y <- psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.psp <- function(X, angle=pi/2, ...) { verifyclass(X, "psp") W <- rotate.owin(X$window, angle=angle, ...) E <- X$ends ends0 <- rotxy(list(x=E$x0,y=E$y0), angle=angle) ends1 <- rotxy(list(x=E$x1,y=E$y1), angle=angle) psp(ends0$x, ends0$y, ends1$x, ends1$y, window=W, marks=marks(X, dfok=TRUE), check=FALSE) } is.empty.psp <- function(x) { return(x$n == 0) } identify.psp <- function(x, ..., labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) { Y <- x W <- as.owin(Y) mids <- midpoints.psp(Y) if(!(is.numeric(n) && (length(n) == 1) && (n %% 1 == 0) && (n >= 0))) stop("n should be a single integer") out <- integer(0) while(length(out) < n) { xy <- locator(1) # check for interrupt exit if(length(xy$x) == 0) return(out) # find nearest segment X <- ppp(xy$x, xy$y, window=W) ident <- project2segment(X, Y)$mapXY # add to list if(ident %in% out) { cat(paste("Segment", ident, "already selected\n")) } else { if(plot) { # Display mi <- mids[ident] li <- labels[ident] text(mi$x, mi$y, labels=li) } out <- c(out, ident) } } # exit if max n reached return(out) } nsegments <- function(x) { UseMethod("nsegments") } nobjects.psp <- nsegments.psp <- function(x) { x$n } as.ppp.psp <- function (X, ..., fatal=TRUE) { Y <- endpoints.psp(X, which="both") m <- marks(X) marks(Y) <- markappend(m, m) return(Y) } spatstat/R/profilepl.R0000755000176000001440000001736712237642727014522 0ustar ripleyusers# # profilepl.R # # $Revision: 1.18 $ $Date: 2013/06/18 01:38:42 $ # # computes profile log pseudolikelihood # profilepl <- function(s, f, ..., rbord=NULL, verbose=TRUE) { s <- as.data.frame(s) n <- nrow(s) fname <- paste(short.deparse(substitute(f)), collapse="") stopifnot(is.function(f)) # validate 's' parms <- names(s) fargs <- names(formals(f)) if(!all(fargs %in% parms)) stop("Some arguments of f are not provided in s") # extra columns in 's' are assumed to be parameters of covariate functions is.farg <- parms %in% fargs pass.cfa <- any(!is.farg) got.cfa <- "covfunargs" %in% names(list(...)) if(pass.cfa && got.cfa) stop("Some columns in s are superfluous") # interlist <- list() logmpl <- numeric(n) # make a fake call pseudocall <- match.call() pseudocall[[1]] <- as.symbol("ppm") namcal <- names(pseudocall) # remove arguments 's' and 'verbose' retain <- !(namcal %in% c("s", "verbose")) pseudocall <- pseudocall[retain] namcal <- namcal[retain] # place 'f' argument third np <- length(pseudocall) fpos <- (1:np)[namcal == "f"] indices <- (1:np)[-fpos] if(length(indices) < 3) indices <- c(indices, fpos) else indices <- c(indices[1:3], fpos, indices[-(1:3)]) pseudocall <- pseudocall[indices] namcal <- names(pseudocall) namcal[namcal=="f"] <- "interaction" names(pseudocall) <- namcal # Determine edge correction # with partial matching, avoiding collisions with # other arguments to ppm that have similar names. getppmcorrection <- function(..., correction = "border", covariates = NULL, covfunargs = NULL, control = NULL) { return(correction) } correction <- getppmcorrection(...) if(correction == "border") { # determine border correction distance if(is.null(rbord)) { # compute rbord = max reach of interactions if(verbose) message("(computing rbord)") for(i in 1:n) { fi <- do.call("f", as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) re <- reach(fi) if(is.null(rbord)) rbord <- re else if(rbord < re) rbord <- re } } } # determine whether computations can be saved if(pass.cfa || got.cfa) { savecomp <- FALSE } else { Q <- ppm(..., rbord=rbord, justQ=TRUE) savecomp <- !oversize.quad(Q) } # fit one model and extract quadscheme if(verbose) message(paste("Comparing", n, "models...")) for(i in 1:n) { if(verbose) progressreport(i, n) fi <- do.call("f", as.list(s[i, is.farg, drop=FALSE])) if(!inherits(fi, "interact")) stop(paste("f did not yield an object of class", sQuote("interact"))) if(pass.cfa) cfai <- list(covfunargs=as.list(s[i, !is.farg, drop=FALSE])) # fit model if(i == 1) { # fit from scratch arg1 <- list(interaction=fi, ..., rbord=rbord, savecomputed=savecomp, warn.illegal=FALSE, callstring="", skip.border=TRUE) if(pass.cfa) arg1 <- append(arg1, cfai) fiti <- do.call("ppm", arg1) # save intermediate computations (pairwise distances, etc) precomp <- fiti$internal$computed savedargs <- list(..., rbord=rbord, precomputed=precomp, warn.illegal=FALSE, callstring="", skip.border=TRUE) } else { # use precomputed data argi <- append(savedargs, list(interaction=fi)) if(pass.cfa) argi <- append(argi, cfai) fiti <- do.call("ppm", argi) } # save log PL for each fit logmpl[i] <- as.numeric(logLik(fiti, warn=FALSE)) # save fitted coefficients for each fit co <- coef(fiti) if(i == 1) { allcoef <- data.frame(matrix(co, nrow=1)) names(allcoef) <- names(co) } else allcoef <- rbind(allcoef, co) } if(verbose) message("Fitting optimal model...") opti <- which.max(logmpl) optint <- do.call("f", as.list(s[opti, is.farg, drop=FALSE])) optarg <- list(interaction=optint, ..., rbord=rbord) if(pass.cfa) { optcfa <- list(covfunargs=as.list(s[opti, !is.farg, drop=FALSE])) optarg <- append(optarg, optcfa) } optfit <- do.call("ppm", optarg) if(verbose) message("done.") result <- list(param=s, prof=logmpl, iopt=opti, fit=optfit, rbord=rbord, fname=fname, allcoef=allcoef, otherstuff=list(...), pseudocall=pseudocall) class(result) <- c("profilepl", class(result)) return(result) } # # print method # print.profilepl <- function(x, ...) { cat("Profile log pseudolikelihood values\n") cat("for model:\t") print(x$pseudocall) cat(paste("fitted with rbord=", x$rbord, "\n")) nparm <- ncol(x$param) cat(paste("Interaction:", x$fname, "\n", "with irregular", ngettext(nparm, "parameter ", "parameters\n"))) for(na in names(x$param)) { ra <- range(x$param[[na]]) cat(paste(sQuote(na), "in", paste("[",ra[1],", ",ra[2],"]",sep=""), "\n")) } cat(paste("Optimum", ngettext(nparm, "value", "values"), "of irregular", ngettext(nparm, "parameter: ", "parameters:\n"))) popt <- x$param[x$iopt,, drop=FALSE] cat(commasep(paste(names(popt), "=", popt))) cat("\n") } # # summary method # summary.profilepl <- function(object, ...) { print(object) cat("\n\nOptimal model:\n") print(object$fit) } as.ppm.profilepl <- function(object) { object$fit } # # plot method # plot.profilepl <- function(x, ..., add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL) { para <- x$param npara <- ncol(para) # main header if(is.null(main)) main <- short.deparse(x$pseudocall) # x variable for plot if(is.null(xvariable)) { xvalues <- para[,1] xname <- names(para)[1] } else { stopifnot(is.character(xvariable)) if(!(xvariable %in% names(para))) stop("There is no irregular parameter named", sQuote(xvariable)) xvalues <- para[[xvariable]] xname <- xvariable } # y variable for plot if(is.null(coeff)) { yvalues <- x$prof ylab <- "log PL" } else { stopifnot(is.character(coeff)) allcoef <- x$allcoef if(!(coeff %in% names(allcoef))) stop(paste("There is no coefficient named", sQuote(coeff), "in the fitted model")) yvalues <- allcoef[[coeff]] ylab <- paste("coefficient:", coeff) } # start plot if(!add) do.call.matched("plot.default", resolve.defaults(list(x=range(xvalues), y=range(yvalues)), list(type="n", main=main), list(...), list(ylab=ylab, xlab=xname))) # single curve if(npara == 1) { do.call.matched("lines.default", list(x=xvalues, y=yvalues, ...)) abline(v = xvalues[x$iopt], lty=2, col="green") return(invisible(NULL)) } # multiple curves other <- para[, -1, drop=FALSE] tapply(1:nrow(para), as.list(other), function(z, xvalues, yvalues, other, tag) { fz <- xvalues[z] pz<- yvalues[z] lines(fz, pz) if(tag) { oz <- other[z, , drop=FALSE] uniques <- apply(oz, 2, unique) labels <- paste(names(uniques), "=", uniques, sep="") label <- paste(labels, sep=",") ii <- which.max(pz) text(fz[ii], pz[ii], label) } }, xvalues=xvalues, yvalues=yvalues, other=other, tag=tag) abline(v = xvalues[x$iopt], lty=2, col="green") return(invisible(NULL)) } spatstat/R/rmhmodel.R0000755000176000001440000011444512237642727014330 0ustar ripleyusers# # # rmhmodel.R # # $Revision: 1.59 $ $Date: 2013/04/25 06:37:43 $ # # rmhmodel <- function(...) { UseMethod("rmhmodel") } rmhmodel.rmhmodel <- function(model, ...) { # Check for outdated internal format # C.par was replaced by C.beta and C.ipar in spatstat 1.22-3 if(outdated <- !is.null(model$C.par)) warning("Outdated internal format of rmhmodel object; rebuilding it") if(outdated || (length(list(...)) > 0)) model <- rmhmodel.list(unclass(model), ...) return(model) } rmhmodel.list <- function(model, ...) { argnames <- c("cif","par","w","trend","types") ok <- argnames %in% names(model) do.call("rmhmodel.default", resolve.defaults(list(...), model[argnames[ok]])) } rmhmodel.default <- function(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) { extractsecret <- function(..., stopinvalid=TRUE) { if(length(list(...)) > 0) stop(paste("rmhmodel.default: syntax should be", "rmhmodel(cif, par, w, trend, types)", "with arguments given by name if they are present"), call.=FALSE) return(list(stopinvalid=stopinvalid)) } stopinvalid <- extractsecret(...)$stopinvalid # Validate parameters if(is.null(cif)) stop("cif is missing or NULL") if(is.null(par)) stop("par is missing or NULL") if(!is.null(w)) w <- as.owin(w) if(!is.character(cif)) stop("cif should be a character string") betamultiplier <- 1 Ncif <- length(cif) if(Ncif > 1) { # hybrid # check for Poisson components ispois <- (cif == 'poisson') if(any(ispois)) { # validate Poisson components Npois <- sum(ispois) poismodels <- vector(mode="list", length=Npois) parpois <- par[ispois] for(i in 1:Npois) poismodels[[i]] <- rmhmodel(cif='poisson', par=parpois[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) # consolidate Poisson intensity parameters poisbetalist <- lapply(poismodels, function(x){x$C.beta}) poisbeta <- Reduce("*", poisbetalist) if(all(ispois)) { # model collapses to a Poisson process cif <- 'poisson' Ncif <- 1 par <- list(beta=poisbeta) betamultiplier <- 1 } else { # remove Poisson components cif <- cif[!ispois] Ncif <- sum(!ispois) par <- par[!ispois] if(Ncif == 1) # revert to single-cif format par <- par[[1]] # absorb beta parameters betamultiplier <- poisbeta } } } if(Ncif > 1) { # genuine hybrid models <- vector(mode="list", length=Ncif) check <- vector(mode="list", length=Ncif) for(i in 1:Ncif) models[[i]] <- rmhmodel(cif=cif[i], par=par[[i]], w=w, trend=NULL, types=types, stopinvalid=FALSE) C.id <- unlist(lapply(models, function(x){x$C.id})) C.betalist <- lapply(models, function(x){x$C.beta}) C.iparlist <- lapply(models, function(x){x$C.ipar}) # absorb beta multiplier into beta parameter of first component C.betalist[[1]] <- C.betalist[[1]] * betamultiplier # concatenate for use in C C.beta <- unlist(C.betalist) C.ipar <- unlist(C.iparlist) check <- lapply(models, function(x){x$check}) maxr <- max(unlist(lapply(models, function(x){x$reach}))) ismulti <- unlist(lapply(models, function(x){x$multitype.interact})) multi <- any(ismulti) # determine whether model exists integ <- unlist(lapply(models, function(x) { x$integrable })) stabi <- unlist(lapply(models, function(x) { x$stabilising })) integrable <- all(integ) || any(stabi) stabilising <- any(stabi) # string explanations of conditions for validity integ.ex <- unlist(lapply(models, function(x){ x$explainvalid$integrable })) stabi.ex <- unlist(lapply(models, function(x){ x$explainvalid$stabilising})) stabi.oper <- !(stabi.ex %in% c("TRUE", "FALSE")) integ.oper <- !(integ.ex %in% c("TRUE", "FALSE")) compnames <- if(!any(duplicated(C.id))) paste("cif", sQuote(C.id)) else paste("component", 1:Ncif, paren(sQuote(C.id))) if(!integrable && stopinvalid) { # model is not integrable: explain why ifail <- !integ & integ.oper ireason <- paste(compnames[ifail], "should satisfy", paren(integ.ex[ifail], "{")) ireason <- verbalogic(ireason, "and") if(sum(ifail) <= 1) { # There's only one offending cif, so stability is redundant sreason <- "FALSE" } else { sfail <- !stabi & stabi.oper sreason <- paste(compnames[sfail], "should satisfy", paren(stabi.ex[sfail], "{")) sreason <- verbalogic(sreason, "or") } reason <- verbalogic(c(ireason, sreason), "or") stop(paste("rmhmodel: hybrid model is not integrable; ", reason), call.=FALSE) } else { # construct strings summarising conditions for validity if(!any(integ.oper)) ireason <- as.character(integrable) else { ireason <- paste(compnames[integ.oper], "should satisfy", paren(integ.ex[integ.oper], "{")) ireason <- verbalogic(ireason, "and") } if(!any(stabi.oper)) sreason <- as.character(stabilising) else { sreason <- paste(compnames[stabi.oper], "should satisfy", paren(stabi.ex[stabi.oper], "{")) sreason <- verbalogic(sreason, "or") } ireason <- verbalogic(c(ireason, sreason), "or") explainvalid <- list(integrable=ireason, stabilising=sreason) } out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, C.betalist=C.betalist, C.iparlist=C.iparlist, check=check, multitype.interact=multi, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=maxr) class(out) <- c("rmhmodel", class(out)) return(out) } # non-hybrid # Check that this is a recognised model # and look up the rules for this model rules <- spatstatRmhInfo(cif) # Map the name of the cif from R to C # (the names are normally identical in R and C, # except "poisson" -> NA) C.id <- rules$C.id # Check that the C name is recognised in C if(!is.na(C.id)) { z <- .C("knownCif", cifname=as.character(C.id), answer=as.integer(0)) # PACKAGE="spatstat") ok <- as.logical(z$answer) if(!ok) stop(paste("Internal error: the cif", sQuote(C.id), "is not recognised in the C code")) } # Validate the model parameters and reformat them check <- rules$parhandler checkedpar <- if(!rules$multitype) check(par) else if(!is.null(types)) check(par, types) else # types vector not given - defer checking NULL if(!is.null(checkedpar)) { stopifnot(is.list(checkedpar)) stopifnot(!is.null(names(checkedpar)) && all(nzchar(names(checkedpar)))) stopifnot(names(checkedpar)[[1]] == "beta") C.beta <- unlist(checkedpar[[1]]) C.beta <- C.beta * betamultiplier C.ipar <- as.numeric(unlist(checkedpar[-1])) } else { C.beta <- C.ipar <- NULL } # Determine whether model is integrable integrable <- rules$validity(par, "integrable") explainvalid <- rules$explainvalid if(!integrable && stopinvalid) stop(paste("rmhmodel: the model is not integrable; it should satisfy", explainvalid$integrable), call.=FALSE) # Determine whether cif is stabilising # (i.e. any hybrid including this cif will be integrable) stabilising <- rules$validity(par, "stabilising") # Calculate reach of model mreach <- rules$reach(par) ################################################################### # return augmented list out <- list(cif=cif, par=par, w=w, trend=trend, types=types, C.id=C.id, C.beta=C.beta, C.ipar=C.ipar, check= if(is.null(C.ipar)) check else NULL, multitype.interact=rules$multitype, integrable=integrable, stabilising=stabilising, explainvalid=explainvalid, reach=mreach ) class(out) <- c("rmhmodel", class(out)) return(out) } print.rmhmodel <- function(x, ...) { verifyclass(x, "rmhmodel") cat("Metropolis-Hastings algorithm, model parameters\n") Ncif <- length(x$cif) cat(paste("Conditional intensity:", if(Ncif == 1) "cif=" else "hybrid of cifs", commasep(sQuote(x$cif)), "\n")) if(!is.null(x$types)) { if(length(x$types) == 1) cat("Univariate process.\n") else { cat("Multitype process with types =\n") print(x$types) if(!x$multitype.interact) cat("Interaction does not depend on type\n") } } else if(x$multitype.interact) cat("Multitype process, types not yet specified.\n") cat("Numerical parameters: par =\n") print(x$par) if(is.null(x$C.ipar)) cat("Parameters have not yet been checked for compatibility with types.\n") if(is.owin(x$w)) print(x$w) else cat("Window: not specified.\n") cat("Trend: ") if(!is.null(x$trend)) print(x$trend) else cat("none.\n") if(!is.null(x$integrable) && !x$integrable) { cat("\n*Warning: model is not integrable and cannot be simulated*\n") } invisible(NULL) } reach.rmhmodel <- function(x, ...) { if(length(list(...)) == 0) return(x$reach) # reach must be recomputed cif <- x$cif Ncif <- length(cif) pars <- if(Ncif == 1) list(x$par) else x$par maxr <- 0 for(i in seq_len(Ncif)) { cif.i <- cif[i] par.i <- pars[[i]] rules <- spatstatRmhInfo(cif.i) rchfun <- rules$reach if(!is.function(rchfun)) stop(paste("Internal error: reach is unknown for cif=", sQuote(cif.i)), call.=FALSE) r.i <- rchfun(par.i, ...) maxr <- max(maxr, r.i, na.rm=TRUE) } return(maxr) } is.poisson.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") identical(x$cif, 'poisson') } is.stationary.rmhmodel <- function(x) { verifyclass(x, "rmhmodel") tren <- x$trend return(is.null(tren) || is.numeric(tren)) } as.owin.rmhmodel <- function(W, ..., fatal=FALSE) { # W is the rmhmodel object. It contains a window w ans <- W$w if(is.owin(ans)) return(ans) if(fatal) stop("rmhmodel object does not contain a window") return(NULL) } is.expandable.rmhmodel <- function(x) { tren <- x$tren ok <- function(z) { is.null(z) || is.numeric(z) || is.function(z) } return(if(!is.list(tren)) ok(tren) else all(unlist(lapply(tren, ok)))) } ##### Table of rules for handling rmh models ################## spatstatRmhInfo <- function(cifname) { rules <- .Spatstat.RmhTable[[cifname]] if(is.null(rules)) stop(paste("Unrecognised cif:", sQuote(cifname)), call.=FALSE) return(rules) } .Spatstat.RmhTable <- list( # # 0. Poisson (special case) # 'poisson'= list( C.id=NA, multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Poisson process" with(par, forbidNA(beta, ctxt)) par <- check.named.list(par, "beta", ctxt) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ...) { return(0) } ), # # 1. Strauss. # 'strauss'= list( C.id="strauss", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the strauss cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) } ), # # 2. Strauss with hardcore. # 'straush' = list( C.id="straush", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the straush cif" par <- check.named.list(par, c("beta","gamma","r","hc"), ctxt) # treat hc=NA as absence of hard core par <- within(par, if(is.na(hc)) { hc <- 0 } ) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- hc; gamma <- 1 } ) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(hc <= r, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc gamma <- par$gamma switch(kind, integrable=(hc > 0 || gamma <= 1), stabilising=(hc > 0) ) }, explainvalid=list( integrable="hc > 0 or gamma <= 1", stabilising="hc > 0"), reach = function(par, ...) { h <- par[["hc"]] r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) h else r) } ), # # 3. Softcore. # 'sftcr' = list( C.id="sftcr", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the sftcr cif" par <- check.named.list(par, c("beta","sigma","kappa"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(sigma >= 0, ctxt)) with(par, explain.ifnot(kappa >= 0 && kappa <= 1, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach = function(par, ..., epsilon=0) { if(epsilon==0) return(Inf) kappa <- par[["kappa"]] sigma <- par[["sigma"]] return(sigma/(epsilon^(kappa/2))) } ), # # 4. Multitype Strauss. # 'straussm' = list( C.id="straussm", multitype=TRUE, parhandler=function(par, types) { ctxt <- "For the straussm cif" par <- check.named.list(par, c("beta","gamma","radii"), ctxt) beta <- par$beta gamma <- par$gamma r <- par$radii ntypes <- length(types) check.finite(beta, ctxt) check.nvector(beta, ntypes, TRUE, "types") MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(r, ntypes, "par$radii") if(any(nar <- is.na(r))) { r[nar] <- 0 gamma[nar] <- 1 } check.finite(r, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(r >= 0), ctxt) par <- list(beta=beta, gamma=gamma, r=r) return(par) }, validity=function(par, kind) { gamma <- par$gamma radii <- par$radii dg <- diag(gamma) dr <- diag(radii) hard <-!is.na(dg) & (dg == 0) & !is.na(dr) & (dr > 0) operative <- !is.na(gamma) & !is.na(radii) & (radii > 0) switch(kind, stabilising=all(hard), integrable=all(hard) || all(gamma[operative] <= 1)) }, explainvalid=list( integrable=paste( "gamma[i,j] <= 1 for all i and j,", "or gamma[i,i] = 0 for all i"), stabilising="gamma[i,i] = 0 for all i"), reach = function(par, ...) { r <- par$radii g <- par$gamma operative <- ! (is.na(r) | (g == 1)) return(max(0, r[operative])) } ), # # 5. Multitype Strauss with hardcore. # 'straushm' = list( C.id="straushm", multitype=TRUE, parhandler=function(par, types) { ctxt="For the straushm cif" par <- check.named.list(par, c("beta","gamma","iradii","hradii"), ctxt) beta <- par$beta gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(gamma, ntypes, "par$gamma") gamma[is.na(gamma)] <- 1 check.finite(gamma, ctxt) MultiPair.checkmatrix(iradii, ntypes, "par$iradii") if(any(nar <- is.na(iradii))) { iradii[nar] <- 0 gamma[nar] <- 1 } check.finite(iradii, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") hradii[is.na(hradii)] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(gamma >= 0), ctxt) explain.ifnot(all(iradii >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) explain.ifnot(all(iradii >= hradii), ctxt) par <- list(beta=beta,gamma=gamma,iradii=iradii,hradii=hradii) return(par) }, validity=function(par, kind) { gamma <- par$gamma iradii <- par$iradii hradii <- par$hradii dh <- diag(hradii) dg <- diag(gamma) dr <- diag(iradii) hhard <- !is.na(dh) & (dh > 0) ihard <- !is.na(dr) & (dr > 0) & !is.na(dg) & (dg == 0) hard <- hhard | ihard operative <- !is.na(gamma) & !is.na(iradii) & (iradii > 0) switch(kind, stabilising=all(hard), integrable={ all(hard) || all(gamma[operative] <= 1) }) }, explainvalid=list( integrable=paste( "hradii[i,i] > 0 or gamma[i,i] = 0 for all i, or", "gamma[i,j] <= 1 for all i and j"), stabilising="hradii[i,i] > 0 or gamma[i,i] = 0 for all i"), reach=function(par, ...) { r <- par$iradii h <- par$hradii g <- par$gamma roperative <- ! (is.na(r) | (g == 1)) hoperative <- ! is.na(h) return(max(0, r[roperative], h[hoperative])) } ), # # 6. Diggle-Gates-Stibbard interaction # (function number 1 from Diggle, Gates, and Stibbard) 'dgs' = list( C.id="dgs", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the dgs cif" par <- check.named.list(par, c("beta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) } ), # # 7. Diggle-Gratton interaction # (function number 2 from Diggle, Gates, and Stibbard). 'diggra' = list( C.id="diggra", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the diggra cif" par <- check.named.list(par, c("beta","kappa","delta","rho"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(delta, ctxt)) with(par, check.finite(rho, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(delta, ctxt)) with(par, check.1.real(rho, ctxt)) with(par, explain.ifnot(kappa >= 0, ctxt)) with(par, explain.ifnot(delta >= 0, ctxt)) with(par, explain.ifnot(rho >= 0, ctxt)) with(par, explain.ifnot(delta < rho, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE",stabilising="FALSE"), reach=function(par, ...) { return(par[["rho"]]) } ), # # 8. Geyer saturation model # 'geyer' = list( C.id="geyer", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the geyer cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(sat, ctxt)) par <- within(par, sat <- min(sat, .Machine$integer.max-100)) par <- within(par, if(is.na(gamma)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else 2 * r) } ), # # 9. The ``lookup'' device. This permits simulating, at least # approximately, ANY pairwise interaction function model # with isotropic pair interaction (i.e. depending only on distance). # The pair interaction function is provided as a vector of # distances and corresponding function values which are used # as a ``lookup table'' by the C code. # 'lookup' = list( C.id="lookup", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the lookup cif" par <- check.named.list(par, c("beta","h"), ctxt, "r") with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) beta <- par[["beta"]] h.init <- par[["h"]] r <- par[["r"]] if(is.null(r)) { if(!is.stepfun(h.init)) stop(paste("For cif=lookup, if component r of", "par is absent then component h must", "be a stepfun object.")) if(!is.cadlag(h.init)) stop(paste("The lookup pairwise interaction step", "function must be right continuous,\n", "i.e. built using the default values of the", sQuote("f"), "and", sQuote("right"), "arguments for stepfun.")) r <- knots(h.init) h0 <- get("yleft",envir=environment(h.init)) h <- h.init(r) nlook <- length(r) if(!identical(all.equal(h[nlook],1),TRUE)) stop(paste("The lookup interaction step function", "must be equal to 1 for", dQuote("large"), "distances.")) if(r[1] <= 0) stop(paste("The first jump point (knot) of the lookup", "interaction step function must be", "strictly positive.")) h <- c(h0,h) } else { h <- h.init nlook <- length(r) if(length(h) != nlook) stop("Mismatch of lengths of h and r lookup vectors.") if(any(is.na(r))) stop("Missing values not allowed in r lookup vector.") if(is.unsorted(r)) stop("The r lookup vector must be in increasing order.") if(r[1] <= 0) stop(paste("The first entry of the lookup vector r", "should be strictly positive.")) h <- c(h,1) } if(any(h < 0)) stop(paste("Negative values in the lookup", "pairwise interaction function.")) if(h[1] > 0 & any(h > 1)) stop(paste("Lookup pairwise interaction function does", "not define a valid point process.")) rmax <- r[nlook] r <- c(0,r) nlook <- nlook+1 deltar <- mean(diff(r)) if(identical(all.equal(diff(r),rep.int(deltar,nlook-1)),TRUE)) { par <- list(beta=beta,nlook=nlook, equisp=1, deltar=deltar,rmax=rmax, h=h) } else { par <- list(beta=beta,nlook=nlook, equisp=0, deltar=deltar,rmax=rmax, h=h, r=r) } return(par) }, validity=function(par, kind) { h <- par$h if(is.stepfun(h)) h <- eval(expression(c(yleft,y)),envir=environment(h)) switch(kind, integrable={ (h[1] == 0) || all(h <= 1) }, stabilising={ h[1] == 0 }) }, explainvalid=list( integrable="h[1] == 0 or h[i] <= 1 for all i", stabilising="h[1] == 0"), reach = function(par, ...) { r <- par[["r"]] h <- par[["h"]] if(is.null(r)) r <- knots(h) return(max(r)) } ), # # 10. Area interaction # 'areaint'= list( C.id="areaint", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the areaint cif" par <- check.named.list(par, c("beta","eta","r"), ctxt) par <- within(par, if(is.na(r)) { r <- 0 }) with(par, check.finite(beta, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(eta, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.finite(eta, ctxt)) with(par, check.finite(r, ctxt)) with(par, explain.ifnot(eta >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] eta <- par[["eta"]] return(if(eta == 1) 0 else (2 * r)) } ), # # 11. The ``badgey'' (Baddeley-Geyer) model. # 'badgey' = list( C.id="badgey", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the badgey cif" par <- check.named.list(par, c("beta","gamma","r","sat"), ctxt) par <- within(par, sat <- pmin(sat, .Machine$integer.max-100)) par <- within(par, gamma[is.na(gamma) | is.na(r)] <- 1) par <- within(par, r[is.na(r)] <- 0) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(sat, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(all(gamma >= 0), ctxt)) with(par, explain.ifnot(all(r >= 0), ctxt)) with(par, explain.ifnot(all(sat >= 0), ctxt)) with(par, explain.ifnot(length(gamma) == length(r), ctxt)) gamma <- par[["gamma"]] r <- par[["r"]] sat <- par[["sat"]] if(length(sat)==1) sat <- rep.int(sat,length(gamma)) else explain.ifnot(length(sat) == length(gamma), ctxt) mmm <- cbind(gamma,r,sat) mmm <- mmm[fave.order(r),] ndisc <- length(r) par <- list(beta=par$beta,ndisc=ndisc,parms=as.vector(t(mmm))) return(par) }, validity=function(par, kind) { switch(kind, integrable=TRUE, stabilising=FALSE) }, explainvalid=list(integrable="TRUE", stabilising="FALSE"), reach = function(par, ...) { r <- par[["r"]] gamma <- par[["gamma"]] operative <- (gamma != 1) return(if(!any(operative)) 0 else (2 * max(r[operative]))) } ), # # 12. The hard core process 'hardcore' = list( C.id="hardcore", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the hardcore cif" par <- check.named.list(par, c("beta", "hc"), ctxt) par <- within(par, if(is.na(hc)) { hc <- 0 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(hc, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc switch(kind, integrable=TRUE, stabilising=(hc > 0)) }, explainvalid=list(integrable="TRUE", stabilising="hc > 0"), reach = function(par, ...) { hc <- par[["hc"]] return(hc) } ), # # Lucky 13. Fiksel process 'fiksel' = list( C.id="fiksel", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Fiksel cif" par <- check.named.list(par, c("beta", "r", "hc", "kappa", "a"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.finite(hc, ctxt)) with(par, check.finite(kappa, ctxt)) with(par, check.finite(a, ctxt)) with(par, check.1.real(r, ctxt)) with(par, check.1.real(hc, ctxt)) with(par, check.1.real(kappa, ctxt)) with(par, check.1.real(a, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(hc >= 0, ctxt)) with(par, explain.ifnot(r > hc, ctxt)) return(par) }, validity=function(par, kind) { hc <- par$hc a <- par$a switch(kind, integrable=(hc > 0 || a <= 0), stabilising=(hc > 0)) }, explainvalid=list( integrable="hc > 0 or a <= 0", stabilising="hc > 0"), reach = function(par, ...) { r <- par[["r"]] hc <- par[["hc"]] a <- par[["a"]] return(if(a != 0) r else hc) } ), # # 14. Lennard-Jones 'lennard' = list( C.id="lennard", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the Lennard-Jones cif" par <- check.named.list(par, c("beta", "sigma", "epsilon"), ctxt) with(par, check.finite(beta, ctxt)) with(par, check.finite(sigma, ctxt)) with(par, check.finite(epsilon, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, check.1.real(sigma, ctxt)) with(par, check.1.real(epsilon, ctxt)) with(par, explain.ifnot(sigma > 0, ctxt)) with(par, explain.ifnot(epsilon > 0, ctxt)) return(par) }, validity=function(par, kind) { switch(kind, integrable=(par$sigma > 0), stabilising=FALSE) }, explainvalid=list( integrable="sigma > 0", stabilising="FALSE"), reach = function(par, ...) { sigma <- par[["sigma"]] return(2.5 * sigma) } ), # # 15. Multitype hardcore. # 'multihard' = list( C.id="multihard", multitype=TRUE, parhandler=function(par, types) { ctxt="For the multihard cif" par <- check.named.list(par, c("beta","hradii"), ctxt) beta <- par$beta hradii <- par$hradii ntypes <- length(types) check.nvector(beta, ntypes, TRUE, "types") check.finite(beta, ctxt) MultiPair.checkmatrix(hradii, ntypes, "par$hradii") hradii[is.na(hradii)] <- 0 check.finite(hradii, ctxt) explain.ifnot(all(beta >= 0), ctxt) explain.ifnot(all(hradii >= 0), ctxt) par <- list(beta=beta,hradii=hradii) return(par) }, validity=function(par, kind) { switch(kind, integrable=return(TRUE), stabilising={ hself <- diag(par$hradii) repel <- !is.na(hself) & (hself > 0) return(all(repel)) }) }, explainvalid=list( integrable="TRUE", stabilising="hradii[i,i] > 0 for all i"), reach=function(par, ...) { return(max(0, par$hradii, na.rm=TRUE)) } ), # # 16. Triplets. # 'triplets'= list( C.id="triplets", multitype=FALSE, parhandler=function(par, ...) { ctxt <- "For the triplets cif" par <- check.named.list(par, c("beta","gamma","r"), ctxt) # treat r=NA as absence of interaction par <- within(par, if(is.na(r)) { r <- 0; gamma <- 1 }) with(par, check.finite(beta, ctxt)) with(par, check.finite(gamma, ctxt)) with(par, check.finite(r, ctxt)) with(par, check.1.real(gamma, ctxt)) with(par, check.1.real(r, ctxt)) with(par, explain.ifnot(all(beta >= 0), ctxt)) with(par, explain.ifnot(gamma >= 0, ctxt)) with(par, explain.ifnot(r >= 0, ctxt)) return(par) }, validity=function(par, kind) { gamma <- par$gamma switch(kind, integrable=(gamma <= 1), stabilising=(gamma == 0) ) }, explainvalid=list( integrable="gamma <= 1", stabilising="gamma == 0"), reach = function(par, ...) { r <- par[["r"]] g <- par[["gamma"]] return(if(g == 1) 0 else r) } ) # end of list '.Spatstat.RmhTable' ) spatstat/R/linfun.R0000644000176000001440000000340612237642727014003 0ustar ripleyusers# # linfun.R # # Class of functions of location on a linear network # # $Revision: 1.5 $ $Date: 2012/10/21 02:03:31 $ # linfun <- function(f, L) { stopifnot(is.function(f)) stopifnot(inherits(L, "linnet")) needargs <- c("x", "y", "seg", "tp") if(!all(needargs %in% names(formals(f)))) stop(paste("f must have arguments named", commasep(sQuote(needargs)))) class(f) <- c("linfun", class(f)) attr(f, "L") <- L return(f) } print.linfun <- function(x, ...) { L <- as.linnet(x) if(!is.null(explain <- attr(x, "explain"))) { explain(x) } else { cat("Function on linear network\n") print(as.function(x), ...) cat("Function domain:\n") print(L) } invisible(NULL) } as.linim.linfun <- function(X, L, ..., eps = NULL, dimyx = NULL, xy = NULL) { if(missing(L) || is.null(L)) L <- as.linnet(X) # create template Y <- as.linim(1, L, eps=eps, dimyx=dimyx, xy=xy) # extract (x,y) and local coordinates df <- attr(Y, "df") coo <- df[, c("x", "y", "mapXY", "tp")] colnames(coo)[3] <- "seg" # evaluate function vals <- do.call(X, append(as.list(coo), list(...))) # replace values df$values <- vals attr(Y, "df") <- df Y[!is.na(Y$v)] <- vals return(Y) } plot.linfun <- function(x, ..., L=NULL, eps = NULL, dimyx = NULL, xy = NULL, main="") { xname <- short.deparse(substitute(x)) if(is.null(L)) L <- as.linnet(x) Z <- as.linim(x, eps=eps, dimyx=dimyx, xy=xy, L=L) plot(Z, ..., main=main) } as.owin.linfun <- function(W, ...) { as.owin(as.linnet(W)) } as.linnet.linfun <- function(X, ...) { attr(X, "L") } as.function.linfun <- function(x, ...) { nax <- names(attributes(x)) if(!is.null(nax)) { retain <- (nax == "srcref") attributes(x)[!retain] <- NULL } return(x) } spatstat/R/effectfun.R0000755000176000001440000001204012237642727014452 0ustar ripleyusers# # effectfun.R # # $Revision: 1.11 $ $Date: 2013/04/25 06:37:43 $ # effectfun <- function(model, covname, ..., se.fit=FALSE) { stopifnot(is.ppm(model)) dotargs <- list(...) # determine names of covariates involved intern.names <- if(is.marked.ppm(model)) c("x", "y", "marks") else c("x", "y") co <- model$covariates extern.names <- names(co) # find the relevant covariate if(missing(covname)) stop("covname must be provided") if(!(covname %in% c(intern.names, extern.names))) stop(paste("model does not have a covariate called", sQuote(covname))) # check that fixed values for all other covariates are provided given.covs <- names(dotargs) if(any(uhoh <- !(extern.names %in% c(given.covs, covname)))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "A value for the covariate", "Values for the covariates"), commasep(dQuote(extern.names[uhoh])), "must be provided (as", ngettext(nuh, "an argument", "arguments"), "to effectfun)")) } # establish type and range of covariate values N0 <- 256 if(covname == "x") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$xrange Zvals <- seq(from=Zr[1], to=Zr[2], length.out=N0) } else if(covname == "y") { covtype <- "real" W <- as.owin(data.ppm(model)) Zr <- W$yrange Zvals <- seq(from=Zr[1], to=Zr[2], length.out=N0) } else if(covname == "marks") { covtype <- "factor" Zvals <- levels(marks(data.ppm(model))) } else { # covariate is external if(is.data.frame(co)) { Z <- co$covname covtype <- typeof(Z) if(covtype == "double") covtype <- "real" switch(covtype, real={ Zr <- range(Z) Zvals <- seq(from=Zr[1], to=Zr[2], length.out=N0) }, integer={ Zr <- range(Z) Zvals <- seq(from=Zr[1], to=Zr[2], by=ceiling((diff(Zr)+1)/N0)) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else if(is.list(co)) { Z <- co[[covname]] # convert to image if(!is.im(Z)) Z <- as.im(Z, W=as.owin(model)) covtype <- Z$type switch(covtype, real={ Zr <- summary(Z)$range Zvals <- seq(from=Zr[1], to=Zr[2], length.out=N0) }, factor={ Zvals <- levels(Z) }, logical={ Zvals <- c(FALSE, TRUE) }, stop(paste("Cannot handle covariate of type", dQuote(covtype))) ) } else stop("Unrecognised format for covariates in model") } # set up data frames of fake data for predict method # First set up default, constant value for each covariate N <- length(Zvals) fakeloc <- resolve.defaults(dotargs, list(x=0, y=0))[c("x","y")] if(is.marked.ppm(model)) { lev <- levels(marks(data.ppm(model))) fakeloc$marks <- lev[1] } fakeloc <- lapply(fakeloc, function(x,N) { rep.int(x[1],N)}, N=N) fakecov <- lapply(dotargs, function(x,N) { rep.int(x[1],N)}, N=N) # Overwrite value for covariate of interest if(covname %in% intern.names) fakeloc[[covname]] <- Zvals else fakecov[[covname]] <- Zvals # convert to data frame fakeloc <- do.call("data.frame", fakeloc) fakecov <- if(length(fakecov) > 0) do.call("data.frame", fakecov) else NULL # # Now predict lambda <- predict(model, locations=fakeloc, covariates=fakecov) if(se.fit) { se <- predict(model, locations=fakeloc, covariates=fakecov, type="se") sedf <- data.frame(se =se, hi = lambda + 2 * se, lo = lambda - 2 * se) } # dfin <- if(!is.null(fakecov)) cbind(fakeloc, fakecov) else fakeloc dfin <- dfin[covname] df <- cbind(dfin, data.frame(lambda=lambda)) # if(covtype != "real") { result <- df if(se.fit) result <- cbind(result, sedf) } else { bc <- paren(covname) result <- fv(df, argu=covname, ylab=substitute(lambda(X), list(X=as.name(covname))), labl=c(covname, paste("hat(%s)", bc)), valu="lambda", alim=Zr, desc=c(paste("value of covariate", covname), "fitted intensity"), fname="lambda") if(se.fit) { result <- bind.fv(result, sedf, labl=c(paste("se[%s]", bc), paste("%s[hi]", bc), paste("%s[lo]", bc)), desc=c("standard error of fitted trend", "upper limit of pointwise 95%% CI for trend", "lower limit of pointwise 95%% CI for trend")) fvnames(result, ".") <- c("lambda", "hi", "lo") formula(result) <- paste(". ~ ", covname) } } return(result) } spatstat/R/Kcom.R0000755000176000001440000003072512237642727013410 0ustar ripleyusers# # Kcom.R # # model compensated K-function # # $Revision: 1.9 $ $Date: 2013/08/21 09:28:16 $ # Kcom <- local({ Kcom <- function(object, r=NULL, breaks=NULL, ..., correction=c("border", "isotropic", "translate"), conditional=!is.poisson(object), restrict=FALSE, trend=~1, interaction=Poisson(), rbord=reach(interaction), compute.var=TRUE, truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp") || inherits(object, "quad")) fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord) else stop("object should be a fitted point process model or a point pattern") if(missing(conditional) || is.null(conditional)) conditional <- !is.poisson(fit) rfixed <- !is.null(r) || !is.null(breaks) # Extract data and window Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) Win <- X$window # selection of edge corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", ripley="isotropic", trans="translation", translate="translation", translation="translation", best="best"), multi=TRUE) correction <- implemented.for.K(correction, Win$type, correction.given) opt <- list(bord = any(correction == "border"), tran = any(correction == "translation"), ripl = any(correction == "isotropic")) if(sum(unlist(opt)) == 0) stop("No corrections selected") # edge correction algorithm algo <- if(!conditional) "classical" else if(restrict) "restricted" else "reweighted" # conditioning on border region? if(!conditional) { Wfree <- Win } else { rbord <- fit$rbord Wfree <- erosion(Win, rbord) if(restrict) { retain <- inside.owin(union.quad(Q), , Wfree) # Throw away boundary data Q <- Q[Wfree] X <- X[Wfree] Win <- Wfree } } # Extract quadrature info U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # quadrature points used USED <- if(algo == "reweighted") (bdist.points(U) > rbord) else rep.int(TRUE, U$n) # basic statistics npoints <- X$n area <- area.owin(Win) lambda <- npoints/area lambda2 <- npoints * (npoints - 1)/(area^2) # adjustments to account for restricted domain of pseudolikelihood if(algo == "reweighted") { npoints.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npoints.used/area.used lambda2.used <- npoints.used * (npoints.used - 1)/(area.used^2) } else { npoints.used <- npoints area.used <- area lambda.used <- lambda lambda2.used <- lambda2 } # 'r' values rmaxdefault <- rmax.rule("K", if(restrict) Wfree else Win, npoints/area) breaks <- handle.r.b.args(r, breaks, Wfree, rmaxdefault=rmaxdefault) r <- breaks$r nr <- length(r) rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, pois=pi * r^2) desc <- c("distance argument r", "expected %s for CSR") K <- fv(K, "r", substitute(K(r), NULL), "pois", , alim, c("r","%s[pois](r)"), desc, fname="K") ############### start computing ################## # residuals resid <- residuals(fit, type="raw",drop=FALSE, coefs=truecoef, quad=hi.res) resval <- with(resid, "increment") rescts <- with(resid, "continuous") if(restrict) { # keep only data inside Wfree resval <- resval[retain] rescts <- rescts[retain] } # close pairs of points # (quadrature point to data point) clos <- crosspairs(U, X, rmax) dIJ <- clos$d I <- clos$i J <- clos$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only nDD <- sum(DD) # determine whether a quadrature point will be used in integral okI <- USED[I] if(spatstat.options("Kcom.remove.zeroes")) okI <- okI & !EIJ # residual weights wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] #################################################### if(opt$bord) { # border method # Compute distances to boundary # (in restricted case, the window of U has been adjusted) b <- bdist.points(U) bI <- b[I] # reduced sample for K(r) of data only RSX <- Kount(dIJ[DD & okI], bI[DD & okI], b[Z & USED], breaks) # Kb <- RSX$numerator/(lambda.used * RSX$denom.count) Kb <- RSX$numerator/(lambda * RSX$denom.count) K <- bind.fv(K, data.frame(border=Kb), "hat(%s)[bord](r)", nzpaste(algo, "border-corrected nonparametric estimate of %s"), "border") # reduced sample for adjustment integral RSD <- Kwtsum(dIJ[okI], bI[okI], wcIJ[okI], b[Z & USED], rep.int(1, npoints.used), breaks) # lambdaU <- (npoints.used + 1)/area.used lambdaU <- (npoints + 1)/area Kb <- RSD$numerator/((RSD$denominator + 1) * lambdaU) K <- bind.fv(K, data.frame(bcom=Kb), "bold(C)~hat(%s)[bord](r)", nzpaste("model compensator of", algo, "border-corrected %s"), "border") } if(opt$tran) { # translation correction edgewt <- switch(algo, classical = edge.Trans(UI, XJ, paired=TRUE), restricted = edge.Trans(UI, XJ, paired=TRUE), reweighted = edge.Trans.modif(UI, XJ, Win, Wfree, paired=TRUE)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) Ktrans <- cumsum(whDD)/(lambda2 * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s)[trans](r)", nzpaste(algo, "translation-corrected nonparametric estimate of %s"), "trans") # lambda2U <- (npoints.used + 1) * npoints.used/(area.used^2) lambda2U <- (npoints + 1) * npoints/(area^2) Ktrans <- cumsum(wh)/(lambda2U * area.used) Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(tcom=Ktrans), "bold(C)~hat(%s)[trans](r)", nzpaste("model compensator of", algo, "translation-corrected %s"), "trans") } if(opt$ripl) { # Ripley isotropic correction edgewt <- edge.Ripley(UI, matrix(dIJ, ncol=1)) wh <- whist(dIJ[okI], breaks$val, (edgewt * wcIJ)[okI]) whDD <- whist(dIJ[DD & okI], breaks$val, edgewt[DD & okI]) # Kiso <- cumsum(whDD)/(lambda2.used * area.used) Kiso <- cumsum(whDD)/(lambda2 * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s)[iso](r)", nzpaste(algo, "isotropic-corrected nonparametric estimate of %s"), "iso") # lambda2U <- (npoints.used + 1) * npoints.used/(area.used^2) lambda2U <- (npoints + 1) * npoints/(area^2) Kiso <- cumsum(wh)/(lambda2U * area.used) Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(icom=Kiso), "bold(C)~hat(%s)[iso](r)", nzpaste("model compensator of", algo, "isotropic-corrected %s"), "iso") # if(compute.var) { savedotnames <- fvnames(K, ".") # compute contribution to compensator from each quadrature point dOK <- dIJ[okI] eOK <- edgewt[okI] iOK <- I[okI] denom <- lambda2U * area.used variso <- varsumiso <- 0 * Kiso for(i in sort(unique(iOK))) { relevant <- (iOK == i) tincrem <- whist(dOK[relevant], breaks$val, eOK[relevant]) localterm <- cumsum(tincrem)/denom variso <- variso + wc[i] * localterm^2 if(Z[i]) varsumiso <- varsumiso + localterm^2 } sdiso <- sqrt(variso) K <- bind.fv(K, data.frame(ivar=variso, isd =sdiso, ihi = 2*sdiso, ilo = -2*sdiso, ivarsum=varsumiso), c("bold(C)^2~hat(%s)[iso](r)", "sqrt(bold(C)^2~hat(%s)[iso](r))", "bold(R)~hat(%s)[hi](r)", "bold(R)~hat(%s)[lo](r)", "hat(C)^2~hat(%s)[iso](r)"), c("Poincare variance of isotropic-corrected %s", "sqrt(Poincare variance) of isotropic-corrected %s", "upper critical band for isotropic-corrected %s", "lower critical band for isotropic-corrected %s", "data estimate of Poincare variance of %s"), "iso") # fvnames(K, ".") <- c(savedotnames, "isd") fvnames(K, ".") <- savedotnames } } # default is to display all corrections formula(K) <- . ~ r unitname(K) <- unitname(X) # secret tag used by 'Kres' attr(K, "maker") <- "Kcom" return(K) } # `reweighted' translation edge correction edge.Trans.modif <- function(X, Y=X, WX=X$window, WY=Y$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { # computes edge correction factor # f = area(WY)/area(intersect.owin(WY, shift(WX, X[i] - Y[j]))) X <- as.ppp(X, WX) W <- X$window x <- X$x y <- X$y Y <- as.ppp(Y, WY) xx <- Y$x yy <- Y$y nX <- npoints(X) nY <- npoints(Y) if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(!exact) { if(WX$type == "polygonal") WX <- as.mask(WX) if(WY$type == "polygonal") WY <- as.mask(WX) } typeX <- WX$type typeY <- WY$type if(typeX == "rectangle" && typeY == "rectangle") { # Fast code for this case if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } A <- WX$xrange B <- WX$yrange a <- WY$xrange b <- WY$yrange # compute width and height of intersection wide <- pmin.int(a[2], A[2]+DX) - pmax(a[1], A[1]+DX) high <- pmin.int(b[2], B[2]+DY) - pmax(b[1], B[1]+DY) # edge correction weight weight <- diff(a) * diff(b) / (wide * high) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) } else if(typeX %in% c("rectangle", "polygonal") && typeY %in% c("rectangle", "polygonal")) { # This code is SLOW WX <- as.polygonal(WX) WY <- as.polygonal(WY) a <- area.owin(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i,j] <- a/b } } } } else { nX <- npoints(X) weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) WXshift <- shift(WX, shiftvector) b <- overlap.owin(WY, WXshift) weight[i] <- a/b } } } } else { WX <- as.mask(WX) WY <- as.mask(WY) # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set cross-covariance g <- setcov(WY,WX) # evaluate set cross-covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) weight <- area.owin(WY)/gvalues } # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=X$n, ncol=Y$n) return(weight) } Kcom }) spatstat/R/superimpose.R0000755000176000001440000001627712237642727015100 0ustar ripleyusers# superimpose.R # # $Revision: 1.25 $ $Date: 2013/04/25 06:37:43 $ # # ############################# superimpose <- function(...) { # remove any NULL arguments arglist <- list(...) if(any(isnull <- sapply(arglist, is.null))) return(do.call("superimpose", arglist[!isnull])) UseMethod("superimpose") } superimpose.ppp <- superimpose.default <- function(..., W=NULL, check=TRUE) { arglist <- list(...) # Check that all "..." arguments have x, y coordinates hasxy <- unlist(lapply(arglist, checkfields, L=c("x", "y"))) if(!all(hasxy)) { nbad <- sum(bad <- !hasxy) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "have components x and y")) } # concatenate lists of (x,y) coordinates XY <- do.call("concatxy", arglist) needcheck <- TRUE # determine whether there is any window information if(!is.owin(W)) { # we have to compute the final window WXY <- NULL Wppp <- NULL if(any(isppp <- unlist(lapply(arglist, is.ppp)))) { # extract windows from ppp objects wins <- unname(lapply(arglist[isppp], as.owin)) # take union Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } if(is.function(W)) { # W is a function like bounding.box.xy or ripras # Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object") } if(is.character(W)) { # character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W))) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=bounding.box.xy(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) } if(is.null(WXY) && is.null(Wppp)) { # no window information return(XY) } W <- union.owin(WXY, Wppp) } # extract the marks if any nobj <- sapply(arglist, function(x) { length(x$x) }) marx <- superimposeMarks(arglist, nobj) # ppp(XY$x, XY$y, window=W, marks=marx, check=check & needcheck) } superimpose.psp <- function(..., W=NULL, check=TRUE) { # superimpose any number of line segment patterns arglist <- list(...) if(!all(sapply(arglist, is.psp))) stop("Patterns to be superimposed must all be psp objects") # extract segment coordinates matlist <- lapply(arglist, function(x) { as.matrix(x$ends) }) # tack them together mat <- do.call("rbind", matlist) # determine whether there is any window information needcheck <- FALSE if(!is.owin(W)) { # we have to compute the final window WXY <- NULL Wpsp <- NULL if(any(ispsp <- unlist(lapply(arglist, is.psp)))) { # extract windows from psp objects wins <- unname(lapply(arglist[ispsp], as.owin)) # take union Wppp <- if(length(wins) == 1) wins[[1]] else do.call(union.owin, wins) } if(is.function(W) || is.character(W)) { # guess window from x, y coordinates XY <- list(x=cbind(mat[,1], mat[,3]), y=cbind(mat[,2], mat[,4])) if(is.function(W)) { # W is a function like bounding.box.xy or ripras # Apply function to the x,y coordinates; it should return an owin WXY <- W(XY) if(!is.owin(WXY)) stop("Function W did not return an owin object") } if(is.character(W)) { # character string identifies a function pW <- pmatch(W, c("convex", "rectangle", "bbox", "none")) if(is.na(pW)) stop(paste("Unrecognised option W=", sQuote(W))) WXY <- switch(pW, convex=ripras(XY), rectangle=ripras(XY, shape="rectangle"), bbox=bounding.box.xy(XY), none=NULL) # in these cases we don't need to verify that the points are inside. needcheck <- !is.null(WXY) } } W <- union.owin(WXY, Wppp) } # extract marks, if any nobj <- sapply(arglist, nsegments) marx <- superimposeMarks(arglist, nobj) return(as.psp(mat, window=W, marks=marx, check=check)) } superimposeMarks <- function(arglist, nobj) { # combine marks from the objects in the argument list marxlist <- lapply(arglist, marks) marx <- do.call(markappend, unname(marxlist)) nama <- names(arglist) if(length(nama) == length(arglist) && all(nzchar(nama))) { # arguments are named: use names as (extra) marks newmarx <- factor(rep.int(nama, nobj)) marx <- markcbind(marx, newmarx) } return(marx) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # This function is now deprecated. superimposePSP <- function(..., W=NULL, check=TRUE) { .Deprecated("superimpose","spatstat") # superimpose any number of line segment patterns arglist <- list(...) nargue <- length(arglist) if(nargue == 0) stop("No line segment patterns given") # catch possible abuses if(is.null(W) && any(suspicious <- (names(arglist) == "window"))) { id <- min(which(suspicious)) Win <- arglist[[id]] if(is.owin(Win) || is.null(Win)) { W <- Win arglist <- arglist[-id] nargue <- length(arglist) } } # unpack a list if(nargue == 1) { X <- arglist[[1]] if(!inherits(X, "psp") && inherits(X, "list")) arglist <- X } isnull <- unlist(lapply(arglist, is.null)) arglist <- arglist[!isnull] if(!all(unlist(lapply(arglist, is.psp)))) stop("Some of the arguments are not psp objects") # extract segment coordinates matlist <- lapply(arglist, function(x) { as.matrix(x$ends) }) # tack them together mat <- do.call("rbind", matlist) # extract marks if any marxlist <- lapply(arglist, marks) # check on compatibility of marks mkfmt <- sapply(marxlist,markformat) if(length(unique(mkfmt))>1) stop(paste("Marks of some patterns are of different format\n", " from those of other patterns.\n",sep="")) mkfmt <- mkfmt[1] if(mkfmt=="dataframe") { mcnms <- lapply(marxlist,names) cdim <- sapply(mcnms,length) OK <- length(unique(cdim)) == 1 if(OK) { allInOne <- sapply(mcnms,paste,collapse="") OK <- length(unique(allInOne)) == 1 if(!OK) stop("Data frames of marks have different names.\n") } else stop("Data frames of marks have different column dimensions.\n") } # combine the marks marx <- switch(mkfmt, none = NULL, vector = { marxlist <- lapply(marxlist, function(x){as.data.frame.vector(x,nm="v1")}) do.call("rbind", marxlist)[,1] }, dataframe = do.call("rbind", marxlist)) # determine window if(!is.null(W)) W <- as.owin(W) else { # extract windows from psp objects Wlist <- lapply(arglist, as.owin) # take the union of all the windows W <- NULL for(i in seq_along(Wlist)) W <- union.owin(W, Wlist[[i]]) } return(as.psp(mat, window=W, marks=marx, check=check)) } spatstat/R/randomlpp.R0000755000176000001440000000063612237642727014511 0ustar ripleyusers# # random.R # # Random point pattern generators for a linear network # # $Revision: 1.3 $ $Date: 2012/10/20 06:56:01 $ # rpoislpp <- function(lambda, L, ...) { verifyclass(L, "linnet") X <- datagen.rpoisppOnLines(lambda, as.psp(L), ...) Y <- lpp(X, L) return(Y) } runiflpp <- function(n, L) { verifyclass(L, "linnet") X <- datagen.runifpointOnLines(n, as.psp(L)) Y <- lpp(X, L) return(Y) } spatstat/R/derivfv.R0000644000176000001440000000441112237642727014152 0ustar ripleyusers# # derivfv.R # # differentiation for fv objects # # $Revision: 1.4 $ $Date: 2013/08/05 07:08:31 $ # deriv.fv <- function(expr, which="*", ..., method=c("spline", "numeric"), kinks=NULL) { f <- expr method <- match.arg(method) # select columns if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(f, which) } if(any(nbg <- !(which %in% names(f)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) relevant <- names(f) %in% which # get rname <- fvnames(f, ".x") df <- as.data.frame(f) rpos <- which(colnames(df) == rname) rvals <- df[,rpos] yvals <- df[,relevant,drop=FALSE] nr <- length(rvals) # cut x axis into intervals? if(is.null(kinks)) { cutx <- factor(rep(1, nr)) } else { rr <- range(rvals) breaks <- sort(unique(kinks)) if(breaks[1] > rr[1]) breaks <- c(rr[1], breaks) if(max(breaks) < rr[2]) breaks <- c(breaks, rr[2]) cutx <- cut(rvals, breaks=breaks, include.lowest=TRUE) } # process for(segment in levels(cutx)) { ii <- (cutx == segment) yy <- yvals[ii, , drop=FALSE] switch(method, numeric = { dydx <- apply(yy, 2, diff)/diff(rvals[ii]) nd <- nrow(dydx) dydx <- rbind(dydx, dydx[nd, ]) }, spline = { dydx <- apply(yy, 2, function(y, r, ...) { ss <- smooth.spline(r, y, ...) predict(ss, r, deriv=1)$y }, r=rvals[ii], ...) }) df[ii, relevant] <- dydx } desc <- attr(expr, "desc") desc[relevant] <- paste("derivative of", desc[relevant]) result <- fv(df, argu=rname, ylab=NULL, valu=fvnames(expr, ".y"), fmla=attr(expr, "fmla"), alim=attr(expr, "alim"), desc=desc, unitname=unitname(expr), fname=paste("D", attr(expr, "fname"), sep="")) fvnames(result, ".") <- fvnames(f, ".") return(result) } spatstat/R/colourschemes.R0000644000176000001440000000241512237642727015362 0ustar ripleyusers# # colourschemes.R # # $Revision: 1.3 $ $Date: 2013/07/17 04:53:48 $ # beachcolourmap <- function(range, ...) { col <- beachcolours(range, ...) z <- colourmap(col, range=range) return(z) } beachcolours <- function(range, sealevel = 0, monochrome=FALSE, ncolours=if(monochrome) 16 else 64, nbeach=1) { if(monochrome) return(grey(seq(from=0,to=1,length.out=ncolours))) stopifnot(is.numeric(range) && length(range) == 2) stopifnot(all(is.finite(range))) depths <- range[1] peaks <- range[2] dv <- diff(range)/(ncolours - 1) epsilon <- nbeach * dv/2 lowtide <- max(sealevel - epsilon, depths) hightide <- min(sealevel + epsilon, peaks) countbetween <- function(a, b, delta) { max(0, round((b-a)/delta)) } nsea <- countbetween(depths, lowtide, dv) nbeach <- countbetween(lowtide, hightide, dv) nland <- countbetween(hightide, peaks, dv) colours <- character(0) if(nsea > 0) colours <- rev(rainbow(nsea, start=3/6,end=4/6)) # cyan/blue if(nbeach > 0) colours <- c(colours, rev(rainbow(nbeach, start=3/12,end=5/12))) # green if(nland > 0) colours <- c(colours, rev(rainbow(nland, start=0, end=1/6))) # red/yellow return(colours) } spatstat/R/rlabel.R0000755000176000001440000000161212237642727013751 0ustar ripleyusers# # rlabel.R # # random (re)labelling # # $Revision: 1.7 $ $Date: 2013/02/27 01:01:20 $ # # rlabel <- function(X, labels=marks(X), permute=TRUE) { stopifnot(is.ppp(X) || is.lpp(X) || is.pp3(X) || is.ppx(X)) if(is.null(labels)) stop("labels not given and marks not present") npts <- npoints(X) if(is.vector(labels) || is.factor(labels)) { nlabels <- length(labels) if(permute && (nlabels != npts)) stop("length of labels vector does not match number of points") Y <- X %mark% sample(labels, npts, replace=!permute) } else if(is.data.frame(labels) || is.hyperframe(labels)) { nlabels <- nrow(labels) if(permute && (nlabels != npts)) stop("number of rows of data frame does not match number of points") Y <- X %mark% labels[sample(1:nlabels, npts, replace=!permute), ] } else stop("Format of labels argument is not understood") return(Y) } spatstat/R/connected.R0000755000176000001440000001226512237642727014460 0ustar ripleyusers# # connected.R # # connected component transform # # $Revision: 1.16 $ $Date: 2013/08/23 07:34:24 $ # # Interpreted code for pixel images by Julian Burgos # Rewritten in C by Adrian Baddeley # # Code for point patterns by Adrian Baddeley connected <- function(X, ...) { UseMethod("connected") } connected.im <- function(X, ..., background=NA, method="C") { method <- pickoption("algorithm choice", method, c(C="C", interpreted="interpreted")) if(!is.na(background)) X <- solutionset(X != background) else X <- as.mask(as.owin(X)) connected.owin(X, method=method) } connected.owin <- function(X, ..., method="C") { method <- pickoption("algorithm choice", method, c(C="C", interpreted="interpreted")) # convert X to binary mask X <- as.mask(X) # Y <- X$m nr <- X$dim[1] nc <- X$dim[2] if(method == "C") { ################ COMPILED CODE ######################### # Pad border with FALSE M <- rbind(FALSE, Y, FALSE) M <- cbind(FALSE, M, FALSE) # assign unique label to each foreground pixel L <- M L[M] <- seq_len(sum(M)) L[!M] <- 0 # resolve labels # (warning: 'mat' is overwritten, so DUP=TRUE) z <- .C("cocoImage", mat=as.integer(t(L)), nr=as.integer(nr), nc=as.integer(nc)) # PACKAGE="spatstat") # unpack Z <- matrix(z$mat, nr+2, nc+2, byrow=TRUE) } else { ################ INTERPRETED CODE ######################### # by Julian Burgos # # Pad border with zeros padY <- rbind(0, Y, 0) padY <- cbind(0, padY, 0) # Initialise Z <- matrix(0, nrow(padY), ncol(padY)) currentlab <- 1 todo <- as.vector(t(Y)) equiv <- NULL # ........ main loop .......................... while(any(todo)){ # pick first unresolved pixel one <- which(todo)[1] onerow <- ceiling(one/nc) onecol <- one -((onerow-1)*nc) parow=onerow+1 # Equivalent rows & column in padded matrix pacol=onecol+1 #Examine four previously scanned neighbors # (use padded matrix to avoid edge issues) nbrs <- rbind(c(parow-1,pacol-1), c(parow-1,pacol), c(parow, pacol-1), c(parow-1,pacol+1)) px <- sum(padY[nbrs]) if (px==0){ # no neighbours: new component Z[parow,pacol] <- currentlab currentlab <- currentlab+1 todo[one] <- FALSE } else if(px==1) { # one neighbour: assign existing label labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] Z[parow,pacol] <- labs[1] currentlab <- max(Z)+1 todo[one] <- FALSE } else { # more than one neighbour: possible merger of labels labs <- unique(Z[nbrs], na.rm=TRUE) labs <- labs[labs != 0] labs <- sort(labs) equiv <- rbind(equiv,c(labs,rep.int(0,times=4-length(labs)))) Z[parow,pacol] <- labs[1] currentlab <- max(Z)+1 todo[one] <- FALSE } } # ........... end of loop ............ # Resolve equivalences ................ if(length(equiv)>1){ merges <- (equiv[,2] > 1) nmerge <- sum(merges) if(nmerge==1) equiv <- equiv[which(merges), , drop=FALSE] else if(nmerge > 1) { relevant <- (equiv[,2] > 0) equiv <- equiv[relevant, , drop=FALSE] equiv <- equiv[fave.order(equiv[,1]),] } for (i in 1:nrow(equiv)){ current <- equiv[i, 1] for (j in 2:4){ twin <- equiv[i,j] if (twin>0){ # Change labels matrix Z[which(Z==twin)] <- current # Update equivalence table equiv[which(equiv==twin)] <- current } } } } } ########### COMMON CODE ############################ # Renumber labels sequentially mapped <- (Z != 0) usedlabs <- sort(unique(as.vector(Z[mapped]))) nlabs <- length(usedlabs) labtable <- cumsum(seq_len(max(usedlabs)) %in% usedlabs) Z[mapped] <- labtable[Z[mapped]] # banish zeroes Z[!mapped] <- NA # strip borders Z <- Z[2:(nrow(Z)-1),2:(ncol(Z)-1)] # dress up Z <- im(factor(Z, levels=1:nlabs), xcol=X$xcol, yrow=X$yrow, unitname=unitname(X)) return(Z) } connected.ppp <- function(X, R, ...) { stopifnot(is.ppp(X)) check.1.real(R, "In connected.ppp") stopifnot(R >= 0) internal <- resolve.1.default("internal", list(...), list(internal=FALSE)) nv <- npoints(X) cl <- closepairs(X, R, what="indices") ie <- cl$i - 1L je <- cl$j - 1L ne <- length(ie) DUP <- spatstat.options("dupC") zz <- .C("cocoGraph", nv=as.integer(nv), ne=as.integer(ne), ie=as.integer(ie), je=as.integer(je), label=as.integer(integer(nv)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(zz$status != 0) stop("Internal error: connected.ppp did not converge") if(internal) return(zz$label) lab <- zz$label + 1L # Renumber labels sequentially lab <- as.integer(factor(lab)) # Convert labels to factor lab <- factor(lab) # Apply to points Y <- X %mark% lab return(Y) } spatstat/R/evalcovar.R0000644000176000001440000003112012237642727014464 0ustar ripleyusers# # evalcovar.R # # evaluate covariate values at data points and at pixels # # $Revision: 1.10 $ $Date: 2013/04/25 06:37:43 $ # evalCovar <- function(model, covariate, ...) { UseMethod("evalCovar") } evalCovar.ppm <- function(model, covariate, ..., dimyx=NULL, eps=NULL, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { # evaluate covariate values at data points and at pixels csr <- is.poisson.ppm(model) && is.stationary.ppm(model) # determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Qname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, spacename="two dimensions") X <- data.ppm(model) W <- as.owin(model) # explicit control of pixel resolution if(!is.null(dimyx) || !is.null(eps)) W <- as.mask(W, dimyx=dimyx, eps=eps) # evaluate covariate if(is.character(covariate)) { # One of the characters 'x' or 'y' # Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=function(x,y,m){x}, y=function(x,y,m){y}, stop(paste("Unrecognised covariate", dQuote(covariate)))) } if(!is.marked(model)) { # ................... unmarked ....................... if(is.im(covariate)) { type <- "im" # evaluate at data points by interpolation ZX <- interp.im(covariate, X$x, X$y) # fix boundary glitches if(any(uhoh <- is.na(ZX))) ZX[uhoh] <- safelookup(covariate, X[uhoh]) # covariate values for pixels inside window Z <- covariate[W, drop=FALSE] # corresponding mask W <- as.owin(Z) } else if(is.function(covariate)) { type <- "function" # evaluate exactly at data points ZX <- covariate(X$x, X$y) if(!all(is.finite(ZX))) warning("covariate function returned NA or Inf values") # window W <- as.mask(W) # covariate in window Z <- as.im(covariate, W=W) # collapse function body to single string covname <- singlestring(covname) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y"))) # values of covariate in window Zvalues <- as.vector(Z[W, drop=TRUE]) # corresponding fitted intensity values lambda <- as.vector(predict(model, locations=W)[W, drop=TRUE]) # pixel area (constant) pixelarea <- with(Z, xstep * ystep) } else { # ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(X, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) # single image: replicate if(is.im(covariate)) covariate <- lapply(possmarks, function(x,v){v}, v=covariate) # if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) { # list of images type <- "im" if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") # evaluate covariate at each data point by interpolation ZX <- numeric(npts) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] values <- interp.im(covariate.k, x=X$x[ii], y=X$y[ii]) # fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, X[ii][uhoh]) ZX[ii] <- values } # restrict covariate images to window Z <- lapply(covariate, function(x,W){x[W, drop=FALSE]}, W=W) # extract pixel locations and pixel values Zframes <- lapply(Z, as.data.frame) # covariate values at each pixel inside window Zvalues <- unlist(lapply(Zframes, function(df) { df[ , 3] })) # pixel locations locn <- lapply(Zframes, function(df) { df[ , 1:2] }) # tack on mark values for(k in seq_along(possmarks)) locn[[k]] <- cbind(locn[[k]], data.frame(marks=possmarks[k])) loc <- do.call("rbind", locn) # corresponding fitted intensity values lambda <- predict(model, locations=loc) # pixel areas pixelarea <- unlist(lapply(Z, function(z) { with(z, rep.int(xstep * ystep, sum(!is.na(v)))) })) } else if(is.function(covariate)) { type <- "function" # evaluate exactly at data points ZX <- covariate(X$x, X$y, marx) # same window W <- as.mask(W) # covariate in window Z <- list() g <- function(x,y,m,f) { f(x,y,m) } for(k in seq_along(possmarks)) Z[[k]] <- as.im(g, m=possmarks[k], f=covariate, W=W) Zvalues <- unlist(lapply(Z, function(z) { as.data.frame(z)[,3] })) # corresponding fitted intensity values lambda <- predict(model, locations=W) lambda <- unlist(lapply(lambda, function(z) { as.data.frame(z)[,3] })) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") # collapse function body to single string covname <- singlestring(covname) # pixel areas pixelarea <- unlist(lapply(Z, function(z) { with(z, rep.int(xstep * ystep, sum(!is.na(v)))) })) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y"))) } # .......................................................... # apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } check.finite(lambda, xname="the fitted intensity lambda", usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) # wrap up values <- list(Zimage = Z, Zvalues = Zvalues, lambda = lambda, weights = pixelarea, ZX = ZX, type = type) return(list(values=values, info=info)) } evalCovar.lppm <- function(model, covariate, ..., eps=NULL, nd=1000, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { # evaluate covariate values at data points and at pixels csr <- is.poisson(model) && is.stationary(model) # determine names if(is.null(modelname)) modelname <- if(csr) "CSR" else short.deparse(substitute(model)) if(is.null(covname)) { covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate } if(is.null(dataname)) dataname <- model$Xname info <- list(modelname=modelname, covname=covname, dataname=dataname, csr=csr, spacename="linear network") # convert character covariate to function if(is.character(covariate)) { # One of the characters 'x' or 'y' # Turn it into a function. ns <- length(covariate) if(ns == 0) stop("covariate is empty") if(ns > 1) stop("more than one covariate specified") covname <- covariate covariate <- switch(covariate, x=function(x,y,m){x}, y=function(x,y,m){y}, stop(paste("Unrecognised covariate", dQuote(covariate)))) } # extract model components X <- model$X fit <- model$fit # L <- as.linnet(X) Q <- quad.ppm(fit) isdat <- is.data(Q) U <- union.quad(Q) wt <- w.quad(Q) # evaluate covariate if(!is.marked(model)) { # ................... unmarked ....................... if(is.im(covariate)) { if(inherits(covariate, "linim")) { type <- "linim" Zimage <- covariate } else { type <- "im" Zimage <- as.linim(covariate, L) } # evaluate at quadrature points by interpolation Zvalues <- interp.im(covariate, U$x, U$y) # fix boundary glitches if(any(uhoh <- is.na(Zvalues))) Zvalues[uhoh] <- safelookup(covariate, U[uhoh]) # extract data values ZX <- Zvalues[isdat] } else if(is.function(covariate)) { type <- "function" Zimage <- as.linim(covariate, L) # evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y) if(!all(is.finite(Zvalues))) warning("covariate function returned NA or Inf values") # extract data values ZX <- Zvalues[isdat] # collapse function body to single string covname <- singlestring(covname) } else stop(paste("The covariate should be", "an image, a function(x,y)", "or one of the characters", sQuote("x"), "or", sQuote("y"))) # corresponding fitted intensity values lambda <- as.vector(predict(model, locations=U)) } else { # ................... marked ....................... if(!is.multitype(model)) stop("Only implemented for multitype models (factor marks)") marx <- marks(U, dfok=FALSE) possmarks <- levels(marx) npts <- npoints(X) # single image: replicate if(is.im(covariate)) covariate <- lapply(possmarks, function(x,v){v}, v=covariate) # if(is.list(covariate) && all(unlist(lapply(covariate, is.im)))) { # list of images if(length(covariate) != length(possmarks)) stop("Number of images does not match number of possible marks") # determine type of data islinim <- unlist(lapply(covariate, inherits, what="linim")) type <- if(all(islinim)) "linim" else "im" Zimage <- covariate Zimage[!islinim] <- lapply(Zimage[!islinim], as.linim, L=L) # evaluate covariate at each data point by interpolation Zvalues <- numeric(npoints(U)) for(k in seq_along(possmarks)) { ii <- (marx == possmarks[k]) covariate.k <- covariate[[k]] values <- interp.im(covariate.k, x=U$x[ii], y=U$y[ii]) # fix boundary glitches if(any(uhoh <- is.na(values))) values[uhoh] <- safelookup(covariate.k, U[ii][uhoh]) Zvalues[ii] <- values } # extract data values ZX <- Zvalues[isdat] # corresponding fitted intensity values lambda <- predict(model, locations=U) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") } else if(is.function(covariate)) { type <- "function" # evaluate exactly at quadrature points Zvalues <- covariate(U$x, U$y, marx) # extract data values ZX <- Zvalues[isdat] # corresponding fitted intensity values lambda <- predict(model, locations=U) if(length(lambda) != length(Zvalues)) stop("Internal error: length(lambda) != length(Zvalues)") # images Zimage <- list() g <- function(x,y,m,f) { f(x,y,m) } for(k in seq_along(possmarks)) Zimage[[k]] <- as.linim(g, L=L, m=possmarks[k], f=covariate) # collapse function body to single string covname <- singlestring(covname) } else stop(paste("For a multitype point process model,", "the covariate should be an image, a list of images,", "a function(x,y,m)", "or one of the characters", sQuote("x"), "or", sQuote("y"))) } # .......................................................... # apply jittering to avoid ties if(jitter) { nX <- length(ZX) dZ <- 0.3 * quantile(diff(sort(unique(c(ZX, Zvalues)))), 1/min(20, nX)) ZX <- ZX + rnorm(nX, sd=dZ) Zvalues <- Zvalues + rnorm(length(Zvalues), sd=dZ) } check.finite(lambda, xname="the fitted intensity lambda", usergiven=FALSE) check.finite(Zvalues, xname="the covariate", usergiven=TRUE) # wrap up values <- list(Zimage = Zimage, Zvalues = Zvalues, lambda = lambda, weights = wt, ZX = ZX, type = type) return(list(values=values, info=info)) } spatstat/R/smooth.ppp.R0000755000176000001440000003621512237642727014626 0ustar ripleyusers# # smooth.ppp.R # # Smooth the marks of a point pattern # # $Revision: 1.9 $ $Date: 2013/08/29 04:17:05 $ # smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") { message("smooth.ppp will soon be deprecated: use the generic Smooth with a capital S") # .Deprecated("Smooth.ppp", package="spatstat", # msg="smooth.ppp is deprecated: use the generic Smooth with a capital S") Smooth(X, ..., weights=weights, at=at) } Smooth <- function(X, ...) { UseMethod("Smooth") } Smooth.ppp <- function(X, ..., weights=rep(1, npoints(X)), at="pixels") { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) stop("X should be a marked point pattern") at <- pickoption("output location type", at, c(pixels="pixels", points="points")) # determine smoothing parameters ker <- resolve.2D.kernel(..., x=X, bwfun=bw.smoothppp, allow.zero=TRUE) sigma <- ker$sigma varcov <- ker$varcov # ..... if(ker$cutoff < min(nndist(X))) { # very small bandwidth leaveoneout <- resolve.1.default("leaveoneout", list(...), list(leaveoneout=TRUE)) if(!leaveoneout && at=="points") { warning(paste("Bandwidth is close to zero:", "original values returned")) Y <- marks(X) } else { warning(paste("Bandwidth is close to zero:", "nearest-neighbour interpolation performed")) Y <- nnmark(X, ..., k=1, at=at) } return(Y) } # if(weightsgiven <- !missing(weights)) { check.nvector(weights, npoints(X)) # rescale weights to avoid numerical gremlins weights <- weights/median(abs(weights)) } # get marks marx <- marks(X, dfok=TRUE) # if(!is.data.frame(marx)) { # ........ vector of marks ................... values <- marx if(is.factor(values)) { warning("Factor valued marks were converted to integers") values <- as.numeric(values) } switch(at, points={ if(!weightsgiven) weights <- NULL result <- do.call("smoothpointsEngine", resolve.defaults(list(x=X), list(values=values, weights=weights), list(sigma=sigma, varcov=varcov), list(...))) }, pixels={ numerator <- do.call("density.ppp", resolve.defaults(list(x=X, at="pixels"), list(weights = values * weights), list(sigma=sigma, varcov=varcov), list(...), list(edge=FALSE))) denominator <- do.call("density.ppp", resolve.defaults(list(x=X, at="pixels"), list(weights = weights), list(sigma=sigma, varcov=varcov), list(...), list(edge=FALSE))) result <- eval.im(numerator/denominator) # trap small values of denominator # trap NaN and +/- Inf values of result, but not NA eps <- .Machine$double.eps nbg <- eval.im(is.infinite(result) | is.nan(result) | (denominator < eps)) if(any(as.matrix(nbg), na.rm=TRUE)) { warning("Numerical underflow detected: sigma is probably too small") # l'Hopital's rule distX <- distmap(X, xy=numerator) whichnn <- attr(distX, "index") nnvalues <- eval.im(values[whichnn]) result[nbg] <- nnvalues[nbg] } attr(result, "warnings") <- attr(numerator, "warnings") }) } else { # ......... data frame of marks .................. nmarx <- ncol(marx) # compute denominator denominator <- do.call("density.ppp", resolve.defaults(list(x=X, at=at), list(weights = weights), list(sigma=sigma, varcov=varcov), list(...), list(edge=FALSE))) # compute numerator for each column of marks numerators <- do.call("density.ppp", resolve.defaults(list(x=X, at=at), list(weights = marx * weights), list(sigma=sigma, varcov=varcov), list(...), list(edge=FALSE))) uhoh <- attr(numerators, "warnings") # calculate ratios switch(at, points={ if(is.null(uhoh)) { # numerators is a matrix ratio <- numerators/denominator if(any(badpoints <- apply(!is.finite(ratio), 1, any))) { whichnnX <- nnwhich(X) ratio[badpoints,] <- marx[whichnnX[badpoints], ] } } else { warning("returning original values") ratio <- marx } result <- as.data.frame(ratio) colnames(result) <- colnames(marx) }, pixels={ ratio <- lapply(numerators, function(a,b) eval.im(a/b), b=denominator) if(!is.null(uhoh)) { # compute nearest neighbour map on same raster distX <- distmap(X, xy=denominator) whichnnX <- attr(distX, "index") # fix images for(j in 1:length(ratio)) { ratj <- ratio[[j]] valj <- marx[,j] ratio[[j]] <- eval.im(ifelseXY(is.finite(ratj), ratj, valj[whichnnX])) } attr(ratio, "warnings") <- uhoh } result <- as.listof(ratio) names(result) <- colnames(marx) }) } # wrap up attr(result, "warnings") <- unlist(lapply(result, function(x){ attr(x, "warnings") })) attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } smoothpointsEngine <- function(x, values, sigma, ..., weights=NULL, varcov=NULL, leaveoneout=TRUE, sorted=FALSE) { stopifnot(is.logical(leaveoneout)) if(is.null(varcov)) { const <- 1/(2 * pi * sigma^2) } else { detSigma <- det(varcov) Sinv <- solve(varcov) const <- 1/(2 * pi * sqrt(detSigma)) } # Contributions from pairs of distinct points # closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd # detect very small bandwidth nnd <- nndist(x) nnrange <- range(nnd) if(cutoff < nnrange[1]) { if(leaveoneout && (npoints(x) > 1)) { warning("Very small bandwidth; values of nearest neighbours returned") result <- values[nnwhich(x)] } else { warning("Very small bandwidth; original values returned") result <- values } attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov attr(result, "warnings") <- "underflow" return(result) } if(leaveoneout) { # ensure cutoff includes at least one point cutoff <- max(1.1 * nnrange[2], cutoff) } if(spatstat.options("densityC")) { # .................. new C code ........................... npts <- npoints(x) result <- numeric(npts) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y vv <- values } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] vv <- values[oo] } DUP <- spatstat.options("dupC") if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("smoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("wtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("asmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { wtsort <- weights[oo] zz <- .C("awtsmoopt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), v = as.double(vv), self = as.integer(!leaveoneout), rmaxi = as.double(cutoff), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } } if(any(nbg <- (is.infinite(result) | is.nan(result)))) { # NaN or +/-Inf can occur if bandwidth is small # Use mark of nearest neighbour (by l'Hopital's rule) result[nbg] <- values[nnwhich(x)[nbg]] } } else { # previous, partly interpreted code # compute weighted densities if(is.null(weights)) { # weights are implicitly equal to 1 numerator <- do.call("density.ppp", resolve.defaults(list(x=x, at="points"), list(weights = values), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) denominator <- do.call("density.ppp", resolve.defaults(list(x=x, at="points"), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) } else { numerator <- do.call("density.ppp", resolve.defaults(list(x=x, at="points"), list(weights = values * weights), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) denominator <- do.call("density.ppp", resolve.defaults(list(x=x, at="points"), list(weights = weights), list(sigma=sigma, varcov=varcov), list(leaveoneout=leaveoneout), list(sorted=sorted), list(...), list(edge=FALSE))) } if(is.null(uhoh <- attr(numerator, "warnings"))) { result <- numerator/denominator result <- ifelseXB(is.finite(result), result, NA) } else { warning("returning original values") result <- values attr(result, "warnings") <- uhoh } } # pack up and return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } markmean <- function(X, ...) { Smooth(X, ...) } markvar <- function(X, ...) { if(!is.marked(X, dfok=FALSE)) stop("X should have (one column of) marks") E1 <- Smooth(X, ...) X2 <- X %mark% marks(X)^2 E2 <- Smooth(X2, ...) V <- eval.im(E2 - E1^2) return(V) } bw.smoothppp <- function(X, nh=spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) # rearrange in ascending order of x-coordinate (for C code) X <- X[fave.order(X$x)] # marx <- marks(X) # determine a range of bandwidth values n <- npoints(X) if(is.null(hmin) || is.null(hmax)) { W <- as.owin(X) a <- area.owin(W) d <- diameter(as.rectangle(W)) # Stoyan's rule of thumb stoyan <- bw.stoyan(X) # rule of thumb based on nearest-neighbour distances nnd <- nndist(X) nnd <- nnd[nnd > 0] if(is.null(hmin)) { hmin <- max(1.1 * min(nnd), stoyan/5) hmin <- min(d/8, hmin) } if(is.null(hmax)) { hmax <- max(stoyan * 20, 3 * mean(nnd), hmin * 2) hmax <- min(d/2, hmax) } } else stopifnot(hmin < hmax) # h <- exp(seq(from=log(hmin), to=log(hmax), length.out=nh)) cv <- numeric(nh) # # compute cross-validation criterion for(i in seq_len(nh)) { yhat <- Smooth(X, sigma=h[i], at="points", leaveoneout=TRUE, sorted=TRUE) cv[i] <- mean((marx - yhat)^2) } # optimize iopt <- which.min(cv) hopt <- h[iopt] # if(warn && (iopt == nh || iopt == 1)) warning(paste("Cross-validation criterion was minimised at", if(iopt == 1) "left-hand" else "right-hand", "end of interval", paste(prange(signif(c(hmin, hmax), 3)), ";", sep=""), "use arguments hmin, hmax to specify a wider interval"), call.=FALSE) # result <- bw.optim(cv, h, iopt, hname="sigma", creator="bw.smoothppp", criterion="Least Squares Cross-Validation") return(result) } spatstat/R/distmap.R0000755000176000001440000000750112237642727014154 0ustar ripleyusers# # # distmap.R # # $Revision: 1.18 $ $Date: 2013/04/25 06:37:43 $ # # # Distance transforms # # distmap <- function(X, ...) { UseMethod("distmap") } distmap.ppp <- function(X, ...) { verifyclass(X, "ppp") e <- exactdt(X, ...) W <- e$w uni <- unitname(W) dmat <- e$d imat <- e$i V <- im(dmat, W$xcol, W$yrow, unitname=uni) I <- im(imat, W$xcol, W$yrow, unitname=uni) if(X$window$type == "rectangle") { # distance to frame boundary bmat <- e$b B <- im(bmat, W$xcol, W$yrow, unitname=uni) } else { # distance to window boundary, not frame boundary bmat <- bdist.pixels(W, style="matrix") B <- im(bmat, W$xcol, W$yrow, unitname=uni) # clip all to window V <- V[W, drop=FALSE] I <- I[W, drop=FALSE] B <- B[W, drop=FALSE] } attr(V, "index") <- I attr(V, "bdry") <- B return(V) } distmap.owin <- function(X, ..., discretise=FALSE, invert=FALSE) { verifyclass(X, "owin") uni <- unitname(X) if(X$type == "rectangle") { M <- as.mask(X, ...) Bdry <- im(bdist.pixels(M, style="matrix"), M$xcol, M$yrow, unitname=uni) if(!invert) Dist <- as.im(M, value=0) else Dist <- Bdry } else if(X$type == "polygonal" && !discretise) { Edges <- as.psp(X) Dist <- distmap(Edges, ...) Bdry <- attr(Dist, "bdry") if(!invert) Dist[X] <- 0 else { bb <- as.rectangle(X) bigbox <- grow.rectangle(bb, diameter(bb)/4) Dist[complement.owin(X, bigbox)] <- 0 } } else { X <- as.mask(X, ...) if(invert) X <- complement.owin(X) xc <- X$xcol yr <- X$yrow nr <- X$dim[1] nc <- X$dim[2] # pad out the input image with a margin of width 1 on all sides mat <- X$m pad <- invert # boundary condition is opposite of value inside W mat <- cbind(pad, mat, pad) mat <- rbind(pad, mat, pad) # call C routine DUP <- spatstat.options("dupC") res <- .C("distmapbin", as.double(X$xrange[1]), as.double(X$yrange[1]), as.double(X$xrange[2]), as.double(X$yrange[2]), nr = as.integer(nr), nc = as.integer(nc), as.logical(t(mat)), distances = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), boundary = as.double(matrix(0, ncol = nc + 2, nrow = nr + 2)), DUP=DUP) # PACKAGE="spatstat") # strip off margins again dist <- matrix(res$distances, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] bdist <- matrix(res$boundary, ncol = nc + 2, byrow = TRUE)[2:(nr + 1), 2:(nc +1)] # cast as image objects Dist <- im(dist, xc, yr, unitname=uni) Bdry <- im(bdist, xc, yr, unitname=uni) } attr(Dist, "bdry") <- Bdry return(Dist) } distmap.psp <- function(X, ...) { verifyclass(X, "psp") W <- as.mask(X$window, ...) uni <- unitname(W) xp <- as.vector(raster.x(W)) yp <- as.vector(raster.y(W)) np <- length(xp) E <- X$ends big <- 2 * diameter(as.rectangle(W))^2 dist2 <- rep.int(big, np) DUP <- spatstat.options("dupC") z <- .C("nndist2segs", xp=as.double(xp), yp=as.double(yp), npoints=as.integer(np), x0=as.double(E$x0), y0=as.double(E$y0), x1=as.double(E$x1), y1=as.double(E$y1), nsegments=as.integer(nrow(E)), epsilon=as.double(.Machine$double.eps), dist2=as.double(dist2), index=as.integer(integer(np)), DUP=DUP) # PACKAGE="spatstat") xc <- W$xcol yr <- W$yrow Dist <- im(array(sqrt(z$dist2), dim=W$dim), xc, yr, unitname=uni) Indx <- im(array(z$index + 1, dim=W$dim), xc, yr, unitname=uni) Bdry <- im(bdist.pixels(W, style="matrix"), xc, yr, unitname=uni) attr(Dist, "index") <- Indx attr(Dist, "bdry") <- Bdry return(Dist) } spatstat/R/psp2pix.R0000755000176000001440000000475412237642727014127 0ustar ripleyusers# # psp2pix.R # # $Revision: 1.6 $ $Date: 2013/04/25 06:37:43 $ # # as.mask.psp <- function(x, W=NULL, ...) { L <- as.psp(x) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- as.mask(W, ...) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty W$m[] <- FALSE return(W) } x0 <- (ends$x0 - W$xrange[1])/W$xstep x1 <- (ends$x1 - W$xrange[1])/W$xstep y0 <- (ends$y0 - W$yrange[1])/W$ystep y1 <- (ends$y1 - W$yrange[1])/W$ystep nr <- W$dim[1] nc <- W$dim[2] DUP <- spatstat.options("dupC") zz <- .C("seg2pixI", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), nx=as.integer(nc), ny=as.integer(nr), out=as.integer(integer(nr * nc)), DUP=DUP) # PACKAGE="spatstat") mm <- matrix(zz$out, nr, nc) # intersect with existing window W$m <- W$m & mm W } pixellate.psp <- function(x, W=NULL, ..., weights=NULL) { L <- as.psp(x) if(is.null(W)) W <- as.owin(L) else W <- as.owin(W) W <- as.mask(W, ...) Z <- as.im(W) ends <- L$ends nseg <- nrow(ends) if(nseg == 0) { # empty Z$v[] <- 0 return(Z) } if(is.null(weights)) weights <- rep.int(1, nseg) else { if(!is.numeric(weights)) stop("weights must be numeric") if(any(is.na(weights))) stop("weights must not be NA") if(!all(is.finite(weights))) stop("weights must not be infinite") if(length(weights) == 1) weights <- rep.int(weights, nseg) else if(length(weights) != nseg) stop(paste("weights vector has length", length(weights), "but there are", nseg, "line segments")) } x0 <- (ends$x0 - Z$xrange[1])/Z$xstep x1 <- (ends$x1 - Z$xrange[1])/Z$xstep y0 <- (ends$y0 - Z$yrange[1])/Z$ystep y1 <- (ends$y1 - Z$yrange[1])/Z$ystep nr <- Z$dim[1] nc <- Z$dim[2] DUP <- spatstat.options("dupC") zz <- .C("seg2pixL", ns=as.integer(nseg), x0=as.double(x0), y0=as.double(y0), x1=as.double(x1), y1=as.double(y1), weights=as.double(weights), pixwidth=as.double(Z$xstep), pixheight=as.double(Z$ystep), nx=as.integer(nc), ny=as.integer(nr), out=as.double(numeric(nr * nc)), DUP=DUP) # PACKAGE="spatstat") mm <- matrix(zz$out, nr, nc) mm[is.na(Z$v)] <- NA # intersect with existing window Z$v <- mm Z } spatstat/R/nnmark.R0000644000176000001440000000175712237642727014005 0ustar ripleyusers# # nnmark.R # # $Revision: 1.3 $ $Date: 2013/07/04 09:17:14 $ nnmark <- function(X, ..., k=1, at=c("pixels", "points")) { stopifnot(is.ppp(X)) stopifnot(is.marked(X)) at <- match.arg(at) mX <- marks(X) switch(at, pixels = { Y <- nnmap(X, k=k, what="which", ...) switch(markformat(X), vector={ result <- eval.im(mX[Y]) }, dataframe = { result <- as.listof(lapply(mX, function(z) eval.im(z[Y]))) }, stop("Marks must be a vector or dataframe")) }, points = { Y <- nnwhich(X, k=k) switch(markformat(X), vector={ result <- eval.im(mX[Y]) }, dataframe = { result <- mX[Y,, drop=FALSE] }, stop("Marks must be a vector or dataframe")) }) return(result) } spatstat/R/rescale.R0000755000176000001440000000231712237642727014131 0ustar ripleyusers# # # rescale.R # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # # rescale <- function(X, s) { UseMethod("rescale") } rescale.ppp <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- affine.ppp(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s) return(Y) } rescale.owin <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- affine.owin(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s) return(Y) } rescale.im <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- X Y$xrange <- X$xrange/s Y$yrange <- X$yrange/s Y$xstep <- X$xstep/s Y$ystep <- X$ystep/s Y$xcol <- X$xcol/s Y$yrow <- X$yrow/s unitname(Y) <- rescale(unitname(X), s) return(Y) } rescale.psp <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- affine.psp(X, mat=diag(c(1/s,1/s))) unitname(Y) <- rescale(unitname(X), s) return(Y) } rescale.units <- function(X, s) { if(summary(X)$vanilla) return(X) if(missing(s)) { X$multiplier <- 1 } else { if(!is.numeric(s) || length(s) != 1 || s <= 0) stop("s should be a positive number") X$multiplier <- s * X$multiplier } return(X) } spatstat/R/bw.optim.R0000755000176000001440000000577512237642727014265 0ustar ripleyusers# # bw.optim.R # # Class of optimised bandwidths # Plotting the object displays the optimisation criterion # # $Revision: 1.21 $ $Date: 2013/07/08 04:54:41 $ # bw.optim <- function(cv, h, iopt=which.min(cv), ..., cvname, hname, criterion="cross-validation") { if(missing(cvname) || is.null(cvname)) cvname <- deparse(substitute(cv)) if(missing(hname) || is.null(hname)) hname <- deparse(substitute(h)) stopifnot(is.numeric(cv)) stopifnot(is.numeric(h)) stopifnot(length(h) == length(cv)) result <- h[iopt] attr(result, "cv") <- cv attr(result, "h") <- h attr(result, "iopt") <- iopt attr(result, "labels") <- list(hname=hname, cvname=cvname) attr(result, "info") <- list(...) attr(result, "criterion") <- criterion class(result) <- "bw.optim" return(result) } print.bw.optim <- function(x, ...) { y <- as.numeric(x) names(y) <- attr(x, "labels")$hname print(y, ...) return(invisible(NULL)) } as.data.frame.bw.optim <- function(x, ...) { h <- attr(x, "h") cv <- attr(x, "cv") df <- data.frame(h, cv) labels <- attr(x, "labels") colnames(df) <- labels[c("hname", "cvname")] info <- attr(x, "info") if(length(info) > 0) { lengths <- unlist(lapply(info, length)) if(any(ok <- (lengths == nrow(df)))) { df <- cbind(df, as.data.frame(info[ok])) } } return(df) } as.fv.bw.optim <- function(x) { # convert to fv object df <- as.data.frame(x) dfnames <- colnames(df) hname <- dfnames[1] cvname <- dfnames[2] descrip <- c("smoothing parameter", paste(attr(x, "criterion"), "criterion")) if(ncol(df) > 2) descrip <- c(descrip, paste("Additional variable", sQuote(dfnames[-(1:2)]))) labl <- c(hname, paste0(dfnames[-1], paren(hname))) yexp <- substitute(CV(h), list(CV=as.name(cvname), h=as.name(hname))) xfv <- fv(df, argu=hname, ylab=yexp, valu=cvname, labl=labl, desc=descrip, fname=cvname, yexp=yexp) fvnames(xfv, ".") <- cvname return(xfv) } plot.bw.optim <- function(x, ..., showopt=TRUE, optargs=list(lty=3, col="blue")) { xname <- short.deparse(substitute(x)) # convert to fv object xfv <- as.fv(x) # plot cross-validation criterion out <- do.call("plot.fv", resolve.defaults(list(x=xfv), list(...), list(main=xname))) # Turn off 'showopt' if the x-variable is not the bandwidth if(missing(showopt)) { argh <- list(...) isfmla <- unlist(lapply(argh, inherits, what="formula")) if(any(isfmla)) { fmla <- argh[[min(which(isfmla))]] xvar <- deparse(rhs.of.formula(fmla, tilde=FALSE)) if(!(identical(xvar, fvnames(xfv, ".x")) || identical(xvar, ".x"))) showopt <- FALSE } } # show optimal value? if(showopt) { hopt <- as.numeric(x) do.call("abline", append(list(v=hopt), optargs)) } if(is.null(out)) return(invisible(NULL)) return(out) } spatstat/R/breakpts.R0000755000176000001440000001306512237642727014330 0ustar ripleyusers# # breakpts.S # # A simple class definition for the specification # of histogram breakpoints in the special form we need them. # # even.breaks() # # $Revision: 1.12 $ $Date: 2011/05/18 01:27:46 $ # # # Other functions in this directory use the standard Splus function # hist() to compute histograms of distance values. # One argument of hist() is the vector 'breaks' # of breakpoints for the histogram cells. # # The breakpoints must # (a) span the range of the data # (b) be given in increasing order # (c) satisfy breaks[2] = 0, # # The function make.even.breaks() will create suitable breakpoints. # # Condition (c) means that the first histogram cell has # *right* endpoint equal to 0. # # Since all our distance values are nonnegative, the effect of (c) is # that the first histogram cell counts the distance values which are # exactly equal to 0. Hence F(0), the probability P{X = 0}, # is estimated without a discretisation bias. # # We assume the histograms have followed the default counting rule # in hist(), which is such that the k-th entry of the histogram # counts the number of data values in # I_k = ( breaks[k],breaks[k+1] ] for k > 1 # I_1 = [ breaks[1],breaks[2] ] # # The implementations of estimators of c.d.f's in this directory # produce vectors of length = length(breaks)-1 # with value[k] = estimate of F(breaks[k+1]), # i.e. value[k] is an estimate of the c.d.f. at the RIGHT endpoint # of the kth histogram cell. # # An object of class 'breakpts' contains: # # $val the actual breakpoints # $max the maximum value (= last breakpoint) # $ncells total number of histogram cells # $r right endpoints, r = val[-1] # $even logical = TRUE if cells known to be evenly spaced # $npos number of histogram cells on the positive halfline # = length(val) - 2, # or NULL if cells not evenly spaced # $step histogram cell width # or NULL if cells not evenly spaced # # -------------------------------------------------------------------- breakpts <- function(val, maxi, even=FALSE, npos=NULL, step=NULL) { out <- list(val=val, max=maxi, ncells=length(val)-1, r = val[-1], even=even, npos=npos, step=step) class(out) <- "breakpts" out } "make.even.breaks" <- function(bmax, npos, bstep) { if(bmax <= 0) stop("bmax must be positive") if(missing(bstep) && missing(npos)) stop(paste("Must specify either", sQuote("bstep"), "or", sQuote("npos"))) if(!missing(npos)) { bstep <- bmax/npos val <- seq(from=0, to=bmax, length.out=npos+1) val <- c(-bstep,val) right <- bmax } else { npos <- ceiling(bmax/bstep) right <- bstep * npos val <- seq(from=0, to=right, length.out=npos+1) val <- c(-bstep,val) } breakpts(val, right, TRUE, npos, bstep) } "as.breakpts" <- function(...) { XL <- list(...) if(length(XL) == 1) { # single argument X <- XL[[1]] if(!is.null(class(X)) && class(X) == "breakpts") # X already in correct form return(X) if(is.vector(X) && length(X) > 2) { # it's a vector if(X[2] != 0) stop("breakpoints do not satisfy breaks[2] = 0") # The following test for equal spacing is used in hist.default steps <- diff(X) if(diff(range(steps)) < 1e-07 * mean(steps)) # equally spaced return(breakpts(X, max(X), TRUE, length(X)-2, steps[1])) else # unknown spacing return(breakpts(X, max(X), FALSE)) } } else { # There are multiple arguments. # exactly two arguments - interpret as even.breaks() if(length(XL) == 2) return(make.even.breaks(XL[[1]], XL[[2]])) # two arguments 'max' and 'npos' if(!is.null(XL$max) && !is.null(XL$npos)) return(make.even.breaks(XL$max, XL$npos)) # otherwise stop("Don't know how to convert these data to breakpoints") } # never reached } check.hist.lengths <- function(hist, breaks) { verifyclass(breaks, "breakpts") nh <- length(hist) nb <- breaks$ncells if(nh != nb) stop(paste("Length of histogram =", nh, "not equal to number of histogram cells =", nb)) } breakpts.from.r <- function(r) { if(!is.numeric(r) && !is.vector(r)) stop("r must be a numeric vector") if(length(r) < 2) stop(paste("r has length", length(r), "- must be at least 2")) if(r[1] != 0) stop("First r value must be 0") if(any(diff(r) <= 0)) stop("successive values of r must be increasing") dr <- r[2] - r[1] b <- c(-dr, r) return(as.breakpts(b)) } handle.r.b.args <- function(r=NULL, breaks=NULL, window, eps=NULL, rmaxdefault) { if(!is.null(r) && !is.null(breaks)) stop(paste("Do not specify both", sQuote("r"), "and", sQuote("breaks"))) if(!is.null(breaks)) { breaks <- as.breakpts(breaks) } else if(!is.null(r)) { breaks <- breakpts.from.r(r) } else { # both 'r' and 'breaks' are missing rmax <- if(missing(rmaxdefault)) diameter(as.rectangle(window)) else rmaxdefault if(is.null(eps)) { if(!is.null(window$xstep)) eps <- window$xstep/4 else eps <- rmax/512 } # warning(paste("step size for argument \'r\' defaults to", eps/4)) breaks <- make.even.breaks( rmax, bstep=eps) } return(breaks) } spatstat/R/nndensity.R0000644000176000001440000000172512247336365014524 0ustar ripleyusers# # nndensity.R # # Density estimation based on nn distance # # $Revision: 1.2 $ $Date: 2013/12/03 10:51:20 $ # nndensity <- function(x, ...) { UseMethod("nndensity") } nndensity.ppp <- function(x, k, ..., verbose=TRUE) { if(missing(k) || is.null(k)) { k <- round(sqrt(npoints(x))) if(verbose) cat(paste("k=", k, "\n")) } else if(k == 1) warning("k=1 will produce strange results") # distance to k-th nearest neighbour D <- nnmap(x, k=k, what="dist", ...) # area searched A <- eval.im(pi * D^2) # distance to boundary B <- bdist.pixels(as.owin(D)) # handle edge effects edge <- solutionset(B < D) # centres of all pixels where edge effect occurs xy <- raster.xy(edge, drop=TRUE) # corresponding values of distance rr <- D[edge, drop=TRUE] # compute actual search area X <- as.ppp(xy, W=as.owin(x), check=FALSE) A[edge] <- discpartarea(X, matrix(rr, ncol=1)) # finally compute intensity estimate L <- eval.im(k/A) return(L) } spatstat/R/crossdistlpp.R0000644000176000001440000000603512237642727015242 0ustar ripleyusers# # crossdistlpp.R # # $Revision: 1.3 $ $Date: 2013/08/23 07:35:36 $ # # crossdist.lpp # Calculates the shortest-path distance from each point of X # to each point of Y, where X and Y are point patterns # on the same linear network. # crossdist.lpp <- function(X, Y, ..., method="C") { stopifnot(inherits(X, "lpp")) stopifnot(method %in% c("C", "interpreted")) check <- resolve.defaults(list(...), list(check=TRUE))$check # nX <- npoints(X) nY <- npoints(Y) # L <- as.linnet(X) if(check) { LY <- as.linnet(Y) if(!identical(L, LY)) stop("X and Y are on different linear networks") } P <- as.ppp(X) Q <- as.ppp(Y) # Lseg <- L$lines Lvert <- L$vertices from <- L$from to <- L$to dpath <- L$dpath # nearest segment for each point Xpro <- coords(X, local=TRUE, spatial=FALSE, temporal=FALSE)$seg Ypro <- coords(Y, local=TRUE, spatial=FALSE, temporal=FALSE)$seg crossdistmat <- matrix(0,nX,nY) if(method == "interpreted") { # loop through all pairs of data points for (i in 1:nX) { Xproi <- Xpro[i] Xi <- P[i] nbi1 <- from[Xproi] nbi2 <- to[Xproi] vi1 <- Lvert[nbi1] vi2 <- Lvert[nbi2] dXi1 <- crossdist(Xi, vi1) dXi2 <- crossdist(Xi, vi2) for (j in 1:nY) { Yj <- Q[j] Yproj <- Ypro[j] if(Xproi == Yproj) { # points i and j lie on the same segment # use Euclidean distance d <- crossdist(Xi, Yj) } else { # shortest path from i to j passes through ends of segments nbj1 <- from[Yproj] nbj2 <- to[Yproj] vj1 <- Lvert[nbj1] vj2 <- Lvert[nbj2] # Calculate shortest of 4 possible paths from i to j d1Yj <- crossdist(vj1,Yj) d2Yj <- crossdist(vj2,Yj) d11 <- dXi1 + dpath[nbi1,nbj1] + d1Yj d12 <- dXi1 + dpath[nbi1,nbj2] + d2Yj d21 <- dXi2 + dpath[nbi2,nbj1] + d1Yj d22 <- dXi2 + dpath[nbi2,nbj2] + d2Yj d <- min(d11,d12,d21,d22) } # store result crossdistmat[i,j] <- d } } } else { # C code # convert indices to start at 0 from0 <- from - 1L to0 <- to - 1L Xsegmap <- Xpro - 1L Ysegmap <- Ypro - 1L DUP <- spatstat.options("dupC") zz <- .C("lincrossdist", np = as.integer(nX), xp = as.double(P$x), yp = as.double(P$y), nq = as.integer(nY), xq = as.double(Q$x), yq = as.double(Q$y), nv = as.integer(Lvert$n), xv = as.double(Lvert$x), yv = as.double(Lvert$y), ns = as.double(L$n), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), psegmap = as.integer(Xsegmap), qsegmap = as.integer(Ysegmap), answer = as.double(crossdistmat), DUP=DUP) # PACKAGE="spatstat") crossdistmat <- matrix(zz$answer, nX, nY) } return(crossdistmat) } spatstat/R/multistrhard.R0000755000176000001440000002447212240721046015225 0ustar ripleyusers# # # multistrhard.S # # $Revision: 2.29 $ $Date: 2013/05/01 10:17:22 $ # # The multitype Strauss/hardcore process # # MultiStraussHard() # create an instance of the multitype Strauss/ harcore # point process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStraussHard <- local({ # ........ define potential ...................... MSHpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[i] type (mark) of point U[j] # # get matrices of interaction radii r <- par$iradii h <- par$hradii # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & (!is.na(r) | !is.na(h)) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create numeric array for result z <- array(0, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # apply the relevant interaction distance to each pair of points rxu <- r[ tx, tu ] str <- (d < rxu) str[is.na(str)] <- FALSE # and the relevant hard core distance hxu <- h[ tx, tu ] forbid <- (d < hxu) forbid[is.na(forbid)] <- FALSE # form the potential value <- str value[forbid] <- -Inf # assign value[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- value[Xsub, Qsub] } } return(z) } # ............... end of potential function ................... # .......... auxiliary functions ................. delMSH <- function(which, types, iradii, hradii, ihc) { iradii[which] <- NA if(any(!is.na(iradii))) { # some gamma interactions left # return modified MultiStraussHard with fewer gamma parameters return(MultiStraussHard(types, iradii, hradii)) } else if(any(!ihc)) { # no gamma interactions left, but some active hard cores return(MultiHard(types, hradii)) } else return(Poisson()) } # ........................................................... # Set up basic object except for family and parameters BlankMSHobject <- list( name = "Multitype Strauss Hardcore process", creator = "MultiStraussHard", family = "pairwise.family", # evaluated later pot = MSHpotential, par = list(types=NULL, iradii=NULL, hradii=NULL), # to be added parnames = c("possible types", "interaction distances", "hardcore distances"), selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStraussHard(types=types,iradii=self$par$iradii, hradii=self$par$hradii) }, init = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii if(!is.null(types)) { if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(any(is.na(types))) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } nt <- length(types) MultiPair.checkmatrix(iradii, nt, sQuote("iradii")) MultiPair.checkmatrix(hradii, nt, sQuote("hradii")) } ina <- is.na(iradii) hna <- is.na(hradii) if(all(ina)) stop(paste("All entries of", sQuote("iradii"), "are NA")) both <- !ina & !hna if(any(iradii[both] <= hradii[both])) stop("iradii must be larger than hradii") }, update = NULL, # default OK print = function(self) { types <- self$par$types iradii <- self$par$iradii hradii <- self$par$hradii nt <- nrow(iradii) cat(paste(nt, "types of points\n")) if(!is.null(types)) { cat("Possible types: \n") print(types) } else cat("Possible types:\t not yet determined\n") cat("Interaction radii:\n") print(iradii) cat("Hardcore radii:\n") print(hradii) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrices of interaction radii r <- self$par$iradii h <- self$par$hradii # list all relevant unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r) | !is.na(h)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=round(gammas,4))) }, valid = function(coeffs, self) { # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # Check that we managed to estimate all required parameters required <- !is.na(iradii) if(!all(is.finite(gamma[required]))) return(FALSE) # Check that the model is integrable # inactive hard cores ... ihc <- (is.na(hradii) | hradii == 0) # .. must have gamma <= 1 return(all(gamma[required & ihc] <= 1)) }, project = function(coeffs, self) { # types types <- self$par$types # interaction radii r[i,j] iradii <- self$par$iradii # hard core radii r[i,j] hradii <- self$par$hradii # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # required gamma parameters required <- !is.na(iradii) # inactive hard cores ihc <- is.na(hradii) | (hradii == 0) # problems okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- ihc & required & !okgamma if(!any(naughty)) return(NULL) # if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMSH(naughty, types, iradii, hradii, ihc)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMSH, types=types, iradii=iradii, hradii=hradii, ihc=ihc) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$iradii h <- self$par$hradii ractive <- !is.na(r) hactive <- !is.na(h) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 ractive <- ractive & (abs(log(gamma)) > epsilon) } if(!any(c(ractive,hactive))) return(0) else return(max(c(r[ractive],h[hactive]))) }, version=NULL # to be added ) class(BlankMSHobject) <- "interact" # Finally define MultiStraussHard function MultiStraussHard <- function(types=NULL, iradii, hradii) { out <- instantiate.interact(BlankMSHobject, list(types=types, iradii = iradii, hradii = hradii)) if(!is.null(types)) dimnames(out$par$iradii) <- dimnames(out$par$hradii) <- list(types, types) return(out) } MultiStraussHard }) spatstat/R/rhohat.R0000755000176000001440000004303712241443111013761 0ustar ripleyusers# # rhohat.R # # $Revision: 1.48 $ $Date: 2013/04/25 06:37:43 $ # # Non-parametric estimation of a transformation rho(z) determining # the intensity function lambda(u) of a point process in terms of a # spatial covariate Z(u) through lambda(u) = rho(Z(u)). # More generally allows offsets etc. rhohat <- function(object, covariate, ...) { UseMethod("rhohat") } rhohat.ppp <- rhohat.quad <- rhohat.ppm <- function(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), dimyx=NULL, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # trap superseded usage argh <- list(...) if(missing(method) && ("transform" %in% names(argh))) { warning(paste("Argument ", sQuote("transform"), " has been superseded by ", sQuote("method"), "; see help(rhohat)")) transform <- argh$transform method <- if(transform) "transform" else "ratio" } else method <- match.arg(method) # validate model if(is.ppp(object) || inherits(object, "quad")) { model <- ppm(object, ~1) reference <- "Lebesgue" modelcall <- NULL } else if(is.ppm(object)) { model <- object reference <- "model" modelcall <- model$call } else stop("object should be a point pattern or a point process model") if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(data.ppm(model)) } else { covunits <- NULL } area <- area.owin(as.owin(data.ppm(model))) rhohatEngine(model, covariate, reference, area, ..., method=method, smoother=smoother, resolution=list(dimyx=dimyx, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, modelcall=modelcall, callstring=callstring) } rhohat.lpp <- rhohat.lppm <- function(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), nd=1000, eps=NULL, n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95) { callstring <- short.deparse(sys.call()) smoother <- match.arg(smoother) method <- match.arg(method) if(missing(covname)) covname <- sensiblevarname(short.deparse(substitute(covariate)), "X") if(is.null(adjust)) adjust <- 1 # validate model if(is.lpp(object)) { X <- object model <- lppm(object, ~1) reference <- "Lebesgue" modelcall <- NULL } else if(inherits(object, "lppm")) { model <- object X <- model$X reference <- "model" modelcall <- model$call } else stop("object should be of class lpp or lppm") if(is.character(covariate) && length(covariate) == 1) { covname <- covariate switch(covname, x={ covariate <- function(x,y) { x } }, y={ covariate <- function(x,y) { y } }, stop("Unrecognised covariate name") ) covunits <- unitname(X) } else { covunits <- NULL } totlen <- sum(lengths.psp(as.psp(as.linnet(X)))) rhohatEngine(model, covariate, reference, totlen, ..., method=method, smoother=smoother, resolution=list(nd=nd, eps=eps), n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, covunits=covunits, confidence=confidence, modelcall=modelcall, callstring=callstring) } rhohatEngine <- function(model, covariate, reference=c("Lebesgue", "model"), volume, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), resolution=list(), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, covunits=NULL, confidence=0.95, modelcall=NULL, callstring="rhohat") { reference <- match.arg(reference) # evaluate the covariate at data points and at pixels stuff <- do.call("evalCovar", append(list(model, covariate), resolution)) # unpack info <- stuff$info values <- stuff$values # values at each data point ZX <- values$ZX # values at each pixel Zimage <- values$Zimage Zvalues <- values$Zvalues lambda <- values$lambda # normalising constants baseline <- if(reference == "Lebesgue") volume else (mean(lambda) * volume) # info savestuff <- list(reference = reference, Zimage = Zimage) # calculate rho-hat result <- rhohatCalc(ZX, Zvalues, lambda, baseline, ..., method=method, smoother=smoother, n=n, bw=bw, adjust=adjust, from=from, to=to, bwref=bwref, covname=covname, confidence=confidence, covunits=covunits, modelcall=modelcall, callstring=callstring, savestuff=savestuff) return(result) } # basic calculation of rhohat from covariate values rhohatCalc <- function(ZX, Zvalues, lambda, baseline, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), n=512, bw="nrd0", adjust=1, from=NULL, to=NULL, bwref=bw, covname, confidence=0.95, covunits = NULL, modelcall=NULL, callstring=NULL, savestuff=list()) { method <- match.arg(method) smoother <- match.arg(smoother) # check availability of locfit package if(smoother == "local" && !require(locfit, quietly=TRUE)) { warning(paste("In", paste(dQuote(callstring), ":", sep=""), "package", sQuote("locfit"), "is not available;", "unable to perform local likelihood smoothing;", "using kernel smoothing instead"), call.=FALSE) smoother <- "kernel" } # validate stopifnot(is.numeric(ZX)) stopifnot(is.numeric(Zvalues)) stopifnot(is.numeric(lambda)) stopifnot(length(lambda) == length(Zvalues)) stopifnot(all(is.finite(lambda))) check.1.real(baseline) # normalising constants nX <- length(ZX) kappahat <- nX/baseline # limits Zrange <- range(ZX, Zvalues) if(is.null(from)) from <- Zrange[1] if(is.null(to)) to <- Zrange[2] if(from > Zrange[1] || to < Zrange[2]) stop("Interval [from, to] = ", prange(c(from,to)), "does not contain the range of data values =", prange(Zrange)) # critical constant for CI's crit <- qnorm((1+confidence)/2) percentage <- paste(round(100 * confidence), "%%", sep="") CIblurb <- paste("pointwise", percentage, "confidence interval") # estimate densities if(smoother == "kernel") { # ............... kernel smoothing ...................... interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } # reference density ghat <- unnormdensity(Zvalues,weights=lambda/sum(lambda), bw=bwref,adjust=adjust,n=n,from=from,to=to, ...) xxx <- ghat$x ghatfun <- interpolate(ghat) # relative density switch(method, ratio={ # compute ratio of smoothed densities fhat <- unnormdensity(ZX,bw=bw,adjust=adjust, n=n,from=from, to=to, ...) fhatfun <- interpolate(fhat) yyy <- kappahat * fhatfun(xxx)/ghatfun(xxx) # compute variance approximation sigma <- fhat$bw fstar <- unnormdensity(ZX,bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) fstarfun <- interpolate(fstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * nX * fstarfun(xxx)/(baseline * ghatfun(xxx))^2 }, reweight={ # weight Z values by reciprocal of reference wt <- 1/(baseline * ghatfun(ZX)) rhat <- unnormdensity(ZX, weights=wt, bw=bw,adjust=adjust, n=n,from=from, to=to, ...) rhatfun <- interpolate(rhat) yyy <- rhatfun(xxx) # compute variance approximation sigma <- rhat$bw rongstar <- unnormdensity(ZX, weights=wt^2, bw=bw,adjust=adjust/sqrt(2), n=n,from=from, to=to, ...) rongstarfun <- interpolate(rongstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * rongstarfun(xxx) }, transform={ # probability integral transform Gfun <- interpolate(ghat$x, cumsum(ghat$y)/sum(ghat$y)) GZX <- Gfun(ZX) # smooth density on [0,1] qhat <- unnormdensity(GZX,bw=bw,adjust=adjust, n=n, from=0, to=1, ...) qhatfun <- interpolate(qhat) # edge effect correction one <- unnormdensity(seq(from=0,to=1,length.out=512), bw=qhat$bw, adjust=1, n=n,from=0, to=1, ...) onefun <- interpolate(one) # apply to transformed values Gxxx <- Gfun(xxx) yyy <- kappahat * qhatfun(Gxxx)/onefun(Gxxx) # compute variance approximation sigma <- qhat$bw qstar <- unnormdensity(GZX,bw=bw,adjust=adjust/sqrt(2), n=n,from=0, to=1, ...) qstarfun <- interpolate(qstar) const <- 1/(2 * sigma * sqrt(pi)) vvv <- const * nX * qstarfun(Gxxx)/(baseline * onefun(Gxxx))^2 }) vvvname <- "Variance of estimator" vvvlabel <- paste("bold(Var)~hat(%s)", paren(covname), sep="") sd <- sqrt(vvv) hi <- yyy + crit * sd lo <- yyy - crit * sd } else { # .................. local likelihood smoothing ....................... LocfitRaw <- function(x, ...) { do.call.matched("locfit.raw", append(list(x=x), list(...))) } varlog <- function(obj,xx) { # variance of log f-hat stopifnot(inherits(obj, "locfit")) if(!identical(obj$trans, exp)) stop("internal error: locfit object does not have log link") # the following call should have band="local" but that produces NaN's pred <- predict(obj, newdata=xx, se.fit=TRUE, what="coef") se <- pred$se.fit return(se^2) } xlim <- c(from, to) xxx <- seq(from, to, length=n) # reference density ghat <- LocfitRaw(Zvalues, weights=lambda/sum(lambda), xlim=xlim, ...) ggg <- predict(ghat, xxx) # relative density switch(method, ratio={ # compute ratio of smoothed densities fhat <- LocfitRaw(ZX, xlim=xlim, ...) fff <- predict(fhat, xxx) yyy <- kappahat * fff/ggg # compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(fhat, xxx) + varlogN }, reweight={ # weight Z values by reciprocal of reference wt <- 1/(baseline * predict(ghat,ZX)) sumwt <- sum(wt) rhat <- LocfitRaw(ZX, weights=(wt/sumwt) * nX, xlim=xlim, ...) rrr <- predict(rhat, xxx) yyy <- sumwt * rrr # compute approximation to variance of log rho-hat varsumwt <- mean(yyy /(baseline * ggg)) * diff(xlim) varlogsumwt <- varsumwt/sumwt^2 vvv <- varlog(rhat, xxx) + varlogsumwt }, transform={ # probability integral transform Gfun <- approxfun(xxx, cumsum(ggg)/sum(ggg), rule=2) GZX <- Gfun(ZX) # smooth density on [0,1], end effect corrected qhat <- LocfitRaw(GZX, xlim=c(0,1), ...) # apply to transformed values Gxxx <- Gfun(xxx) qqq <- predict(qhat, Gxxx) yyy <- kappahat * qqq # compute approximation to variance of log rho-hat varlogN <- 1/nX vvv <- varlog(qhat, Gxxx) + varlogN }) vvvname <- "Variance of log of estimator" vvvlabel <- paste("bold(Var)~log(hat(%s)", paren(covname), ")", sep="") sss <- exp(crit * sqrt(vvv)) hi <- yyy * sss lo <- yyy / sss } # pack into fv object df <- data.frame(xxx=xxx, rho=yyy, var=vvv, hi=hi, lo=lo) names(df)[1] <- covname desc <- c(paste("covariate", covname), "Estimated intensity", vvvname, paste("Upper limit of", CIblurb), paste("Lower limit of", CIblurb)) rslt <- fv(df, argu=covname, ylab=substitute(rho(X), list(X=as.name(covname))), valu="rho", fmla= as.formula(paste(". ~ ", covname)), alim=range(ZX), labl=c(covname, paste("hat(%s)", paren(covname), sep=""), vvvlabel, paste("%s[hi]", paren(covname), sep=""), paste("%s[lo]", paren(covname), sep="")), desc=desc, unitname=covunits, fname="rho", yexp=substitute(rho(X), list(X=as.name(covname)))) attr(rslt, "dotnames") <- c("rho", "hi", "lo") # pack up class(rslt) <- c("rhohat", class(rslt)) # add info stuff <- list(modelcall = modelcall, callstring = callstring, sigma = switch(smoother, kernel=sigma, local=NULL), covname = paste(covname, collapse=""), ZX = ZX, lambda = lambda, method = method, smoother = smoother) attr(rslt, "stuff") <- append(stuff, savestuff) return(rslt) } print.rhohat <- function(x, ...) { s <- attr(x, "stuff") cat("Intensity function estimate (class rhohat)\n") cat(paste("for the covariate", s$covname, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat("Estimation method: ") switch(s$method, ratio={ cat("ratio of fixed-bandwidth kernel smoothers\n") }, reweight={ cat("fixed-bandwidth kernel smoother of weighted data") }, transform={ cat(paste("probability integral transform,", "edge-corrected fixed bandwidth kernel smoothing", "on [0,1]\n")) }, cat("UNKNOWN\n")) cat("Smoother: ") switch(s$smoother, kernel={ cat("Kernel density estimator\n") cat(paste("Actual smoothing bandwidth sigma = ", signif(s$sigma,5), "\n")) }, local ={ cat("Local likelihood density estimator\n") } ) cat(paste("Call:", s$callstring, "\n")) NextMethod("print") } plot.rhohat <- function(x, ..., do.rug=TRUE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") covname <- s$covname asked.rug <- !missing(do.rug) && identical(rug, TRUE) do.call("plot.fv", resolve.defaults(list(x=x), list(...), list(main=xname, shade=c("hi", "lo")))) if(do.rug) { rugx <- ZX <- s$ZX # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) if(any(isfo)) { # a plot formula was given; inspect RHS fmla <- argh[[min(which(isfo))]] rhs <- rhs.of.formula(fmla) vars <- variablesinformula(rhs) vars <- vars[vars %in% c(colnames(x), ".x", ".y")] if(length(vars) == 1 && vars %in% c(covname, ".x")) { # expression in terms of covariate rhstr <- as.character(rhs)[2] dat <- list(ZX) names(dat) <- vars[1] rugx <- as.numeric(eval(parse(text=rhstr), dat)) } else { warning("Unable to add rug plot") rugx <- NULL } } if(!is.null(rugx)) { # restrict to x limits, if given if(!is.null(xlim <- list(...)$xlim)) rugx <- rugx[rugx >= xlim[1] & rugx <= xlim[2]] # finally plot the rug if(length(rugx) > 0) rug(rugx) } } invisible(NULL) } predict.rhohat <- function(object, ..., relative=FALSE) { if(length(list(...)) > 0) warning("Additional arguments ignored in predict.rhohat") # extract info s <- attr(object, "stuff") reference <- s$reference # convert to (linearly interpolated) function x <- with(object, .x) y <- with(object, .y) rho <- approxfun(x, y, rule=2) # extract image of covariate Z <- s$Zimage # apply rho to Z Y <- eval.im(rho(Z)) # adjust to reference baseline if(reference == "model" && !relative) { lambda <- s$lambda Y <- eval.im(Y * lambda) } return(Y) } as.function.rhohat <- function(x, ..., value=".y", extrapolate=TRUE) { NextMethod("as.function") } spatstat/R/nnfun.R0000644000176000001440000000377312252030011013611 0ustar ripleyusers# # nnfun.R # # nearest neighbour function (returns a function of x,y) # # $Revision: 1.4 $ $Date: 2013/12/11 09:22:56 $ # nnfun <- function(X, ...) { UseMethod("nnfun") } nnfun.ppp <- function(X, ..., k=1) { # this line forces X to be bound stopifnot(is.ppp(X)) if(length(k) != 1) stop("k should be a single integer") g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which", k=k) } attr(g, "Xclass") <- "ppp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("nnfun", class(g)) return(g) } nnfun.psp <- function(X, ...) { # this line forces X to be bound stopifnot(is.psp(X)) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="which") } attr(g, "Xclass") <- "psp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("nnfun", class(g)) return(g) } as.owin.nnfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) as.owin(X, ..., fatal=fatal) } as.im.nnfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { if(is.null(W)) { env <- environment(X) Xdata <- get("X", envir=env) k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]] Z <- nnmap(Xdata, k=k, what="which", eps=eps, dimyx=dimyx, xy=xy) if(!is.null(na.replace)) Z$v[is.null(Z$v)] <- na.replace return(Z) } # use as.im.function NextMethod("as.im") } print.nnfun <- function(x, ...) { env <- environment(x) X <- get("X", envir=env) k <- mget("k", envir=env, inherits=FALSE, ifnotfound=list(1))[[1]] xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", paste("object of class", sQuote(xtype))) Kth <- if(k == 1) "Nearest" else paste0(ordinal(k), "-Nearest") cat(paste(Kth, "Neighbour Index function for ", typestring, "\n")) print(X) return(invisible(NULL)) } spatstat/R/pixellate.R0000755000176000001440000001104312237642727014476 0ustar ripleyusers# # pixellate.R # # $Revision: 1.10 $ $Date: 2013/08/30 01:57:20 $ # # pixellate convert an object to a pixel image # # pixellate.ppp convert a point pattern to a pixel image # (pixel value = number of points in pixel) # # pixellate.owin convert a window to a pixel image # (pixel value = area of intersection with pixel) # pixellate <- function(x, ...) { UseMethod("pixellate") } pixellate.ppp <- function(x, W=NULL, ..., weights=NULL, padzero=FALSE) { verifyclass(x, "ppp") if(!is.null(W)) W <- as.mask(W) else { # determine W using as.mask W <- do.call.matched("as.mask", resolve.defaults(list(...), list(w=x$window))) } insideW <- W$m dimW <- W$dim xcolW <- W$xcol yrowW <- W$yrow xrangeW <- W$xrange yrangeW <- W$yrange unitsW <- unitname(W) # multiple columns of weights? if(is.data.frame(weights) || is.matrix(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- if(k == 1) as.vector(weights) else as.data.frame(weights) } else { k <- 1 if(!is.null(weights)) stopifnot(length(weights) == npoints(x) || length(weights) == 1) if(length(weights) == 1) weights <- rep(weights, npoints(x)) } # handle empty point pattern if(x$n == 0) { zeroimage <- as.im(as.double(0), W) if(padzero) # map NA to 0 zeroimage <- na.handle.im(zeroimage, 0) result <- zeroimage if(k > 1) { result <- as.listof(rep(list(zeroimage), k)) names(result) <- colnames(weights) } return(result) } # perform calculation pixels <- nearest.raster.point(x$x, x$y, W) nr <- dimW[1] nc <- dimW[2] rowfac <- factor(pixels$row, levels=1:nr) colfac <- factor(pixels$col, levels=1:nc) if(is.null(weights)) { ta <- table(row = rowfac, col = colfac) } else if(k == 1) { ta <- tapply(weights, list(row = rowfac, col=colfac), sum) ta[is.na(ta)] <- 0 } else { ta <- list() for(j in 1:k) { taj <- tapply(weights[,j], list(row = rowfac, col=colfac), sum) taj[is.na(taj)] <- 0 ta[[j]] <- taj } } # pack up as image(s) if(k == 1) { # single image # clip to window of data if(!padzero) ta[!insideW] <- NA out <- im(ta, xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) } else { # case k > 1 # create template image to reduce overhead template <- im(ta[[1]], xcol = xcolW, yrow = yrowW, xrange = xrangeW, yrange = yrangeW, unitname=unitsW) out <- list() for(j in 1:k) { taj <- ta[[j]] # clip to window of data if(!padzero) taj[!insideW] <- NA # copy template and reassign pixel values outj <- template outj$v <- taj # store out[[j]] <- outj } out <- as.listof(out) names(out) <- names(weights) } return(out) } pixellate.owin <- function(x, W=NULL, ...) { stopifnot(is.owin(x)) P <- as.polygonal(x) R <- as.rectangle(x) if(is.null(W)) W <- R else if(!is.subset.owin(R, as.rectangle(W))) stop("W does not cover the domain of x") W <- as.mask(W, ...) # x0 <- W$xrange[1] y0 <- W$yrange[1] dx <- W$xstep dy <- W$ystep nx <- W$dim[2] ny <- W$dim[1] # set up output image (real-valued) and initialise to zero Z <- as.im(W, value=pi, na.replace=pi) Z <- eval.im(Z * 0) # process each component polygon B <- P$bdry DUP <- spatstat.options("dupC") for(i in seq_along(B)) { PP <- B[[i]] # transform so that pixels become unit squares QQ <- affinexypolygon(PP, vec = c(-x0, -y0)) RR <- affinexypolygon(QQ, mat = diag(1/c(dx, dy))) # xx <- RR$x yy <- RR$y nn <- length(xx) # close polygon xx <- c(xx, xx[1]) yy <- c(yy, yy[1]) nn <- nn+1 # call C routine zz <- .C("poly2imA", ncol=as.integer(nx), nrow=as.integer(ny), xpoly=as.double(xx), ypoly=as.double(yy), npoly=as.integer(nn), out=as.double(numeric(nx * ny)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(zz$status != 0) stop("Internal error") # increment output image Z$v <- Z$v + matrix(zz$out, ny, nx, byrow=TRUE) } # revert to original scale pixelarea <- dx * dy Z <- eval.im(Z * pixelarea) return(Z) } spatstat/R/allstats.R0000755000176000001440000000221012237642727014332 0ustar ripleyusers# # # allstats.R # # $Revision: 1.16 $ $Date: 2013/04/25 06:37:43 $ # # allstats <- function(pp, ..., dataname=NULL,verb=FALSE) { # # Function allstats --- to calculate the F, G, K, and J functions # for an unmarked point pattern. # verifyclass(pp,"ppp") if(is.marked(pp)) stop("This function is applicable only to unmarked patterns.\n") # estimate F, G and J if(verb) cat("Calculating F, G, J ...") Jout <- do.call.matched("Jest",list(X=pp, ...)) if(verb) cat("ok.\n") # extract F, G and J Fout <- attr(Jout, "F") Gout <- attr(Jout, "G") attr(Jout, "F") <- NULL attr(Jout, "G") <- NULL fns <- list("F function"=Fout, "G function"=Gout, "J function"=Jout) # compute second moment function K if(verb) cat("Calculating K function...") Kout <- do.call.matched("Kest", list(X=pp, ...)) fns <- append(fns, list("K function"=Kout)) if(verb) cat("done.\n") # add title if(is.null(dataname)) dataname <- short.deparse(substitute(pp)) title <- paste("Four summary functions for ", dataname,".",sep="") attr(fns, "title") <- title # fns <- as.listof(fns) return(fns) } spatstat/R/linnet.R0000755000176000001440000001750612237642727014012 0ustar ripleyusers# # linnet.R # # Linear networks # # $Revision: 1.22 $ $Date: 2013/05/14 06:04:02 $ # # An object of class 'linnet' defines a linear network. # It includes the following components # # vertices (ppp) vertices of network # # m (matrix) adjacency matrix # # lines (psp) edges of network # # dpath (matrix) matrix of shortest path distances # between each pair of vertices # # from, to (vectors) map from edges to vertices. # The endpoints of the i-th segment lines[i] # are vertices[from[i]] and vertices[to[i]] # # # FUNCTIONS PROVIDED: # linnet creates an object of class "linnet" from data # print.linnet print an object of class "linnet" # plot.linnet plot an object of class "linnet" # # Make an object of class "linnet" from the minimal data linnet <- function(vertices, m, edges) { if(missing(m) && missing(edges)) stop("specify either m or edges") if(!missing(m) && !missing(edges)) stop("do not specify both m and edges") # validate inputs stopifnot(is.ppp(vertices)) if(!missing(m)) { # check logical matrix stopifnot(is.matrix(m) && is.logical(m) && isSymmetric(m)) if(nrow(m) != vertices$n) stop("dimensions of matrix m do not match number of vertices") } else { # check (from, to) pairs stopifnot(is.matrix(edges) && ncol(edges) == 2) if(any((edges %% 1) != 0)) stop("Entries of edges list should be integers") np <- npoints(vertices) if(any(edges > np)) stop("index out-of-bounds in edges list") # convert to adjacency matrix m <- matrix(FALSE, np, np) m[edges] <- TRUE m <- m | t(m) } # create line segments rowm <- row(m) colm <- col(m) uptri <- m & (rowm < colm) from <- as.vector(rowm[uptri]) to <- as.vector(colm[uptri]) xx <- vertices$x yy <- vertices$y lines <- psp(xx[from], yy[from], xx[to], yy[to], window=vertices$window, check=FALSE) # compute matrix of distances between adjacent vertices n <- nrow(m) d <- matrix(Inf, n, n) diag(d) <- 0 d[m] <- pairdist(vertices)[m] # now compute shortest-path distances between each pair of vertices dpath <- dist2dpath(d) if(any(is.infinite(dpath))) warning("Network is not connected") # pack up out <- list(vertices=vertices, m=m, lines=lines, from=from, to=to, dpath=dpath, window=vertices$window) class(out) <- c("linnet", class(out)) # pre-compute circumradius out$circumradius <- circumradius(out) return(out) } print.linnet <- function(x, ...) { cat(paste("Linear network with", x$vertices$n, "vertices,", x$lines$n, "lines and", sum(x$m/2), "edges\n")) return(invisible(NULL)) } summary.linnet <- function(object, ...) { print(object, ...) unitinfo <- summary(unitname(object)) cat(paste("Total length", sum(lengths.psp(object$lines)), unitinfo$plural, unitinfo$explain, "\n")) print(as.owin(object)) return(invisible(NULL)) } plot.linnet <- function(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE) { if(is.null(main)) main <- short.deparse(substitute(x)) stopifnot(inherits(x, "linnet")) lines <- as.psp(x) if(!add) { # initialise new plot w <- as.owin(lines) if(window) plot(w, ..., main=main) else plot(w, ..., main=main, type="n") } # plot segments and (optionally) vertices plot(lines, ..., add=TRUE, main=main) if(vertices) plot(x$vertices, add=TRUE) return(invisible(NULL)) } as.psp.linnet <- function(x, ..., fatal=TRUE) { verifyclass(x, "linnet", fatal=fatal) return(x$lines) } as.owin.linnet <- function(W, ...) { return(as.owin(as.psp(W))) } as.linnet <- function(X, ...) { UseMethod("as.linnet") } as.linnet.linnet <- function(X, ...) { X } unitname.linnet <- function(x) { unitname(x$window) } "unitname<-.linnet" <- function(x, value) { w <- x$window v <- x$vertices l <- x$lines unitname(w) <- unitname(v) <- unitname(l) <- value x$window <- w x$vertices <- v x$lines <- l return(x) } diameter.linnet <- function(x) { stopifnot(inherits(x, "linnet")) max(x$dpath) } circumradius <- function(x) { stopifnot(inherits(x, "linnet")) cr <- x$circumradius if(!is.null(cr)) return(cr) dpath <- x$dpath from <- x$from to <- x$to lines <- x$lines nseg <- lines$n leng <- lengths.psp(lines) sA <- sB <- matrix(Inf, nseg, nseg) for(i in 1:nseg) { # endpoints of segment i A <- from[i] B <- to[i] AB <- leng[i] sA[i,i] <- sB[i,i] <- AB/2 for(j in (1:nseg)[-i]) { # endpoints of segment j C <- from[j] D <- to[j] CD <- leng[j] AC <- dpath[A,C] AD <- dpath[A,D] BC <- dpath[B,C] BD <- dpath[B,D] # max dist from A to any point in segment j sA[i,j] <- if(AD > AC + CD) AC + CD else if(AC > AD + CD) AD + CD else (AC + AD + CD)/2 # max dist from B to any point in segment j sB[i,j] <- if(BD > BC + CD) BC + CD else if(BC > BD + CD) BD + CD else (BC + BD + CD)/2 } } # max dist from each A to any point in another segment mA <- apply(sA, 1, max) # max dist from each B to any point in another segment mB <- apply(sB, 1, max) # min of these min(mA, mB) } #################################################### # affine transformations #################################################### scalardilate.linnet <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$vertices <- scalardilate(X$vertices, f=f) Y$lines <- scalardilate(X$lines, f=f) Y$window <- scalardilate(X$window, f=f) Y$dpath <- f * X$dpath Y$circumradius <- f * X$circumradius return(Y) } affine.linnet <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "linnet") if(length(unique(eigen(mat)$values)) == 1) { # transformation is an isometry scal <- sqrt(abs(det(mat))) Y <- X Y$vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y$lines <- affine(X$lines, mat=mat, vec=vec, ...) Y$window <- affine(X$window, mat=mat, vec=vec, ...) Y$dpath <- scal * X$dpath Y$circumradius <- scal * X$circumradius } else { # general case vertices <- affine(X$vertices, mat=mat, vec=vec, ...) Y <- linnet(vertices, edges=cbind(X$from, X$to)) } return(Y) } shift.linnet <- function(X, ...) { verifyclass(X, "linnet") Y <- X Y$vertices <- shift(X$vertices, ...) Y$lines <- shift(X$lines, ...) Y$window <- shift(X$window, ...) # tack on shift vector attr(Y, "lastshift") <- attr(Y$vertices, "lastshift") return(Y) } rotate.linnet <- function(X, angle=pi/2, ...) { verifyclass(X, "linnet") Y <- X Y$vertices <- rotate(X$vertices, angle=angle, ...) Y$lines <- rotate(X$lines, angle=angle, ...) Y$window <- rotate(X$window, angle=angle, ...) return(Y) } rescale.linnet <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s) return(Y) } "[.linnet" <- function(x, i, ...) { if(!is.owin(i)) stop("In [.linnet: the index i should be a window", call.=FALSE) # Find vertices that lie inside 'i' okvert <- inside.owin(x$vertices, w=i) # find segments whose endpoints both lie in 'upper' okedge <- okvert[x$from] & okvert[x$to] # assign new serial numbers to vertices, and recode newserial <- cumsum(okvert) newfrom <- newserial[x$from[okedge]] newto <- newserial[x$to[okedge]] # make new linear network xnew <- linnet(x$vertices[i], edges=cbind(newfrom, newto)) return(xnew) } spatstat/R/layered.R0000755000176000001440000001231612237642727014140 0ustar ripleyusers# # layered.R # # Simple mechanism for layered plotting # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # layered <- function(..., plotargs=NULL, LayerList=NULL) { argh <- list(...) if(length(argh) > 0 && !is.null(LayerList)) stop("LayerList is incompatible with other arguments") out <- if(!is.null(LayerList)) LayerList else argh n <- length(out) if(sum(nzchar(names(out))) != n) names(out) <- paste("Layer", seq_len(n)) if(!is.null(plotargs)) { if(!is.list(plotargs) || !all(unlist(lapply(plotargs, is.list)))) stop("plotargs should be a list of lists") if(length(plotargs) != length(out)) stop("plotargs should have one component for each element of the list") } else { plotargs <- rep.int(list(list()), length(out)) } names(plotargs) <- names(out) attr(out, "plotargs") <- plotargs class(out) <- c("layered", class(out)) return(out) } print.layered <- function(x, ...) { cat("Layered object\n") for(i in seq_along(x)) { cat(paste("\n", names(x)[i], ":\n", sep="")) print(x[[i]]) } pl <- layerplotargs(x) hasplot <- (unlist(lapply(pl, length)) > 0) if(any(hasplot)) cat(paste("\nIncludes plot arguments for", commasep(names(pl)[hasplot]), "\n")) invisible(NULL) } plot.layered <- function(x, ..., which=NULL, plotargs=NULL) { xname <- short.deparse(substitute(x)) main <- resolve.1.default("main", list(...), list(main=xname)) xp <- if(is.null(which)) x else x[which] if(length(xp) == 0) return(invisible(NULL)) # validate plotting arguments if(is.null(plotargs)) { plotargs <- attr(x, "plotargs") if(!is.null(plotargs) && !is.null(which)) plotargs <- plotargs[which] } else { if(!is.list(plotargs) || !all(unlist(lapply(plotargs, is.list)))) stop("plotargs should be a list of lists") if(length(plotargs) != length(xp)) stop("plotargs should have one component for each layer to be plotted") } # determine plot frame started <- FALSE add <- resolve.1.default("add", list(...), list(add=FALSE)) if(add) { started <- TRUE } else { # new plot notnul <- !unlist(lapply(x, is.null)) if(sum(notnul) > 1) { # more than one non-trivial layer. # Determine bounding frame boxes <- lapply(x[notnul], function(z) { try(as.rectangle(z), silent=TRUE) }) if(!any(unlist(lapply(boxes, inherits, what="try-error")))) { bb <- do.call("bounding.box", boxes) plot(bb, type="n", main=main) started <- TRUE } } } # plot the layers out <- list() for(i in seq_along(xp)) { xpi <- xp[[i]] if(length(xpi) == 0) { # null layer - no plotting out[[i]] <- NULL } else { # plot layer i on top of previous layers iargs <- if(!started) list(main=main) else list(add=TRUE) out[[i]] <- do.call("plot", resolve.defaults(list(x=xpi), list(...), plotargs[[i]], iargs)) started <- TRUE } } return(invisible(out)) } "[.layered" <- function(x, i, j, drop=FALSE, ...) { if(missing(i) && missing(j)) return(x) p <- attr(x, "plotargs") x <- unclass(x) nx <- length(x) if(!missing(i) && !is.null(i)) { x <- x[i] p <- p[i] nx <- length(x) } isnul <- (unlist(lapply(x, length)) == 0) if(!missing(j) && !is.null(j)) x[!isnul] <- lapply(x[!isnul], "[", i=j) if(drop && nx == 1) return(x[[1]]) y <- layered(LayerList=x, plotargs=p) return(y) } layerplotargs <- function(L) { stopifnot(inherits(L, "layered")) attr(L, "plotargs") } "layerplotargs<-" <- function(L, value) { stopifnot(inherits(L, "layered")) if(length(value) != length(L)) stop("Replacement value is wrong length") if(!identical(names(value), names(L))) stop("Mismatch in names of list elements") attr(L, "plotargs") <- value L } applytolayers <- function(L, FUN, ...) { # Apply FUN to each **non-null** layer, # preserving the plot arguments pla <- layerplotargs(L) ok <- !unlist(lapply(L, is.null)) L[ok] <- lapply(L[ok], FUN, ...) Z <- layered(LayerList=L, plotargs=pla) return(Z) } shift.layered <- function(X, ...) { applytolayers(X, shift, ...) } affine.layered <- function(X, ...) { applytolayers(X, affine, ...) } rotate.layered <- function(X, ...) { applytolayers(X, rotate, ...) } reflect.layered <- function(X) { applytolayers(X, reflect) } flipxy.layered <- function(X) { applytolayers(X, flipxy) } scalardilate.layered <- function(X, ...) { applytolayers(X, scalardilate, ...) } rescale.layered <- function(X, s) { if(!missing(s)) applytolayers(X, rescale, s=s) else applytolayers(X, rescale) } as.owin.layered <- function(W, ..., fatal=TRUE) { if(length(W) == 0) { if(fatal) stop("Layered object is empty: no window data") return(NULL) } # remove null layers isnul <- unlist(lapply(W, is.null)) W <- W[!isnul] Wlist <- lapply(unname(W), as.owin, ..., fatal=fatal) Wlist <- lapply(Wlist, rescue.rectangle) Z <- Wlist[[1]] if(length(Wlist) > 1) { same <- unlist(lapply(Wlist[-1], identical, y=Z)) if(!all(same)) Z <- do.call("union.owin", Wlist) } return(Z) } spatstat/R/vcov.ppm.R0000755000176000001440000015567112245335375014274 0ustar ripleyusers# # Asymptotic covariance & correlation matrices # and Fisher information matrix # for ppm objects # # $Revision: 1.98 $ $Date: 2013/11/27 09:27:15 $ # vcov.ppm <- local({ vcov.ppm <- function(object, ..., what="vcov", verbose=TRUE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), hessian=FALSE) { verifyclass(object, "ppm") argh <- list(...) gam.action <- match.arg(gam.action) matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) stopifnot(length(what) == 1 && is.character(what)) what.options <- c("vcov", "corr", "fisher", "Fisher", "internals", "all") what.map <- c("vcov", "corr", "fisher", "fisher", "internals", "all") if(is.na(m <- pmatch(what, what.options))) stop(paste("Unrecognised option: what=", sQuote(what))) what <- what.map[m] # nonstandard calculations (hack) generic.triggers <- c("A1", "A1dummy", "new.coef", "matwt", "saveterms") nonstandard <- any(generic.triggers %in% names(argh)) saveterms <- identical(resolve.1.default("saveterms", argh), TRUE) # Fisher information *may* be contained in object fisher <- object$fisher varcov <- object$varcov # Do we need to go into the guts? needguts <- nonstandard || (is.null(fisher) && what=="fisher") || (is.null(varcov) && what %in% c("vcov", "corr")) || (what %in% c("internals", "all")) # In general it is not true that varcov = solve(fisher) # because we might use different estimators, # or the parameters might be a subset of the canonical parameter if(needguts) { # warn if fitted model was obtained using GAM if(identical(object$fitter, "gam")) { switch(gam.action, fatal={ stop(paste("model was fitted by gam();", "execution halted because fatal=TRUE"), call.=FALSE) }, warn={ warning(paste("model was fitted by gam();", "asymptotic variance calculation ignores this"), call.=FALSE) }, silent={}) } # ++++ perform main calculation ++++ if(is.poisson(object) || hessian) { # Poisson model, or Hessian of Gibbs model results <- vcalcPois(object, ..., what=what, matrix.action=matrix.action, verbose=verbose, fisher=fisher) } else { # Gibbs model results <- vcalcGibbs(object, ..., what=what, matrix.action=matrix.action) } varcov <- results$varcov fisher <- results$fisher internals <- results$internals } if(what %in% c("vcov", "corr") && is.null(varcov)) { # Need variance-covariance matrix. if(!is.null(fisher) && is.poisson(object)) # Derive from Fisher information varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") } out <- switch(what, fisher = fisher, vcov = varcov, corr = { if(is.null(varcov)) return(NULL) sd <- sqrt(diag(varcov)) varcov / outer(sd, sd, "*") }, internals = internals, all = results ) return(out) } # ................ variance calculation for Poisson models ............. vcalcPois <- function(object, ..., what = c("vcov", "corr", "fisher", "internals", "all"), matrix.action=c("warn", "fatal", "silent"), method=c("C", "interpreted"), verbose=TRUE, fisher=NULL, matwt=NULL, new.coef=NULL, saveterms=FALSE) { # variance-covariance matrix of Poisson model, # or Hessian of Gibbs model what <- match.arg(what) method <- match.arg(method) matrix.action <- match.arg(matrix.action) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) internals <- NULL nonstandard <- reweighting || !is.null(new.coef) || saveterms # compute Fisher information if not known if(is.null(fisher) || nonstandard) { gf <- getglmfit(object) # we need a glm or gam if(is.null(gf)) { if(verbose) warning("Refitting the model using GLM/GAM") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("Internal error - refitting did not yield a glm object") } # compute fitted intensity and sufficient statistic ltype <- if(is.poisson(object)) "trend" else "lambda" lambda <- fitted(object, type=ltype, new.coef=new.coef, check=FALSE) mom <- model.matrix(object) nmom <- nrow(mom) Q <- quad.ppm(object) wt <- w.quad(Q) ok <- getglmsubset(object) Z <- is.data(Q) # save them if(what == "internals") { internals <- if(!saveterms) list(suff=mom) else list(suff=mom, mom=mom, lambda=lambda, Z=Z, ok=ok) } # Now restrict all terms to the domain of the pseudolikelihood lambda <- lambda[ok] mom <- mom[ok, , drop=FALSE] wt <- wt[ok] Z <- Z[ok] # apply weights to rows of model matrix - temporary hack if(reweighting) { nwt <- length(matwt) if(nwt == nmom) { # matwt matches original quadrature scheme - trim it matwt <- matwt[ok] } else if(nwt != sum(ok)) stop("Hack argument matwt has incompatible length") mom.orig <- mom mom <- matwt * mom } # compute Fisher information switch(method, C = { fisher <- sumouter(mom, lambda * wt) if(reweighting) { gradient <- sumouter(mom.orig, matwt * lambda * wt) } }, interpreted = { if(!reweighting) { fisher <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] v <- outer(ro, ro, "*") * lambda[i] * wt[i] if(!any(is.na(v))) fisher <- fisher + v } momnames <- dimnames(mom)[[2]] dimnames(fisher) <- list(momnames, momnames) } else { fisher <- gradient <- 0 for(i in 1:nrow(mom)) { ro <- mom[i, ] ro0 <- mom.orig[i,] ldu <- lambda[i] * wt[i] v <- outer(ro, ro, "*") * ldu v0 <- outer(ro0, ro0, "*") * matwt[i] * ldu if(!any(is.na(v))) fisher <- fisher + v if(!any(is.na(v0))) gradient <- gradient + v0 } momnames <- dimnames(mom)[[2]] dn <- list(momnames, momnames) dimnames(fisher) <- dimnames(gradient) <- dn } }) } if(what %in% c("all", "internals")) { # Internals needed if(is.null(internals)) internals <- list(suff = model.matrix(object)) internals$fisher <- fisher if(reweighting) internals$gradient <- gradient ilist <- list(internals=internals) } if(what %in% c("all", "vcov", "corr")) { # Variance-covariance matrix needed if(!reweighting) { # Derive variance-covariance from Fisher info varcov <- checksolve(fisher, matrix.action, "Fisher information matrix", "variance") vcovlist <- list(fisher=fisher, varcov=varcov) } else { invgrad <- checksolve(gradient, matrix.action, "gradient matrix", "variance") varcov <- if(is.null(invgrad)) NULL else invgrad %*% fisher %*% invgrad vcovlist <- list(fisher=fisher, varcov=varcov, invgrad=invgrad) } } result <- switch(what, fisher = list(fisher=fisher), vcov = vcovlist, corr = vcovlist, internals = ilist, all = append(ilist, vcovlist)) return(result) } # ...................... vcov calculation for Gibbs models .................... vcalcGibbs <- function(fit, ..., what = c("vcov", "corr", "fisher", "internals", "all"), generic=FALSE) { what <- match.arg(what) # decide whether to use the generic, slower algorithm generic.triggers <- c("A1", "A1dummy", "new.coef", "matwt", "saveterms") use.generic <- generic || !is.stationary(fit) || (fit$method == "logi" && ("marks" %in% variablesinformula(fit$trend))) || (fit$method != "logi" && has.offset(fit)) || (fit$method == "logi" && has.offset.term(fit)) || !(fit$correction == "border" && fit$rbord == reach(fit)) || any(generic.triggers %in% names(list(...))) || !identical(options("contrasts")[[1]], c(unordered="contr.treatment", ordered="contr.poly")) # compute spill <- (what %in% c("all", "internals", "fisher")) spill.vc <- (what == "all") out <- if(use.generic) vcalcGibbsGeneral(fit, ..., spill=spill, spill.vc=spill.vc) else vcalcGibbsSpecial(fit, ..., spill=spill, spill.vc=spill.vc) switch(what, vcov = , corr = { # out is the variance-covariance matrix; return it return(list(varcov=out)) }, fisher = { # out is a list of internal data: extract the Fisher info Fmat <- with(out, if(fit$method != "logi") Sigma else Sigma1log+Sigma2log) return(list(fisher=Fmat)) }, internals = { # out is a list of internal data: return it # (ensure model matrix is included) if(is.null(out$mom)) out$mom <- model.matrix(fit) return(list(internals=out)) }, all = { # out is a list(internals, vc): return it # (ensure model matrix is included) if(is.null(out$internals$mom)) out$internals$mom <- model.matrix(fit) return(out) }, ) return(NULL) } # ...................... general algorithm ........................... vcalcGibbsGeneral <- function(model, ..., spill = FALSE, spill.vc = FALSE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), algorithm=c("vectorclip", "vector", "basic"), A1 = NULL, A1dummy = FALSE, matwt = NULL, new.coef = NULL, saveterms = FALSE, parallel = TRUE ) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) algorithm <- match.arg(algorithm) if(reweighting <- !is.null(matwt)) stopifnot(is.numeric(matwt) && is.vector(matwt)) spill <- spill || spill.vc saveterms <- spill && saveterms logi <- model$method=="logi" asked.parallel <- !missing(parallel) old.coef <- coef(model) use.coef <- if(!is.null(new.coef)) new.coef else old.coef p <- length(old.coef) if(p == 0) { # this probably can't happen if(!spill) return(matrix(, 0, 0)) else return(list()) } pnames <- names(old.coef) dnames <- list(pnames, pnames) internals <- list() # sumobj <- summary(model, quick="entries") correction <- model$correction rbord <- model$rbord R <- reach(model, epsilon=1e-2) Q <- quad.ppm(model) D <- dummy.ppm(model) rho <- model$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area.owin(D)*markspace.integral(D)) X <- data.ppm(model) Z <- is.data(Q) W <- as.owin(model) areaW <- if(correction == "border") eroded.areas(W, rbord) else area.owin(W) # # determine which quadrature points contributed to the # sum/integral in the pseudolikelihood # (e.g. some points may be excluded by the border correction) okall <- getglmsubset(model) # data only: ok <- okall[Z] nX <- npoints(X) # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(model, check = FALSE, new.coef = new.coef) # data only: lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(model) # save if(saveterms) internals <- append(internals, list(mom=mall, lambda=lamall, Z=Z, ok=okall, matwt=matwt)) if(reweighting) { # each column of the model matrix is multiplied by 'matwt' check.nvector(matwt, nrow(mall), things="quadrature points") mall.orig <- mall mall <- mall * matwt } # subsets of model matrix mokall <- mall[okall, , drop=FALSE] # data only: m <- mall[Z, , drop=FALSE] mok <- m[ok, , drop=FALSE] # if(reweighting) { # save unweighted versions mokall.orig <- mall.orig[okall, , drop=FALSE] m.orig <- mall.orig[Z, , drop=FALSE] mok.orig <- m.orig[ok, , drop=FALSE] # matwtX <- matwt[Z] } # ^^^^^^^^^^^^^^^^ First order (sensitivity) matrices A1, S # logistic if(logi){ # Sensitivity matrix S for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) dimnames(Slog) <- dnames # A1 matrix for logistic case A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) dimnames(A1log) <- dnames } # Sensitivity matrix for MPLE case (= A1) if(is.null(A1) || reweighting) { if(A1dummy){ A1 <- sumouter(mokall, w = (lamall * w.quad(Q))[okall]) if(reweighting) gradient <- sumouter(mokall.orig, w=(matwt * lamall * w.quad(Q))[okall]) } else{ A1 <- sumouter(mok) if(reweighting) gradient <- sumouter(mok.orig, w=matwtX) } } else { stopifnot(is.matrix(A1)) if(!all(dim(A1) == p)) stop(paste("Matrix A1 has wrong dimensions:", prange(dim(A1)), "!=", prange(c(p, p)))) } dimnames(A1) <- dnames # ^^^^^^^^^^ Second order interaction effects A2, A3 # ^^^^^^^^^^^^^^^^^^^^ `parallel' evaluation need.loop <- TRUE if(parallel) { # compute second order difference # ddS[i,j,] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- deltasuffstat(model, restrict=TRUE, force=FALSE) if(is.null(ddS)) { if(asked.parallel) warning("parallel option not available - reverting to loop") } else { need.loop <- FALSE # rearrange so that # ddS[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) ddS <- aperm(ddS, c(3,2,1)) # now compute sum_{i,j} for i != j # outer(ddS[,i,j], ddS[,j,i]) ddSok <- ddS[ , ok, ok, drop=FALSE] A3 <- sumsymouter(ddSok) # mom.array[ ,i,j] = h(X[i] | X) mom.array <- array(t(m), dim=c(p, nX, nX)) # momdel[ ,i,j] = h(X[i] | X[-j]) momdel <- mom.array - ddS # lamdel[i,j] = lambda(X[i] | X[-j]) lamdel <- matrix(lam, nX, nX) * exp(tensor(-use.coef, ddS, 1, 1)) # pairweight[i,j] = lamdel[i,j]/lambda[i] - 1 pairweight <- lamdel / lam - 1 # now compute sum_{i,j} for i != j # pairweight[i,j] * outer(momdel[,i,j], momdel[,j,i]) # for data points that contributed to the pseudolikelihood momdelok <- momdel[ , ok, ok, drop=FALSE] A2 <- sumsymouter(momdelok, w=pairweight[ok, ok]) if(logi){ # lam.array[ ,i,j] = lambda(X[i] | X) lam.array <- array(lam, c(nX,nX,p)) lam.array <- aperm(lam.array, c(3,1,2)) # lamdel.array[,i,j] = lambda(X[i] | X[-j]) lamdel.array <- array(lamdel, c(nX,nX,p)) lamdel.array <- aperm(lamdel.array, c(3,1,2)) momdellogi <- rho/(lamdel.array+rho)*momdel momdellogiok <- momdellogi[ , ok, ok, drop=FALSE] A2log <- sumsymouter(momdellogiok, w=pairweight[ok, ok]) ddSlogi <- rho/(lam.array+rho)*mom.array - momdellogi ddSlogiok <- ddSlogi[ , ok, ok, drop=FALSE] A3log <- sumsymouter(ddSlogiok) } } } # ^^^^^^^^^^^^^^^^^^^^ loop evaluation if(need.loop) { A2 <- A3 <- matrix(0, p, p, dimnames=dnames) if(logi) A2log <- A3log <- matrix(0, p, p, dimnames=dnames) if(saveterms) { # *initialise* matrices # lamdel[i,j] = lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)]) lamdel <- matrix(lam, nX, nX) # momdel[ ,i,j] = h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)]) momdel <- array(t(m), dim=c(p, nX, nX)) } # identify close pairs if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j if(algorithm == "vectorclip") { cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } } else { # either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } # filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] # if(length(I) > 0 && length(J) > 0) { # .............. loop over pairs ........................ # The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] # make an empty 'equalpairs' matrix nonE <- matrix(, nrow=0, ncol=2) # Run through pairs switch(algorithm, basic={ for(i in unique(I)) { Xi <- X[i] Ji <- unique(J[I==i]) if((nJi <- length(Ji)) > 0) { for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] # compute conditional intensity # lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] plamj.i <- predict(model, type="cif", locations=X[j], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) # corresponding values of sufficient statistic # h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] pmj.i <- partialModelMatrix(X.ij, X[j], model)[nX-1, ] # conditional intensity and sufficient statistic # in reverse order # lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] plami.j <- predict(model, type="cif", locations=X[i], X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi.j <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] # if(reweighting) { pmj.i <- pmj.i * matwtX[j] pmi.j <- pmi.j * matwtX[i] } if(saveterms) { lamdel[i,j] <- plami.j momdel[ , i, j] <- pmi.j lamdel[j,i] <- plamj.i momdel[ , j, i] <- pmj.i } # increment A2, A3 wt <- plami.j / lam[i] - 1 A2 <- A2 + wt * outer(pmi.j, pmj.i) if(logi) A2log <- A2log + wt * rho/(plami.j+rho) * rho/(plamj.i+rho) * outer(pmi.j, pmj.i) # delta sufficient statistic # delta_i h(X[j] | X[-c(i,j)]) # = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) # = h(X[j] | X) - h(X[j] | X[-i]) # delta_j h(X[i] | X[-c(i,j)]) # = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) # = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj.i deltajSi <- m[i, ] - pmi.j A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/ (lam[j]+rho) - pmj.i/(plamj.i+rho)) deltajSilog <- rho*(m[i, ]/ (lam[i]+rho) - pmi.j/(plami.j+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vector={ # --------- faster algorithm using vector functions ------------ for(i in unique(I)) { Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] # neighbours of X[i] XJi <- X[Ji] # all points other than X[i] X.i <- X[-i] # index of XJi in X.i J.i <- Ji - (Ji > i) # equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) # compute conditional intensity # lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] # for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj=sumobj, E=E.i) # corresponding values of sufficient statistic # h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] # for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] # # conditional intensity & sufficient statistic in reverse order # lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] # for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] X.ij <- X[-c(i,j)] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX-1, ] } # if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } # increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) # delta sufficient statistic # delta_i h(X[j] | X[-c(i,j)]) # = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) # = h(X[j] | X) - h(X[j] | X[-i]) # delta_j h(X[i] | X[-c(i,j)]) # = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) # = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }, vectorclip={ # --------- faster version of 'vector' algorithm # -------- by removing non-interacting points of X for(i in unique(I)) { # all points within 2R J2i <- unique(J2[I2==i]) # all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] # neighbours of X[i] XJi <- X[Ji] # replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) # index of XJi in X.i J.i <- match(Ji, J2i) if(any(is.na(J.i))) stop("Internal error: Ji not a subset of J2i") # equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) # compute conditional intensity # lambda(X[j] | X[-i]) = lambda(X[j] | X[-c(i,j)] # for all j plamj <- predict(model, type="cif", locations=XJi, X=X.i, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=E.i) # corresponding values of sufficient statistic # h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] # for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] # # conditional intensity & sufficient statistic in reverse order # lambda(X[i] | X[-j]) = lambda(X[i] | X[-c(i,j)] # for all j plami <- numeric(nJi) pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] # X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] plami[k] <- predict(model, type="cif", locations=Xi, X=X.ij, check = FALSE, new.coef = new.coef, sumobj = sumobj, E=nonE) pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } # if(reweighting) { pmj <- pmj * matwtX[Ji] pmi <- pmi * matwtX[i] } if(saveterms) { lamdel[Ji, i] <- plamj momdel[ , Ji, i] <- t(pmj) lamdel[i,Ji] <- plami momdel[ , i, Ji] <- t(pmi) } # increment A2, A3 wt <- plami / lam[i] - 1 for(k in 1:nJi) { j <- Ji[k] A2 <- A2 + wt[k] * outer(pmi[k,], pmj[k,]) if(logi) A2log <- A2log + wt[k] * rho/(plami[k]+rho) * rho/(plamj[k]+rho) * outer(pmi[k,], pmj[k,]) # delta sufficient statistic # delta_i h(X[j] | X[-c(i,j)]) # = h(X[j] | X[-j]) - h(X[j] | X[-c(i,j)]) # = h(X[j] | X) - h(X[j] | X[-i]) # delta_j h(X[i] | X[-c(i,j)]) # = h(X[i] | X[-i]) - h(X[i] | X[-c(i,j)]) # = h(X[i] | X) - h(X[i] | X[-j]) deltaiSj <- m[j, ] - pmj[k,] deltajSi <- m[i, ] - pmi[k,] A3 <- A3 + outer(deltaiSj, deltajSi) if(logi){ deltaiSjlog <- rho*(m[j, ]/(lam[j]+rho) - pmj[k,]/(plamj[k]+rho)) deltajSilog <- rho*(m[i, ]/(lam[i]+rho) - pmi[k,]/(plami[k]+rho)) A3log <- A3log + outer(deltaiSjlog, deltajSilog) } } } } }) } } # ......... end of loop computation ............... ## Matrix Sigma Sigma <- A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) internals <- c(internals, list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW), if(logi) list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog) else NULL, if(reweighting) list(gradient=gradient) else NULL, if(saveterms) list(lamdel=lamdel, momdel=momdel) else NULL) # return internal data if no further calculation needed if(!spill.vc && !logi) return(internals) } # ........... calculate variance/covariance matrix for MPL ......... if(!reweighting) { # Normalise A1 <- A1/areaW Sigma <- Sigma/areaW # Enforce exact symmetry A1 <- (A1 + t(A1))/2 Sigma <- (Sigma + t(Sigma))/2 # calculate inverse negative Hessian U <- checksolve(A1, matrix.action, , "variance") } else { # Normalise gradient <- gradient/areaW Sigma <- Sigma/areaW # Enforce exact symmetry gradient <- (gradient + t(gradient))/2 Sigma <- (Sigma + t(Sigma))/2 # calculate inverse negative Hessian U <- checksolve(gradient, matrix.action, , "variance") } # compute variance-covariance vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW dimnames(vc.mpl) <- dnames # return variance-covariance matrix, if model was fitted by MPL if(!logi) { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ###### Everything below is only computed for logistic fits ####### ## Matrix Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- model$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## Changed by ER 2013/06/14 to use the new quadscheme.logi ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = model$internal$logistic$nd) D2 <- Q2$dummy Q2$dummy$Dinfo <- D2$Dinfo Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=model$trend, interaction = model$interaction, method = model$method, correction = model$correction, rbord = model$rbord, covariates = model$covariates) arglist <- append(arglist, model$internal$logistic$extraargs) model2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(model2, check = FALSE, new.coef = new.coef) ## New model matrix mall2 <- model.matrix(model2) okall2 <- getglmsubset(model2) # index vectors of stratrand cell indices of dummy points inD <- model$internal$logistic$inD inD2 <- model2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- (bdist.points(D) >= R) ii2 <- (bdist.points(D2) >= R) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) ## Attaching to Sigma2log calculated above dimnames(Sigma2log) <- dnames if(spill) { # return internal data only (with matrices unnormalised) internals <- c(internals, list(Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } ## .. Calculate variance-covariance matrix for logistic fit ........... # normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW # evaluate Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW dimnames(vc.logi) <- dnames # if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } # vcalcGibbs from Ege Rubak and J-F Coeurjolly ## 2013/06/14, modified by Ege to handle logistic case as well vcalcGibbsSpecial <- function(fit, ..., spill=FALSE, spill.vc=FALSE, special.alg = TRUE, matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent")) { matrix.action <- match.arg(matrix.action) logi.action <- match.arg(logi.action) spill <- spill || spill.vc ## Interaction name: iname <- fit$interaction$name ## Does the model have marks which are in the trend? marx <- is.marked(fit) && ("marks" %in% variablesinformula(fit$trend)) ## The full data and window: Xplus <- data.ppm(fit) Wplus <- as.owin(Xplus) ## Fitted parameters and the parameter dimension p (later consiting of p1 trend param. and p2 interaction param.): theta <- coef(fit) p <- length(theta) ## Number of points: n <- npoints(Xplus) ## Using the faster algorithms for special cases if(special.alg && fit$method != "logi"){ param <- coef(fit) switch(iname, "Strauss process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, reach(fit$interaction), exp(coef(fit)[2]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Piecewise constant pairwise interaction process"={ ## Only implemented for non-marked case: if(!marx) return(vcovPairPiece(Xplus, fit$interaction$par$r, exp(coef(fit)[-1]), matrix.action, spill=spill, spill.vc=spill.vc)) }, "Multitype Strauss process"={ matR <- fit$interaction$par$radii R <- c(matR[1,1], matR[1,2], matR[2,2]) ## Only implemented for 2 types with equal interaction range: if(ncol(matR)==2 && marx){ n <- length(theta) res <- vcovMultiStrauss(Xplus, R, exp(theta[c(n-2,n-1,n)]), matrix.action,spill=spill,spill.vc=spill.vc) if(!spill) { res <- contrastmatrix(res, 2) dimnames(res) <- list(names(theta), names(theta)) } return(res) } } ) } ## Matrix specifying equal points in the two patterns in the call to eval below: E <- matrix(rep.int(1:n, 2), ncol = 2) ## Eval. the interaction potential difference at all points (internal spatstat function): # V1 <- fit$interaction$family$eval(Xplus, Xplus, E, fit$interaction$pot, fit$interaction$par, fit$correction) V1 <- evalInteraction(Xplus, Xplus, E, as.interact(fit), fit$correction) ## Calculate parameter dimensions and correct the contrast type parameters: p2 <- ncol(V1) p1 <- p-p2 if(p1>1) theta[2:p1] <- theta[2:p1] + theta[1] ## V1 <- evalInteraction(Q, Xplus, union.quad(Q), fit$interaction, fit$correction) POT <- attr(V1, "POT") attr(V1, "POT") <- NULL ## Adding the constant potential as first column (one column per type for multitype): if(!marx){ V1 <- cbind(1, V1) colnames(V1) <- names(theta) } else{ lev <- levels(marks(Xplus)) ## Indicator matrix for mark type attached to V1: tmp <- matrix(marks(Xplus), nrow(V1), p1)==matrix(lev, nrow(V1), p-ncol(V1), byrow=TRUE) colnames(tmp) <- lev V1 <- cbind(tmp,V1) } ## Matrices for differences of potentials: E <- matrix(rep.int(1:(n-1), 2), ncol = 2) dV <- V2 <- array(0,dim=c(n,n,p)) for(k in 1:p1){ V2[,,k] <- matrix(V1[,k], n, n, byrow = FALSE) } for(k in (p1+1):p){ diag(V2[,,k]) <- V1[,k] } for(j in 1:n){ ## Fast evaluation for pairwise interaction processes: if(fit$interaction$family$name=="pairwise"){ V2[-j,j,-(1:p1)] <- V1[-j,-(1:p1)]-POT[-j,j,] } else{ V2[-j,j,-(1:p1)] <- fit$interaction$family$eval(Xplus[-j], Xplus[-j], E, fit$interaction$pot, fit$interaction$par, fit$correction) ## Q <- quadscheme(Xplus[-j],emptyppp) ## V2[-j,j,-1] <- evalInteraction(Q, Xplus[-j], Xplus[-j], fit$interaction, fit$correction) } for(k in 1:p){ dV[,j,k] <- V1[,k] - V2[,j,k] } } ## Ratio of first and second order Papangelou - 1: frac <- 0*dV[,,1] for(k in (p1+1):p){ frac <- frac + dV[,,k]*theta[k] } frac <- exp(-frac)-1 ## In the rest we restrict attention to points in the interior: ## The interaction range: R <- reach(fit$interaction) ## The reduced window, area and point pattern: W<-erosion.owin(Wplus,R) areaW <- area.owin(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=R X <- Xplus[IntPoints] ## Making a logical matrix, I, indicating R-close pairs which are in the interior: D <- pairdist(Xplus) diag(D) <- Inf I <- (D<=R) & outer(IntPoints,IntPoints, "&") ## Matrix A1: A1 <- t(V1[IntPoints,])%*%V1[IntPoints,] ## Matrix A2: A2 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2[k,l] <- A2[l,k] <- sum(I*V2[,,k]*frac*t(V2[,,l])) } } ## Matrix A3: A3 <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A3[k,l] <- A3[l,k] <- sum(I*dV[,,k]*t(dV[,,l])) } } ## Matrix Sigma (A1+A2+A3): Sigma<-A1+A2+A3 if(spill) { # save internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- list(names(theta), names(theta)) internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma, areaW=areaW) # return internal data, if model fitted by MPL if(!spill.vc && fit$method != "logi") return(internals) } # ......... Calculate variance-covariance matrix for MPL ........ # normalise A1 <- A1/areaW Sigma <- Sigma/areaW # evaluate U <- checksolve(A1, matrix.action, , "variance") vc.mpl <- if(is.null(U)) matrix(NA, p, p) else U %*% Sigma %*% U / areaW ## Convert to treatment contrasts if(marx) vc.mpl <- contrastmatrix(vc.mpl, p1) dimnames(vc.mpl) <- list(names(theta), names(theta)) # Return result for standard ppm method: if(fit$method!="logi") { if(spill.vc) return(list(varcov=vc.mpl, internals=internals)) return(vc.mpl) } ######################################################################## ###### The remainder is only executed when the method is logistic ###### ######################################################################## ### Most of this is copy/pasted from vcalcGibbsGeneral correction <- fit$correction Q <- quad.ppm(fit) D <- dummy.ppm(fit) rho <- fit$internal$logistic$rho ## If dummy intensity rho is unknown we estimate it if(is.null(rho)) rho <- npoints(D)/(area.owin(D)*markspace.integral(D)) X <- data.ppm(fit) Z <- is.data(Q) # determine which data points entered into the sum in the pseudolikelihood # (border correction, nonzero cif) # data and dummy: okall <- getglmsubset(fit) ## # data only: ## ok <- okall[Z] # conditional intensity lambda(X[i] | X) = lambda(X[i] | X[-i]) # data and dummy: lamall <- fitted(fit, check = FALSE) ## # data only: ## lam <- lamall[Z] # sufficient statistic h(X[i] | X) = h(X[i] | X[-i]) # data and dummy: mall <- model.matrix(fit) mokall <- mall[okall, , drop=FALSE] ## # data only: ## m <- mall[Z, , drop=FALSE] ## mok <- m[ok, , drop=FALSE] # Sensitivity matrix S and A1 matrix for logistic case Slog <- sumouter(mokall, w = lamall[okall]*rho/(lamall[okall]+rho)^2) A1log <- sumouter(mokall, w = lamall[okall]*rho*rho/(lamall[okall]+rho)^3) ## Define W1, W2 and dW for the logistic method based on V1, V2 and dV (frac is unchanged) lambda1 <- exp(rowSums(matrix(theta,n,p,byrow=TRUE)*V1)) W1 <- V1*rho/(lambda1+rho) lambda2 <- exp(apply(array(rep(theta,each=n*n),dim=c(n,n,p))*V2, c(1,2), sum)) W2 <- V2 dW <- dV for(k in 1:p){ W2[,,k] <- V2[,,k] * rho/(lambda2+rho) for(j in 1:n){ dW[,j,k] <- W1[,k] - W2[,j,k] } } ## Matrices A2log and A3log for the first component Sigma1log of the variance: A2log <- A3log <- matrix(0,p,p) for(k in 1:p){ for(l in k:p){ A2log[k,l] <- A2log[l,k] <- sum(I*W2[,,k]*frac*t(W2[,,l])) A3log[k,l] <- A3log[l,k] <- sum(I*dW[,,k]*t(dW[,,l])) } } A2log <- A2log A3log <- A3log ## First variance component Sigma1log (A1log+A2log+A3log): Sigma1log <- A1log+A2log+A3log ## Resolving the dummy process type how <- fit$internal$logistic$how if(how %in% c("given", "grid", "transgrid")){ whinge <- paste("vcov is not implemented for dummy type", sQuote(how)) if(logi.action=="fatal") stop(whinge) how <- if(how=="given") "poisson" else "stratrand" if(logi.action=="warn") warning(paste(whinge,"- using", sQuote(how), "formula"), call.=FALSE) } ## Matrix Sigma2log (depends on dummy process type) switch(how, poisson={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) }, binomial={ Sigma2log <- sumouter(mokall, w = lamall[okall]*lamall[okall]*rho/(lamall[okall]+rho)^3) A1vec <- t(mokall) %*% (rho*lamall[okall]/(lamall[okall]+rho)^2) Sigma2log <- Sigma2log - A1vec%*%t(A1vec)/rho*1/sum(1/(lamall[okall]+rho)) }, stratrand={ ### Dirty way of refitting model with new dummy pattern (should probably be done using call, eval, envir, etc.): ## D2 <- logi.dummy(X = X, type = "stratrand", nd = model$internal$logistic$args) ## Q2 <- quad(data=X, dummy=D2) ## Q2$dummy$Dinfo <- D2$Dinfo Q2 <- quadscheme.logi(data=X, dummytype = "stratrand", nd = fit$internal$logistic$nd) D2 <- Q2$dummy Z2 <- is.data(Q2) arglist <- list(Q=Q2, trend=fit$trend, interaction = fit$interaction, method = fit$method, correction = fit$correction, rbord = fit$rbord, covariates = fit$covariates) arglist <- append(arglist, fit$internal$logistic$extraargs) fit2 <- do.call(ppm, args = arglist) ## New cif lamall2 <- fitted(fit2, check=FALSE) ## New model matrix mall2 <- model.matrix(fit2) okall2 <- getglmsubset(fit2) # index vectors of stratrand cell indices of dummy points inD <- fit$internal$logistic$inD inD2 <- fit2$internal$logistic$inD # Dummy points inside eroded window (for border correction) if(is.finite(R) && (correction == "border")){ ii <- inside.owin(D, w = W) ii2 <- inside.owin(D2, w = W) } else{ ii <- rep.int(TRUE, npoints(D)) ii2 <- rep.int(TRUE, npoints(D2)) } # OK points of dummy pattern 1 with a valid point of dummy pattern 2 in same stratrand cell (and vice versa) okdum <- okall[!Z] okdum2 <- okall2[!Z2] ok1 <- okdum & ii & is.element(inD, inD2[okdum2 & ii2]) ok2 <- okdum2 & ii2 & is.element(inD2, inD[okdum & ii]) ## ok1 <- okdum & okdum2 & ii & is.element(inD, inD2[ii2]) ## ok2 <- okdum2 & okdum1 & ii2 & is.element(inD2, inD[ii]) ## ok1 <- ii & is.element(inD, inD2[ii2]) ## ok2 <- ii2 & is.element(inD2, inD[ii]) # cif and suff. stat. for valid points in dummy patterns 1 and 2 lamdum <- lamall[!Z][ok1] lamdum2 <- lamall2[!Z2][ok2] mdum <- mall[!Z,][ok1,] mdum2 <- mall2[!Z2,][ok2,] # finally calculation of Sigma2 wlam <- mdum * rho*lamdum/(lamdum+rho) wlam2 <- mdum2 * rho*lamdum2/(lamdum2+rho) Sigma2log <- t(wlam-wlam2)%*%(wlam-wlam2)/(2*rho*rho) }, stop("sorry - unrecognized dummy process in logistic fit") ) if(spill) { ## Attach dimnames to all matrices dimnames(Sigma2log) <- dimnames(Slog) <- dimnames(Sigma1log) <- dimnames(A1log) <- dimnames(A2log) <- dimnames(A3log) <- list(names(theta),names(theta)) # return internal data (with matrices unnormalised) internals <- c(internals, list(A1log=A1log, A2log=A2log, A3log=A3log, Slog=Slog, Sigma1log=Sigma1log, Sigma2log=Sigma2log, mple=vc.mpl)) if(!spill.vc) return(internals) } # ....... Compute variance-covariance for logistic fit ............. # Normalise Slog <- Slog/areaW Sigma1log <- Sigma1log/areaW Sigma2log <- Sigma2log/areaW ## Finally the result is calculated: Ulog <- checksolve(Slog, matrix.action, , "variance") vc.logi <- if(is.null(Ulog)) matrix(NA, p, p) else Ulog %*% (Sigma1log+Sigma2log) %*% Ulog / areaW # dimnames(vc.logi) <- list(names(theta), names(theta)) if(spill.vc) return(list(varcov=vc.logi, internals=internals)) return(vc.logi) } vcovPairPiece <- function(Xplus, R, gam, matrix.action, spill=FALSE, spill.vc=FALSE){ ## R is the vector of breaks (R[length(R)]= range of the pp. ## gam is the vector of weights Rmax <- R[length(R)] ## Xplus : point process observed in W+R ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,Rmax) areaW <- area.owin(W) ## Interior points determined by bdist.points: IntPoints <- bdist.points(Xplus)>=Rmax X <- Xplus[IntPoints] ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: Dplus<-pairdist(Xplus) D <- pairdist(X) diag(D) <- diag(Dplus) <- Inf ## logical matrix, I, indicating R-close pairs: p<-length(R) Tplus<-T<-matrix(0,X$n,p) I<-Iplus<-list() for (i in 1:p){ if (i==1){ Iplus[[1]]<- Dplus <=R[1] I[[1]] <- D<=R[1] } else { Iplus[[i]]<- ((Dplus>R[i-1]) & (Dplus <=R[i])) I[[i]] <- ((D>R[i-1]) & (D <=R[i])) } ## Vector T with the number of $R$-close neighbours to each point: Tplus[,i]<-colSums(Iplus[[i]])[IntPoints] T[,i] <- colSums(I[[i]]) } ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,p+1,p+1) ## A1 and A3: A1[1,1] <- npoints(X) for (j in (2:(p+1))){ A1[1,j]<-A1[j,1]<-sum(Tplus[,j-1]) A3[j,j]<-sum(T[,j-1]) for (k in (2:(p+1))){ A1[j,k]<-sum(Tplus[,j-1] * Tplus[,k-1]) } } ## A2: for (j in (2:(p+1))){ A2[1,1]<-A2[1,1]+(gam[j-1]^(-1)-1)*sum(T[,j-1]) for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] A2[1,j]<-A2[1,j]+(gam[l-1]^(-1)-1)*sum(T[,l-1]*(vj) ) } A2[j,1]<-A2[1,j] for (k in (2:(p+1))){ for (l in (2:(p+1))){ if (l==j) vj<-Tplus[,j-1]-1 else vj<-Tplus[,j-1] if (l==k) vk<-Tplus[,k-1]-1 else vk<-Tplus[,k-1] A2[j,k]<-A2[j,k]+ (gam[l-1]^(-1)-1)*sum(I[[l-1]]*outer(vj,vk)) } } } Sigma<-A1+A2+A3 nam <- c("(Intercept)", names(gam)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } vcovMultiStrauss <- function(Xplus, vecR, vecg, matrix.action, spill=FALSE, spill.vc=FALSE){ ## Xplus : marked Strauss point process ## with two types ## observed in W+R (R=max(R11,R12,R22)) ## vecg = estimated parameters of interaction parameters ## ordered as the output of ppm, i.e. vecg=(g11,g12,g22) ## vecR = range for the diff. strauss ordered a vecg(R11,R12,R22) R <- max(vecR) R11<-vecR[1];R12<-vecR[2];R22<-vecR[3] ## Extracting the window and calculating area: Wplus<-as.owin(Xplus) W<-erosion.owin(Wplus,R) areaW <- area.owin(W) X1plus<-Xplus[Xplus$marks==levels(Xplus$marks)[1]] X2plus<-Xplus[Xplus$marks==levels(Xplus$marks)[2]] ## Interior points determined by bdist.points: IntPoints1 <- bdist.points(X1plus)>=R IntPoints2 <- bdist.points(X2plus)>=R X1 <- X1plus[IntPoints1] X2 <- X2plus[IntPoints2] ## Matrix D with pairwise distances between points and infinite distance ## between a point and itself: D1plus<-pairdist(X1plus) D1 <- pairdist(X1) diag(D1) <- diag(D1plus) <- Inf D2plus<-pairdist(X2plus) D2 <- pairdist(X2) diag(D2) <- diag(D2plus) <- Inf D12plus<-crossdist(X1,X2plus) T12plus<-rowSums(D12plus<=R12) D21plus<-crossdist(X2,X1plus) T21plus<-rowSums(D21plus<=R12) I12<-crossdist(X1,X2)<=R12 I21<-crossdist(X2,X1)<=R12 T12<-rowSums( I12) T21<-rowSums(I21) ## logical matrix, I, indicating R-close pairs: I1plus<- D1plus <=R11 I1 <- D1<=R11 I2plus<- D2plus <=R22 I2 <- D2<=R22 ## Vector T with the number of $R$-close neighbours to each point: T1plus<-colSums(I1plus)[IntPoints1] T1 <- colSums(I1) T2plus<-colSums(I2plus)[IntPoints2] T2 <- colSums(I2) ## Matrices A1, A2 and A3 are initialized to zero: A1 <- A2 <- A3 <- matrix(0,5,5) ## A1 is filled: A1[1,1]<-npoints(X1) A1[1,3]<-A1[3,1]<-sum(T1plus) A1[1,4]<-A1[4,1]<-sum(T12plus) A1[2,2]<-npoints(X2) A1[2,5]<-A1[5,2]<-sum(T2plus) A1[2,4]<-A1[4,2]<-sum(T21plus) A1[3,3]<-sum(T1plus*T1plus) A1[3,4]<-A1[4,3]<-sum(T1plus*T12plus) A1[5,5]<-sum(T2plus*T2plus) A1[4,5]<-A1[5,4]<-sum(T2plus*T21plus) A1[4,4]<-sum(T12plus*T12plus)+sum(T21plus*T21plus) ## A3 is filled: A3[3,3]<-sum(T1) A3[5,5]<-sum(T2) A3[4,4]<-sum(T12)+sum(T21) ## A2 is filled: gamInv<-vecg^(-1)-1 gi1<-gamInv[1];gi12<-gamInv[2];gi2<-gamInv[3] A2[1,1]<-sum(T1)*gi1 A2[1,2]<-A2[2,1]<-sum(T12)*gi12 A2[1,3]<-A2[3,1]<-sum(T1*(T1plus-1))*gi1 A2[1,5]<-A2[5,1]<-sum(T21*T2plus)*gi12 A2[1,4]<-A2[4,1]<-gi1*sum(T1*(T12plus))+gi12*sum(T21*(T21plus-1)) A2[2,2]<-sum(T2)*gi2 A2[2,3]<-A2[3,2]<-sum(T12*T1plus)*gi12 A2[2,5]<-A2[5,2]<-sum(T2*(T2plus-1))*gi2 A2[2,4]<-A2[4,2]<-gi2*sum(T2*(T21plus))+gi12*sum(T12*(T12plus-1)) A2[3,3]<-gi1*sum(I1*outer(T1plus-1,T1plus-1)) A2[3,5]<-A2[5,3]<- gi12*sum(I12*outer(T1plus,T2plus)) A2[3,4]<-A2[4,3]<-gi1*sum(I1*outer(T1plus-1,T12plus))+gi12*sum(I12*outer(T1plus,T21plus-1)) A2[5,5]<-gi2*sum(I2*outer(T2plus-1,T2plus-1)) A2[4,5]<-A2[5,4]<-gi2*sum(I2*outer(T2plus-1,T21plus))+gi12*sum(I21*outer(T2plus,T12plus-1)) A2[4,4]<-gi1*sum(I1*outer(T12plus,T12plus))+gi2*sum(I2*outer(T21plus,T21plus))+ gi12*sum(I12*outer(T12plus-1,T21plus-1))+gi12*sum(I21*outer(T21plus-1,T12plus-1)) Sigma<-A1+A2+A3 nam <- c(levels(marks(Xplus)), names(vecg)) dnam <- list(nam, nam) if(spill) { # return internal data (with matrices unnormalised) dimnames(A1) <- dimnames(A2) <- dimnames(A3) <- dimnames(Sigma) <- dnam internals <- list(A1=A1, A2=A2, A3=A3, Sigma=Sigma) if(!spill.vc) return(internals) } ## Calculate variance-covariance # Normalise: A1 <- A1/areaW Sigma <- Sigma/areaW U <- checksolve(A1, matrix.action, , "variance") mat <- if(is.null(U)) matrix(NA, length(nam), length(nam)) else U%*%Sigma%*%U / areaW dimnames(mat) <- dnam if(spill.vc) return(list(varcov=mat, internals=internals)) return(mat) } # Convert the first p rows & columns of variance matrix x # to variances of treatment contrasts contrastmatrix <- function(x,p){ mat <- x ## Correct column and row 1: for(i in 2:p){ mat[1,i] <- mat[i,1] <- x[1,i]-x[1,1] } ## Correct columns and rows 2,...,p: for(i in 2:p){ for(j in 2:p){ mat[i,j] <- x[1,1]-x[1,i]-x[1,j]+x[i,j] } for(j in (p+1):ncol(x)){ mat[i,j] <- mat[j,i] <- x[i,j]-x[1,j] } } mat } checksolve <- function(M, action, descrip, target="") { Mname <- short.deparse(substitute(M)) Minv <- try(solve(M), silent=(action=="silent")) if(!inherits(Minv, "try-error")) return(Minv) if(missing(descrip)) descrip <- paste("the matrix", sQuote(Mname)) whinge <- paste0("Cannot compute ", target, ": ", descrip, " is singular") switch(action, fatal=stop(whinge, call.=FALSE), warn= warning(whinge, call.=FALSE), silent={}) return(NULL) } vcov.ppm } ) suffloc <- function(object) { verifyclass(object, "ppm") if(!is.poisson(object)) stop("Internals not available for Gibbs models") return(vcov(object, what="internals")$suff) } spatstat/R/badgey.R0000755000176000001440000001627412237642727013755 0ustar ripleyusers# # # badgey.S # # $Revision: 1.12 $ $Date: 2013/04/25 06:37:43 $ # # Hybrid Geyer process # # BadGey() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # BadGey <- local({ # ........... auxiliary functions .............. delBG <- function(i, r, sat) { r <- r[-i] if(length(r) == length(sat)) { r <- r[-i] sat <- sat[-i] } else if(length(sat) == 1) { r <- r[-i] } else stop("Mismatch in dimensions of arguments r and sat") nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(BadGey(r, sat)) } # .............. template .................... BlankBG <- list( name = "hybrid Geyer process", creator = "BadGey", family = "pairsat.family", # will be evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) for(i in 1:nr) out[,,i] <- (d <= r[i]) out }, par = list(r = NULL, sat=NULL), # to fill in later parnames = c("interaction radii", "saturation parameters"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction radii r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction radii r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[0,", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=round(gn,4))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(any(is.na(gamma))) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r good <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(good)) return(NULL) if(!any(good)) return(Poisson()) bad <- !good if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delBG(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delBG, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL, # to be added later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE) { # fast evaluator for BadGey interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for BadGey") r <- potpars$r sat <- potpars$sat # ensure r and sat have equal length if(length(r) != length(sat)) { if(length(r) == 1) r <- rep.int(r, length(sat)) else if(length(sat) == 1) sat <- rep.int(sat, length(r)) else stop("lengths or r and sat do not match") } # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } nterms <- length(r) answer <- matrix(, nrow=nU, ncol=nterms) for(k in 1:nterms) { # first determine saturated pair counts counts <- strausscounts(U, X, r[k], EqualPairs) satcounts <- pmin.int(sat[k], counts) # trapdoor used by suffstat() if(halfway) answer[,k] <- satcounts else if(sat[k] == Inf) answer[,k] <- 2 * satcounts else { # extract counts for data points Uindex <- EqualPairs[,2] Xindex <- EqualPairs[,1] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r[k], sat[k], Xcounts, EqualPairs) answer[,k] <- satcounts + change } } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) } ) class(BlankBG) <- "interact" BadGey <- function(r, sat) { instantiate.interact(BlankBG, list(r=r, sat=sat)) } BadGey }) spatstat/R/marktable.R0000755000176000001440000000172112237642727014453 0ustar ripleyusers# # marktable.R # # Tabulate mark frequencies in r-neighbourhood of each point # for multitype point patterns # # $Revision: 1.6 $ $Date: 2013/02/22 05:34:02 $ # # Requested by Ian Robertson "marktable" <- function(X, R, exclude=TRUE) { verifyclass(X, "ppp") if(!is.marked(X, dfok=FALSE)) stop("point pattern has no marks") stopifnot(is.numeric(R) && length(R) == 1 && R > 0) stopifnot(is.logical(exclude) && length(exclude) == 1) m <- marks(X) if(!is.factor(m)) stop("marks must be a factor") # identify close pairs p <- closepairs(X,R,what="indices") pi <- p$i pj <- p$j if(!exclude) { # add identical pairs n <- X$n pi <- c(pi, 1:n) pj <- c(pj, 1:n) } # tabulate i <- factor(pi, levels=seq_len(X$n)) mj <- m[pj] mat <- table(point=i, mark=mj) return(mat) } spatstat/R/smoothfv.R0000755000176000001440000000335212237642727014360 0ustar ripleyusers# # smoothfv.R # # $Revision: 1.12 $ $Date: 2013/08/29 05:04:21 $ # smooth.fv <- function(x, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { message("smooth.fv will soon be deprecated: use the generic Smooth with a capital S") # .Deprecated("Smooth.fv", package="spatstat", # msg="smooth.fv is deprecated: use the generic Smooth with a capital S") Smooth(x, which=which, ..., method=method, xinterval=xinterval) } Smooth.fv <- function(X, which="*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) { x <- X stopifnot(is.character(which)) method <- match.arg(method) if(!is.null(xinterval)) check.range(xinterval) if(length(which) == 1 && which %in% .Spatstat.FvAbbrev) { if(which == ".x") stop("Cannot smooth the function argument") which <- fvnames(x, which) } if(any(nbg <- !(which %in% names(x)))) stop(paste("Unrecognised column", ngettext(sum(nbg), "name", "names"), commasep(sQuote(which[nbg])), "in argument", sQuote("which"))) xx <- x[[fvnames(x, ".x")]] # process each column of function values for(ynam in which) { yy <- x[[ynam]] ok <- is.finite(yy) if(!is.null(xinterval)) ok <- ok & inside.range(xx, xinterval) switch(method, smooth.spline = { ss <- smooth.spline(xx[ok], yy[ok], ...) yhat <- predict(ss, xx[ok])$y }, loess = { df <- data.frame(x=xx[ok], y=yy[ok]) lo <- loess(y ~ x, df, ...) yhat <- predict(lo, df[,"x", drop=FALSE]) }) yy[ok] <- yhat x[[ynam]] <- yy } return(x) } spatstat/R/plot.im.R0000755000176000001440000006141012237642727014074 0ustar ripleyusers# # plot.im.R # # $Revision: 1.68 $ $Date: 2013/10/02 05:35:12 $ # # Plotting code for pixel images # # plot.im # image.im # contour.im # persp.im # ########################################################################### plot.im <- local({ # recognised additional arguments to image.default() and axis() plotparams <- imageparams <- c("main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub") axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab") # auxiliary functions image.doit <- function(..., extrargs=imageparams) { do.call.matched("image.default", resolve.defaults(...), extrargs=extrargs) } clamp <- function(x, v, tol=0.02 * diff(v)) { ok <- (x >= v[1] - tol) & (x <= v[2] + tol) x[ok] } cellbreaks <- function(x, dx) { nx <- length(x) seq(x[1] - dx/2, x[nx] + dx/2, length.out=nx+1) } log10orNA <- function(x) { y <- rep(NA_real_, length(x)) ok <- (x > 0) y[ok] <- log10(x[ok]) return(y) } # main function PlotIm <- function(x, ..., col=NULL, valuesAreColours=NULL, log=FALSE, ribbon=TRUE, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), colargs=list()) { main <- short.deparse(substitute(x)) verifyclass(x, "im") dotargs <- list(...) stopifnot(is.list(ribargs)) ribside <- match.arg(ribside) zlim <- dotargs$zlim x <- repair.image.xycoords(x) xtype <- x$type do.log <- identical(log, TRUE) if(do.log && !(x$type %in% c("real", "integer"))) stop(paste("Log transform is undefined for an image of type", sQuote(xtype))) # determine whether pixel values are to be treated as colours if(!is.null(valuesAreColours)) { # argument given - validate stopifnot(is.logical(valuesAreColours)) if(valuesAreColours) { # pixel values must be factor or character if(!xtype %in% c("factor", "character")) { warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) valuesAreColours <- FALSE } else if(!is.null(col)) { # colour info provided: contradictory warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the colour map (argument col) is ignored"), call.=FALSE) col <- NULL } if(do.log) warning(paste("Pixel values are taken to be colour values,", "because valuesAreColours=TRUE;", "the argument log=TRUE is ignored"), call.=FALSE) } } else if(!is.null(col)) { # argument 'col' controls colours valuesAreColours <- FALSE } else { # default : determine whether pixel values are colours strings <- switch(xtype, character = { as.vector(x$v) }, factor = { levels(x) }, { NULL }) valuesAreColours <- is.character(strings) && !inherits(try(col2rgb(strings), silent=TRUE), "try-error") if(valuesAreColours) cat("Interpreting pixel values as colours\n") } # if(valuesAreColours) { # colour-valued images are plotted using the code for factor images # with the colour map equal to the levels of the factor switch(xtype, factor = { col <- levels(x) }, character = { x <- eval.im(factor(x)) xtype <- "factor" col <- levels(x) }, { warning(paste("Pixel values of type", sQuote(xtype), "are not interpretable as colours")) }) # colours not suitable for ribbon ribbon <- FALSE } # transform pixel values to log scale? if(do.log) { rx <- range(x, finite=TRUE) if(all(rx > 0)) { x <- eval.im(log10(x)) } else { if(any(rx < 0)) warning(paste("Negative pixel values", "omitted from logarithmic colour map;", "range of values =", prange(rx)), call.=FALSE) if(!all(rx < 0)) warning("Zero pixel values omitted from logarithmic colour map", call.=FALSE) x <- eval.im(log10orNA(x)) } xtype <- x$type Log <- log10 Exp <- function(x) { 10^x } } else { Log <- Exp <- function(x) { x } } imagebreaks <- NULL ribbonvalues <- ribbonbreaks <- NULL # predetermined colour map? # (i.e. mapping from values to colours) colmap <- if(inherits(col, "colourmap")) col else NULL # colour map determined by a function? if(is.null(colmap) && (is.null(col) || is.function(col))) { colfun <- if(is.function(col)) col else spatstat.options("image.colfun") colargnames <- names(formals(colfun)) if("range" %in% colargnames && xtype %in% c("real", "integer")) { # continuous vrange <- range(range(x, finite=TRUE), zlim) cvals <- try(do.call.matched(colfun, append(list(range=vrange), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, range=vrange) else NULL } } else if("inputs" %in% colargnames && xtype != "real") { # discrete vpossible <- switch(xtype, logical = c(FALSE, TRUE), factor = levels(x), unique(as.matrix(x))) if(!is.null(vpossible) && length(vpossible) < 256) { cvals <- try(do.call.matched(colfun, append(list(inputs=vpossible), colargs)), silent=TRUE) if(!inherits(cvals, "try-error")) { colmap <- if(inherits(cvals, "colourmap")) cvals else if(is.character(cvals)) colourmap(cvals, inputs=vpossible) else NULL } } } } switch(xtype, real = { vrange <- range(x, finite=TRUE) vrange <- range(zlim, vrange) if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) if(s$discrete) stop("Discrete colour map is not applicable to real values") imagebreaks <- s$breaks vrange <- range(imagebreaks) col <- s$outputs } trivial <- (diff(vrange) <= .Machine$double.eps) if(!trivial) { # ribbonvalues: domain of colour map (pixel values) # ribbonrange: (min, max) of pixel values in image # nominalrange: range of values shown on ribbon # nominalmarks: values shown on ribbon at tick marks # ribbonticks: pixel values of tick marks # ribbonlabels: text displayed at tick marks ribbonvalues <- seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange nominalrange <- Log(ribscale * Exp(ribbonrange)) nominalmarks <- axisTicks(nominalrange, log=do.log) ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) } }, integer = { values <- as.vector(x$v) values <- values[!is.na(values)] uv <- unique(values) vrange <- range(uv, finite=TRUE) vrange <- range(zlim, vrange) nvalues <- length(uv) trivial <- (nvalues < 2) if(!trivial){ nominalrange <- Log(ribscale * Exp(vrange)) nominalmarks <- axisTicks(nominalrange, log=do.log) nominalmarks <- nominalmarks[nominalmarks %% 1 == 0] ribbonticks <- Log(nominalmarks/ribscale) ribbonlabels <- paste(nominalmarks) if(!do.log && identical(all.equal(ribbonticks, vrange[1]:vrange[2]), TRUE)) { # each possible pixel value will appear in ribbon ribbonvalues <- vrange[1]:vrange[2] imagebreaks <- c(ribbonvalues - 0.5, vrange[2] + 0.5) ribbonrange <- range(imagebreaks) ribbonticks <- ribbonvalues ribbonlabels <- paste(ribbonticks * ribscale) } else { # not all possible values will appear in ribbon ribn <- min(ribn, diff(vrange)+1) ribbonvalues <- seq(from=vrange[1], to=vrange[2], length.out=ribn) ribbonrange <- vrange } } if(!is.null(colmap)) { # explicit colour map s <- summary(colmap) imagebreaks <- if(!s$discrete) s$breaks else c(s$inputs[1] - 0.5, s$inputs + 0.5) col <- s$outputs } }, logical = { values <- as.integer(as.vector(x$v)) values <- values[!is.na(values)] uv <- unique(values) trivial <- (length(uv) < 2) vrange <- c(0,1) imagebreaks <- c(-0.5, 0.5, 1.5) ribbonvalues <- c(0,1) ribbonrange <- range(imagebreaks) ribbonbreaks <- imagebreaks ribbonticks <- ribbonvalues ribbonlabels <- c("FALSE", "TRUE") if(!is.null(colmap)) col <- colmap(c(FALSE,TRUE)) }, factor = { lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) ribbonbreaks <- imagebreaks ribbonticks <- ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap)) col <- colmap(fac) }, character = { x <- eval.im(factor(x)) lev <- levels(x) nvalues <- length(lev) trivial <- (nvalues < 2) # ensure all factor levels plotted separately fac <- factor(lev, levels=lev) intlev <- as.integer(fac) imagebreaks <- c(intlev - 0.5, max(intlev) + 0.5) ribbonvalues <- intlev ribbonrange <- range(imagebreaks) ribbonbreaks <- imagebreaks ribbonticks <- ribbonvalues ribbonlabels <- paste(lev) vrange <- range(intlev) if(!is.null(colmap)) col <- colmap(fac) }, stop(paste("Do not know how to plot image of type", sQuote(xtype))) ) # determine colour map if(!is.null(colmap)) { # explicit colour map object colourinfo <- list(breaks=imagebreaks, col=col) } else { # compile colour information # start with default colour values colfun <- spatstat.options("image.colfun") # we already know that colfun(n) works colourinfo <- if(!is.null(imagebreaks)) { list(breaks=imagebreaks, col=colfun(length(imagebreaks)-1)) } else list(col=colfun(256)) if(!is.null(col) && is.character(col)) { # overwrite colour info with user-specified colour values colourinfo$col <- col if(!is.null(colourinfo$breaks)) { # check consistency nvalues <- length(colourinfo$breaks) - 1 if(length(col) != nvalues) stop(paste("Length of argument", dQuote("col"), paren(paste(length(col))), "does not match the number of distinct values", paren(paste(nvalues)))) } } } # colour map to be returned (invisibly) i.col <- colourinfo$col i.bks <- colourinfo$breaks output.colmap <- if(is.null(i.col)) NULL else if(inherits(i.col, "colourmap")) i.col else if(valuesAreColours) colourmap(col=i.col, inputs=i.col) else switch(xtype, integer=, real= { if(!is.null(i.bks)) { colourmap(col=i.col, breaks=i.bks) } else colourmap(col=i.col, range=vrange) }, logical={ colourmap(col=i.col, inputs=c(FALSE,TRUE)) }, character=, factor={ colourmap(col=i.col, inputs=lev) }, NULL) # ........ secret exit used by plot.listof preponly <- resolve.defaults(dotargs, list(preponly=FALSE))$preponly if(preponly) return(output.colmap) # ........ start plotting ................. add <- resolve.defaults(dotargs, list(add=FALSE))$add if(!identical(ribbon, TRUE) || identical(add, TRUE) || trivial) { # plot image without ribbon image.doit(list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), dotargs, list(useRaster=TRUE), colourinfo, list(xlab = "", ylab = ""), list(asp = 1, main = main, axes=FALSE) ) return(invisible(output.colmap)) } # determine plot region bb <- owin(x$xrange, x$yrange) Width <- diff(bb$xrange) Height <- diff(bb$yrange) Size <- max(Width, Height) switch(ribside, right={ # ribbon to right of image bb.rib <- owin(bb$xrange[2] + c(ribsep, ribsep+ribwid) * Size, bb$yrange) rib.iside <- 4 }, left={ # ribbon to left of image bb.rib <- owin(bb$xrange[1] - c(ribsep+ribwid, ribsep) * Size, bb$yrange) rib.iside <- 2 }, top={ # ribbon above image bb.rib <- owin(bb$xrange, bb$yrange[2] + c(ribsep, ribsep+ribwid) * Size) rib.iside <- 3 }, bottom={ # ribbon below image bb.rib <- owin(bb$xrange, bb$yrange[1] - c(ribsep+ribwid, ribsep) * Size) rib.iside <- 1 }) bb.all <- bounding.box(bb.rib, bb) # establish coordinate system do.call.matched("plot.default", resolve.defaults(list(x=0, y=0, type="n", axes=FALSE, asp=1, xlim=bb.all$xrange, ylim=bb.all$yrange), dotargs, list(main=main, xlab="", ylab="")), extrargs=plotparams) # plot image image.doit(list(x=cellbreaks(x$xcol, x$xstep), y=cellbreaks(x$yrow, x$ystep), z=t(x$v)), list(add=TRUE), dotargs, list(useRaster=TRUE), colourinfo, list(xlab = "", ylab = ""), list(asp = 1, main = main)) # axes for image imax <- identical(dotargs$axes, TRUE) imbox <- !identical(dotargs$box, FALSE) if(imbox) rect(x$xrange[1], x$yrange[1], x$xrange[2], x$yrange[2]) if(imax) { px <- pretty(bb$xrange) py <- pretty(bb$yrange) do.call.matched("axis", resolve.defaults( list(side=1, at=px), dotargs, list(pos=bb$yrange[1])), extrargs=axisparams) do.call.matched("axis", resolve.defaults( list(side=2, at=py), dotargs, list(pos=bb$xrange[1])), extrargs=axisparams) } # plot ribbon image containing the range of image values rib.npixel <- length(ribbonvalues) + 1 switch(ribside, left=, right={ # vertical ribbon rib.xcoords <- bb.rib$xrange rib.ycoords <- seq(from=bb.rib$yrange[1], to=bb.rib$yrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, ncol=1) rib.useRaster <- TRUE }, top=, bottom={ # horizontal ribbon rib.ycoords <- bb.rib$yrange rib.xcoords <- seq(from=bb.rib$xrange[1], to=bb.rib$xrange[2], length.out=rib.npixel) rib.z <- matrix(ribbonvalues, nrow=1) # bug workaround rib.useRaster <- FALSE }) image.doit(list(x=rib.xcoords, y=rib.ycoords, z=t(rib.z), add=TRUE), ribargs, list(useRaster=rib.useRaster), list(main="", sub=""), dotargs, colourinfo) # box around ribbon? resol <- resolve.defaults(ribargs, dotargs) if(!identical(resol$box, FALSE)) plot(as.owin(bb.rib), add=TRUE) # scale axis for ribbon image ribaxis <- !(identical(resol$axes, FALSE) || identical(resol$ann, FALSE)) if(ribaxis) { axisargs <- list(side=rib.iside, labels=ribbonlabels) switch(ribside, right={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[2], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, left={ scal <- diff(bb.rib$yrange)/diff(ribbonrange) at <- bb.rib$yrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$xrange[1], yaxp=c(bb.rib$yrange, length(ribbonticks))) }, top={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[2], xaxp=c(bb.rib$xrange, length(ribbonticks))) }, bottom={ scal <- diff(bb.rib$xrange)/diff(ribbonrange) at <- bb.rib$xrange[1] + scal * (ribbonticks - ribbonrange[1]) axisargs <- append(axisargs, list(at=at)) posargs <- list(pos=bb.rib$yrange[1], xaxp=c(bb.rib$xrange, length(ribbonticks))) }) do.call.matched("axis", resolve.defaults(axisargs, ribargs, dotargs, posargs), extrargs=axisparams) } # return(invisible(output.colmap)) } PlotIm }) ######################################################################## image.im <- plot.im ######################################################################## persp.im <- function(x, ..., colmap=NULL) { xname <- deparse(substitute(x)) xinfo <- summary(x) if(xinfo$type == "factor") stop("Perspective plot is inappropriate for factor-valued image") pop <- spatstat.options("par.persp") # check for common error if(!is.null(col <- list(...)$col) && !is.matrix(col)) warning("Argument col is not a matrix. Did you mean colmap?") # colour map? if(is.null(colmap)) { colinfo <- list(col=NULL) } else if(inherits(colmap, "colourmap")) { # colour map object # apply colour function to image data colval <- eval.im(colmap(x)) colval <- t(as.matrix(colval)) # strip one row and column for input to persp.default colval <- colval[-1, -1] # replace NA by arbitrary value isna <- is.na(colval) if(any(isna)) { stuff <- attr(colmap, "stuff") colvalues <- stuff$outputs colval[isna] <- colvalues[1] } # pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } else { # interpret 'colmap' as colour map if(is.list(colmap) && all(c("breaks", "col") %in% names(colmap))) { breaks <- colmap$breaks colvalues <- colmap$col } else if(is.vector(colmap)) { colvalues <- colmap breaks <- quantile(x, seq(from=0,to=1,length.out=length(colvalues)+1)) if(!all(ok <- !duplicated(breaks))) { breaks <- breaks[ok] colvalues <- colvalues[ok[-1]] } } else warning("Unrecognised format for colour map") # apply colour map to image values colid <- cut.im(x, breaks=breaks, include.lowest=TRUE) colval <- eval.im(colvalues[unclass(colid)]) colval <- t(as.matrix(colval)) nr <- nrow(colval) nc <- ncol(colval) colval <- colval[-1, -1] colval[is.na(colval)] <- colvalues[1] # pass colour matrix (and suppress lines) colinfo <- list(col=colval, border=NA) } # get reasonable z scale while fixing x:y aspect ratio if(xinfo$type %in% c("integer", "real")) { zrange <- xinfo$range if(diff(zrange) > 0) { xbox <- as.rectangle(x) zscale <- 0.5 * mean(diff(xbox$xrange), diff(xbox$yrange))/diff(zrange) zlim <- zrange } else { zscale <- NULL zlim <- c(0,2) * xinfo$mean } } else zscale <- zlim <- NULL do.call.matched("persp", resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(...), pop, colinfo, list(xlab="x", ylab="y", zlab=xname), list(scale=FALSE, expand=zscale, zlim=zlim), list(main=xname), .StripNull=TRUE), funargs=.Spatstat.persp.args) } .Spatstat.persp.args <- list("x", "y", "z", "xlim", "ylim", "zlim", "xlab", "ylab", "zlab", "main", "sub", "theta", "phi", "r", "d", "scale", "expand", "col", "border", "ltheta", "lphi", "shade", "box", "axes", "nticks", "ticktype") ###################################################################### contour.im <- function (x, ..., main, axes=TRUE, add=FALSE) { defaultmain <- deparse(substitute(x)) sop <- spatstat.options("par.contour") if(missing(main)) main <- resolve.defaults(sop, list(main=defaultmain))$main if(missing(add)) add <- resolve.defaults(sop, list(add=FALSE))$add if(missing(axes)) axes <- resolve.defaults(sop, list(axes=TRUE))$axes if(!add) { # new plot if(axes) # with axes do.call.matched("plot.default", resolve.defaults( list(x = range(x$xcol), y = range(x$yrow), type = "n"), list(...), list(asp = 1, xlab = "x", ylab = "y", main = main))) else { # box without axes rec <- owin(x$xrange, x$yrange) do.call.matched("plot.owin", resolve.defaults(list(x=rec), list(...), list(main=main))) } } do.call.matched("contour.default", resolve.defaults(list(x=x$xcol, y=x$yrow, z=t(x$v)), list(add=TRUE), list(...))) return(invisible(NULL)) } spatstat/R/Gest.R0000755000176000001440000000715712237642727013424 0ustar ripleyusers# # Gest.S # # Compute estimates of nearest neighbour distance distribution function G # # $Revision: 4.28 $ $Date: 2013/04/25 06:37:43 $ # ################################################################################ # "Gest" <- "nearest.neighbour" <- function(X, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { verifyclass(X, "ppp") # W <- X$window npts <- npoints(X) lambda <- npts/area.owin(W) # determine r values rmaxdefault <- rmax.rule("G", W, lambda) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max zeroes <- numeric(length(rvals)) # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "han") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", cs="han", ChiuStoyan="han", best="km"), multi=TRUE) # compute nearest neighbour distances nnd <- nndist(X$x, X$y) # distance to boundary bdry <- bdist.points(X) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(G(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="G") if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts <= 1) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { if(npts <= 1) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s)[han](r)", "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hat(lambda)[km](r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat/R/nncorr.R0000755000176000001440000000661312237642727014017 0ustar ripleyusers# # nncorr.R # # $Revision: 1.8 $ $Date: 2013/04/25 06:37:43 $ # nnmean <- function(X) { stopifnot(is.ppp(X) && is.marked(X)) m <- numeric.columns(marks(X), logical=TRUE, others="na") nv <- ncol(m) nnid <- nnwhich(X) ok <- (nndist(X) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") numer <- unlist(lapply(as.data.frame(m[nnid[ok], ]), mean, na.rm=TRUE)) denom <- unlist(lapply(as.data.frame(m), mean, na.rm=TRUE)) ans <- rbind(unnormalised=numer, normalised =numer/denom) return(ans) } nnvario <- function(X) { stopifnot(is.ppp(X) && is.marked(X)) m <- numeric.columns(marks(X), logical=TRUE, others="na") f <- function(m1,m2) { ((m1-m2)^2)/2 } ans <- nncorr(X %mark% m, f, denominator=diag(var(m))) return(ans) } nncorr <- function(X, f = function(m1,m2) { m1 * m2}, ..., use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL) { stopifnot(is.ppp(X) && is.marked(X)) m <- as.data.frame(marks(X)) nv <- ncol(m) if(nv == 1) colnames(m) <- "" # if(missing(method) || is.null(method)) method <- "pearson" # if(missing(f)) f <- NULL if(!is.null(f) && !is.function(f)) { if(nv == 1) stop("f should be a function") # could be a list of functions if(!(is.list(f) && all(unlist(lapply(f, is.function))))) stop("f should be a function or a list of functions") if(length(f) != nv) stop("Length of list f does not match number of mark variables") } # optional denominator(s) if(!is.null(denominator) && !(length(denominator) %in% c(1, nv))) stop("Denominator has incorrect length") # multi-dimensional case if(nv > 1) { # replicate things if(is.function(f)) f <- rep.int(list(f), nv) if(length(denominator) <= 1) denominator <- rep.int(list(denominator), nv) # result <- matrix(NA, nrow=3, ncol=nv) outnames <- c("unnormalised", "normalised", "correlation") dimnames(result) <- list(outnames, colnames(m)) for(j in 1:nv) { mj <- m[,j, drop=FALSE] denj <- denominator[[j]] nncj <- nncorr(X %mark% mj, f=f[[j]], use=use, method=method, denominator=denj) kj <- length(nncj) result[1:kj,j] <- nncj } if(all(is.na(result[3, ]))) result <- result[1:2, ] return(result) } # one-dimensional m <- m[,1,drop=TRUE] # select 'f' appropriately for X chk <- check.testfun(f, X=X) f <- chk$f ftype <- chk$ftype # denominator Efmm <- if(!is.null(denominator)) denominator else switch(ftype, mul={ mean(m)^2 }, equ={ sum(table(m)^2)/length(m)^2 }, general={ mean(outer(m, m, f, ...)) }) # border method nn <- nnwhich(X) ok <- (nndist(X) <= bdist.points(X)) if(!any(ok)) stop("Insufficient data") mY <- m[nn[ok]] mX <- m[ok] Efmk <- switch(ftype, mul = { mean(mX * mY, ...) }, equ = { mean(mX == mY, ...) }, general = { mean(f(mX, mY, ...)) }) # answer <- c(unnormalised=Efmk, normalised=Efmk/Efmm) if(ftype == "mul") { classic <- cor(mX, mY, use=use, method=method) answer <- c(answer, correlation=classic) } return(answer) } spatstat/R/reach.R0000755000176000001440000000162712237642727013600 0ustar ripleyusers# # reach.R # # $Revision: 1.8 $ $Date: 2007/10/24 09:41:15 $ # reach <- function(x, ...) { UseMethod("reach") } reach.interact <- function(x, ...) { verifyclass(x, "interact") irange <- x$irange if(is.null(irange)) return(Inf) if(!is.function(irange)) stop("Internal error - x$irange is not a function") ir <- irange(x) if(is.na(ir)) ir <- Inf return(ir) } reach.ppm <- function(x, ..., epsilon=0) { verifyclass(x, "ppm") # Poisson case if(is.poisson.ppm(x)) return(0) # extract info inte <- x$interaction coeffs <- coef(x) if(newstyle.coeff.handling(inte)) { # extract only interaction coefficients Vnames <- x$internal$Vnames coeffs <- coeffs[Vnames] } # apply 'irange' function irange <- inte$irange if(is.null(irange)) return(Inf) ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } spatstat/R/split.ppx.R0000644000176000001440000000732112237642727014451 0ustar ripleyusers# # split.ppx.R # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # # split.ppx etc # ######################################### split.ppx <- function(x, f = marks(x), drop=FALSE, un=NULL, ...) { stopifnot(inherits(x, "ppx")) mf <- markformat(x) if(is.null(un)) un <- missing(f) && !(mf %in% c("dataframe", "hyperframe")) if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, hyperframe=, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Marks do not include a factor") }) splittype <- "factor" } else{ # f was given fsplit <- f if(is.factor(f)) { splittype <- "factor" } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if((is.data.frame(marx) || is.hyperframe(marx)) && (f %in% names(marx))) { fsplit <- f <- marx[[f]] } else stop(paste("The name", sQuote(f), "does not match any column of marks")) splittype <- "factor" } else stop(paste("f must be", "a factor,", "or the name of a column of marks")) if(length(f) != npoints(x)) stop("length(f) must equal the number of points in x") } # At this point # 'f' is a factor that can be used to separate the points # 'fsplit' is the object (either a factor or a tessellation) # that determines the split (and can be "un-split") lev <- levels(f) if(drop) { # remove components that don't contain points retain <- (table(f) > 0) lev <- lev[retain] switch(splittype, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } # split the data out <- list() for(l in lev) out[[paste(l)]] <- x[!is.na(f) & (f == l)] if(un) out <- lapply(out, unmark) class(out) <- c("splitppx", "listof", class(out)) attr(out, "fsplit") <- fsplit return(out) } print.splitppx <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.factor(f)) "factor" else "unknown data" cat(paste("Multidimensional point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppx <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppx" x } print.summary.splitppx <- function(x, ...) { class(x) <- "listof" print(x) invisible(NULL) } "[.splitppx" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppx' object too class(y) <- c("splitppx", class(y)) if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppx" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppx)))) stop("replacement value must be a list of point patterns (ppx)") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppx' object too class(x) <- c("splitppx", class(x)) if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } spatstat/R/clusterset.R0000644000176000001440000000415412237642727014706 0ustar ripleyusers# # clusterset.R # # Allard-Fraley estimator of cluster region # # $Revision: 1.6 $ $Date: 2013/10/06 04:35:24 $ # clusterset <- function(X, result=c("marks", "domain"), ..., verbose=TRUE, fast=FALSE, exact=!fast) { stopifnot(is.ppp(X)) result <- match.arg(result) if(!missing(exact)) stopifnot(is.logical(exact)) if(fast && exact) stop("fast=TRUE is incompatible with exact=TRUE") # compute duplication exactly as in deldir, or the universe will explode X <- unique(unmark(X), rule="deldir", warn=TRUE) n <- npoints(X) W <- as.owin(X) # discretised Dirichlet tessellation if(verbose) cat("Computing Dirichlet tessellation...") if(fast || !exact) cellid <- as.im(nnfun(X), ...) # compute tile areas if(fast) { a <- table(factor(as.vector(as.matrix(cellid)), levels=1:n)) if(verbose) cat("done.\n") a <- a + 0.5 A <- sum(a) } else { d <- dirichlet(X) if(verbose) cat("done.\n") D <- tiles(d) suppressWarnings(id <- as.integer(names(D))) if(any(is.na(id)) && result == "marks") stop("Unable to map Dirichlet tiles to data points") A <- area.owin(W) a <- unlist(lapply(D, area.owin)) } # determine optimal selection of tiles ntile <- length(a) o <- order(a) b <- cumsum(a[o]) m <- seq_len(ntile) logl <- -n * log(n) + m * log(m/b) + (n-m) * log((n-m)/(A-b)) mopt <- which.max(logl) picked <- o[seq_len(mopt)] # construct result switch(result, marks = { # map tiles to points if(!fast) picked <- id[picked] # label points is.picked <- rep.int("no", n) is.picked[picked] <- "yes" is.picked <- factor(is.picked, levels=c("no", "yes")) out <- X %mark% is.picked }, domain = { if(exact) { out <- do.call("union.owin", unname(D[picked])) } else { is.picked <- rep.int(FALSE, n) is.picked[picked] <- TRUE out <- eval.im(is.picked[cellid]) } }) return(out) } spatstat/R/plot.fasp.R0000755000176000001440000001277312237642727014430 0ustar ripleyusers# # plot.fasp.R # # $Revision: 1.27 $ $Date: 2013/04/25 06:37:43 $ # plot.fasp <- function(x, formule=NULL, ..., subset=NULL, title=NULL, banner=TRUE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) { # plot dimensions which <- x$which nrows <- nrow(which) ncols <- ncol(which) # Determine the overall title of the plot if(banner) { if(!is.null(title)) overall <- title else if(!is.null(x$title)) overall <- x$title else { if(prod(dim(x$which)) > 1) overall <- "Array of diagnostic functions" else overall <- "Diagnostic function" if(is.null(x$dataname)) overall <- paste(overall,".",sep="") else overall <- paste(overall," for ",x$dataname,".",sep="") } if(length(overall) > 1) overall <- paste(overall, collapse="\n") nlines <- if(!is.character(overall)) 1 else length(unlist(strsplit(overall, "\n"))) } # If no formula is given, look for a default formula in x: defaultplot <- is.null(formule) if(defaultplot && !is.null(x$default.formula)) formule <- x$default.formula if(!is.null(formule)) { # ensure formulae are given as character strings. formule <- FormatFaspFormulae(formule, "formule") # Number of formulae should match number of functions. nf <- length(formule) nfun <- length(x$fns) if(nf == 1 && nfun > 1) formule <- rep.int(formule, nfun) else if(nf != nfun) stop(paste("Wrong number of entries in", sQuote("formule"))) } # Check on the length of the subset argument. ns <- length(subset) if(ns > 1) { if(ns != length(x$fns)) stop("Wrong number of entries in subset argument.\n") msub <- TRUE } else msub <- FALSE # compute common x, y axis limits for all plots ? xlim <- ylim <- NULL if(samex || samey) { cat("Computing limits\n") # call plot.fv to determine plot limits of each panel for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(!is.na(k)) { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset lims <- plot(fun, fmla, subset=sub, limitsonly=TRUE) # update the limits if(samex) xlim <- range(xlim, lims$xlim) if(samey) ylim <- range(ylim, lims$ylim) } } } } ############################################################# # Set up the plot layout n <- nrows * ncols # panels 1..n = plot panels codes <- matrix(seq_len(n), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) # annotation as chosen if(outerlabels) { # column headings colhead.codes <- max(codes) + (1:ncols) colhead.height <- 0.2 codes <- rbind(colhead.codes, codes) heights <- c(colhead.height, heights) # row headings rowhead.codes <- max(codes) + (1:nrows) rowhead.width <- 0.2 codes <- cbind(c(0,rowhead.codes), codes) widths <- c(rowhead.width, widths) } if(banner) { # overall banner top.code <- max(codes) + 1 top.height <- 0.1 * (1+nlines) codes <- rbind(top.code, codes) heights <- c(top.height, heights) } # declare layout layout(codes, widths=widths, heights=heights) ############################################################ # Plot the function panels # # determine annotation colNames <- colnames(which) rowNames <- rownames(which) nrc <- max(nrows, ncols) ann.def <- par("ann") && (nrc <= 3) # determine margin around each panel if(is.null(mar.panel)) mar.panel <- if(nrc > 3 && outerlabels) rep.int(1/nrc, 4) else par("mar") opa <- par(mar=mar.panel, xpd=TRUE) # # plot each function for(i in 1:nrows) { for(j in 1:ncols) { k <- which[i,j] if(is.na(k)) plot(0,0,type='n',xlim=c(0,1), ylim=c(0,1),axes=FALSE,xlab='',ylab='', ...) else { fun <- as.fv(x$fns[[k]]) fmla <- if(!defaultplot) formule[k] else NULL sub <- if(msub) subset[[k]] else subset main <- if(outerlabels) "" else paste("(", rowNames[i], ", ", colNames[j], ")", sep="") do.call("plot", resolve.defaults(list(x=fun, fmla=fmla, subset=sub), list(...), list(xlim=xlim, ylim=ylim, main=main, legend=legend), list(ann=ann.def, axes=ann.def, frame.plot=TRUE))) } } } ############################################################ # # Annotation as selected if(outerlabels) { par(mar=rep.int(0,4), xpd=TRUE) # Plot the column headers for(j in 1:ncols) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,colNames[j], cex=cex.outerlabels) } # Plot the row labels for(i in 1:nrows) { plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) text(0,0,rowNames[i], srt=90, cex=cex.outerlabels) } } if(banner) { par(mar=rep.int(0,4), xpd=TRUE) # plot the banner plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0, overall, cex=cex) } # revert layout(1) par(opa) return(invisible(NULL)) } spatstat/R/pppmatch.R0000755000176000001440000006616712237642727014344 0ustar ripleyusers# # pppmatch.R # # $Revision: 1.13 $ $Date: 2013/04/25 07:24:43 $ # # Code by Dominic Schuhmacher # # # ----------------------------------------------------------------- # The standard functions for the new class pppmatching # # Objects of class pppmatching consist of two point patterns pp1 and pp2, # and either an adjacency matrix ((i,j)-th entry 1 if i-th point of pp1 and j-th # point of pp2 are matched, 0 otherwise) for "full point matchings" or # a "generalized adjacency matrix" (or flow matrix; positive values are # no longer limited to 1, (i,j)-th entry gives the "flow" between # the i-th point of pp1 and the j-th point of pp2) for "fractional matchings". # Optional elements are the type # of the matching, the cutoff value for distances in R^2, the order # of averages taken, and the resulting distance for the matching. # Currently recognized types are "spa" (subpattern assignment, # where dummy points at maximal dist are introduced if cardinalities differ), # "ace" (assignment if cardinalities equal, where dist is maximal if cards differ), # and "mat" (mass transfer, fractional matching that belongs to the # Wasserstein distance obtained if point patterns are normalized to probability measures). # ----------------------------------------------------------------- pppmatching <- function(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) { verifyclass(X, "ppp") verifyclass(Y, "ppp") n1 <- X$n n2 <- Y$n am <- as.matrix(am) am <- apply(am, c(1,2), as.numeric) if (length(am) == 0) { if (min(n1,n2) == 0) am <- matrix(am, nrow=n1, ncol=n2) else stop("Adjacency matrix does not have the right dimensions") } if (dim(am)[1] != n1 || dim(am)[2] != n2) stop("Adjacency matrix does not have the right dimensions") res <- list("pp1" = X, "pp2" = Y, "matrix" = am, "type" = type, "cutoff" = cutoff, "q" = q, "distance" = mdist) class(res) <- "pppmatching" res } # currently, for fractional matchings all the flows are plotted the same way # irrespective of their weights plot.pppmatching <- function(x, addmatch = NULL, main = NULL, ...) { if (is.null(main)) main <- short.deparse(substitute(x)) pp1 <- x$pp1 pp2 <- x$pp2 plot.owin(pp1$window, main = main, ...) here <- which((x$matrix > 0), arr.ind = TRUE) if (!is.null(addmatch)) { addhere <- which((addmatch > 0), arr.ind = TRUE) seg <- as.psp(from=pp1[addhere[,1]], to=pp2[addhere[,2]]) plot(seg, add=TRUE, lty = 2, col="gray70") } if (length(here) > 0) { seg <- as.psp(from=pp1[here[,1]], to=pp2[here[,2]]) plot(seg, add=TRUE, ...) } points(x$pp1, pch=20, col=2, ...) points(x$pp2, pch=20, col=4, ...) return(invisible(NULL)) } print.pppmatching <- function(x, ...) { n1 <- x$pp1$n n2 <- x$pp2$n if (is.null(x$type) || is.null(x$q) || is.null(x$cutoff)) cat("Generic matching of two planar point patterns \n") else cat(x$type, "-", x$q, " matching of two planar point patterns (cutoff = ", x$cutoff, ") \n", sep = "") cat("pp1:", n1, ngettext(n1, "point", "points"), "\n") cat("pp2:", n2, ngettext(n2, "point", "points"), "\n") print.owin(x$pp1$window) npair <- sum(x$matrix > 0) if (npair == 0) cat("matching is empty \n") else { if (any(x$matrix != trunc(x$matrix))) cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n") else cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n") } if (!is.null(x$distance)) cat("distance:", x$distance, "\n") return(invisible(NULL)) } summary.pppmatching <- function(object, ...) { X <- object$pp1 Y <- object$pp2 n1 <- X$n n2 <- Y$n if (is.null(object$type) || is.null(object$q) || is.null(object$cutoff)) cat("Generic matching of two planar point patterns \n") else cat(object$type, "-", object$q, " matching of two planar point patterns (cutoff = ", object$cutoff, ") \n", sep = "") cat("pp1:", n1, ngettext(n1, "point", "points"), "\n") cat("pp2:", n2, ngettext(n2, "point", "points"), "\n") print.owin(X$window) npair <- sum(object$matrix > 0) if (npair == 0) cat("matching is empty \n") else { if (any(object$matrix != trunc(object$matrix))) { cat("fractional matching,", npair, ngettext(npair, "flow", "flows"), "\n") } else { cat("point matching,", npair, ngettext(npair, "line", "lines"), "\n") rowsum <- apply(object$matrix, 1, "sum") colsum <- apply(object$matrix, 2, "sum") lt <- ifelse(min(rowsum) >= 1, TRUE, FALSE) ru <- ifelse(max(rowsum) <= 1, TRUE, FALSE) rt <- ifelse(min(colsum) >= 1, TRUE, FALSE) lu <- ifelse(max(colsum) <= 1, TRUE, FALSE) if (lt && ru && rt && lu) cat("matching is 1-1 \n") else if (any(lt, ru, rt, lu)) { cat("matching is", ifelse(lt, " left-total", ""), ifelse(lu, " left-unique", ""), ifelse(rt, " right-total", ""), ifelse(ru, " right-unique", ""), "\n", sep="") } } } if (!is.null(object$distance)) cat("distance:", object$distance, "\n") return(invisible(NULL)) } # ----------------------------------------------------------------- # matchingdist computes the distance associated with a certain kind of matching. # Any of the arguments type, cutoff and order (if supplied) override the # the corresponding arguments in the matching. # This function is useful for verifying the distance element of an # object of class pppmatching as well as for comparing different # (typically non-optimal) matchings. # ----------------------------------------------------------------- matchingdist <- function(matching, type = NULL, cutoff = NULL, q = NULL) { verifyclass(matching, "pppmatching") if (is.null(type)) if (is.null(matching$type)) stop("Type of matching unknown. Distance cannot be computed") else type <- matching$type if (is.null(cutoff)) if (is.null(matching$cutoff)) stop("Cutoff value unknown. Distance cannot be computed") else cutoff <- matching$cutoff if (is.null(q)) if (is.null(matching$q)) stop("Order unknown. Distance cannot be computed") else q <- matching$q X <- matching$pp1 Y <- matching$pp2 n1 <- X$n n2 <- Y$n Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } if (type == "spa") { n <- max(n1,n2) # divisor for Lpexpect if (n == 0) return(0) else if (min(n1,n2) == 0) return(cutoff) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) if (is.finite(q)) resdist <- (Lpexpect(dfix, matching$matrix/n, q)^q + abs(n2-n1)/n * cutoff^q)^(1/q) else resdist <- ifelse(n1==n2, max(dfix[matching$matrix > 0]), cutoff) } else if (type == "ace") { n <- n1 # divisor for Lpexpect if (n1 != n2) return(cutoff) if (n == 0) return(0) rowsum <- apply(matching$matrix, 1, sum) colsum <- apply(matching$matrix, 2, sum) if (any(c(rowsum, colsum) != 1)) warning("matching is not 1-1") dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else if (type == "mat") { n <- min(n1,n2) # divisor for Lpexpect if (min(n1,n2) == 0) return(NaN) shortdim <- which.min(c(n1,n2)) shortsum <- apply(matching$matrix, shortdim, sum) if (any(shortsum != 1)) warning("matching does not attribute mass 1 to each point of point pattern with smaller cardinality") dfix <- apply(crossdist(X,Y), c(1,2), function(x) { min(x,cutoff) }) if (is.finite(q)) resdist <- Lpexpect(dfix, matching$matrix/n, q) else resdist <- max(dfix[matching$matrix > 0]) } else stop(paste("Unrecognised type", sQuote(type))) return(resdist) } # ----------------------------------------------------------------- # The main function for computation of distances and finding optimal # matchings between point patterns: pppdist # ----------------------------------------------------------------- # # pppdist uses several helper functions not normally called by the user # # The arguments of pppdist are # # x and y of class ppp (the two point patterns for which we want to compute # a distance) # The type of distance to be computed; any one of "spa" (default), "ace", "mat". # For details of this and the following two arguments see above (description # for class "pppmatching") # cutoff and order q of the distance # Set matching to TRUE if the full point matching (including distance) # should be returned; otherwise only the distance is returned # If ccode is FALSE R code is used where available. This may be useful if q # is high (say above 10) and severe warning messages pop up. R can # (on most machines) deal with a higher number of significant digits per # number than C (at least with the code used below) # precision should only be entered by advanced users. Empirically reasonable defaults # are used otherwise. As a rule of thumb, if ccode is TRUE, precision should # be the highest value that does not give an error (typically 9); if ccode # is FALSE, precision should be balanced (typically between 10 and 100) in # such a way that the sum of the number of zeroes and pseudo-zeroes given in the # warning messages is minimal # approximation: if q = Inf, by the distance of which order should # the true distance be approximated. If approximation is Inf, brute force # computation is used, which is only practicable for point patterns with # very few points (see also the remarks just before the pppdist.prohorov # function below). # show.rprimal=TRUE shows at each stage of the algorithm what the current restricted # primal problem and its solution are (algorithm jumps between restricted primal # and dual problem until the solution to the restricted primal (a partial # matching of the point patterns) is a full matching) # timelag gives the number of seconds of pause added each time a solution to # the current restricted primal is found (has only an effect if show.primal=TRUE) # ----------------------------------------------------------------- pppdist <- function(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) { verifyclass(X, "ppp") verifyclass(Y, "ppp") if (!ccode && type == "mat") { warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") ccode <- TRUE } if (!ccode && is.infinite(q) && is.infinite(approximation)) { warning("R code is not available for q = Inf and approximation = Inf. C code is used instead") ccode <- TRUE } if (ccode && is.infinite(q) && is.infinite(approximation) && type == "spa" && X$n != Y$n) { warning("approximation = Inf not available for type = ", dQuote("spa"), " and point patterns with differing cardinalities") approximation <- 10 } if (is.infinite(q) && is.infinite(approximation) && type == "mat") { warning("approximation = Inf not available for type = ", dQuote("mat")) approximation <- 10 } if (show.rprimal) { ccode <- FALSE if (type != "ace"){ warning("show.rprimal = TRUE not available for type = ", dQuote(type), ". Type is changed to ", dQuote("ace")) type <- "ace" } } if (is.null(precision)) { if (ccode) precision <- trunc(log10(.Machine$integer.max)) else { db <- .Machine$double.base minprec <- trunc(log10(.Machine$double.base^.Machine$double.digits)) if (is.finite(q)) precision <- min(max(minprec,2*q),(.Machine$double.max.exp-1)*log(db)/log(10)) else precision <- min(max(minprec,2*approximation),(.Machine$double.max.exp-1)*log(db)/log(10)) } } if (type == "spa") { if (X$n == 0 && Y$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n1 <- X$n n2 <- Y$n n <- max(n1,n2) dfix <- matrix(cutoff,n,n) if (min(n1,n2) > 0) dfix[1:n1,1:n2] <- crossdist(X,Y) d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) if (is.infinite(q)) { if (n1 == n2 || matching) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, precision, approximation)) else return(cutoff) # in the case n1 != n2 the distance is clear, and in a sense any # matching would be correct. We go here the extra mile and call # pppdist.prohorov in order to find (approximate) the matching # that is intuitively most interesting (i.e. the one that # pairs the points of the # smaller cardinality point pattern with the points of the larger # cardinality point pattern in such a way that the maximal pairing distance # is minimal (for q < Inf the q-th order pairing distance before the introduction # of dummy points is automatically minimal if it is minimal after the # introduction of dummy points) # which would be the case for the obtained pairing if q < Inf } } else if (type == "ace") { if (X$n != Y$n) { if (!matching) return(cutoff) else { return(pppmatching(X, Y, matrix(0, nrow=X$n, ncol=Y$n), type, cutoff, q, cutoff)) } } if (X$n == 0) { if (!matching) return(0) else { return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), type, cutoff, q, 0)) } } n <- n1 <- n2 <- X$n dfix <- crossdist(X,Y) d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) if (is.infinite(q)) return(pppdist.prohorov(X, Y, n, d, type, cutoff, matching, ccode, precision, approximation)) } else if (type == "mat") { if (!ccode) warning("R code is not available for type = ", dQuote("mat"), ". C code is used instead") return(pppdist.mat(X, Y, cutoff, q, matching, precision, approximation)) } else stop(paste("Unrecognised type", sQuote(type))) d <- d/max(d) d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(ccode & any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") if(!ccode) { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) # a pseudo-zero is a value that is positive but contributes nothing to the # q-th order average because it is too small compared to the other values } Lpmean <- function(x, p) { f <- max(x) return(ifelse(f==0, 0, f * mean((x/f)^p)^(1/p))) } if (show.rprimal && type == "ace") { assig <- acedist.show(X, Y, n, d, timelag) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } else if (ccode) { res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2))) # PACKAGE="spatstat") am <- matrix(res$flowmatrix, n, n) } else { assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } resdist <- Lpmean(dfix[am == 1], q) if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, q, resdist)) } } # # # =========================================================== # =========================================================== # Anything below: # Internal functions usually not to be called by user # =========================================================== # =========================================================== # # # Called if show.rprimal is true # acedist.show <- function(X, Y, n, d, timelag = 0) { plot(pppmatching(X, Y, matrix(0, n, n))) # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution am <- matrix(0, n, n) for (i in 1:n) { if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE } Sys.sleep(timelag) channelmat <- (d == 0 & !am) plot(pppmatching(X, Y, am), addmatch = channelmat) # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # R-version of hungarian algo without the pictures # useful if q is large # acedist.noshow <- function(X, Y, n, d) { # initialization of dual variables u <- apply(d, 1, min) d <- d - u v <- apply(d, 2, min) d <- d - rep(v, each=n) # the main loop feasible <- FALSE while (!feasible) { rpsol <- maxflow(d) # rpsol = restricted primal, solution am <- matrix(0, n, n) for (i in 1:n) { if (rpsol$assignment[i] > -1) am[i, rpsol$assignment[i]] <- TRUE } channelmat <- (d == 0 & !am) # if the solution of the restricted primal is not feasible for # the original primal, update dual variables if (min(rpsol$assignment) == -1) { w1 <- which(rpsol$fi_rowlab > -1) w2 <- which(rpsol$fi_collab == -1) subtractor <- min(d[w1, w2]) d[w1,] <- d[w1,] - subtractor d[,-w2] <- d[,-w2] + subtractor } # otherwise break the loop else { feasible <- TRUE } } return(rpsol$assignment) } # # Solution of restricted primal # maxflow <- function(costm) { stopifnot(is.matrix(costm)) stopifnot(nrow(costm) == ncol(costm)) if(!all(apply(costm == 0, 1, any))) stop("Each row of the cost matrix must contain a zero") m <- dim(costm)[1] # cost matrix is square m * m assignment <- rep.int(-1, m) # -1 means no pp2-point assigned to i-th pp1-point # initial assignment or rowlabel <- source label (= 0) where not possible for (i in 1:m) { j <- match(0, costm[i,]) if (!(j %in% assignment)) assignment[i] <- j } newlabelfound <- TRUE while (newlabelfound) { rowlab <- rep.int(-1, m) # -1 means no label given, 0 stands for source label collab <- rep.int(-1, m) rowlab <- ifelse(assignment == -1, 0, rowlab) # column and row labeling procedure until either breakthrough occurs # (which means that there is a better point assignment, i.e. one that # creates more point pairs than the current one (flow can be increased)) # or no more labeling is possible breakthrough <- -1 while (newlabelfound && breakthrough == -1) { newlabelfound <- FALSE for (i in 1:m) { if (rowlab[i] != -1) { for (j in 1:m) { if (costm[i,j] == 0 && collab[j] == -1) { collab[j] <- i newlabelfound <- TRUE if (!(j %in% assignment) && breakthrough == -1) breakthrough <- j } } } } for (j in 1:m) { if (collab[j] != -1) { for (i in 1:m) { if (assignment[i] == j && rowlab[i] == -1) { rowlab[i] <- j newlabelfound <- TRUE } } } } } # if the while-loop was left due to breakthrough, # reassign points (i.e. redirect flow) and restart labeling procedure if (breakthrough != -1) { l <- breakthrough while (l != 0) { k <- collab[l] assignment[k] <- l l <- rowlab[k] } } } # the outermost while-loop is left, no more labels can be given; hence # the maximal number of points are paired given the current restriction # (flow is maximal given the current graph) return(list("assignment"=assignment, "fi_rowlab"=rowlab, "fi_collab"=collab)) } # # Prohorov distance computation/approximation (called if q = Inf in pppdist # and type = "spa" or "ace") # Exact brute force computation of distance if approximation = Inf, # scales very badly, should not be used for cardinality n larger than 10-12 # Approximation by order q distance gives often (if the warning messages # are not too extreme) the right matching and therefore the exact Prohorov distance, # but in very rare cases the result can be very wrong. However, it is always # an exact upper bound of the Prohorov distance (since based on *a* pairing # as opposed to optimal pairing. # pppdist.prohorov <- function(X, Y, n, dfix, type, cutoff = 1, matching = TRUE, ccode = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n d <- dfix/max(dfix) if (is.finite(approximation)) { warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (ccode) { if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") res <- .C("dwpure", as.integer(d), as.integer(rep.int(1,n)), as.integer(rep.int(1,n)), as.integer(n), as.integer(n), flowmatrix = as.integer(integer(n^2))) # PACKAGE="spatstat") am <- matrix(res$flowmatrix, n, n) } else { if (any(is.infinite(d))) stop("Inf obtained, while taking the q-th powers of distances") maxd <- max(d) npszeroes <- sum(maxd/d[d>0] >= .Machine$double.base^.Machine$double.digits) if (npszeroes > 0) warning(paste(npszeroes, ngettext(npszeroes, "pseudo-zero", "pseudo-zeroes"), "introduced, while taking the q-th powers of distances")) assig <- acedist.noshow(X, Y, n, d) am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } } else { d <- round(d*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") res <- .C("dinfty_R", as.integer(d), as.integer(n), assignment = as.integer(rep.int(-1,n))) # PACKAGE="spatstat") assig <- res$assignment am <- matrix(0, n, n) am[cbind(1:n, assig[1:n])] <- 1 } if (n1 == n2) resdist <- max(dfix[am == 1]) else resdist <- cutoff if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, type, cutoff, Inf, resdist)) } } # # Computation of "pure Wasserstein distance" for any q (called if type="mat" # in pppdist, no matter if q finite or not). # If q = Inf, approximation using ccode is enforced # (approximation == Inf is not allowed here). # pppdist.mat <- function(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) { n1 <- X$n n2 <- Y$n n <- min(n1,n2) if (n == 0) { if (!matching) return(NaN) else return(pppmatching(X, Y, matrix(0, nrow=0,ncol=0), "mat", cutoff, q, NaN)) } dfix <- crossdist(X,Y) d <- dfix <- apply(dfix, c(1,2), function(x) { min(x,cutoff) }) d <- d/max(d) if (is.infinite(q)) { if (is.infinite(approximation)) stop("approximation = Inf") warning(paste("distance with parameter q = Inf is approximated by distance with parameter q =", approximation)) d <- round((d^approximation)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if (nzeroes > 0) warning(paste(nzeroes, "zeroes introduced, while rounding distances")) if (any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2))) # PACKAGE="spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) resdist <- max(dfix[am > 0]) } else { d <- round((d^q)*(10^precision)) nzeroes <- sum(d == 0 & dfix > 0) if(nzeroes > 0) warning(paste(nzeroes, ngettext(nzeroes, "zero", "zeroes"), "introduced, while rounding the q-th powers of distances")) if(any(d > .Machine$integer.max)) stop("integer overflow, while rounding the q-th powers of distances") gcd <- greatest.common.divisor(n1,n2) mass1 <- n2/gcd mass2 <- n1/gcd Lpexpect <- function(x, w, p) { f <- max(x) return(ifelse(f==0, 0, f * sum((x/f)^p * w)^(1/p))) } res <- .C("dwpure", as.integer(d), as.integer(rep.int(mass1,n1)), as.integer(rep.int(mass2,n2)), as.integer(n1), as.integer(n2), flowmatrix = as.integer(integer(n1*n2))) # PACKAGE="spatstat") am <- matrix(res$flowmatrix/(max(n1,n2)/gcd), n1, n2) # our "adjacency matrix" in this case is standardized to have # rowsum 1 if n1 <= n2 and colsum 1 if n1 >= n2 resdist <- Lpexpect(dfix, am/n, q) } if (!matching) return(resdist) else { amsmall <- suppressWarnings(matrix(am[1:n1,1:n2], nrow=n1, ncol=n2)) # previous line solves various problems associated with min(n1,n2) = 0 or = 1 return(pppmatching(X, Y, amsmall, "mat", cutoff, q, resdist)) } } spatstat/R/ppx.R0000755000176000001440000002543612242562146013321 0ustar ripleyusers# # ppx.R # # class of general point patterns in any dimension # # $Revision: 1.41 $ $Date: 2013/04/25 06:37:43 $ # ppx <- local({ ctype.table <- c("spatial", "temporal", "local", "mark") ctype.real <- c(TRUE, TRUE, FALSE, FALSE) ppx <- function(data, domain=NULL, coord.type=NULL, simplify=FALSE) { data <- as.hyperframe(data) # columns suitable for spatial coordinates suitable <- with(unclass(data), vtype == "dfcolumn" & (vclass == "numeric" | vclass == "integer")) if(is.null(coord.type)) { # assume all suitable columns of data are spatial coordinates # and all other columns are marks. ctype <- ifelse(suitable, "spatial", "mark") } else { stopifnot(is.character(coord.type)) stopifnot(length(coord.type) == ncol(data)) ctypeid <- pmatch(coord.type, ctype.table, duplicates.ok=TRUE) # validate if(any(uhoh <- is.na(ctypeid))) stop(paste("Unrecognised coordinate", ngettext(sum(uhoh), "type", "types"), commasep(sQuote(coord.type[uhoh])))) if(any(uhoh <- (!suitable & ctype.real[ctypeid]))) { nuh <- sum(uhoh) stop(paste(ngettext(nuh, "Coordinate", "Coordinates"), commasep(sQuote(names(data)[uhoh])), ngettext(nuh, "does not", "do not"), "contain real numbers")) } ctype <- ctype.table[ctypeid] } ctype <- factor(ctype, levels=ctype.table) # if(simplify && all(ctype == "spatial")) { # attempt to reduce to ppp or pp3 d <- length(ctype) if(d == 2) { ow <- try(as.owin(domain), silent=TRUE) if(!inherits(ow, "try-error")) { X <- try(as.ppp(as.data.frame(data), W=ow)) if(!inherits(X, "try-error")) return(X) } } else if(d == 3) { bx <- try(as.box3(domain), silent=TRUE) if(!inherits(bx, "try-error")) { m <- as.matrix(as.data.frame(data)) X <- try(pp3(m[,1], m[,2], m[,3], bx)) if(!inherits(X, "try-error")) return(X) } } } out <- list(data=data, ctype=ctype, domain=domain) class(out) <- "ppx" return(out) } ppx }) is.ppx <- function(x) { inherits(x, "ppx") } nobjects.ppx <- npoints.ppx <- function(x) { nrow(x$data) } print.ppx <- function(x, ...) { cat("Multidimensional point pattern\n") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names cat(paste(np, ngettext(np, "point", "points"), "\n")) if(any(iscoord <- (x$ctype == "spatial"))) cat(paste(sum(iscoord), "-dimensional space coordinates ", paren(paste(nama[iscoord], collapse=",")), "\n", sep="")) if(any(istime <- (x$ctype == "temporal"))) cat(paste(sum(istime), "-dimensional time coordinates ", paren(paste(nama[istime], collapse=",")), "\n", sep="")) if(any(islocal <- (x$ctype == "local"))) cat(paste(sum(islocal), ngettext(sum(islocal), "column", "columns"), "of local coordinates:", commasep(sQuote(nama[islocal])), "\n")) if(any(ismark <- (x$ctype == "mark"))) cat(paste(sum(ismark), ngettext(sum(ismark), "column", "columns"), "of marks:", commasep(sQuote(nama[ismark])), "\n")) if(!is.null(x$domain)) { cat("Domain:\n\t") print(x$domain) } invisible(NULL) } summary.ppx <- function(object, ...) { print(object, ...) } plot.ppx <- function(x, ...) { xname <- short.deparse(substitute(x)) coo <- coords(x, local=FALSE) dom <- x$domain m <- ncol(coo) if(m == 1) { coo <- coo[,1] ran <- diff(range(coo)) ylim <- c(-1,1) * ran/20 do.call("plot.default", resolve.defaults(list(coo, numeric(length(coo))), list(...), list(asp=1, ylim=ylim, axes=FALSE, xlab="", ylab=""))) axis(1, pos=ylim[1]) } else if(m == 2) { if(is.null(dom)) { # plot x, y coordinates only nama <- names(coo) do.call.matched("plot.default", resolve.defaults(list(x=coo[,1], y=coo[,2], asp=1), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2]))) } else { add <- resolve.defaults(list(...), list(add=FALSE))$add if(!add) { # plot domain, whatever it is do.call("plot", resolve.defaults(list(dom), list(...), list(main=xname))) } # convert to ppp x2 <- ppp(coo[,1], coo[,2], window=as.owin(dom), marks=as.data.frame(marks(x)), check=FALSE) # invoke plot.ppp return(do.call("plot", resolve.defaults(list(x2), list(add=TRUE), list(...)))) } } else if(m == 3) { # convert to pp3 if(is.null(dom)) dom <- box3(range(coo[,1]), range(coo[,2]), range(coo[,3])) x3 <- pp3(coo[,1], coo[,2], coo[,3], dom) # invoke plot.pp3 nama <- names(coo) do.call("plot", resolve.defaults(list(x3), list(...), list(main=xname), list(xlab=nama[1], ylab=nama[2], zlab=nama[3]))) } else stop(paste("Don't know how to plot a general point pattern in", ncol(coo), "dimensions")) return(invisible(NULL)) } "[.ppx" <- function (x, i, ...) { da <- x$data daij <- da[i, , drop=FALSE] out <- list(data=daij, ctype=x$ctype, domain=x$domain) class(out) <- "ppx" return(out) } coords <- function(x, ...) { UseMethod("coords") } coords.ppx <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) as.data.frame(x$data[, chosen]) } coords.ppp <- function(x, ...) { data.frame(x=x$x,y=x$y) } "coords<-" <- function(x, ..., value) { UseMethod("coords<-") } "coords<-.ppp" <- function(x, ..., value) { win <- x$window if(is.null(value)) { # empty pattern return(ppp(window=win)) } value <- as.data.frame(value) if(ncol(value) != 2) stop("Expecting a 2-column matrix or data frame, or two vectors") result <- as.ppp(value, win) marks(result) <- marks(x) return(result) } "coords<-.ppx" <- function(x, ..., spatial=TRUE, temporal=TRUE, local=TRUE, value) { ctype <- x$ctype chosen <- (ctype == "spatial" & spatial) | (ctype == "temporal" & temporal) | (ctype == "local" & local) x$data[, chosen] <- value return(x) } as.hyperframe.ppx <- function(x, ...) { x$data } as.data.frame.ppx <- function(x, ...) { as.data.frame(x$data, ...) } as.matrix.ppx <- function(x, ...) { as.matrix(as.data.frame(x, ...)) } marks.ppx <- function(x, ..., drop=TRUE) { ctype <- x$ctype chosen <- (ctype == "mark") if(!any(chosen)) return(NULL) x$data[, chosen, drop=drop] } "marks<-.ppx" <- function(x, ..., value) { ctype <- x$ctype retain <- (ctype != "mark") coorddata <- x$data[, retain, drop=TRUE] if(is.null(value)) { newdata <- coorddata newctype <- ctype[retain] } else { if(is.matrix(value) && nrow(value) == nrow(x$data)) { # assume matrix is to be treated as data frame value <- as.data.frame(value) } if(!is.data.frame(value) && !is.hyperframe(value)) value <- hyperframe(marks=value) if(is.hyperframe(value) || is.hyperframe(coorddata)) { value <- as.hyperframe(value) coorddata <- as.hyperframe(coorddata) } if(ncol(value) == 0) { newdata <- coorddata newctype <- ctype[retain] } else { newdata <- cbind(coorddata, value) newctype <- factor(c(as.character(ctype[retain]), rep.int("mark", ncol(value))), levels=levels(ctype)) } } out <- list(data=newdata, ctype=newctype, domain=x$domain) class(out) <- "ppx" return(out) } unmark.ppx <- function(X) { marks(X) <- NULL return(X) } markformat.ppx <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } boxx <- function(..., unitname=NULL) { if(length(list(...)) == 0) stop("No data") ranges <- data.frame(...) nama <- names(list(...)) if(is.null(nama) || !all(nzchar(nama))) names(ranges) <- paste("x", 1:ncol(ranges),sep="") if(nrow(ranges) != 2) stop("Data should be vectors of length 2") if(any(unlist(lapply(ranges, diff)) <= 0)) stop("Illegal range: Second element <= first element") out <- list(ranges=ranges, units=as.units(unitname)) class(out) <- "boxx" return(out) } print.boxx <- function(x, ...) { m <- ncol(x$ranges) cat(paste(m, "-dimensional box:\n", sep="")) bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x$ranges, bracket)), collapse=" x ") s <- summary(unitname(x)) cat(paste(v, s$plural, s$explain, "\n")) invisible(NULL) } unitname.boxx <- function(x) { x$units } "unitname<-.boxx" <- function(x, value) { x$units <- as.units(value) return(x) } unitname.ppx <- function(x) { unitname(x$domain) } "unitname<-.ppx" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } sidelengths.boxx <- function(x) { stopifnot(inherits(x, "boxx")) y <- unlist(lapply(x$ranges, diff)) return(y) } volume.boxx <- function(x) { prod(sidelengths(x)) } diameter.boxx <- function(x) { d <- sqrt(sum(sidelengths(x)^2)) return(d) } shortside.boxx <- function(x) { return(min(sidelengths(x))) } eroded.volumes.boxx <- function(x, r) { len <- sidelengths(x) ero <- sapply(as.list(len), function(z, r) { pmax.int(0, z - 2 * r)}, r=r) apply(ero, 1, prod) } runifpointx <- function(n, domain) { stopifnot(inherits(domain, "boxx")) coo <- lapply(domain$ranges, function(ra, n) { runif(n, min=ra[1], max=ra[2]) }, n=n) df <- do.call("data.frame", coo) ppx(df, domain) } rpoisppx <- function(lambda, domain) { stopifnot(inherits(domain, "boxx")) vol <- volume.boxx(domain) stopifnot(is.numeric(lambda) && length(lambda) == 1 && lambda >= 0) n <- rpois(1, lambda * vol) runifpointx(n, domain) } unique.ppx <- function(x, ..., warn=FALSE) { dup <- duplicated(x, ...) if(!any(dup)) return(x) if(warn) warning(paste(sum(dup), "duplicated points were removed"), call.=FALSE) y <- x[!dup] return(y) } duplicated.ppx <- function(x, ...) { dup <- duplicated(as.data.frame(x), ...) return(dup) } multiplicity.ppx <- function(x) { mul <- multiplicity(as.data.frame(x)) return(mul) } spatstat/R/triplets.R0000644000176000001440000001227112237642727014356 0ustar ripleyusers# # # triplets.R # # $Revision: 1.12 $ $Date: 2013/04/25 06:37:43 $ # # The triplets interaction # # Triplets() create an instance of the triplets process # [an object of class 'interact'] # # ------------------------------------------------------------------- # Triplets <- local({ DebugTriplets <- FALSE # define triplet potential TripletPotential <- function(X,U,EqualPairs,pars,correction, ...) { if(!all(ok <- correction %in% c("border", "none"))) { nbad <- sum(bad <- !ok) warning(paste(ngettext(nbad, "Correction", "Corrections"), commasep(sQuote(correction[bad])), ngettext(nbad, "is unavailable and was ignored", "are unavailable and were ignored"))) } # check that all points of X are included in U nX <- npoints(X) nU <- npoints(U) XinU <- if(length(EqualPairs) == 0) integer(0) else EqualPairs[,1] missX <- which(table(factor(XinU, levels=1:nX)) == 0) if((nmiss <- length(missX)) > 0) { # add missing points to (the end of) U U <- superimpose(U, X[missX], W=as.owin(X), check=FALSE) EqualPairs <- rbind(EqualPairs, cbind(missX, nU + 1:nmiss)) nU <- nU + nmiss } iXX <- EqualPairs[,1] iXU <- EqualPairs[,2] # construct map from X index to U index mapXU <- integer(nX) mapXU[iXX] <- iXU # construct map from U index to X index mapUX <- rep.int(NA_integer_, nU) mapUX[iXU] <- iXX # logical vector identifying which quadrature points are in X isdata <- rep.int(FALSE, nU) isdata[iXU] <- TRUE # identify all close pairs u, x r <- pars$r cp <- crosspairs(U, X, r, what="indices") if(DebugTriplets) cat(paste("crosspairs at distance", r, "yields", length(cp$i), "pairs\n")) IU <- cp$i J <- cp$j # map X index to U index JU <- mapXU[J] # Each (Xi, Xj) pair will appear twice - eliminate duplicates dupX <- isdata[IU] & isdata[JU] & (IU > JU) retain <- !dupX IU <- IU[retain] JU <- JU[retain] if(DebugTriplets) cat(paste(sum(dupX), "duplicate pairs removed\n")) # find all triangles tri <- edges2triangles(IU, JU, nU, friendly=isdata) if(DebugTriplets) cat(paste(nrow(tri), "triangles identified\n")) if(nrow(tri) == 0) { # there are no triangles; return vector of zeroes return(rep.int(0, nU-nmiss)) } # count triangles containing a given quadrature point tcount <- apply(tri, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nU) tcount <- rowSums(tcount) # select triangles consisting only of data points triX <- matrix(mapUX[tri], nrow=nrow(tri)) isX <- apply(!is.na(triX), 1, all) triX <- triX[isX, , drop=FALSE] # if(nrow(triX) > 0) { # count triangles of data points containing each given data point tXcount <- apply(triX, 2, function(x, n) { table(factor(x, levels=1:n)) }, n=nX) tXcount <- rowSums(tXcount) } else { # there are no triangles of data points tXcount <- rep.int(0, nX) } # answer <- tcount answer[iXU] <- tXcount[iXX] if(DebugTriplets) cat(paste("Max suff stat: data ", max(tXcount), ", dummy ", max(tcount[isdata]), "\n", sep="")) # truncate to original size if(nmiss > 0) answer <- answer[-((nU-nmiss+1):nU)] return(answer) } # set up basic 'triplets' object except for family and parameters BlankTripletsObject <- list( name = "Triplets process", creator = "Triplets", family = "triplet.family", # evaluated later pot = TripletPotential, par = list(r=NULL), # filled in later parnames = "interaction distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=round(gamma,4))) }, valid = function(coeffs, self) { gamma <- ((self$interpret)(coeffs, self))$param$gamma return(is.finite(gamma) && (gamma <= 1)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(is.na(coeffs))) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL # to be added ) class(BlankTripletsObject) <- "interact" # define Triplets function Triplets <- function(r) { instantiate.interact(BlankTripletsObject, list(r=r)) } Triplets }) spatstat/R/summary.quad.R0000755000176000001440000000523412237642727015142 0ustar ripleyusers# # summary.quad.R # # summary() method for class "quad" # # $Revision: 1.6 $ $Date: 2006/03/01 08:25:01 $ # summary.quad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "quad") s <- list( data = summary.ppp(object$data, checkdup=checkdup), dummy = summary.ppp(object$dummy, checkdup=checkdup), param = object$param) doit <- function(ww) { if(length(ww) > 0) return(list(range=range(ww), sum=sum(ww))) else return(NULL) } w <- object$w Z <- is.data(object) s$w <- list(all=doit(w), data=doit(w[Z]), dummy=doit(w[!Z])) class(s) <- "summary.quad" return(s) } print.summary.quad <- function(x, ..., dp=3) { cat("Quadrature scheme = data + dummy + weights\n") pa <- x$param if(is.null(pa)) cat("created by an unknown function.\n") cat("Data pattern:\n") print(x$data, dp=dp) cat("\n\nDummy quadrature points:\n") # How they were computed if(!is.null(pa)) { dumpar <- pa$dummy if(is.null(dumpar)) cat("(provided manually)\n") else if(!is.null(dumpar$nd)) cat(paste("(", dumpar$nd[1], "x", dumpar$nd[2], "grid, plus 4 corner points)\n")) else cat("(rule for creating dummy points not understood)") } # Description of them print(x$dummy, dp=dp) cat("\n\nQuadrature weights:\n") # How they were computed if(!is.null(pa)) { wpar <- pa$weight if(is.null(wpar)) cat("(values provided manually)\n") else if(!is.null(wpar$method)) { if(wpar$method=="grid") { cat(paste("(counting weights based on", wpar$ntile[1], "x", wpar$ntile[2], "array of rectangular tiles)\n")) } else if(wpar$method=="dirichlet") { cat(paste("(Dirichlet tile areas, computed", if(wpar$exact) "exactly" else "by pixel approximation", ")\n")) } else cat("(rule for creating dummy points not understood)\n") } } # Description of them doit <- function(ww, blah) { cat(paste(blah, ":\n\t", sep="")) if(is.null(ww)) { cat("(None)\n") return() } cat(paste("range: ", "[", paste(signif(ww$range, digits=dp), collapse=", "), "]\t", "total: ", signif(ww$sum, digits=dp), "\n", sep="")) } doit(x$w$all, "All weights") doit(x$w$data, "Weights on data points") doit(x$w$dummy, "Weights on dummy points") return(invisible(NULL)) } print.quad <- function(x, ...) { cat("Quadrature scheme\n") cat(paste(x$data$n, "data points, ", x$dummy$n, "dummy points\n")) cat(paste("Total weight ", sum(x$w), "\n")) return(invisible(NULL)) } spatstat/R/linearpcfmulti.R0000644000176000001440000002324412237642727015530 0ustar ripleyusers# # linearpcfmulti.R # # $Revision: 1.3 $ $Date: 2013/01/18 07:56:49 $ # # pair correlation functions for multitype point pattern on linear network # # linearpcfdot <- function(X, i, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(linearpcf[i ~ dot](r), list(i=iname)), paste("linearpcf[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(linearpcf[i ~ symbol("\267")](r), list(i=iname))) return(result) } linearpcfcross <- function(X, i, j, r=NULL, ..., correction="Ang") { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { result <- linearpcf(X[marx == i], r=r, correction=correction, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti(X, I, J, r=r, correction=correction, ...) } # rebrand iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(linearpcfcross[i,j](r), list(i=iname,j=jname)), sprintf("linearpcf[list(%s,%s)]", iname, jname), new.yexp=substitute(linearpcf[list(i,j)](r), list(i=iname,j=jname))) return(result) } linearpcfmulti <- function(X, I, J, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # if(!any(J)) stop("no points satisfy J") nI <- sum(I) nJ <- sum(J) nIandJ <- sum(I & J) lambdaI <- nI/lengthL lambdaJ <- nJ/lengthL # compute pcf denom <- (nI * nJ - nIandJ)/lengthL g <- linearPCFmultiEngine(X, I, J, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(pcfmulti[L](r)) fname <- "pcfmulti[L]" }, none = { ylab <- quote(pcfmulti[net](r)) fname <- "pcfmulti[net]" }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) return(g) } # ................ inhomogeneous ............................ linearpcfdot.inhom <- function(X, i, lambdaI, lambdadot, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) I <- (marx == i) J <- rep(TRUE, npoints(X)) # i.e. all points # for better error messages lambdadot <- getlambda.lpp(lambdadot, X, ...) # compute result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdadot, r=r, correction=correction, normalise=normalise, ...) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(linearpcf[inhom, i ~ dot](r), list(i=iname)), paste("linearpcf[list(inhom,", iname, "~ symbol(\"\\267\"))]"), new.yexp=substitute(linearpcf[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) return(result) } linearpcfcross.inhom <- function(X, i, j, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j)) j <- lev[2] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) # if(i == j) { I <- (marx == i) result <- linearpcfinhom(X[I], lambda=lambdaI, r=r, correction=correction, normalise=normalise, ...) } else { I <- (marx == i) J <- (marx == j) result <- linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r=r, correction=correction, normalise=normalise, ...) } # rebrand iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(linearpcf[inhom,i,j](r), list(i=iname,j=jname)), sprintf("linearpcf[list(inhom,%s,%s)]", iname, jname), new.yexp=substitute(linearpcf[list(inhom,i,j)](r), list(i=iname,j=jname))) return(result) } linearpcfmulti.inhom <- function(X, I, J, lambdaI, lambdaJ, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # validate I, J if(!is.logical(I) || !is.logical(J)) stop("I and J must be logical vectors") if(length(I) != np || length(J) != np) stop(paste("The length of I and J must equal", "the number of points in the pattern")) if(!any(I)) stop("no points satisfy I") # validate lambda vectors lambdaI <- getlambda.lpp(lambdaI, X[I], ...) lambdaJ <- getlambda.lpp(lambdaJ, X[J], ...) # compute pcf weightsIJ <- outer(1/lambdaI, 1/lambdaJ, "*") denom <- if(!normalise) lengthL else sum(1/lambdaI) g <- linearPCFmultiEngine(X, I, J, r=r, reweight=weightsIJ, denom=denom, correction=correction, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(pcfmulti[L](r)) fname <- "pcfmulti[L]" }, none = { ylab <- quote(pcfmulti[net](r)) fname <- "pcfmulti[net]" }) g <- rebadge.fv(g, new.ylab=ylab, new.fname=fname) return(g) } # .............. internal ............................... linearPCFmultiEngine <- function(X, I, J, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # extract linear network L <- X$domain # extract points XP <- as.ppp(X) W <- as.owin(XP) # determine r values rmaxdefault <- 0.98 * circumradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(np < 2) { # no pairs to count: return zero function zeroes <- rep(0, length(r)) df <- data.frame(r = r, est = zeroes) g <- fv(df, "r", substitute(linearpcf(r), NULL), "est", . ~ r, c(0, rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname = "linearpcf") return(g) } # nI <- sum(I) nJ <- sum(J) whichI <- which(I) whichJ <- which(J) clash <- I & J has.clash <- any(clash) # compute pairwise distances if(exists("crossdist.lpp")) { DIJ <- crossdist(X[I], X[J], check=FALSE) if(has.clash) { # exclude pairs of identical points from consideration Iclash <- which(clash[I]) Jclash <- which(clash[J]) DIJ[cbind(Iclash,Jclash)] <- Inf } } else { D <- pairdist(X) diag(D) <- Inf DIJ <- D[I, J] } #--- compile into pair correlation function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) g <- compilepcf(DIJ, r, denom=denom, check=FALSE) unitname(g) <- unitname(X) return(g) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Ang's correction) # compute m[i,j] m <- matrix(1, nI, nJ) XPI <- XP[I] if(!has.clash) { for(k in seq_len(nJ)) { j <- whichJ[k] m[,k] <- countends(L, XPI, DIJ[, k]) } } else { # don't count identical pairs for(k in seq_len(nJ)) { j <- whichJ[k] inotj <- (whichI != j) m[inotj, k] <- countends(L, XPI[inotj], DIJ[inotj, k]) } } edgewt <- 1/m } # compute pcf wt <- if(!is.null(reweight)) edgewt * reweight else edgewt g <- compilepcf(DIJ, r, weights=wt, denom=denom, check=FALSE, ...) # tack on theoretical value g <- bind.fv(g, data.frame(theo=rep(1,length(r))), "%s[theo](r)", "theoretical Poisson %s") unitname(g) <- unitname(X) fvnames(g, ".") <- rev(fvnames(g, ".")) # show working if(showworking) attr(g, "working") <- list(DIJ=DIJ, wt=wt) return(g) } spatstat/R/rotate.R0000755000176000001440000000413512237642727014011 0ustar ripleyusers# # rotate.S # # $Revision: 1.18 $ $Date: 2012/10/10 01:20:23 $ # rotxy <- function(X, angle=pi/2) { co <- cos(angle) si <- sin(angle) list(x = co * X$x - si * X$y, y = si * X$x + co * X$y) } rotxypolygon <- function(p, angle=pi/2) { p[c("x","y")] <- rotxy(p, angle=angle) # area and hole status are invariant under rotation return(p) } rotate <- function(X, ...) { UseMethod("rotate") } rotate.owin <- function(X, angle=pi/2, ..., rescue=TRUE) { verifyclass(X, "owin") switch(X$type, rectangle={ # convert rectangle to polygon P <- owin(X$xrange, X$yrange, poly= list(x=X$xrange[c(1,2,2,1)], y=X$yrange[c(1,1,2,2)]), unitname=unitname(X)) # call polygonal case return(rotate.owin(P, angle, rescue=rescue)) }, polygonal={ # First rotate the polygonal boundaries bdry <- lapply(X$bdry, rotxypolygon, angle=angle) # wrap up W <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) if(rescue) W <- rescue.rectangle(W) return(W) }, mask={ newframe <- bounding.box.xy(rotxy(corners(X), angle)) W <- if(length(list(...)) > 0) as.mask(newframe, ...) else as.mask(newframe, eps=with(X, min(xstep, ystep))) pixelxy <- raster.xy(W) xybefore <- rotxy(pixelxy, -angle) W$m[] <- with(xybefore, inside.owin(x, y, X)) W <- intersect.owin(W, bounding.box(W)) if(rescue) W <- rescue.rectangle(W) unitname(W) <- unitname(X) return(W) }, stop("Unrecognised window type") ) } rotate.ppp <- function(X, angle=pi/2, ...) { verifyclass(X, "ppp") r <- rotxy(X, angle) w <- rotate.owin(X$window, angle, ...) return(ppp(r$x, r$y, window=w, marks=marks(X, dfok=TRUE), check=FALSE)) } rotate.im <- function(X, angle=pi/2, ...) { co <- cos(angle) si <- sin(angle) m <- matrix(c(co,si,-si,co), nrow=2, ncol=2) affine(X, mat=m) } spatstat/R/unnormdensity.R0000644000176000001440000000451512237642727015430 0ustar ripleyusers# # unnormdensity.R # # $Revision: 1.3 $ $Date: 2011/10/22 04:47:10 $ # unnormdensity <- function(x, ..., weights=NULL) { if(any(!nzchar(names(list(...))))) stop("All arguments must be named (tag=value)") if(is.null(weights)) { out <- do.call.matched("density.default", c(list(x=x), list(...))) } else if(all(weights == 0)) { # result is zero out <- do.call.matched("density.default", c(list(x=x), list(...))) out$y <- 0 * out$y } else if(all(weights >= 0)) { # all masses are nonnegative w <- weights totmass <- sum(w) out <- do.call.matched("density.default", c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * totmass } else if(all(weights <= 0)) { # all masses are nonpositive w <- (- weights) totmass <- sum(w) out <- do.call.matched("density.default", c(list(x=x), list(...), list(weights=w/totmass))) out$y <- out$y * (- totmass) } else { # mixture of positive and negative masses w <- weights wabs <- abs(w) wpos <- pmax.int(0, w) wneg <- - pmin.int(0, w) # determine bandwidth using absolute masses dabs <- do.call.matched("density.default", c(list(x=x), list(...), list(weights=wabs/sum(wabs)))) bw <- dabs$bw # compute densities for positive and negative masses separately outpos <- do.call.matched("density.default", resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wpos/sum(wpos)), list(...), .StripNull=TRUE)) outneg <- do.call.matched("density.default", resolve.defaults(list(x=x), list(bw=bw, adjust=1), list(weights=wneg/sum(wneg)), list(...), .StripNull=TRUE)) # combine out <- outpos out$y <- sum(wpos) * outpos$y - sum(wneg) * outneg$y } out$call <- match.call() return(out) } spatstat/R/rknn.R0000755000176000001440000000203212237642727013455 0ustar ripleyusers# # rknn.R # # Distribution of distance to k-th nearest point in d dimensions # (Poisson process of intensity lambda) # # $Revision: 1.2 $ $Date: 2009/12/31 01:33:44 $ # dknn <- function(x, k=1, d=2, lambda=1) { validposint(k, "dknn") validposint(d, "dknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- dgamma(x^d, shape=k, rate=lambda * alpha.d) y <- y * d * x^(d-1) return(y) } pknn <- function(q, k=1, d=2, lambda=1) { validposint(k, "pknn") validposint(d, "pknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) p <- pgamma(q^d, shape=k, rate=lambda * alpha.d) return(p) } qknn <- function(p, k=1, d=2, lambda=1) { validposint(k, "qknn") validposint(d, "qknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- qgamma(p, shape=k, rate=lambda * alpha.d) z <- y^(1/d) return(z) } rknn <- function(n, k=1, d=2, lambda=1) { validposint(k, "rknn") validposint(d, "rknn") alpha.d <- (2 * pi^(d/2))/(d * gamma(d/2.)) y <- rgamma(n, shape=k, rate=lambda * alpha.d) x <- y^(1/d) return(x) } spatstat/R/Kmulti.R0000755000176000001440000003013612237642727013760 0ustar ripleyusers# # Kmulti.S # # Compute estimates of cross-type K functions # for multitype point patterns # # $Revision: 5.42 $ $Date: 2013/04/25 06:37:43 $ # # # -------- functions ---------------------------------------- # Kcross() cross-type K function K_{ij} # between types i and j # # Kdot() K_{i\bullet} # between type i and all points regardless of type # # Kmulti() (generic) # # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # including 'marks' vector # r distance values at which to compute K # # -------- standard output ------------------------------ # A data frame with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # using standard formula (denominator = count of points) # # bord.modif: K function estimated by border method # using modified formula # (denominator = area of eroded window # # ------------------------------------------------------------------------ "Lcross" <- function(X, i, j, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] K <- Kcross(X, i, j, ...) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) L <- rebadge.fv(L, substitute(L[i,j](r), list(i=iname,j=jname)), sprintf("L[list(%s,%s)]", iname, jname), new.yexp=substitute(L[list(i,j)](r), list(i=iname,j=jname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Ldot" <- function(X, i, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] K <- Kdot(X, i, ...) L <- eval.fv(sqrt(K/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[i ~ dot](r), list(i=iname)), paste("L[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(L[i ~ symbol("\267")](r), list(i=iname))) attr(L, "labl") <- attr(K, "labl") return(L) } "Kcross" <- function(X, i, j, r=NULL, breaks=NULL, correction =c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) if(!any(I)) stop(paste("No points have mark i =", i)) if(i == j) { result <- Kest(X[I], r=r, breaks=breaks, correction=correction, ..., ratio=ratio) } else { J <- (marx == j) if(!any(J)) stop(paste("No points have mark j =", j)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(Kcross[i,j](r), list(i=iname,j=jname)), sprintf("K[list(%s,%s)]", iname, jname), new.yexp=substitute(K[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Kdot" <- function(X, i, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points if(!any(I)) stop(paste("No points have mark i =", i)) result <- Kmulti(X, I, J, r=r, breaks=breaks, correction=correction, ..., ratio=ratio) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(K[i ~ dot](r), list(i=iname)), paste("K[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(K[i ~ symbol("\267")](r), list(i=iname))) return(result) } "Kmulti"<- function(X, I, J, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate") , ..., ratio=FALSE) { verifyclass(X, "ppp") npts <- npoints(X) x <- X$x y <- X$y W <- X$window area <- area.owin(W) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") if(!any(I)) stop("no points belong to subset I") if(!any(J)) stop("no points belong to subset J") nI <- sum(I) nJ <- sum(J) lambdaI <- nI/area lambdaJ <- nJ/area # r values rmaxdefault <- rmax.rule("K", W, lambdaJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", substitute(Kmulti(r), NULL), "theo", , alim, c("r","{%s^{pois}}(r)"), desc, fname="K[multi]") # save numerator and denominator? if(ratio) { denom <- lambdaI * lambdaJ * area numK <- eval.fv(denom * K) denK <- eval.fv(denom + K * 0) attributes(numK) <- attributes(denK) <- attributes(K) attr(numK, "desc")[2] <- "numerator for theoretical Poisson %s" attr(denK, "desc")[2] <- "denominator for theoretical Poisson %s" } # find close pairs of points XI <- X[I] XJ <- X[J] close <- crosspairs(XI, XJ, max(r)) # close$i and close$j are serial numbers in XI and XJ respectively; # map them to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$xi <- close$xi[ok] close$yi <- close$yi[ok] close$xj <- close$xj[ok] close$yj <- close$yj[ok] close$dx <- close$dx[ok] close$dy <- close$dy[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dcloseIJ <- close$d icloseI <- close$i jcloseJ <- close$j # Compute estimates by each of the selected edge corrections. if(any(correction == "none")) { # uncorrected! wh <- whist(dcloseIJ, breaks$val) # no weights numKun <- cumsum(wh) denKun <- lambdaI * lambdaJ * area Kun <- numKun/denKun K <- bind.fv(K, data.frame(un=Kun), "hat(%s^{un})(r)", "uncorrected estimate of %s", "un") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(un=numKun), "hat(%s)[un](r)", "numerator of uncorrected estimate of %s", "un") denK <- bind.fv(denK, data.frame(un=denKun), "hat(%s)[un](r)", "denominator of uncorrected estimate of %s", "un") } } if(any(correction == "border" | correction == "bord.modif")) { # border method # distance to boundary from each point of type I bI <- bdist.points(XI) # distance to boundary from first element of each (i, j) pair bcloseI <- bI[icloseI] # apply reduced sample algorithm RS <- Kount(dcloseIJ, bcloseI, bI, breaks) if(any(correction == "bord.modif")) { denom.area <- eroded.areas(W, r) numKbm <- RS$numerator denKbm <- denom.area * nI * nJ Kbm <- numKbm/denKbm K <- bind.fv(K, data.frame(bord.modif=Kbm), "hat(%s^{bordm})(r)", "modified border-corrected estimate of %s", "bord.modif") if(ratio) { # save numerator and denominator numK <- bind.fv(numK, data.frame(bord.modif=numKbm), "hat(%s)[bordm](r)", "numerator of modified border-corrected estimate of %s", "bord.modif") denK <- bind.fv(denK, data.frame(bord.modif=denKbm), "hat(%s)[bordm](r)", "denominator of modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "border")) { numKb <- RS$numerator denKb <- lambdaJ * RS$denom.count Kb <- numKb/denKb K <- bind.fv(K, data.frame(border=Kb), "hat(%s^{bord})(r)", "border-corrected estimate of %s", "border") if(ratio) { numK <- bind.fv(numK, data.frame(border=numKb), "hat(%s)[bord](r)", "numerator of border-corrected estimate of %s", "border") denK <- bind.fv(denK, data.frame(border=denKb), "hat(%s)[bord](r)", "denominator of border-corrected estimate of %s", "border") } } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) wh <- whist(dcloseIJ, breaks$val, edgewt) numKtrans <- cumsum(wh) denKtrans <- lambdaI * lambdaJ * area Ktrans <- numKtrans/denKtrans rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s^{trans})(r)", "translation-corrected estimate of %s", "trans") if(ratio) { numK <- bind.fv(numK, data.frame(trans=numKtrans), "hat(%s)[trans](r)", "numerator of translation-corrected estimate of %s", "trans") denK <- bind.fv(denK, data.frame(trans=denKtrans), "hat(%s)[trans](r)", "denominator of translation-corrected estimate of %s", "trans") } } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dcloseIJ, ncol=1)) wh <- whist(dcloseIJ, breaks$val, edgewt) numKiso <- cumsum(wh) denKiso <- lambdaI * lambdaJ * area Kiso <- numKiso/denKiso rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s^{iso})(r)", "Ripley isotropic correction estimate of %s", "iso") if(ratio) { numK <- bind.fv(numK, data.frame(iso=numKiso), "hat(%s)[iso](r)", "numerator of Ripley isotropic correction estimate of %s", "iso") denK <- bind.fv(denK, data.frame(iso=denKiso), "hat(%s)[iso](r)", "denominator of Ripley isotropic correction estimate of %s", "iso") } } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) if(ratio) { # finish up numerator & denominator formula(numK) <- formula(denK) <- . ~ r unitname(numK) <- unitname(denK) <- unitname(K) # tack on to result K <- rat(K, numK, denK, check=FALSE) } return(K) } spatstat/R/areainter.R0000755000176000001440000002341612237642727014470 0ustar ripleyusers# # # areainter.R # # $Revision: 1.29 $ $Date: 2013/09/23 01:19:58 $ # # The area interaction # # AreaInter() create an instance of the area-interaction process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # AreaInter <- local({ # area-interaction conditional intensity potential # corresponds to potential -C(x) = n(x) - A(x)/\pi r^2 areapot <- function(X,U,EqualPairs,pars,correction, ..., W=as.owin(X)) { uhoh <- !(correction %in% c("border", "none")) if(any(uhoh)) { nuh <- sum(uhoh) warning(paste(ngettext(nuh, "Correction", "Corrections"), commasep(sQuote(correction[uhoh])), ngettext(nuh, "is not supported and was ignored", "are not supported and were ignored"))) } r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") n <- U$n areas <- numeric(n) dummies <- !(seq_len(n) %in% EqualPairs[,2]) if(sum(dummies) > 0) areas[dummies] <- areaGain(U[dummies], X, r, W=W) ii <- EqualPairs[,1] jj <- EqualPairs[,2] areas[jj] <- areaLoss(X, r, subset=ii, W=W) return(1 - areas/(pi * r^2)) } # template object without family, par, version BlankAI <- list( name = "Area-interaction process", creator = "AreaInter", family = "inforder.family", # evaluated later pot = areapot, par = list(r = NULL), # to be filled in parnames = "disc radius", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("disc radius r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=round(eta,4))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(is.na(coeffs))) return(2 * r) logeta <- coeffs[1] if(abs(logeta) <= epsilon) return(0) else return(2 * r) }, delta2 = function(X, inte, correction) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r areadelta2(X, r) }, version=NULL # to be added ) class(BlankAI) <- "interact" AreaInter <- function(r) { instantiate.interact(BlankAI, list(r=r)) } AreaInter }) areadelta2 <- local({ areadelta2 <- function(X, r, ...) { # Sufficient statistic for second order conditional intensity # Area-interaction model if(is.ppp(X)) return(areadelppp(X, r, ...)) else if(inherits(X, "quad")) return(areadelquad(X, r)) else stop("internal error: X should be a ppp or quad object") } areadelppp <- function(X, r, algorithm=c("C", "nncross", "nnmap")) { # Evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) # where h is first order cif statistic algorithm <- match.arg(algorithm) nX <- npoints(X) result <- matrix(0, nX, nX) if(nX < 2) return(result) if(algorithm == "C") { # use special purpose C routine # called once for each interacting pair of points xx <- X$x yy <- X$y cl <- closepairs(X, 2 * r, what="indices", ordered=FALSE) I <- cl$i J <- cl$j eps <- r/spatstat.options("ngrid.disc") for(k in seq_along(I)) { i <- I[k] j <- J[k] # all neighbours of i Ki <- union(J[I==i], I[J==i]) # all neighbours of j Kj <- union(J[I==j], I[J==j]) # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code DUP <- spatstat.options("dupC") z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1)), DUP = DUP) result[i,j] <- result[j,i] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } # remove any non-interacting points relevant <- (nndist(X) <= 2 * r) if(!all(relevant)) { answer <- matrix(0, nX, nX) if(any(relevant)) { # call self on subset Dok <- areadelppp(X[relevant], r, algorithm) answer[relevant,relevant] <- Dok } return(answer) } # .............. algorithm using interpreted code ........... # sort pattern in increasing order of x sortX <- (algorithm == "nnmap") if(sortX) { oX <- fave.order(X$x) X <- X[oX] } # area calculation may be restricted to window W for efficiency W <- as.owin(X) U <- as.rectangle(W) # decide pixel resolution eps <- r/spatstat.options("ngrid.disc") npix <- prod(ceiling(sidelengths(U)/eps)) if(npix <= 2^20) { # do it all in one go tile <- list(NULL) } else { # divide into rectangular tiles B <- as.rectangle(W) ntile0 <- ceiling(npix/(2^20)) tile0area <- area.owin(B)/ntile0 tile0side <- sqrt(tile0area) nx <- ceiling(sidelengths(B)[1]/tile0side) ny <- ceiling(sidelengths(B)[2]/tile0side) tile <- tiles(quadrats(B, nx, ny)) } result <- matrix(0, nX, nX) for(i in seq_len(length(tile))) { # form pixel grid Ti <- tile[[i]] Wi <- if(is.null(Ti)) W else intersect.owin(W, Ti) if(algorithm == "nncross") { # Trusted, slow algorithm using nncross Z <- as.mask(Wi, eps=eps) G <- as.ppp(raster.xy(Z), U, check=FALSE) # compute 3 nearest neighbours in X of each grid point v <- nncross(G, X, k=1:3) # select pixels which have exactly 2 neighbours within distance r ok <- with(v, dist.3 > r & dist.2 <= r) if(any(ok)) { v <- v[ok, , drop=FALSE] # accumulate pixel counts -> areas counts <- with(v, table(i=factor(which.1, levels=1:nX), j=factor(which.2, levels=1:nX))) pixarea <- with(Z, xstep * ystep) result <- result + pixarea * (counts + t(counts)) } } else { # Faster algorithm using nnmap # compute 3 nearest neighbours in X of each grid point stuff <- nnmap(X, k=1:3, W=Wi, eps=eps, is.sorted.X=TRUE, sortby="x", outputarray=TRUE) dist.2 <- stuff$dist[2,,] dist.3 <- stuff$dist[3,,] which.1 <- stuff$which[1,,] which.2 <- stuff$which[2,,] ok <- (dist.3 > r & dist.2 <= r) if(any(ok)) { which.1 <- as.vector(which.1[ok]) which.2 <- as.vector(which.2[ok]) counts <- table(i=factor(which.1, levels=1:nX), j=factor(which.2, levels=1:nX)) pixarea <- attr(stuff, "pixarea") result <- result + pixarea * (counts + t(counts)) } } } if(sortX) { # map back to original ordering result[oX, oX] <- result } # normalise result <- result/(pi * r^2) return(result) } areadelquad <- function(Q, D, r) { # Sufficient statistic for second order conditional intensity # Area-interaction model # Evaluate \Delta_{u_j} \Delta_{u_i} S(x) for quadrature points # answer is area(b(u[i],r) \cap b(u[j],r)\setminus \bigcup_k b(x[k],r)) # where k ranges over all indices that are not equivalent to u[i,j] U <- union.quad(Q) Z <- is.data(Q) nU <- npoints(U) xx <- U$x yy <- U$y # identify all close pairs of quadrature points cl <- closepairs(U, 2 * r, what="indices") I <- b$i J <- b$j # find neighbours in X of each quadrature point zJ <- Z[J] neigh <- split(J[zJ], factor(I[zJ], levels=1:nU)) # result <- matrix(0, nU, nU) eps <- r/spatstat.options("ngrid.disc") DUP <- spatstat.options("dupC") # for(k in seq_along(I)) { i <- I[k] j <- J[k] # all points of X close to U[i] Ki <- neigh[[i]] # all points of X close to U[j] Kj <- neigh[[j]] # relevant neighbours K <- setdiff(union(Ki, Kj), c(i,j)) # call C code z <- .C("delta2area", xa = as.double(xx[i]), ya = as.double(yy[i]), xb = as.double(xx[j]), yb = as.double(yy[j]), nother = as.integer(length(K)), xother = as.double(xx[K]), yother = as.double(yy[K]), radius = as.double(r), epsilon = as.double(eps), pixcount = as.integer(integer(1)), DUP = DUP) result[i,j] <- z$pixcount } # normalise result <- result * (eps^2)/(pi * r^2) return(result) } areadelta2 }) spatstat/R/beginner.R0000644000176000001440000000054612237642727014303 0ustar ripleyusers# # beginner.R # # Helpful information for beginners # # $Revision: 1.1 $ $Date: 2013/08/02 05:40:16 $ # beginner <- function(package="spatstat") { package <- as.character(substitute(package)) RShowDoc("BEGINNER.txt", type="txt", package=package) return(invisible(NULL)) } class(beginner) <- "autoexec" print.autoexec <- function(x, ...) { x() } spatstat/R/marks.R0000755000176000001440000002301612242543071013613 0ustar ripleyusers# # marks.R # # $Revision: 1.35 $ $Date: 2013/11/18 13:50:49 $ # # stuff for handling marks # # marks <- function(x, ...) { UseMethod("marks") } marks.default <- function(x, ...) { NULL } # The 'dfok' switch is temporary # while we convert the code to accept data frames of marks marks.ppp <- function(x, ..., dfok=TRUE) { ma <- x$marks if((is.data.frame(ma) || is.matrix(ma)) && !dfok) stop("Sorry, not implemented when the marks are a data frame") return(ma) } # ------------------------------------------------------------------ "marks<-" <- function(x, ..., value) { UseMethod("marks<-") } "marks<-.ppp" <- function(x, ..., dfok=TRUE, value) { np <- npoints(x) m <- value switch(markformat(m), none = { return(unmark(x)) }, vector = { # vector of marks if(length(m) == 1) m <- rep.int(m, np) else if(np == 0) m <- rep.int(m, 0) # ensures marked pattern obtained else if(length(m) != np) stop("number of points != number of marks") marx <- m }, dataframe = { if(!dfok) stop("Sorry, data frames of marks are not yet implemented") m <- as.data.frame(m) # data frame of marks if(ncol(m) == 0) { # no mark variables marx <- NULL } else { # marks to be attached if(nrow(m) == np) { marx <- m } else { # lengths do not match if(nrow(m) == 1 || np == 0) { # replicate data frame marx <- as.data.frame(lapply(as.list(m), function(x, k) { rep.int(x, k) }, k=np)) } else stop("number of rows of data frame != number of points") } } }, hyperframe = stop("Hyperframes of marks are not supported in ppp objects; use ppx"), stop("Format of marks is not understood") ) # attach/overwrite marks Y <- ppp(x$x,x$y,window=x$window,marks=marx, check=FALSE) return(Y) } "%mark%" <- setmarks <- function(x,value) { marks(x) <- value return(x) } # ------------------------------------------------- markformat <- function(x) { UseMethod("markformat") } markformat.ppp <- function(x) { mf <- x$markformat if(is.null(mf)) mf <- markformat(marks(x)) return(mf) } markformat.default <- function(x) { if(is.null(x)) return("none") if(is.vector(x) || is.factor(x)) return("vector") if(is.null(dim(x)) && is.atomic(x)) return("vector") if(is.data.frame(x) || is.matrix(x)) return("dataframe") if(is.hyperframe(x)) return("hyperframe") if(inherits(x, "listof")) return("listof") stop("Mark format not understood") } # ------------------------------------------------------------------ "is.marked" <- function(X, ...) { UseMethod("is.marked") } "is.marked.ppp" <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if((length(marx) > 0) && any(is.na(marx))) { gripe <- paste("some mark values are NA in the point pattern", short.deparse(substitute(X))) switch(na.action, warn = warning(gripe, call.=FALSE), fatal = stop(gripe, call.=FALSE), ignore = {} ) } return(TRUE) } "is.marked.default" <- function(...) { return(!is.null(marks(...))) } # ------------------------------------------------------------------ is.multitype <- function(X, ...) { UseMethod("is.multitype") } is.multitype.default <- function(X, ...) { m <- marks(X) if(is.null(m)) return(FALSE) if(!is.null(dim(m))) { # should have a single column if(dim(m)[2] != 1) return(FALSE) m <- m[,1,drop=TRUE] } return(is.factor(m)) } is.multitype.ppp <- function(X, na.action="warn", ...) { marx <- marks(X, dfok=TRUE) if(is.null(marx)) return(FALSE) if((is.data.frame(marx) || is.hyperframe(marx)) && ncol(marx) > 1) return(FALSE) if(!is.factor(marx)) return(FALSE) if((length(marx) > 0) && any(is.na(marx))) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } # ------------------------------------------------------------------ unmark <- function(X) { UseMethod("unmark") } unmark.ppp <- function(X) { X$marks <- NULL X$markformat <- "none" return(X) } unmark.splitppp <- function(X) { Y <- lapply(X, unmark.ppp) class(Y) <- c("splitppp", class(Y)) return(Y) } ##### utility functions for subsetting & combining marks ######### marksubset <- function(x, index, format=NULL) { if(is.null(format)) format <- markformat(x) switch(format, none={return(NULL)}, listof=, vector={return(x[index])}, hyperframe=, dataframe={return(x[index,,drop=FALSE])}, stop("Internal error: unrecognised format of marks")) } "%msub%" <- marksubsetop <- function(x,i) { marksubset(x, i) } "%mrep%" <- markreplicateop <- function(x,n) { format <- markformat(x) switch(format, none={return(NULL)}, listof=, vector={ return(rep.int(x,n))}, dataframe={ return(as.data.frame(lapply(x, rep, times=n))) }, hyperframe={ xcols <- as.list(x) repxcols <- lapply(xcols, rep, times=n) return(do.call("hyperframe", repxcols)) }, stop("Internal error: unrecognised format of marks")) } "%mapp%" <- markappendop <- function(x,y) { fx <- markformat(x) fy <- markformat(y) agree <- (fx == fy) if(all(c(fx,fy) %in% c("dataframe", "hyperframe"))) agree <- agree && identical(names(x),names(y)) if(!agree) stop("Attempted to concatenate marks that are not compatible") switch(fx, none = { return(NULL) }, vector = { if(is.factor(x) || is.factor(y)) return(cat.factor(x,y)) else return(c(x,y)) }, hypeframe=, dataframe = { return(rbind(x,y)) }, listof = { z <- append(x,y) if(!inherits(z, "listof")) z <- as.listof(z) return(z) }, stop("Internal error: unrecognised format of marks")) } markappend <- function(...) { # combine marks from any number of patterns marxlist <- list(...) # check on compatibility of marks mkfmt <- sapply(marxlist,markformat) if(length(unique(mkfmt))>1) stop(paste("Marks of some patterns are of different format", "from those of other patterns.")) mkfmt <- mkfmt[1] # combine the marks switch(mkfmt, none = { return(NULL) }, vector = { marxlist <- lapply(marxlist, function(x){as.data.frame.vector(x,nm="v1")}) marx <- do.call("rbind", marxlist)[,1] return(marx) }, hyperframe =, dataframe = { # check compatibility of data frames # (this is redundant but gives more helpful message) nama <- lapply(marxlist, names) dims <- unlist(lapply(nama, length)) if(length(unique(dims)) != 1) stop("Data frames of marks have different column dimensions.") samenames <- unlist(lapply(nama, function(x,y) { identical(x,y) }, y=nama[[1]])) if(!all(samenames)) stop("Data frames of marks have different names.\n") marx <- do.call("rbind", marxlist) return(marx) }, listof = { marx <- do.call(c, marxlist) if(!inherits(marx, "listof")) marx <- as.listof(marx) return(marx) }) stop("Unrecognised mark format") } markcbind <- function(...) { # cbind several columns of marks marxlist <- list(...) mkfmt <- unlist(lapply(marxlist, markformat)) if(any(vacuous <- (mkfmt == "none"))) { marxlist <- marxlist[!vacuous] mkfmt <- mkfmt[!vacuous] } if(all(mkfmt %in% c("vector", "dataframe"))) { # result is a data frame if(any(isvec <- (mkfmt == "vector"))) marxlist[isvec] <- lapply(marxlist[isvec], as.data.frame.vector) marx <- do.call(data.frame, marxlist) } else { # result is a hyperframe if(!all(ishyp <- (mkfmt == "hyperframe"))) marxlist[!ishyp] <- lapply(marxlist[!ishyp], as.hyperframe) marx <- do.call(hyperframe, marxlist) } return(marx) } # extract only the columns of (passably) numeric data from a data frame numeric.columns <- function(M, logical=TRUE, others=c("discard", "na")) { others <- match.arg(others) M <- as.data.frame(M) if(ncol(M) == 1) colnames(M) <- NULL process <- function(z, logi, other) { if(is.numeric(z)) return(z) if(logi && is.logical(z)) return(as.integer(z)) switch(other, na=rep.int(NA_real_, length(z)), discard=NULL, NULL) } Mprocessed <- lapply(M, process, logi=logical, other=others) isnul <- unlist(lapply(Mprocessed, is.null)) if(all(isnul)) { # all columns have been removed # return a data frame with no columns return(as.data.frame(matrix(, nrow=nrow(M), ncol=0))) } Mout <- do.call("data.frame", Mprocessed[!isnul]) if(ncol(M) == 1 && ncol(Mout) == 1) colnames(Mout) <- NULL return(Mout) } spatstat/R/disc.R0000755000176000001440000000134112237642727013431 0ustar ripleyusers# # disc.R # # $Revision: 1.5 $ $Date: 2013/05/01 05:46:37 $ # # disc <- function(radius=1, centre=c(0,0), ..., mask=FALSE, npoly=128) { stopifnot(length(centre) == 2) stopifnot(length(radius) == 1) stopifnot(radius > 0) stopifnot(length(npoly) == 1) stopifnot(npoly > 2) if(!mask) { theta <- seq(from=0, to=2*pi, length.out=npoly+1)[-(npoly+1)] x <- centre[1] + radius * cos(theta) y <- centre[2] + radius * sin(theta) W <- owin(poly=list(x=x, y=y)) } else { B <- owin(c(-1,1),c(-1,1)) B <- as.mask(B, ...) indic <- function(x,y,x0,y0,r) as.integer((x-x0)^2 + (y-y0)^2 < r^2) IW <- as.im(indic, B, x0=centre[1], y0=centre[2], r=radius) W <- levelset(IW, 1, "==") } return(W) } spatstat/R/quadclass.R0000755000176000001440000001600212237642727014467 0ustar ripleyusers# # quadclass.S # # Class 'quad' to define quadrature schemes # in (rectangular) windows in two dimensions. # # $Revision: 4.22 $ $Date: 2013/04/25 06:37:43 $ # # An object of class 'quad' contains the following entries: # # $data: an object of class 'ppp' # defining the OBSERVATION window, # giving the locations (& marks) of the data points. # # $dummy: object of class 'ppp' # defining the QUADRATURE window, # giving the locations (& marks) of the dummy points. # # $w: vector giving the nonnegative weights for the # data and dummy points (data first, followed by dummy) # # w may also have an attribute attr(w, "zeroes") # equivalent to (w == 0). If this is absent # then all points are known to have positive weights. # # $param: # parameters that were used to compute the weights # and possibly to create the dummy points (see below). # # The combined (data+dummy) vectors of x, y coordinates of the points, # and their weights, are extracted using standard functions # x.quad(), y.quad(), w.quad() etc. # # ---------------------------------------------------------------------- # Note about parameters: # # If the quadrature scheme was created by quadscheme(), # then $param contains # # $param$weight # list containing the values of all parameters # actually used to compute the weights. # # $param$dummy # list containing the values of all parameters # actually used to construct the dummy pattern # via default.dummy(); # or NULL if the dummy pattern was provided externally # # $param$sourceid # vector mapping the quadrature points to the # original data and dummy points. # # If you constructed the quadrature scheme manually, this # structure may not be present. # #------------------------------------------------------------- quad <- function(data, dummy, w, param=NULL) { data <- as.ppp(data) dummy <- as.ppp(dummy) n <- data$n + dummy$n if(missing(w)) w <- rep.int(1, n) else { w <- as.vector(w) if(length(w) != n) stop("length of weights vector w is not equal to total number of points") } if(is.null(attr(w, "zeroes")) && any( w == 0)) attr(w, "zeroes") <- (w == 0) Q <- list(data=data, dummy=dummy, w=w, param=param) class(Q) <- "quad" invisible(Q) } # ------------------ extractor functions ---------------------- x.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$x, Q$dummy$x) } y.quad <- function(Q) { verifyclass(Q, "quad") c(Q$data$y, Q$dummy$y) } w.quad <- function(Q) { verifyclass(Q, "quad") Q$w } param.quad <- function(Q) { verifyclass(Q, "quad") Q$param } n.quad <- function(Q) { verifyclass(Q, "quad") Q$data$n + Q$dummy$n } marks.quad <- function(x, dfok=FALSE, ...) { verifyclass(x, "quad") dat <- x$data dum <- x$dummy if(dfok) warning("ignored dfok = TRUE; not implemented") mdat <- marks(dat, dfok=FALSE, ...) mdum <- marks(dum, dfok=FALSE, ...) if(is.null(mdat) && is.null(mdum)) return(NULL) if(is.null(mdat)) mdat <- rep.int(NA_integer_, dat$n) if(is.null(mdum)) mdum <- rep.int(NA_integer_, dum$n) if(is.factor(mdat) && is.factor(mdum)) { mall <- cat.factor(mdat, mdum) } else mall <- c(mdat, mdum) return(mall) } is.marked.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(any(is.na(marx))) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(TRUE) } is.multitype.quad <- function(X, na.action="warn", ...) { marx <- marks(X, ...) if(is.null(marx)) return(FALSE) if(any(is.na(marx))) switch(na.action, warn = { warning(paste("some mark values are NA in the point pattern", short.deparse(substitute(X)))) }, fatal = { return(FALSE) }, ignore = {} ) return(!is.data.frame(marx) && is.factor(marx)) } is.data <- function(Q) { verifyclass(Q, "quad") return(c(rep.int(TRUE, Q$data$n), rep.int(FALSE, Q$dummy$n))) } equals.quad <- function(Q) { # return matrix E such that E[i,j] = (X[i] == U[j]) # where X = Q$data and U = union.quad(Q) n <- Q$data$n m <- Q$dummy$n E <- matrix(FALSE, nrow=n, ncol=n+m) diag(E) <- TRUE E } equalsfun.quad <- function(Q) { stopifnot(inherits(Q, "quad")) return(function(i,j) { i == j }) } equalpairs.quad <- function(Q) { # return two-column matrix E such that # X[E[i,1]] == U[E[i,2]] for all i # where X = Q$data and U = union.quad(Q) n <- Q$data$n return(matrix(rep.int(seq_len(n),2), ncol=2)) } union.quad <- function(Q) { verifyclass(Q, "quad") ppp(x= c(Q$data$x, Q$dummy$x), y= c(Q$data$y, Q$dummy$y), window=Q$dummy$window, marks=marks.quad(Q), check=FALSE) } # # Plot a quadrature scheme # # plot.quad <- function(x, ..., main, dum=list()) { if(missing(main) || is.null(main)) main <- short.deparse(substitute(x)) verifyclass(x, "quad") data <- x$data dummy <- x$dummy dummyplot <- function(x, ..., pch=".", add=TRUE) { plot(x, pch=pch, add=add, ...) } if(!is.marked(data)) { plot(data, main=main, ...) do.call("dummyplot", append(list( dummy, main=paste(main, "\n dummy points") ), dum)) } else if(is.multitype(data)) { oldpar <- par(ask = interactive() && (.Device %in% c("X11", "GTK", "windows", "Macintosh"))) on.exit(par(oldpar)) data.marks <- marks(data) dummy.marks <- marks(dummy) types <- levels(data.marks) for(k in types) { maink <- paste(main, "\n mark = ", k, sep="") plot(unmark(data[data.marks == k]), main=maink, ...) do.call("dummyplot", append(list(unmark(dummy[dummy.marks == k])), dum)) } } else { plot(data, ..., main=main) addplot <- function(x, ..., add=TRUE, main=short.deparse(substitute(x))) { plot(x, ..., main=main, add=add) } do.call("addplot", append(list(dummy), dum)) } invisible(NULL) } # subset operator "[.quad" <- function(x, ...) { U <- union.quad(x) Z <- is.data(x) w <- w.quad(x) # determine serial numbers of points to be included V <- U %mark% seq_len(U$n) i <- marks(V[...]) # extract corresponding subsets of vectors Z <- Z[i] w <- w[i] # take subset of points, using any type of subset index U <- U[...] # stick together quad(U[Z], U[!Z], w) } unitname.quad <- function(x) { return(unitname(x$data)) } "unitname<-.quad" <- function(x, value) { unitname(x$data) <- value unitname(x$dummy) <- value return(x) } spatstat/R/transect.R0000644000176000001440000000515612237642727014337 0ustar ripleyusers# # transect.R # # Line transects of pixel images # # $Revision: 1.6 $ $Date: 2013/03/15 01:28:06 $ # transect.im <- local({ specify.location <- function(loc, rect) { lname <- short.deparse(substitute(loc)) if(is.numeric(loc) && length(loc) == 2) return(list(x=loc[1], y=loc[2])) if(is.list(loc)) return(xy.coords(loc)) if(!(is.character(loc) && length(loc) == 1)) stop(paste("Unrecognised format for", sQuote(lname)), call.=FALSE) xr <- rect$xrange yr <- rect$yrange switch(loc, bottomleft = list(x=xr[1], y=yr[1]), bottom = list(x=mean(xr), y=yr[1]), bottomright = list(x=xr[2], y=yr[1]), right = list(x=xr[2], y=mean(yr)), topright = list(x=xr[2], y=yr[2]), top = list(x=mean(xr), y=yr[2]), topleft = list(x=xr[1], y=yr[2]), left = list(x=xr[1], y=mean(yr)), centre=, center = list(x=mean(xr), y=mean(yr)), stop(paste("Unrecognised location", sQuote(lname), "=", dQuote(loc)), call.=FALSE) ) } transect.im <- function(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) { Xname <- short.deparse(substitute(X)) Xname <- sensiblevarname(Xname, "X") stopifnot(is.im(X)) # determine transect position if(click) { # interactive if(!add) plot(X) from <- locator(1) points(from) to <- locator(1) points(to) segments(from$x, from$y, to$x, to$y) } else { # data defining a line segment R <- as.rectangle(X) from <- specify.location(from, R) to <- specify.location(to, R) } # create sample points along transect if(identical(from,to)) stop(paste(sQuote("from"), "and", sQuote("to"), "must be distinct points"), call.=FALSE) u <- seq(0,1,length=512) x <- from$x + u * (to$x - from$x) y <- from$y + u * (to$y - from$y) leng <- sqrt( (to$x - from$x)^2 + (to$y - from$y)^2) t <- u * leng # look up pixel values (may be NA) v <- X[list(x=x, y=y), drop=FALSE] # package into fv object df <- data.frame(t=t, v=v) colnames(df)[2] <- Xname fv(df, argu = "t", ylab = substitute(Xname(t), list(Xname=as.name(Xname))), valu=Xname, labl = c("t", "%s(t)"), desc = c("distance along transect", "pixel value of %s"), unitname = unitname(X), fname = Xname) } transect.im }) spatstat/R/pcf.R0000755000176000001440000001621512251745116013255 0ustar ripleyusers# # pcf.R # # $Revision: 1.47 $ $Date: 2013/12/11 02:12:43 $ # # # calculate pair correlation function # from point pattern (pcf.ppp) # or from estimate of K or Kcross (pcf.fv) # or from fasp object # # pcf <- function(X, ...) { UseMethod("pcf") } pcf.ppp <- function(X, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r", "d")) { verifyclass(X, "ppp") r.override <- !is.null(r) win <- X$window area <- area.owin(win) lambda <- X$n/area lambda2area <- area * lambda^2 correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) # bandwidth if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(lambda) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(lambda) } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances close <- closepairs(X, rmax + hmax) dIJ <- close$d XI <- ppp(close$xi, close$yi, window=win, check=FALSE) # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", substitute(g(r), NULL), "theo", , alim, c("r","%s[Pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="g") ###### compute ####### if(any(correction=="translate")) { # translation correction XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor)$g out <- bind.fv(out, data.frame(trans=gT), "hat(%s)[Trans](r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt, denargs, lambda2area, divisor)$g out <- bind.fv(out, data.frame(iso=gR), "hat(%s)[Ripley](r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # default is to display all corrections formula(out) <- . ~ r # unitname(out) <- unitname(X) return(out) } # Smoothing Estimate of Weighted Pair Correlation # d = vector of relevant distances # w = vector of edge correction weights (in normal use) # denargs = arguments to density.default # lambda2area = constant lambda^2 * area (in normal use) sewpcf <- function(d, w, denargs, lambda2area, divisor=c("r","d")) { divisor <- match.arg(divisor) if(divisor == "d") { w <- w/d if(!all(good <- is.finite(w))) { nbad <- sum(!good) warning(paste(nbad, "infinite or NA", ngettext(nbad, "contribution was", "contributions were"), "deleted from pcf estimate")) d <- d[good] w <- w[good] } } wtot <- sum(w) kden <- do.call.matched("density.default", append(list(x=d, weights=w/wtot), denargs)) r <- kden$x y <- kden$y * wtot if(divisor == "r") y <- y/r g <- y/(2 * pi * lambda2area) return(data.frame(r=r,g=g)) } # #---------- OTHER METHODS FOR pcf -------------------- # "pcf.fasp" <- function(X, ..., method="c") { verifyclass(X, "fasp") Y <- X Y$title <- paste("Array of pair correlation functions", if(!is.null(X$dataname)) "for", X$dataname) # go to work on each function for(i in seq_along(X$fns)) { Xi <- X$fns[[i]] PCFi <- pcf.fv(Xi, ..., method=method) Y$fns[[i]] <- PCFi if(is.fv(PCFi)) Y$default.formula[[i]] <- formula(PCFi) } return(Y) } pcf.fv <- local({ callmatched <- function(fun, argue) { formalnames <- names(formals(fun)) formalnames <- formalnames[formalnames != "..."] do.call("fun", argue[names(argue) %in% formalnames]) } pcf.fv <- function(X, ..., method="c") { verifyclass(X, "fv") # extract r and the recommended estimate of K r <- with(X, .x) K <- with(X, .y) alim <- attr(X, "alim") # remove NA's ok <- !is.na(K) K <- K[ok] r <- r[ok] switch(method, a = { ss <- callmatched(smooth.spline, list(x=r, y=K, ...)) dK <- predict(ss, r, deriv=1)$y g <- dK/(2 * pi * r) }, b = { y <- K/(2 * pi * r) y[!is.finite(y)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=y, ...)) dy <- predict(ss, r, deriv=1)$y g <- dy + y/r }, c = { z <- K/(pi * r^2) z[!is.finite(z)] <- 1 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- (r/2) * dz + z }, d = { z <- sqrt(K) z[!is.finite(z)] <- 0 ss <- callmatched(smooth.spline, list(x=r, y=z, ...)) dz <- predict(ss, r, deriv=1)$y g <- z * dz/(pi * r) }, stop(paste("unrecognised method", sQuote(method))) ) # pack result into "fv" data frame Z <- fv(data.frame(r=r, theo=rep.int(1, length(r)), pcf=g), "r", substitute(g(r), NULL), "pcf", . ~ r, alim, c("r", "%s[pois](r)", "%s(r)"), c("distance argument r", "theoretical Poisson value of %s", "estimate of %s by numerical differentiation"), fname="g") unitname(Z) <- unitname(X) return(Z) } pcf.fv }) spatstat/R/randomNS.R0000644000176000001440000001735412251541120014215 0ustar ripleyusers# # randomNS.R # # simulating from Neyman-Scott processes # # $Revision: 1.11 $ $Date: 2013/12/10 06:11:43 $ # # Original code for rCauchy and rVarGamma by Abdollah Jalilian # Other code and modifications by Adrian Baddeley # Bug fixes by Abdollah, Adrian, and Rolf Turner "rNeymanScott" <- function(kappa, rmax, rcluster, win = owin(c(0,1),c(0,1)), ..., lmax=NULL) { # Generic Neyman-Scott process # Implementation for bounded cluster radius # # 'rcluster' may be # # (1) a function(x,y, ...) that takes the coordinates # (x,y) of the parent point and generates a list(x,y) of offspring # if(is.function(rcluster)) return(rPoissonCluster(kappa, rmax, rcluster, win, ..., lmax=lmax)) # (2) a list(mu, f) where mu is a numeric value, function, or pixel image # and f is a function(n, ...) generating n i.i.d. offspring at 0,0 if(!(is.list(rcluster) && length(rcluster) == 2)) stop("rcluster should be either a function, or a list of two elements") win <- as.owin(win) mu <- rcluster[[1]] rdisplace <- rcluster[[2]] if(is.numeric(mu)) { # homogeneous if(!(length(mu) == 1 && mu >= 0)) stop("rcluster[[1]] should be a single nonnegative number") mumax <- mu } else if (is.im(mu) || is.function(mu)) { # inhomogeneous if(is.function(mu)) mu <- as.im(mu, W=win) mumax <- max(mu) } else stop("rcluster[[1]] should be a number, a function or a pixel image") if(!is.function(rdisplace)) stop("rcluster[[2]] should be a function") # Generate parents in dilated window frame <- bounding.box(win) dilated <- grow.rectangle(frame, rmax) if(is.im(kappa) && !is.subset.owin(dilated, as.owin(kappa))) stop(paste("The window in which the image", sQuote("kappa"), "is defined\n", "is not large enough to contain the dilation of the window", sQuote("win"))) parents <- rpoispp(kappa, lmax=lmax, win=dilated) np <- npoints(parents) # generate cluster sizes if(np == 0) { # no parents - empty pattern result <- ppp(numeric(0), numeric(0), window=win) parentid <- integer(0) } else { csize <- rpois(np, mumax) noff <- sum(csize) xparent <- parents$x yparent <- parents$y x0 <- rep.int(xparent, csize) y0 <- rep.int(yparent, csize) # invoke random generator dd <- rdisplace(noff, ...) mm <- if(is.ppp(dd)) marks(dd) else NULL # validate xy <- xy.coords(dd) dx <- xy$x dy <- xy$y if(!(length(dx) == noff)) stop("rcluster returned the wrong number of points") # create offspring and offspring-to-parent map xoff <- x0 + dx yoff <- y0 + dy parentid <- rep.int(1:np, csize) # trim to window retain <- inside.owin(xoff, yoff, win) xoff <- xoff[retain] yoff <- yoff[retain] parentid <- parentid[retain] if(!is.null(mm)) mm <- marksubset(mm, retain) # done result <- ppp(xoff, yoff, window=win, check=FALSE, marks=mm) } attr(result, "parents") <- parents attr(result, "parentid") <- parentid if(is.im(mu)) { # inhomogeneously modulated clusters a la Waagepetersen P <- eval.im(mu/mumax) result <- rthin(result, P) } return(result) } "rMatClust" <- local({ # like runifdisc but returns only the coordinates rundisk <- function(n, radius) { R <- radius * sqrt(runif(n, min=0, max=1)) Theta <- runif(n, min=0, max=2*pi) cbind(R * cos(Theta), R * sin(Theta)) } rMatClust <- function(kappa, r, mu, win = owin(c(0,1),c(0,1))) { # Matern Cluster Process with Poisson (mu) offspring distribution stopifnot(is.numeric(r) && length(r) == 1 && r > 0) result <- rNeymanScott(kappa, r, list(mu, rundisk), win, radius=r) return(result) } rMatClust }) "rThomas" <- local({ # random displacements gaus <- function(n, sigma) { matrix(rnorm(2 * n, mean=0, sd=sigma), ncol=2) } # main function rThomas <- function(kappa, sigma, mu, win = owin(c(0,1),c(0,1))) { # Thomas process with Poisson(mu) number of offspring # at isotropic Normal(0,sigma^2) displacements from parent # stopifnot(is.numeric(sigma) && length(sigma) == 1 && sigma > 0) result <- rNeymanScott(kappa, 4 * sigma, list(mu, gaus), win, sigma=sigma) return(result) } rThomas }) # ================================================ # Neyman-Scott process with Cauchy kernel function # ================================================ # omega: scale parameter of Cauchy kernel function # eta: scale parameter of Cauchy pair correlation function # eta = 2 * omega rCauchy <- local({ # simulate mixture of normals with inverse-gamma distributed variance rnmix.invgam <- function(n = 1, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- 1/rgamma(n, shape=1/2, rate=rate) return(sqrt(s) * V) } # threshold the kernel function in polar coordinate kernthresh <- function(r, eta, eps) { 4 * (r/eta^2)/((1 + (2 * r/eta)^2)^(3/2)) - eps } # main function rCauchy <- function (kappa, omega, mu, win = owin(), eps = 0.001) { # omega: scale parameter of Cauchy kernel function # eta: scale parameter of Cauchy pair correlation function eta <- 2 * omega # determine the maximum radius of clusters rmax <- uniroot(kernthresh, lower = eta/2, upper = 5 * diameter(as.rectangle(win)), eta = eta, eps = eps)$root # simulate result <- rNeymanScott(kappa, rmax, list(mu, rnmix.invgam), win, rate = eta^2/8) # correction from Abdollah: the rate is beta = omega^2 / 2 = eta^2 / 8. return(result) } rCauchy }) # # ================================================================= # Neyman-Scott process with Variance Gamma (Bessel) kernel function # ================================================================= # nu.ker: smoothness parameter of Variance Gamma kernel function # omega: scale parameter of kernel function # nu.pcf: smoothness parameter of Variance Gamma pair correlation function # eta: scale parameter of Variance Gamma pair correlation function # nu.pcf = 2 * nu.ker + 1 and eta = omega rVarGamma <- local({ # simulates mixture of isotropic Normal points in 2D with gamma variances rnmix.gamma <- function(n = 1, shape, rate) { V <- matrix(rnorm(2 * n, 0, 1), nrow = n, ncol = 2) s <- rgamma(n, shape=shape, rate=rate) return(sqrt(s) * V) } # kernel function in polar coordinates kernfun.old <- function(r, nu.ker, omega, eps) { numer <- ((r/omega)^(nu.ker+1)) * besselK(r/omega, nu.ker) denom <- (2^nu.ker) * omega * gamma(nu.ker + 1) numer/denom - eps } kernfun <- function(r, nu.ker, omega, eps) { numer <- ((r/omega)^(nu.ker + 1)) * besselK(r/omega, nu.ker) denom <- pi * (2^(nu.ker+1)) * omega^2 * gamma(nu.ker + 1) numer/denom - eps } # main function rVarGamma <- function (kappa, nu.ker=NULL, omega, mu, win = owin(), eps = 0.001, nu.pcf=NULL) { # nu.ker: smoothness parameter of Variance Gamma kernel function # omega: scale parameter of kernel function nu.ker <- resolve.vargamma.shape(nu.ker=nu.ker, nu.pcf=nu.pcf)$nu.ker # determine the maximum radius of clusters rmax <- uniroot(kernfun, lower = omega, upper = 5 * diameter(as.rectangle(win)), nu.ker = nu.ker, omega=omega, eps=eps)$root # simulate result <- rNeymanScott(kappa, rmax, list(mu, rnmix.gamma), win, # WAS: shape = 2 * (nu.ker + 1) shape = nu.ker + 1, rate = 1/(2 * omega^2)) return(result) } rVarGamma }) spatstat/R/pp3.R0000755000176000001440000001332312242557163013207 0ustar ripleyusers# # pp3.R # # class of three-dimensional point patterns in rectangular boxes # # $Revision: 1.14 $ $Date: 2013/10/30 02:15:55 $ # box3 <- function(xrange=c(0,1), yrange=xrange, zrange=yrange, unitname=NULL) { stopifnot(is.numeric(xrange) && length(xrange) == 2 && diff(xrange) > 0) stopifnot(is.numeric(yrange) && length(yrange) == 2 && diff(yrange) > 0) stopifnot(is.numeric(zrange) && length(zrange) == 2 && diff(zrange) > 0) out <- list(xrange=xrange, yrange=yrange, zrange=zrange, units=as.units(unitname)) class(out) <- "box3" return(out) } as.box3 <- function(...) { a <- list(...) n <- length(a) if(n == 0) stop("No arguments given") if(n == 1) { a <- a[[1]] if(inherits(a, "box3")) return(a) if(inherits(a, "pp3")) return(a$domain) if(inherits(a, "boxx")){ if(ncol(a$ranges)==3) return(box3(a$ranges[,1], a$ranges[,2], a$ranges[,3])) stop("Supplied boxx object does not have dimension three") } if(inherits(a, "ppx")) return(as.box3(a$domain)) if(is.numeric(a)) { if(length(a) == 6) return(box3(a[1:2], a[3:4], a[5:6])) stop(paste("Don't know how to interpret", length(a), "numbers as a box")) } if(!is.list(a)) stop("Don't know how to interpret data as a box") } return(do.call("box3", a)) } print.box3 <- function(x, ...) { bracket <- function(z) paste("[", paste(signif(z, 5), collapse=", "), "]", sep="") v <- paste(unlist(lapply(x[1:3], bracket)), collapse=" x ") s <- summary(unitname(x)) cat(paste("Box:", v, s$plural, s$explain, "\n")) invisible(NULL) } unitname.box3 <- function(x) { x$units } "unitname<-.box3" <- function(x, value) { x$units <- as.units(value) return(x) } eroded.volumes <- function(x, r) { UseMethod("eroded.volumes") } eroded.volumes.box3 <- function(x, r) { b <- as.box3(x) ax <- pmax.int(0, diff(b$xrange) - 2 * r) ay <- pmax.int(0, diff(b$yrange) - 2 * r) az <- pmax.int(0, diff(b$zrange) - 2 * r) ax * ay * az } shortside <- function(x) { UseMethod("shortside") } shortside.box3 <- function(x) { min(sidelengths(x)) } sidelengths <- function(x) { UseMethod("sidelengths") } sidelengths.box3 <- function(x) { with(x, c(diff(xrange), diff(yrange), diff(zrange))) } bounding.box3 <- function(...) { wins <- list(...) boxes <- lapply(wins, as.box3) xr <- range(unlist(lapply(boxes, getElement, name="xrange"))) yr <- range(unlist(lapply(boxes, getElement, name="yrange"))) zr <- range(unlist(lapply(boxes, getElement, name="zrange"))) box3(xr, yr, zr) } pp3 <- function(x, y, z, ...) { stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) stopifnot(is.numeric(z)) b <- as.box3(...) out <- ppx(data=data.frame(x=x,y=y,z=z), domain=b) class(out) <- c("pp3", class(out)) return(out) } is.pp3 <- function(x) { inherits(x, "pp3") } npoints.pp3 <- function(x) { nrow(x$data) } print.pp3 <- function(x, ...) { cat("Three-dimensional point pattern\n") sd <- summary(x$data) np <- sd$ncases cat(paste(np, ngettext(np, "point", "points"), "\n")) print(x$domain) invisible(NULL) } summary.pp3 <- function(object, ...) { sd <- summary(object$data) np <- sd$ncases dom <- object$domain v <- volume.box3(dom) u <- summary(unitname(dom)) intens <- np/v out <- list(np=np, sumdat=sd, dom=dom, v=v, u=u, intensity=intens) class(out) <- "summary.pp3" return(out) } print.summary.pp3 <- function(x, ...) { cat("Three-dimensional point pattern\n") cat(paste(x$np, ngettext(x$np, "point", "points"), "\n")) print(x$dom) u <- x$u v <- x$v cat(paste("Volume", v, "cubic", if(v == 1) u$singular else u$plural, u$explain, "\n")) cat(paste("Average intensity", x$intensity, "points per cubic", u$singular, u$explain, "\n")) invisible(NULL) } plot.pp3 <- function(x, ...) { xname <- short.deparse(substitute(x)) if(!require("scatterplot3d")) stop("Package scatterplot3d is needed to plot 3D point patterns\n") coo <- coords(x) cnam <- names(coo) do.call("scatterplot3d", resolve.defaults(list(x=coo[,1], y=coo[,2], z=coo[,3]), list(...), list(main=xname), list(xlab=cnam[1], ylab=cnam[2], zlab=cnam[3]), list(xlim=x$domain$xrange, ylim=x$domain$yrange, zlim=x$domain$zrange))) } "[.pp3" <- function(x, i, ...) { answer <- NextMethod("[") if(is.ppx(answer)) class(answer) <- c("pp3", class(answer)) return(answer) } unitname.pp3 <- function(x) { unitname(x$domain) } "unitname<-.pp3" <- function(x, value) { d <- x$domain unitname(d) <- value x$domain <- d return(x) } diameter.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, sqrt(diff(xrange)^2+diff(yrange)^2+diff(zrange)^2)) } volume <- function(x) { UseMethod("volume") } volume.box3 <- function(x) { stopifnot(inherits(x, "box3")) with(x, prod(diff(xrange), diff(yrange), diff(zrange))) } runifpoint3 <- function(n, domain=box3()) { domain <- as.box3(domain) x <- with(domain, runif(n, min=xrange[1], max=xrange[2])) y <- with(domain, runif(n, min=yrange[1], max=yrange[2])) z <- with(domain, runif(n, min=zrange[1], max=zrange[2])) pp3(x,y,z,domain) } rpoispp3 <- function(lambda, domain=box3()) { domain <- as.box3(domain) v <- volume.box3(domain) if(!(is.numeric(lambda) && length(lambda) == 1)) stop("lambda must be a single numeric value") n <- rpois(1, lambda * v) runifpoint3(n, domain=domain) } spatstat/R/objsurf.R0000644000176000001440000000745312240447357014164 0ustar ripleyusers# # objsurf.R # # surface of the objective function for an M-estimator # # $Revision: 1.3 $ $Date: 2013/11/12 15:53:11 $ # objsurf <- function(x, ...) { UseMethod("objsurf") } objsurf.kppm <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { Fit <- x$Fit switch(Fit$method, mincon = { result <- objsurf(Fit$mcfit, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }, clik = { optpar <- x$par objfun <- Fit$objfun objargs <- Fit$objargs result <- objsurfEngine(objfun, optpar, objargs, ..., ngrid=ngrid, ratio=ratio, verbose=verbose) }) return(result) } objsurf.minconfit <- function(x, ..., ngrid=32, ratio=1.5, verbose=TRUE) { optpar <- x$par objfun <- x$objfun objargs <- x$objargs dotargs <- x$dotargs objsurfEngine(objfun, optpar, objargs, ..., dotargs=dotargs, ngrid=ngrid, ratio=ratio, verbose=verbose) } objsurfEngine <- function(objfun, optpar, objargs, ..., dotargs=list(), objname="objective", ngrid=32, ratio=1.5, verbose=TRUE) { trap.extra.arguments(...) if(!is.function(objfun)) stop("Object is in an outdated format and needs to be re-fitted") npar <- length(optpar) if(npar != 2) stop("Only implemented for functions of 2 arguments") # create grid of parameter values ratio <- ensure2vector(ratio) ngrid <- ensure2vector(ngrid) stopifnot(all(ratio > 1)) xgrid <- seq(optpar[1]/ratio[1], optpar[1] * ratio[1], length=ngrid[1]) ygrid <- seq(optpar[2]/ratio[2], optpar[2] * ratio[2], length=ngrid[2]) pargrid <- expand.grid(xgrid, ygrid) colnames(pargrid) <- names(optpar) # evaluate if(verbose) cat(paste("Evaluating", nrow(pargrid), "function values...")) values <- do.call("apply", append(list(pargrid, 1, objfun, objargs=objargs), dotargs)) if(verbose) cat("Done.\n") result <- list(x=xgrid, y=ygrid, z=matrix(values, ngrid[1], ngrid[2])) attr(result, "optpar") <- optpar attr(result, "objname") <- "contrast" class(result) <- "objsurf" return(result) } print.objsurf <- function(x, ...) { cat("Objective function surface\n") optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) cat("Parameter ranges:\n") cat(paste(paste0(nama[1], ":"), prange(range(x$x)), "\n")) cat(paste(paste0(nama[2], ":"), prange(range(x$y)), "\n")) cat(paste("Function value:", objname, "\n")) invisible(NULL) } image.objsurf <- plot.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call("image", resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } contour.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") nama <- names(optpar) do.call("contour", resolve.defaults(list(x=unclass(x)), list(...), list(xlab=nama[1], ylab=nama[2], main=xname))) abline(v=optpar[1], lty=3) abline(h=optpar[2], lty=3) invisible(NULL) } persp.objsurf <- function(x, ...) { xname <- short.deparse(substitute(x)) optpar <- attr(x, "optpar") objname <- attr(x, "objname") nama <- names(optpar) r <- do.call("persp", resolve.defaults(list(x=x$x, y=x$y, z=x$z), list(...), list(xlab=nama[1], ylab=nama[2], zlab=objname, main=xname))) invisible(r) } spatstat/R/linearmrkcon.R0000644000176000001440000000335712237642727015201 0ustar ripleyusers# # linearmrkcon.R # # mark connection function & mark equality function for linear networks # # $Revision$ $Date$ # linearmarkconnect <- function(X, i, j, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") marx <- marks(X) lev <- levels(marx) if(missing(i) || is.null(i)) i <- lev[1] else if(!(i %in% lev)) stop(paste("i = ", i , "is not a valid mark")) if(missing(j) || is.null(j)) j <- lev[2] else if(!(j %in% lev)) stop(paste("j = ", j , "is not a valid mark")) pcfij <- linearpcfcross(X, i, j, r=r, ...) pcfall <- linearpcf(X, r=r, ...) qi <- mean(marx == i) qj <- mean(marx == j) result <- eval.fv(qi * qj * pcfij/pcfall) # rebrand iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(p[lin,i,j](r), list(i=iname,j=jname)), sprintf("p[list(lin,%s,%s)]", iname, jname), new.yexp=substitute(p[list(lin,i,j)](r), list(i=iname,j=jname))) result <- rebadge.fv(result, tags=c("est","theo"), new.labl=c("hat(%s)(r)", "%s[theo](r)")) return(result) } linearmarkequal <- function(X, r=NULL, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") lev <- levels(marks(X)) v <- list() for(l in lev) v[[l]] <- linearmarkconnect(X, l, l, r=r, ...) result <- Reduce(function(A,B){eval.fv(A+B)}, v) result <-rebadge.fv(result, quote(p[lin](r)), new.fname="p[lin]") result <- rebadge.fv(result, tags=c("est","theo"), new.labl=c("hat(%s)(r)", "%s[theo](r)")) return(result) } spatstat/R/covariates.R0000755000176000001440000000306512237642727014654 0ustar ripleyusers# # covariates.R # # evaluate covariates # # $Revision: 1.2 $ $Date: 2013/04/25 06:37:43 $ # evalCovariate <- function(covariate, locations) { # evaluate covariate of any kind at specified locations covvalues <- if(is.im(covariate)) safelookup(covariate, locations) else if(is.function(covariate)) covariate(locations$x, locations$y) else if(is.numeric(covariate) || is.factor(covariate)) { if(length(covariate) == 1) rep.int(covariate, length(locations$x)) else if(length(covariate) == length(locations$x)) covariate else stop("Inappropriate length for covariate vector") } else stop("Covariate should be an image, a function or a factor/numeric vector") return(covvalues) } ppmCovariates <- function(model) { # generate list of all covariates in ppm (excluding marks) stopifnot(is.ppm(model)) co <- as.list(model$covariates) xy <- list(x=function(x,y){x}, y=function(x,y){y}) coplus <- append(co, xy) return(as.listof(coplus)) } findCovariate <- function(covname, scope, scopename=NULL) { # find the named covariate in the given ppm object or list if(is.ppm(scope)) { covlist <- ppmCovariates(scope) if(missing(scopename)) scopename <- "covariates in model" } else if(is.list(scope)) { covlist <- scope } else stop("scope should be a named list of covariates, or a ppm object") if(!(covname %in% names(covlist))) stop(paste("covariate", dQuote(covname), "not found", if(!is.null(scopename)) paste("amongst", scopename) else NULL)) covlist[[covname]] } spatstat/R/progress.R0000644000176000001440000000635612237642727014363 0ustar ripleyusers# # progress.R # # $Revision: 1.5 $ $Date: 2013/08/01 10:38:56 $ # # progress plots (envelope representations) # dclf.progress <- function(X, ..., nrank=1) mctest.progress(X, ..., expo=2, nrank=nrank) mad.progress <- function(X, ..., nrank=1) mctest.progress(X, ..., expo=Inf, nrank=nrank) mctest.progress <- local({ ordstat <- function(z, k) { sort(z, decreasing=TRUE, na.last=TRUE)[k] } silentmax <- function(z) { if(all(is.nan(z))) NaN else max(z[is.finite(z)]) } mctest.progress <- function(X, fun=Lest, ..., expo=1, nrank=1) { check.1.real(expo) explain.ifnot(expo >= 0) if((nrank %% 1) != 0) stop("nrank must be an integer") if(missing(fun) && inherits(X, "envelope")) fun <- NULL Z <- envelopeProgressData(X, fun=fun, ..., expo=expo) R <- Z$R devdata <- Z$devdata devsim <- Z$devsim nsim <- ncol(devsim) critval <- if(nrank == 1) apply(devsim, 1, silentmax) else apply(devsim, 1, ordstat, k=nrank) alpha <- nrank/(nsim + 1) alphastring <- paste(100 * alpha, "%%", sep="") # create fv object fname <- if(is.infinite(expo)) "mad" else if(expo == 2) "T" else paste("D[",expo,"]", sep="") ylab <- if(is.infinite(expo)) quote(mad(R)) else if(expo == 2) quote(T(R)) else eval(substitute(quote(D[p](R)), list(p=expo))) df <- data.frame(R=R, obs=devdata, crit=critval, zero=0) p <- fv(df, argu="R", ylab=ylab, valu="obs", fmla = . ~ R, desc = c("Interval endpoint R", "observed value of test statistic %s", paste("Monte Carlo", alphastring, "critical value for %s"), "zero"), labl=c("R", "%s(R)", "%s[crit](R)", "0"), unitname = unitname(X), fname = fname) fvnames(p, ".") <- c("obs", "crit") fvnames(p, ".s") <- c("zero", "crit") return(p) } mctest.progress }) # Do not call this function. # Performs underlying computations envelopeProgressData <- function(X, fun=Lest, ..., expo=1, normalize=FALSE, deflate=FALSE) { # compute or extract simulated functions X <- envelope(X, fun=fun, ..., savefuns=TRUE) Y <- attr(X, "simfuns") # extract values R <- with(X, .x) obs <- with(X, .y) reference <- if("theo" %in% names(X)) with(X, theo) else with(X, mmean) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) if(is.infinite(expo)) { # MAD devdata <- cummax(abs(obs-reference)) devsim <- apply(abs(sim-reference), 2, cummax) testname <- "Maximum absolute deviation test" } else { dR <- c(0, diff(R)) a <- (nsim/(nsim - 1))^expo devdata <- a * cumsum(dR * abs(obs - reference)^expo) devsim <- a * apply(dR * abs(sim - reference)^expo, 2, cumsum) if(normalize) { devdata <- devdata/R devsim <- sweep(devsim, 1, R, "/") } if(deflate) { devdata <- devdata^(1/expo) devsim <- devsim^(1/expo) } testname <- if(expo == 2) "Diggle-Cressie-Loosmore-Ford test" else if(expo == 1) "Integral absolute deviation test" else paste("Integrated", ordinal(expo), "Power Deviation test") } result <- list(R=R, devdata=devdata, devsim=devsim, testname=testname) return(result) } spatstat/R/adaptive.density.R0000755000176000001440000000205212237642727015762 0ustar ripleyusers# # adaptive.density.R # # $Revision: 1.4 $ $Date: 2011/05/18 01:24:50 $ # # adaptive.density <- function(X, f=0.1, ..., nrep=1) { stopifnot(is.ppp(X)) npts <- npoints(X) stopifnot(is.numeric(f) && length(f) == 1 && f > 0 & f < 1) ntess <- floor(f * npts) if(ntess == 0) { # naive estimate of intensity W <- X$window lam <- npts/area.owin(W) return(as.im(lam, W, ...)) } if(nrep > 1) { # estimate is the average of nrep randomised estimates total <- 0 for(i in seq_len(nrep)) { estimate <- adaptive.density(X, f, ..., nrep=1) total <- eval.im(total + estimate) } average <- eval.im(total/nrep) return(average) } ncount <- npts - ntess fcount <- ncount/npts itess <- sample(seq_len(npts), ntess, replace=FALSE) Xtess <- X[itess] Xcount <- X[-itess] tes <- dirichlet(Xtess) meanintensity <- function(x) { x$n/area.owin(x$window) } lam <- unlist(lapply(split(Xcount, tes), meanintensity)) tesim <- as.im(tes, ...) out <- eval.im(lam[as.integer(tesim)]/fcount) return(out) } spatstat/R/lurking.R0000755000176000001440000003250612237642727014171 0ustar ripleyusers# Lurking variable plot for arbitrary covariate. # # # $Revision: 1.36 $ $Date: 2013/05/01 07:23:21 $ # lurking <- function(object, covariate, type="eem", cumulative=TRUE, clipwindow=default.clipwindow(object), rv = NULL, plot.sd=is.poisson.ppm(object), plot.it=TRUE, typename, covname, oldstyle=FALSE, check=TRUE, ..., splineargs=list(spar=0.5)) { # validate object if(is.ppp(object)) { X <- object object <- ppm(X, ~1, forcefit=TRUE) } else verifyclass(object, "ppm") # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # may need to refit the model if(plot.sd && is.null(getglmfit(object))) object <- update(object, forcefit=TRUE, use.internal=TRUE) # extract spatial locations Q <- quad.ppm(object) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) # subset of quadrature points used to fit model subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) ################################################################# # compute the covariate if(is.im(covariate)) { covvalues <- covariate[quadpoints, drop=FALSE] } else if(is.vector(covariate) && is.numeric(covariate)) { covvalues <- covariate if(length(covvalues) != quadpoints$n) stop("Length of covariate vector,", length(covvalues), "!=", quadpoints$n, ", number of quadrature points") } else if(is.expression(covariate)) { # Expression involving covariates in the model # Set up environment for evaluating expression if(!is.null(object$covariates)) { # Expression may involve an external covariate # Recompute model, extracting all covariates object <- update(object, allcovar=TRUE) # harmonise, just in case Q <- quad.ppm(object) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) subQset <- getglmsubset(object) if(is.null(subQset)) subQset <- rep.int(TRUE, n.quad(Q)) # } glmdata <- getglmdata(object) # Fix special cases if(is.null(glmdata)) { # default glmdata <- data.frame(x=quadpoints$x, y=quadpoints$y) if(is.marked(quadpoints)) glmdata$marks <- marks(quadpoints) } # ensure x and y are in data frame if(!all(c("x","y") %in% names(glmdata))) { glmdata$x <- quadpoints$x glmdata$y <- quadpoints$y } # Evaluate expression sp <- parent.frame() covvalues <- eval(covariate, envir= glmdata, enclos=sp) if(!is.numeric(covvalues)) stop("The evaluated covariate is not numeric") } else stop(paste("The", sQuote("covariate"), "should be either", "a pixel image, an expression or a numeric vector")) ################################################################# # Validate covariate values nbg <- is.na(covvalues) if(any(offending <- nbg && subQset)) { if(is.im(covariate)) warning(paste(sum(offending), "out of", length(offending), "quadrature points discarded because", ngettext(sum(offending), "it lies", "they lie"), "outside the domain of the covariate image")) else warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is NA", "they are NA"))) } # remove points ok <- !nbg & subQset Q <- Q[ok] covvalues <- covvalues[ok] quadpoints <- quadpoints[ok] # adjust Z <- is.data(Q) wts <- w.quad(Q) if(any(is.infinite(covvalues) | is.nan(covvalues))) stop("covariate contains Inf or NaN values") # Quadrature points marked by covariate value covq <- quadpoints %mark% as.numeric(covvalues) ################################################################ # Residuals/marks attached to appropriate locations. # Stoyan-Grabarnik weights are attached to the data points only. # Others (residuals) are attached to all quadrature points. resvalues <- if(!is.null(rv)) rv else if(type=="eem") eem(object, check=check) else residuals.ppm(object, type=type, check=check) if(inherits(resvalues, "msr")) { # signed or vector-valued measure resvalues <- resvalues$val if(ncol(as.matrix(resvalues)) > 1) stop("Not implemented for vector measures; use [.msr to split into separate components") } if(type != "eem") resvalues <- resvalues[ok] res <- (if(type == "eem") datapoints else quadpoints) %mark% as.numeric(resvalues) # ... and the same locations marked by the covariate covres <- if(type == "eem") covq[Z] else covq # NAMES OF THINGS # name of the covariate if(missing(covname)) covname <- if(is.expression(covariate)) covariate else "covariate" # type of residual/mark if(missing(typename)) typename <- if(!is.null(rv)) "rv" else "" ####################################################################### # START ANALYSIS # Clip to subwindow if needed clip <- !is.poisson.ppm(object) || (!missing(clipwindow) && !is.null(clipwindow)) if(clip) { covq <- covq[clipwindow] res <- res[clipwindow] covres <- covres[clipwindow] clipquad <- inside.owin(quadpoints$x, quadpoints$y, clipwindow) wts <- wts[ clipquad ] } # ----------------------------------------------------------------------- # (A) EMPIRICAL CUMULATIVE FUNCTION # based on data points if type="eem", otherwise on quadrature points # cumulative sums which ignore NA's cumsumna <- function(x) { x[is.na(x)] <- 0 return(cumsum(x)) } # Reorder the data/quad points in order of increasing covariate value # and then compute the cumulative sum of their residuals/marks markscovres <- marks(covres) o <- fave.order(markscovres) covsort <- markscovres[o] cummark <- cumsumna(marks(res)[o]) # we'll plot(covsort, cummark) in the cumulative case # (B) THEORETICAL MEAN CUMULATIVE FUNCTION # based on all quadrature points # Range of covariate values covqmarks <- marks(covq) covrange <- range(covqmarks, na.rm=TRUE) # Suitable breakpoints cvalues <- seq(from=covrange[1], to=covrange[2], length.out=100) csmall <- cvalues[1] - diff(cvalues[1:2]) cbreaks <- c(csmall, cvalues) # cumulative area as function of covariate values covclass <- cut(covqmarks, breaks=cbreaks) increm <- tapply(wts, covclass, sum) cumarea <- cumsumna(increm) # compute theoretical mean (when model is true) mean0 <- if(type == "eem") cumarea else numeric(length(cumarea)) # we'll plot(cvalues, mean0) in the cumulative case # (A'),(B') DERIVATIVES OF (A) AND (B) # Required if cumulative=FALSE # Estimated by spline smoothing (with x values jittered) if(!cumulative) { # fit smoothing spline to (A) ss <- do.call("smooth.spline", append(list(covsort, cummark), splineargs) ) # estimate derivative of (A) derivmark <- predict(ss, covsort, deriv=1)$y # similarly for (B) ss <- do.call("smooth.spline", append(list(cvalues, mean0), splineargs) ) derivmean <- predict(ss, cvalues, deriv=1)$y } # ----------------------------------------------------------------------- # Store what will be plotted if(cumulative) { empirical <- data.frame(covariate=covsort, value=cummark) theoretical <- data.frame(covariate=cvalues, mean=mean0) } else { empirical <- data.frame(covariate=covsort, value=derivmark) theoretical <- data.frame(covariate=cvalues, mean=derivmean) } # ------------------------------------------------------------------------ # (C) STANDARD DEVIATION if desired # (currently implemented only for Poisson) # (currently implemented only for cumulative case) if(plot.sd && !is.poisson.ppm(object)) warning(paste("standard deviation is calculated for Poisson model;", "not valid for this model")) if(plot.sd && cumulative) { # Fitted intensity at quadrature points lambda <- fitted.ppm(object, type="trend", check=check) lambda <- lambda[ok] # Fisher information for coefficients asymp <- vcov(object,what="internals") Fisher <- asymp$fisher # Local sufficient statistic at quadrature points suff <- asymp$suff suff <- suff[ok, ,drop=FALSE] # Clip if required if(clip) { lambda <- lambda[clipquad] suff <- suff[clipquad, , drop=FALSE] # suff is a matrix } # First term: integral of lambda^(2p+1) switch(type, pearson={ varI <- cumarea }, raw={ # Compute sum of w*lambda for quadrature points in each interval dvar <- tapply(wts * lambda, covclass, sum) # tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 # Cumulate varI <- cumsum(dvar) }, inverse=, # same as eem eem={ # Compute sum of w/lambda for quadrature points in each interval dvar <- tapply(wts / lambda, covclass, sum) # tapply() returns NA when the table is empty dvar[is.na(dvar)] <- 0 # Cumulate varI <- cumsum(dvar) }) # variance-covariance matrix of coefficients V <- try(solve(Fisher), silent=TRUE) if(inherits(V, "try-error")) { warning("Fisher information is singular; reverting to oldstyle=TRUE") oldstyle <- TRUE } # Second term: B' V B if(oldstyle) { varII <- 0 } else { # lamp = lambda^(p + 1) lamp <- switch(type, raw = lambda, pearson = sqrt(lambda), inverse =, eem = as.integer(lambda > 0)) # Compute sum of w * lamp * suff for quad points in intervals Bcontrib <- as.vector(wts * lamp) * suff dB <- matrix(, nrow=length(cumarea), ncol=ncol(Bcontrib)) for(j in seq_len(ncol(dB))) dB[,j] <- tapply(Bcontrib[,j], covclass, sum, na.rm=TRUE) # tapply() returns NA when the table is empty dB[is.na(dB)] <- 0 # Cumulate columns B <- apply(dB, 2, cumsum) # compute B' V B for each i varII <- diag(B %*% V %*% t(B)) } # # variance of residuals varR <- varI - varII # trap numerical errors nbg <- (varR < 0) if(any(nbg)) { ran <- range(varR) varR[nbg] <- 0 relerr <- abs(ran[1]/ran[2]) nerr <- sum(nbg) if(relerr > 1e-6) { warning(paste(nerr, "negative", ngettext(nerr, "value (", "values (min="), signif(ran[1], 4), ")", "of residual variance reset to zero", "(out of", length(varR), "values)")) } } theoretical$sd <- sqrt(varR) } # --------------- PLOT THEM ---------------------------------- if(plot.it) { # work out plot range mr <- range(c(0, empirical$value, theoretical$mean), na.rm=TRUE) if(!is.null(theoretical$sd)) mr <- range(c(mr, theoretical$mean + 2 * theoretical$sd, theoretical$mean - 2 * theoretical$sd), na.rm=TRUE) # start plot vname <- paste(if(cumulative)"cumulative" else "marginal", typename) do.call("plot", resolve.defaults( list(covrange, mr), list(type="n"), list(...), list(xlab=covname, ylab=vname))) # (A)/(A') Empirical lines(value ~ covariate, empirical, ...) # (B)/(B') Theoretical mean do.call("lines", resolve.defaults( list(mean ~ covariate, theoretical), list(...), list(lty=2))) # (C) Standard deviation if(!is.null(theoretical$sd)) { do.call("lines", resolve.defaults( list(mean + 2 * sd ~ covariate, theoretical), list(...), list(lty=3))) do.call("lines", resolve.defaults( list(mean - 2 * sd ~ covariate, theoretical), list(...), list(lty=3))) } } # ---------------- RETURN COORDINATES ---------------------------- stuff <- list(empirical=empirical, theoretical=theoretical) return(invisible(stuff)) } spatstat/R/Kmulti.inhom.R0000755000176000001440000002527712237642727015103 0ustar ripleyusers# # Kmulti.inhom.S # # $Revision: 1.39 $ $Date: 2013/04/25 06:37:43 $ # # # ------------------------------------------------------------------------ Lcross.inhom <- function(X, i, j, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] if(missing(j)) j <- levels(marks(X))[2] K <- Kcross.inhom(X, i, j, ...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) # relabel the fv object L <- rebadge.fv(L, substitute(L[inhom,i,j](r), list(i=iname,j=jname)), sprintf("L[list(inhom,%s,%s)]", i, j), new.yexp=substitute(L[list(inhom,i,j)](r), list(i=iname,j=jname))) return(L) } Ldot.inhom <- function(X, i, ...) { if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(i)) i <- levels(marks(X))[1] K <- Kdot.inhom(X, i, ...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object iname <- make.parseable(paste(i)) L <- rebadge.fv(L, substitute(L[inhom, i ~ dot](r), list(i=iname)), paste("L[list(inhom,", iname, "~symbol(\"\\267\"))]"), new.yexp=substitute(L[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) return(L) } "Kcross.inhom" <- function(X, i, j, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] I <- (marx == i) J <- (marx == j) Iname <- paste("points with mark i =", i) Jname <- paste("points with mark j =", j) result <- Kmulti.inhom(X, I, J, lambdaI, lambdaJ, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIJ, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(K[inhom,i,j](r), list(i=iname,j=jname)), sprintf("K[list(inhom,%s,%s)]", i, j), new.yexp=substitute(K[list(inhom,i,j)](r), list(i=iname,j=jname))) return(result) } "Kdot.inhom" <- function(X, i, lambdaI=NULL, lambdadot=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL) { verifyclass(X, "ppp") if(!is.multitype(X, dfok=FALSE)) stop("Point pattern must be multitype") if(missing(correction)) correction <- NULL marx <- marks(X) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) J <- rep.int(TRUE, X$n) # i.e. all points Iname <- paste("points with mark i =", i) Jname <- paste("points") result <- Kmulti.inhom(X, I, J, lambdaI, lambdadot, ..., r=r,breaks=breaks,correction=correction, sigma=sigma, varcov=varcov, lambdaIJ=lambdaIdot, Iname=Iname, Jname=Jname) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(K[inhom, i ~ dot](r), list(i=iname)), paste("K[list(inhom,", iname, "~symbol(\"\\267\"))]"), new.yexp=substitute(K[list(inhom, i ~ symbol("\267"))](r), list(i=iname))) return(result) } "Kmulti.inhom"<- function(X, I, J, lambdaI=NULL, lambdaJ=NULL, ..., r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") extrargs <- resolve.defaults(list(...), list(Iname="points satisfying condition I", Jname="points satisfying condition J")) if(length(extrargs) > 2) warning("Additional arguments unrecognised") Iname <- extrargs$Iname Jname <- extrargs$Jname npts <- npoints(X) x <- X$x y <- X$y W <- as.owin(X) area <- area.owin(W) # validate edge correction correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("border", "isotropic", "Ripley", "translate") correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, W$type, correction.given) # validate I, J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop(paste("There are no", Iname)) if(nJ == 0) stop(paste("There are no", Jname)) # r values rmaxdefault <- rmax.rule("K", W, nJ/area) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # intensity data if(is.null(lambdaI)) { # estimate intensity lambdaI <- density(X[I], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else if(is.im(lambdaI)) { # look up intensity values lambdaI <- safelookup(lambdaI, X[I]) } else if(is.function(lambdaI)) { # evaluate function at locations XI <- X[I] lambdaI <- lambdaI(XI$x, XI$y) } else if(is.numeric(lambdaI) && is.vector(as.numeric(lambdaI))) { # validate intensity vector if(length(lambdaI) != nI) stop(paste("The length of", sQuote("lambdaI"), "should equal the number of", Iname)) } else stop(paste(sQuote("lambdaI"), "should be a vector or an image")) if(is.null(lambdaJ)) { # estimate intensity lambdaJ <- density(X[J], ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) } else if(is.im(lambdaJ)) { # look up intensity values lambdaJ <- safelookup(lambdaJ, X[J]) } else if(is.function(lambdaJ)) { # evaluate function at locations XJ <- X[J] lambdaJ <- lambdaJ(XJ$x, XJ$y) } else if(is.numeric(lambdaJ) && is.vector(as.numeric(lambdaJ))) { # validate intensity vector if(length(lambdaJ) != nJ) stop(paste("The length of", sQuote("lambdaJ"), "should equal the number of", Jname)) } else stop(paste(sQuote("lambdaJ"), "should be a vector or an image")) # Weight for each pair if(!is.null(lambdaIJ)) { if(!is.matrix(lambdaIJ)) stop("lambdaIJ should be a matrix") if(nrow(lambdaIJ) != nI) stop(paste("nrow(lambdaIJ) should equal the number of", Iname)) if(ncol(lambdaIJ) != nJ) stop(paste("ncol(lambdaIJ) should equal the number of", Jname)) } # Recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame # It will be given more columns later K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", substitute(K[inhom, multi](r), NULL), "theo", , alim, c("r","{%s^{pois}}(r)"), desc, fname="K[list(inhom, multi)]") # identify close pairs of points XI <- X[I] XJ <- X[J] close <- crosspairs(XI, XJ, max(r)) # map (i,j) to original serial numbers in X orig <- seq_len(npts) imap <- orig[I] jmap <- orig[J] iX <- imap[close$i] jX <- jmap[close$j] # eliminate any identical pairs if(any(I & J)) { ok <- (iX != jX) if(!all(ok)) { close$i <- close$i[ok] close$j <- close$j[ok] close$xi <- close$xi[ok] close$yi <- close$yi[ok] close$xj <- close$xj[ok] close$yj <- close$yj[ok] close$dx <- close$dx[ok] close$dy <- close$dy[ok] close$d <- close$d[ok] } } # extract information for these pairs (relative to orderings of XI, XJ) dclose <- close$d icloseI <- close$i jcloseJ <- close$j # Form weight for each pair if(is.null(lambdaIJ)) weight <- 1/(lambdaI[icloseI] * lambdaJ[jcloseJ]) else weight <- 1/lambdaIJ[cbind(icloseI, jcloseJ)] # Compute estimates by each of the selected edge corrections. if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(XI) bI <- b[icloseI] # apply reduced sample algorithm RS <- Kwtsum(dclose, bI, weight, b, 1/lambdaI, breaks) if(any(correction == "border")) { Kb <- RS$ratio K <- bind.fv(K, data.frame(border=Kb), "hat(%s^{bord})(r)", "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) K <- bind.fv(K, data.frame(bord.modif=Kbm), "hat(%s^{bordm})(r)", "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { # translation correction edgewt <- edge.Trans(XI[icloseI], XJ[jcloseJ], paired=TRUE) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Ktrans <- cumsum(wh)/area rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s^{trans})(r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI[icloseI], matrix(dclose, ncol=1)) allweight <- edgewt * weight wh <- whist(dclose, breaks$val, allweight) Kiso <- cumsum(wh)/area rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s^{iso})(r)", "Ripley isotropic correction estimate of %s", "iso") } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } spatstat/R/First.R0000755000176000001440000000114712240732216013565 0ustar ripleyusers# First.R # # $Revision: 1.37 $ $Date: 2013/11/13 17:18:17 $ # .onLoad <- function(...) reset.spatstat.options() .onAttach <- function(libname, pkgname) { store.versionstring.spatstat() ver <- versionstring.spatstat() ni <- read.dcf(file=system.file("DESCRIPTION", package="spatstat"), fields="Nickname") ni <- as.character(ni) msg <- paste("\nspatstat", ver, " ", paren(paste("nickname:", sQuote(ni))), "\nFor an introduction to spatstat, type", sQuote("beginner")) packageStartupMessage(msg) invisible(NULL) } spatstat/R/plot.owin.R0000755000176000001440000002167212247607350014443 0ustar ripleyusers# # plot.owin.S # # The 'plot' method for observation windows (class "owin") # # $Revision: 1.41 $ $Date: 2013/10/09 00:50:18 $ # # # plot.owin <- function(x, main, add=FALSE, ..., box, edge=0.04, type = c("w", "n"), hatch=FALSE, angle=45, spacing=diameter(x)/50, invert=FALSE) { # # Function plot.owin. A method for plot. # if(missing(main)) main <- short.deparse(substitute(x)) W <- x verifyclass(W, "owin") type <- match.arg(type) if(missing(box) || is.null(box)) box <- is.mask(W) else stopifnot(is.logical(box) && length(box) == 1) #### if(is.expression(main)) nlines <- 1 else { # convert to string and count number of lines main <- paste(main) if(length(main) > 1) main <- paste(main, collapse="\n") if(nchar(main) == 0) nlines <- 0 else nlines <- length(strsplit(main, "\n")[[1]]) } ######### xlim <- xr <- W$xrange ylim <- yr <- W$yrange #################################################### if(!add) { # new plot # allow space for main title guesslinespace <- 0.08 * diff(yr) ylim[2] <- ylim[2] + nlines * guesslinespace # set up plot with equal scales do.call.matched("plot.default", resolve.defaults(list(x=numeric(0), y=numeric(0), type="n"), list(...), list(xlim=xlim, ylim=ylim, ann=FALSE, axes=FALSE, asp=1.0))) # add title in a reasonable place! if(nlines > 0) { parval <- resolve.defaults(list(...), par()) mainheight <- strheight(main, units="user", cex=parval$cex.main) gapheight <- (strheight("b\nb", units="user", cex=parval$cex.main) - 2 * strheight("b", units="user", cex=parval$cex.main)) text(x=mean(xr), y=yr[2] + mainheight + 0.5 * gapheight, labels=main, cex=parval$cex.main, col=parval$col.main, font=parval$font.main) } } # Draw surrounding box if(box) do.call.matched("segments", resolve.defaults( list(x0=xr[c(1,2,2,1)], y0=yr[c(1,1,2,2)], x1=xr[c(2,2,1,1)], y1=yr[c(1,2,2,1)]), list(...))) # If type = "n", do not plot the window. if(type == "n") return(invisible(NULL)) # Draw window switch(W$type, rectangle = { Wpoly <- as.polygonal(W) po <- Wpoly$bdry[[1]] do.call.matched("polygon", resolve.defaults(list(x=po), list(...)), extrargs="lwd") if(hatch) { L <- rlinegrid(angle, spacing, W) L <- L[W] do.call.matched("plot.psp", resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lwd","lty","col")) } }, polygonal = { p <- W$bdry # Determine whether user wants to fill the interior col.poly <- resolve.defaults(list(...), list(col=NA))$col den.poly <- resolve.defaults(list(...), list(density=NULL))$density no.fill <- is.null(den.poly) && (is.null(col.poly) || is.na(col.poly)) # Determine whether we need to triangulate the interior. # If it is required to fill the interior, # this can be done directly using polygon() provided # there are no holes. Otherwise we must triangulate the interior. if(no.fill) triangulate <- FALSE else { # Determine whether there are any holes holes <- unlist(lapply(p, is.hole.xypolygon)) triangulate <- any(holes) } if(!triangulate) { # No triangulation required; # simply plot the polygons for(i in seq_along(p)) do.call.matched("polygon", resolve.defaults( list(x=p[[i]]), list(...)), extrargs="lwd") } else { # Try using polypath(): lucy <- names(dev.cur()) if(!(lucy %in% c("xfig","pictex","X11"))) { xx <- unlist(lapply(p, function(a) {c(NA, a$x)}))[-1] yy <- unlist(lapply(p, function(a) {c(NA, a$y)}))[-1] do.call.matched("polypath", resolve.defaults(list(x=xx,y=yy), list(border=col.poly), list(...))) } else { # decompose window into simply-connected pieces broken <- try(break.holes(W)) if(inherits(broken, "try-error")) { warning("Unable to plot filled polygons") } else { # Fill pieces with colour (and draw border in same colour) pp <- broken$bdry for(i in seq_len(length(pp))) do.call.matched("polygon", resolve.defaults(list(x=pp[[i]], border=col.poly), list(...))) } } # Now draw polygon boundaries for(i in seq_along(p)) do.call.matched("polygon", resolve.defaults( list(x=p[[i]]), list(density=0, col=NA), list(...)), extrargs="lwd") } if(hatch) { L <- rlinegrid(angle, spacing, W) L <- L[W] do.call.matched("plot.psp", resolve.defaults(list(x=L, add=TRUE), list(...)), extrargs=c("lwd","lty","col")) } }, mask = { # capture 'col' argument and ensure it's at least 2 values coldefault <- c(par("bg"), par("fg")) col <- resolve.defaults( list(...), spatstat.options("par.binary"), list(col=coldefault) )$col if(length(col) == 1) { col <- unique(c(par("bg"), col)) if(length(col) == 1) col <- c(par("fg"), col) } # invert colours? if(invert) col <- rev(col) do.call.matched("image.default", resolve.defaults( list(x=W$xcol, y=W$yrow, z=t(W$m), add=TRUE), list(col=col), list(...), spatstat.options("par.binary"), list(zlim=c(FALSE, TRUE)))) if(hatch) warning("Hatching is not implemented for mask windows") }, stop(paste("Don't know how to plot window of type", sQuote(W$type))) ) invisible() } break.holes <- function(x, splitby=NULL, depth=0, maxdepth=100) { if(is.null(splitby)) { # first call: validate x stopifnot(is.owin(x)) splitby <- "x" } if(depth > maxdepth) stop("Unable to divide window into simply-connected pieces") p <- x$bdry holes <- unlist(lapply(p, is.hole.xypolygon)) if(!any(holes)) return(x) nholes <- sum(holes) i <- min(which(holes)) p.i <- p[[i]] b <- as.rectangle(x) xr <- b$xrange yr <- b$yrange switch(splitby, x = { xsplit <- mean(range(p.i$x)) left <- c(xr[1], xsplit) right <- c(xsplit, xr[2]) pleft <- intersect.owin(x, owin(left, yr))$bdry pright <- intersect.owin(x, owin(right, yr))$bdry xnew <- owin(poly=c(pleft, pright), check=FALSE) nextsplit <- "y" }, y = { ysplit <- mean(range(p.i$y)) lower <- c(yr[1], ysplit) upper <- c(ysplit, yr[2]) plower <- intersect.owin(x, owin(xr, lower))$bdry pupper <- intersect.owin(x, owin(xr, upper))$bdry xnew <- owin(poly=c(plower, pupper), check=FALSE) nextsplit <- "x" }) # recurse xnew <- break.holes(xnew, splitby=nextsplit, depth=depth+1, maxdepth=max(maxdepth, 4*nholes)) return(xnew) } spatstat/R/colourtables.R0000755000176000001440000003466712252243435015214 0ustar ripleyusers# # colourtables.R # # support for colour maps and other lookup tables # # $Revision: 1.28 $ $Date: 2013/12/12 05:17:44 $ # colourmap <- function(col, ..., range=NULL, breaks=NULL, inputs=NULL) { # validate colour data h <- col2hex(col) # store without conversion f <- lut(col, ..., range=range, breaks=breaks, inputs=inputs) class(f) <- c("colourmap", class(f)) f } lut <- function(outputs, ..., range=NULL, breaks=NULL, inputs=NULL) { n <- length(outputs) given <- c(!is.null(range), !is.null(breaks), !is.null(inputs)) names(given) <- c("range", "breaks", "inputs") ngiven <- sum(given) if(ngiven == 0) stop(paste("One of the arguments", sQuote("range"), ",", sQuote("breaks"), "or", sQuote("inputs"), "should be given")) if(ngiven > 1) { offending <- names(breaks)[given] stop(paste("The arguments", commasep(sQuote(offending)), "are incompatible")) } if(!is.null(inputs)) { # discrete set of input values mapped to output values stopifnot(length(inputs) == length(outputs)) stuff <- list(n=n, discrete=TRUE, inputs=inputs, outputs=outputs) f <- function(x, what="value") { m <- match(x, stuff$inputs) if(what == "index") return(m) cout <- stuff$outputs[m] return(cout) } } else { # interval of real line mapped to colours if(is.null(breaks)) { breaks <- seq(from=range[1], to=range[2], length.out=length(outputs)+1) } else { stopifnot(is.numeric(breaks) && length(breaks) >= 2) stopifnot(length(breaks) == length(outputs) + 1) if(!all(diff(breaks) > 0)) stop("breaks must be increasing") } stuff <- list(n=n, discrete=FALSE, breaks=breaks, outputs=outputs) f <- function(x, what="value") { stopifnot(is.numeric(x)) x <- as.vector(x) z <- findInterval(x, stuff$breaks, rightmost.closed=TRUE) if(what == "index") return(z) cout <- stuff$outputs[z] return(cout) } } attr(f, "stuff") <- stuff class(f) <- c("lut", class(f)) f } print.lut <- function(x, ...) { stuff <- attr(x, "stuff") n <- stuff$n if(inherits(x, "colourmap")) { tablename <- "Colour map" outputname <- "colour" } else { tablename <- "Lookup table" outputname <- "output" } if(stuff$discrete) { cat(paste(tablename, "for discrete set of input values\n")) out <- data.frame(input=stuff$inputs, output=stuff$outputs) } else { b <- stuff$breaks cat(paste(tablename, "for the range", prange(b[c(1,n+1)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1), "]") inames <- paste(leftend, b[-(n+1)], ", ", b[-1], rightend, sep="") out <- data.frame(interval=inames, output=stuff$outputs) } colnames(out)[2] <- outputname print(out) invisible(NULL) } print.colourmap <- function(x, ...) { NextMethod("print") } summary.lut <- function(object, ...) { s <- attr(object, "stuff") if(inherits(object, "colourmap")) { s$tablename <- "Colour map" s$outputname <- "colour" } else { s$tablename <- "Lookup table" s$outputname <- "output" } class(s) <- "summary.lut" return(s) } print.summary.lut <- function(x, ...) { n <- x$n if(x$discrete) { cat(paste(x$tablename, "for discrete set of input values\n")) out <- data.frame(input=x$inputs, output=x$outputs) } else { b <- x$breaks cat(paste(x$tablename, "for the range", prange(b[c(1,n+1)]), "\n")) leftend <- rep("[", n) rightend <- c(rep(")", n-1), "]") inames <- paste(leftend, b[-(n+1)], ", ", b[-1], rightend, sep="") out <- data.frame(interval=inames, output=x$outputs) } colnames(out)[2] <- x$outputname print(out) } plot.colourmap <- local({ # recognised additional arguments to image.default() and axis() imageparams <- c("main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub") axisparams <- c("cex", "cex.axis", "cex.lab", "col.axis", "col.lab", "font.axis", "font.lab", "las", "mgp", "xaxp", "yaxp", "tck", "tcl", "xpd") linmap <- function(x, from, to) { to[1] + diff(to) * (x - from[1])/diff(from) } # rules to determine the ribbon dimensions when one dimension is given widthrule <- function(heightrange, separate, n, gap) { if(separate) 1 else diff(heightrange)/10 } heightrule <- function(widthrange, separate, n, gap) { (if(separate) (n + (n-1)*gap) else 10) * diff(widthrange) } plot.colourmap <- function(x, ..., main, xlim=NULL, ylim=NULL, vertical=FALSE, axis=TRUE, labelmap=NULL, gap=0.25, add=FALSE) { if(missing(main)) main <- short.deparse(substitute(x)) stuff <- attr(x, "stuff") col <- stuff$outputs n <- stuff$n discrete <- stuff$discrete if(discrete) { check.1.real(gap, "In plot.colourmap") explain.ifnot(gap >= 0, "In plot.colourmap") } separate <- discrete && (gap > 0) if(is.null(labelmap)) { labelmap <- function(x) x } else if(is.numeric(labelmap) && length(labelmap) == 1 && !discrete) { labscal <- labelmap labelmap <- function(x) { x * labscal } } else stopifnot(is.function(labelmap)) # determine pixel entries 'v' and colour map breakpoints 'bks' # to be passed to 'image.default' if(!discrete) { # real numbers: continuous ribbon bks <- stuff$breaks rr <- range(bks) v <- seq(from=rr[1], to=rr[2], length.out=max(n+1, 1024)) } else if(!separate) { # discrete values: blocks of colour, run together v <- (1:n) - 0.5 bks <- 0:n rr <- c(0,n) } else { # discrete values: separate blocks of colour vleft <- (1+gap) * (0:(n-1)) vright <- vleft + 1 v <- vleft + 0.5 rr <- c(0, n + (n-1)*gap) } # determine position of ribbon or blocks of colour if(is.null(xlim) && is.null(ylim)) { u <- widthrule(rr, separate, n, gap) if(!vertical) { xlim <- rr ylim <- c(0,u) } else { xlim <- c(0,u) ylim <- rr } } else if(is.null(ylim)) { if(!vertical) ylim <- c(0, widthrule(xlim, separate, n, gap)) else ylim <- c(0, heightrule(xlim, separate, n, gap)) } else if(is.null(xlim)) { if(!vertical) xlim <- c(0, heightrule(ylim, separate, n, gap)) else xlim <- c(0, widthrule(ylim, separate, n, gap)) } # .......... initialise plot ............................... if(!add) do.call.matched("plot.default", resolve.defaults(list(x=xlim, y=ylim, type="n", main=main, axes=FALSE, xlab="", ylab="", asp=1.0), list(...))) if(separate) { # ................ plot separate blocks of colour ................. if(!vertical) { # horizontal arrangement of blocks xleft <- linmap(vleft, rr, xlim) xright <- linmap(vright, rr, xlim) y <- ylim z <- matrix(1, 1, 1) for(i in 1:n) { x <- c(xleft[i], xright[i]) do.call.matched("image.default", resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } else { # vertical arrangement of blocks x <- xlim ylow <- linmap(vleft, rr, ylim) yupp <- linmap(vright, rr, ylim) z <- matrix(1, 1, 1) for(i in 1:n) { y <- c(ylow[i], yupp[i]) do.call.matched("image.default", resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(col=col[i])), extrargs=imageparams) } } } else { # ................... plot ribbon image ............................. if(!vertical) { # horizontal colour ribbon x <- linmap(v, rr, xlim) y <- ylim z <- matrix(v, ncol=1) } else { # vertical colour ribbon y <- linmap(v, rr, ylim) z <- matrix(v, nrow=1) x <- xlim } do.call.matched("image.default", resolve.defaults(list(x=x, y=y, z=z, add=TRUE), list(...), list(breaks=bks, col=col)), extrargs=imageparams) } if(axis) { # ................. draw annotation .................. if(!vertical) { # add horizontal axis/annotation if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, xlim) } else { la <- prettyinside(rr) at <- linmap(la, rr, xlim) la <- labelmap(la) } # default axis position is below the ribbon (side=1) sidecode <- resolve.1.default("side", list(...), list(side=1)) if(!(sidecode %in% c(1,3))) warning(paste("side =", sidecode, "is not consistent with horizontal orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw axis do.call.matched("axis", resolve.defaults(list(...), list(side = 1, pos = pos, at = at), list(labels=la, lwd=lwd0)), extrargs=axisparams) } else { # add vertical axis if(discrete) { la <- paste(labelmap(stuff$inputs)) at <- linmap(v, rr, ylim) } else { la <- prettyinside(rr) at <- linmap(la, rr, ylim) la <- labelmap(la) } # default axis position is to the right of ribbon (side=4) sidecode <- resolve.1.default("side", list(...), list(side=4)) if(!(sidecode %in% c(2,4))) warning(paste("side =", sidecode, "is not consistent with vertical orientation")) pos <- c(ylim[1], xlim[1], ylim[2], xlim[2])[sidecode] # don't draw axis lines if plotting separate blocks lwd0 <- if(separate) 0 else 1 # draw labels horizontally if plotting separate blocks las0 <- if(separate) 1 else 0 # draw axis do.call.matched("axis", resolve.defaults(list(...), list(side=4, pos=pos, at=at), list(labels=la, lwd=lwd0, las=las0)), extrargs=axisparams) } } invisible(NULL) } plot.colourmap }) # Interpolate a colourmap or lookup table defined on real numbers interp.colourmap <- function(m, n=512) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") st <- attr(m, "stuff") if(st$discrete) { # discrete set of input values mapped to colours xknots <- st$inputs # Ensure the inputs are real numbers if(!is.numeric(xknots)) stop("Cannot interpolate: inputs are not numerical values") } else { # interval of real line, chopped into intervals, mapped to colours # Find midpoints of intervals bks <- st$breaks nb <- length(bks) xknots <- (bks[-1] + bks[-nb])/2 } # corresponding colours in hsv coordinates yknots.hsv <- rgb2hsv(col2rgb(st$outputs)) # transform 'hue' from polar to cartesian coordinate # divide domain into n equal intervals xrange <- range(xknots) xbreaks <- seq(xrange[1], xrange[2], length=n+1) xx <- (xbreaks[-1] + xbreaks[-(n+1)])/2 # interpolate saturation and value in hsv coordinates yy.sat <- approx(x=xknots, y=yknots.hsv["s", ], xout=xx)$y yy.val <- approx(x=xknots, y=yknots.hsv["v", ], xout=xx)$y # interpolate hue by first transforming polar to cartesian coordinate yknots.hue <- 2 * pi * yknots.hsv["h", ] yy.huex <- approx(x=xknots, y=cos(yknots.hue), xout=xx)$y yy.huey <- approx(x=xknots, y=sin(yknots.hue), xout=xx)$y yy.hue <- (atan2(yy.huey, yy.huex)/(2 * pi)) %% 1 # form colours using hue, sat, val yy <- hsv(yy.hue, yy.sat, yy.val) # done f <- colourmap(yy, breaks=xbreaks) return(f) } tweak.colourmap <- function(m, col, ..., inputs=NULL, range=NULL) { if(!inherits(m, "colourmap")) stop("m should be a colourmap") if(is.null(inputs) && is.null(range)) stop("Specify either inputs or range") if(!is.null(inputs) && !is.null(range)) stop("Do not specify both inputs and range") # determine indices of colours to be changed if(!is.null(inputs)) { ix <- m(inputs, what="index") } else { if(!(is.numeric(range) && length(range) == 2 && diff(range) > 0)) stop("range should be a numeric vector of length 2 giving (min, max)") if(length(col2hex(col)) != 1) stop("When range is given, col should be a single colour value") ixr <- m(range, what="index") ix <- (ixr[1]):(ixr[2]) } # reassign colours st <- attr(m, "stuff") outputs <- st$outputs is.hex <- function(z) identical(substr(z, 1, 7), substr(col2hex(z), 1, 7)) result.hex <- FALSE if(is.hex(outputs)) { # convert replacement data to hex col <- col2hex(col) result.hex <- TRUE } else if(is.hex(col)) { # convert existing data to hex outputs <- col2hex(outputs) result.hex <- TRUE } else if(!(is.character(outputs) && is.character(col))) { # unrecognised format - convert both to hex outputs <- col2hex(outputs) col <- col2hex(col) result.hex <- TRUE } if(result.hex) { # hex codes may be 7 or 9 characters outlen <- nchar(outputs) collen <- nchar(col) if(length(unique(c(outlen, collen))) > 1) { # convert all to 9 characters if(any(bad <- (outlen == 7))) outputs[bad] <- paste0(outputs[bad], "FF") if(any(bad <- (collen == 7))) col[bad] <- paste0(col[bad], "FF") } } # Finally, replace outputs[ix] <- col st$outputs <- outputs attr(m, "stuff") <- st assign("stuff", st, envir=environment(m)) return(m) } spatstat/R/exactMPLEstrauss.R0000644000176000001440000000377612237642727015731 0ustar ripleyusers# # exactMPLEstrauss.R # # 'exact' MPLE for stationary Strauss process # # $Revision: 1.5 $ $Date: 2012/07/13 09:12:41 $ # exactMPLEstrauss <- local({ # main function exactMPLEstrauss <- function(X, R, ngrid=2048, plotit=FALSE, project=TRUE) { n <- npoints(X) W <- as.owin(X) # border correction WminR <- erosion(W, R) bR <- (bdist.points(X) >= R) nR <- sum(bR) # evaluate neighbour counts for data points Tcounts <- crosspaircounts(X, X, R) - 1 sumT <- sum(Tcounts[bR]) # determine the coefficients a_k for k = 0, 1, ... Z <- scanmeasure(X, R, dimyx=ngrid) Z <- Z[WminR, drop=FALSE] kcounts <- tabulate(as.vector(Z$v) + 1L) pixarea <- with(Z, xstep * ystep) A <- kcounts * pixarea # find optimal log(gamma) op <- optim(log(0.5), lpl, sco, method="L-BFGS-B", control=list(fnscale=-1), lower=-Inf, upper=if(project) 0 else Inf, A=A, sumT=sumT, nR=nR) loggamma <- op$par # plot? if(plotit) { x <- seq(log(1e-4), if(project) 0 else log(1e4), length=512) plot(x, lpl(x, A, sumT, nR), type="l", xlab=expression(log(gamma)), ylab=expression(log(PL(gamma)))) abline(v=loggamma, lty=3) } # derive optimal beta kmax <-length(A) - 1 polypart <- A %*% exp(outer(0:kmax, loggamma)) beta <- nR/polypart logbeta <- log(beta) result <- c(logbeta, loggamma) names(result) <- c("(Intercept)", "Interaction") return(result) } # helper functions (vectorised) # log pseudolikelihood lpl <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <-length(A) - 1 polypart <- A %*% exp(outer(0:kmax, theta)) nR * (log(nR) - log(polypart) - 1) + theta * sumT } # pseudoscore sco <- function(theta, A=A, sumT=sumT, nR=nR) { kmax <- length(A) - 1 kseq <- 0:kmax mat <- exp(outer(kseq, theta)) polypart <- A %*% mat Dpolypart <- (A * kseq) %*% mat sumT - nR * Dpolypart/polypart } exactMPLEstrauss }) spatstat/R/summary.im.R0000755000176000001440000001023412237642727014611 0ustar ripleyusers# # summary.im.R # # summary() method for class "im" # # $Revision: 1.15 $ $Date: 2011/05/18 09:15:30 $ # # summary.im() # print.summary.im() # print.im() # summary.im <- function(object, ...) { verifyclass(object, "im") x <- object y <- unclass(x)[c("dim", "xstep", "ystep")] pixelarea <- y$xstep * y$ystep # extract image values v <- x$v inside <- !is.na(v) v <- v[inside] # type of values? y$type <- x$type # factor-valued? lev <- levels(x) if(fak <- !is.null(lev)) v <- factor(v, levels=seq_along(lev), labels=lev) switch(x$type, integer=, real={ y$integral <- sum(v) * pixelarea y$mean <- mean(v) y$range <- range(v) y$min <- y$range[1] y$max <- y$range[2] }, factor={ y$levels <- lev y$table <- table(v, dnn="") }, complex={ y$integral <- sum(v) * pixelarea y$mean <- mean(v) rr <- range(Re(v)) y$Re <- list(range=rr, min=rr[1], max=rr[2]) ri <- range(Im(v)) y$Im <- list(range=ri, min=ri[1], max=ri[2]) }, { # another unknown type pixelvalues <- v y$summary <- summary(pixelvalues) }) # summarise pixel raster win <- as.owin(x) y$window <- summary.owin(win) y$fullgrid <- (rescue.rectangle(win)$type == "rectangle") y$units <- unitname(x) class(y) <- "summary.im" return(y) } print.summary.im <- function(x, ...) { verifyclass(x, "summary.im") cat(paste(x$type, "-valued pixel image\n", sep="")) unitinfo <- summary(x$units) pluralunits <- unitinfo$plural di <- x$dim win <- x$window cat(paste(di[1], "x", di[2], "pixel array (ny, nx)\n")) cat("enclosing rectangle: ") cat(paste("[", paste(win$xrange, collapse=", "), "] x [", paste(win$yrange, collapse=", "), "] ", pluralunits, "\n", sep="")) cat(paste("dimensions of each pixel:", signif(x$xstep, 3), "x", signif(x$ystep, 3), pluralunits, "\n")) if(!is.null(explain <- unitinfo$explain)) cat(paste(explain, "\n")) if(x$fullgrid) { cat("Image is defined on the full rectangular grid\n") whatpart <- "Frame" } else { cat("Image is defined on a subset of the rectangular grid\n") whatpart <- "Subset" } cat(paste(whatpart, "area = ", win$area, "square", pluralunits, "\n")) cat(paste("Pixel values ", if(x$fullgrid) "" else "(inside window)", ":\n", sep="")) switch(x$type, integer=, real={ cat(paste( "\trange = [", paste(x$range, collapse=","), "]\n", "\tintegral = ", x$integral, "\n", "\tmean = ", x$mean, "\n", sep="")) }, factor={ print(x$table) }, complex={ cat(paste( "\trange: Real [", paste(x$Re$range, collapse=","), "], Imaginary [", paste(x$Im$range, collapse=","), "]\n", "\tintegral = ", x$integral, "\n", "\tmean = ", x$mean, "\n", sep="")) }, { print(x$summary) }) return(invisible(NULL)) } print.im <- function(x, ...) { cat(paste(x$type, "-valued pixel image\n", sep="")) if(x$type == "factor") { cat("factor levels:\n") print(levels(x)) } unitinfo <- summary(unitname(x)) di <- x$dim cat(paste(di[1], "x", di[2], "pixel array (ny, nx)\n")) cat("enclosing rectangle: ") cat(paste("[", paste(signif(x$xrange, 5), collapse=", "), "] x [", paste(signif(x$yrange, 5), collapse=", "), "] ", unitinfo$plural, " ", unitinfo$explain, "\n", sep="")) return(invisible(NULL)) } spatstat/R/rmh.R0000755000176000001440000000010712237642727013274 0ustar ripleyusers# # generic rmh rmh <- function(model, ...){ UseMethod("rmh") } spatstat/R/weights.R0000755000176000001440000001762312237642727014173 0ustar ripleyusers# # weights.S # # Utilities for computing quadrature weights # # $Revision: 4.29 $ $Date: 2013/04/25 06:37:43 $ # # # Main functions: # gridweights() Divide the window frame into a regular nx * ny # grid of rectangular tiles. Given an arbitrary # pattern of (data + dummy) points derive the # 'counting weights'. # # dirichlet.weights() Compute the areas of the tiles of the # Dirichlet tessellation generated by the # given pattern of (data+dummy) points, # restricted to the window. # # Auxiliary functions: # # countingweights() compute the counting weights # for a GENERIC tiling scheme and an arbitrary # pattern of (data + dummy) points, # given the tile areas and the information # that point number k belongs to tile number id[k]. # # # gridindex() Divide the window frame into a regular nx * ny # grid of rectangular tiles. # Compute tile membership for arbitrary x,y. # # grid1index() 1-dimensional analogue of gridindex() # # #------------------------------------------------------------------- countingweights <- function(id, areas, check=TRUE) { # # id: cell indices of n points # (length n, values in 1:k) # # areas: areas of k cells # (length k) # id <- as.integer(id) fid <- factor(id, levels=seq_along(areas)) counts <- table(fid) w <- areas[id] / counts[id] # ensures denominator > 0 w <- as.vector(w) # # that's it; but check for funny business # if(check) { zerocount <- (counts == 0) zeroarea <- (areas == 0) if(any(uhoh <- !zeroarea & zerocount)) { lostfrac <- sum(areas[uhoh])/sum(areas) warning(paste("some tiles with positive area do not contain any points:", "relative error =", signif(lostfrac, 4))) } if(any(!zerocount & zeroarea)) { warning("Some tiles with zero area contain points") warning("Some weights are zero") attr(w, "zeroes") <- zeroarea[id] } } # names(w) <- NULL return(w) } gridindex <- function(x, y, xrange, yrange, nx, ny) { # # The box with dimensions xrange, yrange is divided # into nx * ny cells. # # For each point (x[i], y[i]) compute the index (ix, iy) # of the cell containing the point. # ix <- grid1index(x, xrange, nx) iy <- grid1index(y, yrange, ny) # return(list(ix=ix, iy=iy, index=as.integer((iy-1) * nx + ix))) } grid1index <- function(x, xrange, nx) { i <- ceiling( nx * (x - xrange[1])/diff(xrange)) i <- pmax.int(1, i) i <- pmin.int(i, nx) i } gridweights <- function(X, ntile=NULL, ..., window=NULL, verbose=FALSE, npix=NULL, areas=NULL) { # # Compute counting weights based on a regular tessellation of the # window frame into ntile[1] * ntile[2] rectangular tiles. # # Arguments X and (optionally) 'window' are interpreted as a # point pattern. # # The window frame is divided into a regular ntile[1] * ntile[2] grid # of rectangular tiles. The counting weights based on this tessellation # are computed for the points (x, y) of the pattern. # # 'npix' determines the dimensions of the pixel raster used to # approximate tile areas. X <- as.ppp(X, window) x <- X$x y <- X$y win <- X$window # determine number of tiles if(is.null(ntile)) ntile <- default.ntile(X) if(length(ntile) == 1) ntile <- rep.int(ntile, 2) nx <- ntile[1] ny <- ntile[2] if(verbose) cat(paste("grid weights for a", nx, "x", ny, "grid of tiles\n")) if(is.null(areas)) { # compute tile areas if(win$type == "rectangle") { tilearea <- area.owin(win)/(nx * ny) areas <- rep.int(tilearea, nx * ny) } else { # convert window to mask win <- as.mask(win, dimyx=rev(npix)) if(verbose) { if(!is.null(npix)) np <- npix else { np <- rev(spatstat.options("npixel")) if(length(np) == 1) np <- rep.int(np, 2) } cat(paste("Approximating window by mask (", np[1], " x ", np[2], " pixels)\n", sep="")) } # extract pixel coordinates inside window xx <- as.vector(raster.x(win)[win$m]) yy <- as.vector(raster.y(win)[win$m]) # classify all pixels into tiles pixelid <- gridindex(xx, yy, win$xrange, win$yrange, nx, ny)$index pixelid <- factor(pixelid, levels=seq_len(nx * ny)) # compute digital areas of tiles tilepixels <- as.vector(table(pixelid)) pixelarea <- win$xstep * win$ystep areas <- tilepixels * pixelarea } } # classify each point according to its tile if(win$type == "mask") { # first move each data point to nearest pixel Xapprox <- nearest.raster.point(X$x, X$y, win, indices=FALSE) x <- Xapprox$x y <- Xapprox$y } id <- gridindex(x, y, win$xrange, win$yrange, nx, ny)$index # compute counting weights w <- countingweights(id, areas) # attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="grid", ntile=ntile, npix=npix, areas=areas) return(w) } dirichlet.weights <- function(X, window = NULL, exact=TRUE, ...) { # # Compute weights based on Dirichlet tessellation of the window # induced by the point pattern X. # The weights are just the tile areas. # # NOTE: X should contain both data and dummy points, # if you need these weights for the B-T-B method. # # Arguments X and (optionally) 'window' are interpreted as a # point pattern. # # If the window is a rectangle, we invoke Rolf Turner's "deldir" # package to compute the areas of the tiles of the Dirichlet # tessellation of the window frame induced by the points. # [NOTE: the functionality of deldir to create dummy points # is NOT used. ] # if exact=TRUE compute the exact areas, using "deldir" # if exact=FALSE compute the digital areas using exactdt() # # If the window is a mask, we compute the digital area of # each tile of the Dirichlet tessellation by counting pixels. # # # # X <- as.ppp(X, window) x <- X$x y <- X$y win <- X$window if(exact && (win$type == "rectangle")) { rw <- c(win$xrange, win$yrange) # invoke deldir() with NO DUMMY POINTS tessellation <- deldir(x, y, dpl=NULL, rw=rw) # extract tile areas w <- tessellation$summary[, 'dir.area'] } else { # Compute digital areas of Dirichlet tiles. win <- as.mask(win) X$window <- win # # Nearest data point to each pixel: tileid <- exactdt(X)$i # if(win$type == "mask") # Restrict to window (result is a vector - OK) tileid <- tileid[win$m] # Count pixels in each tile id <- factor(tileid, levels=seq_len(X$n)) counts <- table(id) # turn off the christmas lights class(counts) <- NULL names(counts) <- NULL dimnames(counts) <- NULL # Convert to digital area pixelarea <- win$xstep * win$ystep w <- pixelarea * counts # Check for zero pixel counts zeroes <- (counts == 0) if(any(zeroes)) { warning("some Dirichlet tiles have zero digital area") attr(w, "zeroes") <- zeroes } } # attach information about weight construction parameters attr(w, "weight.parameters") <- list(method="dirichlet", exact=exact) return(w) } default.ntile <- function(X) { # default number of tiles (n x n) for tile weights # when data and dummy points are X X <- as.ppp(X) guess.ngrid <- 10 * floor(sqrt(X$n)/10) max(5, guess.ngrid/2) } spatstat/R/bw.ppl.R0000644000176000001440000000137012237642727013710 0ustar ripleyusers# # bw.ppl.R # # Likelihood cross-validation for kernel smoother of point pattern # # $Revision: 1.2 $ $Date: 2013/08/26 02:34:00 $ # bw.ppl <- function(X, ..., srange=NULL, ns=32) { stopifnot(is.ppp(X)) if(!is.null(srange)) check.range(srange) else srange <- c(min(nndist(X)), diameter(as.owin(X))/2) sigma <- exp(seq(log(srange[1]), log(srange[2]), length=ns)) cv <- numeric(ns) for(i in 1:ns) { si <- sigma[i] lamx <- density(X, sigma=si, at="points", leaveoneout=TRUE) lam <- density(X, sigma=si) cv[i] <- sum(log(lamx)) - integral.im(lam) } result <- bw.optim(cv, sigma, iopt=which.max(cv), creator="bw.ppl", criterion="Likelihood Cross-Validation") return(result) } spatstat/R/kstest.R0000755000176000001440000002666612237642727014045 0ustar ripleyusers# # kstest.R # # $Revision: 1.58 $ $Date: 2013/01/30 02:12:56 $ # # # --------- old ------------- ks.test.ppm <- function(...) { .Deprecated("kstest.ppm", package="spatstat") kstest.ppm(...) } # --------------------------- kstest <- function(...) { UseMethod("kstest") } kstest.ppp <- function(X, covariate, ..., jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- ppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- summary(X)$marks$frequency if(all(mf > 0)) { model <- ppm(X, ~marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } do.call("spatialCDFtest", resolve.defaults(list(model, covariate, test="ks"), list(jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } kstest.ppm <- function(model, covariate, ..., jitter=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) verifyclass(model, "ppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call("spatialCDFtest", resolve.defaults(list(model, covariate, test="ks"), list(jitter=jitter), list(...), list(modelname=modelname, covname=covname))) } kstest.lpp <- function(X, covariate, ..., jitter=TRUE) { Xname <- short.deparse(substitute(X)) covname <- singlestring(short.deparse(substitute(covariate))) if(is.character(covariate)) covname <- covariate if(!is.marked(X, dfok=TRUE)) { # unmarked model <- lppm(X) modelname <- "CSR" } else if(is.multitype(X)) { # multitype mf <- table(marks(X)) if(all(mf > 0)) { model <- lppm(X, ~marks) modelname <- "CSRI" } else { warning("Ignoring marks, because some mark values have zero frequency") X <- unmark(X) model <- ppm(X) modelname <- "CSR" } } else { # marked - general case X <- unmark(X) warning("marks ignored") model <- ppm(X) modelname <- "CSR" } do.call("spatialCDFtest", resolve.defaults(list(model, covariate, test="ks"), list(jitter=jitter), list(...), list(modelname=modelname, covname=covname, dataname=Xname))) } kstest.lppm <- function(model, covariate, ..., jitter=TRUE) { modelname <- short.deparse(substitute(model)) covname <- singlestring(short.deparse(substitute(covariate))) verifyclass(model, "lppm") if(is.character(covariate)) covname <- covariate if(is.poisson(model) && is.stationary(model)) modelname <- "CSR" do.call("spatialCDFtest", resolve.defaults(list(model, covariate, test="ks"), list(jitter=jitter), list(...), list(modelname=modelname, covname=covname))) } kstest.slrm <- function(model, covariate, ..., modelname=NULL, covname=NULL) { # get names if(is.null(modelname)) modelname <- short.deparse(substitute(model)) if(is.null(covname)) covname <- short.deparse(substitute(covariate)) dataname <- model$CallInfo$responsename # stopifnot(is.slrm(model)) stopifnot(is.im(covariate)) # extract data prob <- fitted(model) covim <- as.im(covariate, W=as.owin(prob)) probvalu <- as.matrix(prob) covvalu <- as.matrix(covim) ok <- !is.na(probvalu) & !is.na(covvalu) probvalu <- as.vector(probvalu[ok]) covvalu <- as.vector(covvalu[ok]) # compile weighted cdf's FZ <- ewcdf(covvalu, probvalu/sum(probvalu)) X <- model$Data$response ZX <- safelookup(covim, X) FZX <- ewcdf(ZX) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) if(min(xxx) > min(ZX)) { xxx <- c(min(ZX), xxx) yyy <- c(0, yyy) } if(max(xxx) < max(ZX)) { xxx <- c(xxx, max(ZX)) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) # Test uniformity of transformed values result <- ks.test(U, "punif", ...) # modify the 'htest' entries result$method <- paste("Spatial Kolmogorov-Smirnov test of", "inhomogeneous Poisson process", "in two dimensions") result$data.name <- paste("covariate", sQuote(paste(covname, collapse="")), "evaluated at points of", sQuote(dataname), "\n\t", "and transformed to uniform distribution under model", sQuote(modelname)) # additional class 'kstest' class(result) <- c("kstest", class(result)) attr(result, "prep") <- list(Zvalues=covvalu, ZX=ZX, FZ=FZ, FZX=ecdf(ZX), U=U) attr(result, "info") <- list(modelname=modelname, covname=covname, dataname=dataname, csr=FALSE) return(result) } #............. helper functions ........................# spatialCDFtest <- function(model, covariate, test, ..., dimyx=NULL, eps=NULL, jitter=TRUE, modelname=NULL, covname=NULL, dataname=NULL) { if(!is.poisson(model)) stop("Only implemented for Poisson point process models") # conduct test based on comparison of CDF's of covariate values test <- pickoption("test", test, c(ks="ks")) # compute the essential data fra <- spatialCDFframe(model, covariate, dimyx=dimyx, eps=eps, jitter=jitter, modelname=modelname, covname=covname, dataname=dataname) values <- fra$values info <- fra$info # Test uniformity of transformed values U <- values$U switch(test, ks={ result <- ks.test(U, "punif", ...) }, stop("Unrecognised test option")) # modify the 'htest' entries csr <- info$csr result$method <- paste("Spatial Kolmogorov-Smirnov test of", if(csr) "CSR" else "inhomogeneous Poisson process", "in", info$spacename) result$data.name <- paste("covariate", sQuote(singlestring(info$covname)), "evaluated at points of", sQuote(info$dataname), "\n\t", "and transformed to uniform distribution under", if(csr) info$modelname else sQuote(info$modelname)) # additional class 'kstest' class(result) <- c("kstest", class(result)) attr(result, "frame") <- fra return(result) } spatialCDFframe <- function(model, covariate, ...) { # evaluate CDF of covariate values at data points and at pixels stuff <- evalCovar(model, covariate, ...) # extract values <- stuff$values info <- stuff$info Zvalues <- values$Zvalues lambda <- values$lambda weights <- values$weights ZX <- values$ZX # compute empirical cdf of Z values at points of X FZX <- ecdf(ZX) # form weighted cdf of Z values in window wts <- lambda * weights FZ <- ewcdf(Zvalues, wts/sum(wts)) # Ensure support of cdf includes the range of the data xxx <- knots(FZ) yyy <- FZ(xxx) minZX <- min(ZX, na.rm=TRUE) minxxx <- min(xxx, na.rm=TRUE) if(minxxx > minZX) { xxx <- c(minZX, xxx) yyy <- c(0, yyy) } maxZX <- max(ZX, na.rm=TRUE) maxxxx <- max(xxx, na.rm=TRUE) if(maxxxx < maxZX) { xxx <- c(xxx, maxZX) yyy <- c(yyy, 1) } # make piecewise linear approximation of cdf FZ <- approxfun(xxx, yyy, rule=2) # now apply cdf U <- FZ(ZX) # pack up stuff$values$FZ <- FZ stuff$values$FZX <- FZX stuff$values$U <- U class(stuff) <- "spatialCDFframe" return(stuff) } plot.kstest <- function(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=col, lty0=lty) { style <- match.arg(style) fram <- attr(x, "frame") if(!is.null(fram)) { values <- fram$values info <- fram$info } else { # old style values <- attr(x, "prep") info <- attr(x, "info") } # cdf of covariate Z over window FZ <- values$FZ # cdf of covariate values at data points FZX <- values$FZX # blurb covname <- info$covname covdescrip <- switch(covname, x="x coordinate", y="y coordinate", paste("covariate", dQuote(covname))) # plot it switch(style, cdf={ # plot both cdf's superimposed qZ <- get("x", environment(FZ)) pZ <- get("y", environment(FZ)) main <- c(x$method, paste("based on distribution of", covdescrip), paste("p-value=", signif(x$p.value, 4))) do.call("plot.default", resolve.defaults( list(x=qZ, y=pZ, type="l"), list(...), list(lwd=lwd0, col=col0, lty=lty0), list(xlab=info$covname, ylab="probability", main=main))) plot(FZX, add=TRUE, do.points=FALSE, lwd=lwd, col=col, lty=lty) }, PP={ # plot FZX o (FZ)^{-1} pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) p0 <- FZ(qX) do.call("plot.default", resolve.defaults( list(x=p0, y=pX), list(...), list(col=col), list(xlim=c(0,1), ylim=c(0,1), xlab="Theoretical probability", ylab="Observed probability", main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }, QQ={ # plot (FZX)^{-1} o FZ pZ <- get("y", environment(FZ)) qZ <- get("x", environment(FZ)) FZinverse <- approxfun(pZ, qZ, rule=2) pX <- get("y", environment(FZX)) qX <- get("x", environment(FZX)) qZX <- FZinverse(pX) Zrange <- range(qZ, qX, qZX) xlab <- paste("Theoretical quantile of", covname) ylab <- paste("Observed quantile of", covname) do.call("plot.default", resolve.defaults( list(x=qZX, y=qX), list(...), list(col=col), list(xlim=Zrange, ylim=Zrange, xlab=xlab, ylab=ylab, main=""))) abline(0,1, lwd=lwd0, col=col0, lty=lty0) }) return(invisible(NULL)) } spatstat/R/fitted.mppm.R0000644000176000001440000000344312237642727014740 0ustar ripleyusers# # fitted.mppm.R # # method for 'fitted' for mppm objects # # $Revision: 1.5 $ $Date: 2006/10/09 02:25:33 $ # fitted.mppm <- function(object, ..., type="lambda", dataonly=FALSE) { sumry <- summary(object) type <- pickoption("type", type, c(lambda="lambda", cif ="lambda", trend ="trend"), multi=FALSE, exact=FALSE) # extract fitted model object and data frame glmfit <- object$Fit$FIT glmdata <- object$Fit$moadf # interaction names Vnames <- unlist(object$Fit$Vnamelist) interacting <- (length(Vnames) > 0) # Modification of `glmdata' may be required if(interacting) switch(type, trend={ # zero the interaction statistics glmdata[ , Vnames] <- 0 }, lambda={ # Find any dummy points with zero conditional intensity forbid <- matrowany(as.matrix(glmdata[, Vnames]) == -Inf) # exclude from predict.glm glmdata <- glmdata[!forbid, ] }) # Compute predicted [conditional] intensity values values <- predict(glmfit, newdata=glmdata, type="response") # Note: the `newdata' argument is necessary in order to obtain # predictions at all quadrature points. If it is omitted then # we would only get predictions at the quadrature points j # where glmdata$SUBSET[j]=TRUE. if(interacting && type=="lambda") { # reinsert zeroes vals <- numeric(length(forbid)) vals[forbid] <- 0 vals[!forbid] <- values values <- vals } names(values) <- NULL id <- glmdata$id if(dataonly) { # extract only data values isdata <- (glmdata$.mpl.Y != 0) values <- values[is.data] id <- id[is.data] } return(split(values, id)) } spatstat/R/pointsonlines.R0000755000176000001440000000273512237642727015423 0ustar ripleyusers# # pointsonlines.R # # place points at regular intervals along line segments # # $Revision: 1.6 $ $Date: 2011/09/23 01:55:58 $ # pointsOnLines <- function(X, eps=NULL, np=1000, shortok=TRUE) { stopifnot(is.psp(X)) len <- lengths.psp(X) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(np) && length(np) == 1) stopifnot(is.finite(np) && np > 0) eps <- sum(len)/np } else { stopifnot(is.numeric(eps) && length(eps) == 1) stopifnot(is.finite(eps) && eps > 0) } # initialise Xdf <- as.data.frame(X) xmid <- with(Xdf, (x0+x1)/2) ymid <- with(Xdf, (y0+y1)/2) # handle very short segments allsegs <- 1:nseg if(any(short <- (len <= eps)) && shortok) { # very short segments: use midpoints Z <- data.frame(x = xmid[short], y = ymid[short]) } else Z <- data.frame(x=numeric(0), y=numeric(0)) # handle other segments for(i in (1:nseg)[!short]) { # divide segment into pieces of length eps # with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) # points at middle of each piece ss <- (brks[-1] + brks[-nbrks])/2 x <- with(Xdf, x0[i] + (ss/leni) * (x1[i]-x0[i])) y <- with(Xdf, y0[i] + (ss/leni) * (y1[i]-y0[i])) Z <- rbind(Z, data.frame(x=x, y=y)) } Z <- as.ppp(Z, W=X$window) return(Z) } spatstat/R/mpl.R0000755000176000001440000012415112237642727013304 0ustar ripleyusers# mpl.R # # $Revision: 5.172 $ $Date: 2013/10/16 07:35:44 $ # # mpl.engine() # Fit a point process model to a two-dimensional point pattern # by maximum pseudolikelihood # # mpl.prepare() # set up data for glm procedure # # ------------------------------------------------------------------- # "mpl" <- function(Q, trend = ~1, interaction = NULL, data = NULL, correction="border", rbord = 0, use.gam=FALSE) { .Deprecated("ppm", package="spatstat") ppm(Q=Q, trend=trend, interaction=interaction, covariates=data, correction=correction, rbord=rbord, use.gam=use.gam, method="mpl") } "mpl.engine" <- function(Q, trend = ~1, interaction = NULL, ..., covariates = NULL, covfunargs = list(), correction="border", rbord = 0, use.gam=FALSE, gcontrol=list(), famille=NULL, forcefit=FALSE, nd = NULL, eps = eps, allcovar=FALSE, callstring="", precomputed=NULL, savecomputed=FALSE, preponly=FALSE, rename.intercept=TRUE, justQ = FALSE, weightfactor = NULL ) { # # Extract precomputed data if available # if(!is.null(precomputed$Q)) { Q <- precomputed$Q X <- precomputed$X P <- precomputed$U } else { # # Determine quadrature scheme from argument Q # if(verifyclass(Q, "quad", fatal=FALSE)) { # user-supplied quadrature scheme - validate it validate.quad(Q, fatal=TRUE, repair=FALSE, announce=TRUE) # Extract data points X <- Q$data } else if(verifyclass(Q, "ppp", fatal = FALSE)) { # point pattern - create default quadrature scheme X <- Q Q <- quadscheme(X, nd=nd, eps=eps) } else stop("First argument Q should be a point pattern or a quadrature scheme") # # # Data and dummy points together P <- union.quad(Q) } # # secret exit if(justQ) return(Q) # # computed <- if(savecomputed) list(X=X, Q=Q, U=P) else NULL # # Validate main arguments if(!is.null(trend) && !inherits(trend, "formula")) stop(paste("Argument", sQuote("trend"), "must be a formula")) if(!is.null(interaction) && !inherits(interaction, "interact")) stop(paste("Argument", sQuote("interaction"), "has incorrect format")) # check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") # rbord applies only to border correction if(correction != "border") rbord <- 0 # # # Interpret the call want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) want.inter <- !is.null(interaction) && !is.null(interaction$family) trend.formula <- if(want.trend) trend else (~1) # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2013/10/16 07:35:44 $") if(want.inter) { # ensure we're using the latest version of the interaction object if(outdated.interact(interaction)) interaction <- update(interaction) } # if(!want.trend && !want.inter && !forcefit && !allcovar) { # the model is the uniform Poisson process # The MPLE (= MLE) can be evaluated directly npts <- npoints(X) W <- as.owin(X) if(correction == "border" && rbord > 0) { npts <- sum(bdist.points(X) >= rbord) areaW <- eroded.areas(W, rbord) } else { npts <- npoints(X) areaW <- area.owin(W) } volume <- areaW * markspace.integral(X) lambda <- npts/volume # fitted canonical coefficient co <- log(lambda) # asymptotic variance of canonical coefficient varcov <- matrix(1/npts, 1, 1) fisher <- matrix(npts, 1, 1) se <- sqrt(1/npts) # give names tag <- if(rename.intercept) "log(lambda)" else "(Intercept)" names(co) <- tag dimnames(varcov) <- dimnames(fisher) <- list(tag, tag) # maximised log likelihood maxlogpl <- if(npts == 0) 0 else npts * (log(lambda) - 1) # rslt <- list( method = "mpl", fitter = "exact", projected = FALSE, coef = co, trend = NULL, interaction = NULL, fitin = fii(), Q = Q, maxlogpl = maxlogpl, internal = list(computed=computed, se=se), covariates = covariates, # covariates are still retained! covfunargs = covfunargs, correction = correction, rbord = rbord, terms = terms(trend.formula), fisher = fisher, varcov = varcov, version = the.version, problems = list()) class(rslt) <- "ppm" return(rslt) } ################# P r e p a r e D a t a ###################### prep <- mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, "quadrature points", callstring, allcovar=allcovar, precomputed=precomputed, savecomputed=savecomputed, covfunargs=covfunargs, weightfactor=weightfactor, ...) # back door if(preponly) { # exit now, returning prepared data frame and internal information prep$info <- list(want.trend=want.trend, want.inter=want.inter, correction=correction, rbord=rbord, interaction=interaction) return(prep) } fmla <- prep$fmla glmdata <- prep$glmdata problems <- prep$problems likelihood.is.zero <- prep$likelihood.is.zero is.identifiable <- prep$is.identifiable computed <- append(computed, prep$computed) IsOffset <- prep$IsOffset ################# F i t i t #################################### if(!is.identifiable) stop(paste("in", callstring, ":", problems$unidentifiable$print), call.=FALSE) # to avoid problem with package checker .mpl.W <- glmdata$.mpl.W .mpl.SUBSET <- glmdata$.mpl.SUBSET # determine algorithm control parameters if(is.null(gcontrol)) gcontrol <- list() else stopifnot(is.list(gcontrol)) gc <- if(use.gam) "gam.control" else "glm.control" gcontrol <- do.call(gc, gcontrol) # Fit the generalized linear/additive model. if(is.null(famille)) { # the sanctioned technique, using `quasi' family if(want.trend && use.gam) FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) else FIT <- glm(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) } else { # for experimentation only! if(is.function(famille)) famille <- famille() stopifnot(inherits(famille, "family")) if(want.trend && use.gam) FIT <- gam(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol) else FIT <- glm(fmla, family=famille, weights=.mpl.W, data=glmdata, subset=.mpl.SUBSET, control=gcontrol, model=FALSE) } environment(FIT$terms) <- sys.frame(sys.nframe()) ################ I n t e r p r e t f i t ####################### # Fitted coefficients co <- FIT$coef # glm covariates W <- glmdata$.mpl.W SUBSET <- glmdata$.mpl.SUBSET Z <- is.data(Q) Vnames <- prep$Vnames # attained value of max log pseudolikelihood maxlogpl <- if(likelihood.is.zero) { -Inf } else -(deviance(FIT)/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) # fitted interaction object fitin <- if(want.inter) fii(interaction, co, Vnames, IsOffset) else fii() ###################################################################### # Clean up & return rslt <- list( method = "mpl", fitter = if(use.gam) "gam" else "glm", projected = FALSE, coef = co, trend = if(want.trend) trend else NULL, interaction = if(want.inter) interaction else NULL, fitin = fitin, Q = Q, maxlogpl = maxlogpl, internal = list(glmfit=FIT, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, fmla=fmla, computed=computed), covariates = covariates, covfunargs = covfunargs, correction = correction, rbord = rbord, terms = terms(trend.formula), version = the.version, problems = problems) class(rslt) <- "ppm" return(rslt) } ########################################################################## ### ///////////////////////////////////////////////////////////////////// ########################################################################## mpl.prepare <- function(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname="quadrature points", callstring="", ..., covfunargs=list(), allcovar=FALSE, precomputed=NULL, savecomputed=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, warn.illegal=TRUE, warn.unidentifiable=TRUE, weightfactor=NULL, skip.border=FALSE) { # Q: quadrature scheme # X = data.quad(Q) # P = union.quad(Q) if(missing(want.trend)) want.trend <- !is.null(trend) && !identical.formulae(trend, ~1) if(missing(want.inter)) want.inter <- !is.null(interaction) && !is.null(interaction$family) computed <- list() problems <- list() names.precomputed <- names(precomputed) likelihood.is.zero <- FALSE is.identifiable <- TRUE if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } ################ C o m p u t e d a t a #################### # Extract covariate values if((allcovar || want.trend) && !is.null(covariates)) { if("covariates.df" %in% names.precomputed) covariates.df <- precomputed$covariates.df else covariates.df <- mpl.get.covariates(covariates, P, Pname, covfunargs) if(savecomputed) computed$covariates.df <- covariates.df } ### Form the weights and the ``response variable''. if("dotmplbase" %in% names.precomputed) .mpl <- precomputed$dotmplbase else { nQ <- n.quad(Q) wQ <- w.quad(Q) mQ <- marks.quad(Q) # is NULL for unmarked patterns zQ <- is.data(Q) yQ <- numeric(nQ) yQ[zQ] <- 1/wQ[zQ] zeroes <- attr(wQ, "zeroes") sQ <- if(is.null(zeroes)) rep.int(TRUE, nQ) else !zeroes # tweak weights ONLY if(!is.null(weightfactor)) wQ <- wQ * weightfactor # pack up .mpl <- list(W = wQ, Z = zQ, Y = yQ, MARKS = mQ, SUBSET = sQ) } if(savecomputed) computed$dotmplbase <- .mpl glmdata <- data.frame(.mpl.W = .mpl$W, .mpl.Y = .mpl$Y) # count data and dummy points in specified subset izdat <- .mpl$Z[.mpl$SUBSET] ndata <- sum(izdat) ndummy <- sum(!izdat) # Determine the domain of integration for the pseudolikelihood. if(correction == "border") { if("bdP" %in% names.precomputed) bdP <- precomputed$bdP else bdP <- bdist.points(P) if(savecomputed) computed$bdP <- bdP .mpl$DOMAIN <- (bdP >= rbord) } skip.border <- skip.border && (correction == "border") ####################### T r e n d ############################## internal.names <- c(".mpl.W", ".mpl.Y", ".mpl.Z", ".mpl.SUBSET", "SUBSET", ".mpl") reserved.names <- c("x", "y", "marks", internal.names) check.clashes <- function(forbidden, offered, where) { name.match <- outer(forbidden, offered, "==") if(any(name.match)) { is.matched <- apply(name.match, 2, any) matched.names <- (offered)[is.matched] if(sum(is.matched) == 1) { return(paste("The variable",sQuote(matched.names), "in", where, "is a reserved name")) } else { return(paste("The variables", paste(sQuote(matched.names), collapse=", "), "in", where, "are reserved names")) } } return("") } if(allcovar || want.trend) { trendvariables <- variablesinformula(trend) # Check for use of internal names in trend cc <- check.clashes(internal.names, trendvariables, "the model formula") if(cc != "") stop(cc) # Standard variables if(allcovar || "x" %in% trendvariables) glmdata <- data.frame(glmdata, x=P$x) if(allcovar || "y" %in% trendvariables) glmdata <- data.frame(glmdata, y=P$y) if(("marks" %in% trendvariables) || !is.null(.mpl$MARKS)) { if(is.null(.mpl$MARKS)) stop("Model formula depends on marks, but data do not have marks", call.=FALSE) glmdata <- data.frame(glmdata, marks=.mpl$MARKS) } # # Check covariates if(!is.null(covariates)) { # Check for duplication of reserved names cc <- check.clashes(reserved.names, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) # Take only those covariates that are named in the trend formula if(!allcovar) needed <- names(covariates.df) %in% trendvariables else needed <- rep.int(TRUE, ncol(covariates.df)) if(any(needed)) { covariates.needed <- covariates.df[, needed, drop=FALSE] # Append to `glmdata' glmdata <- data.frame(glmdata,covariates.needed) # Ignore any quadrature points that have NA's in the covariates nbg <- is.na(covariates.needed) if(any(nbg)) { offending <- matcolany(nbg) covnames.na <- names(covariates.needed)[offending] quadpoints.na <- matrowany(nbg) n.na <- sum(quadpoints.na) n.tot <- length(quadpoints.na) errate <- n.na/n.tot pcerror <- round(signif(100 * errate, 2), 2) complaint <- paste("Values of the", ngettext(length(covnames.na), "covariate", "covariates"), paste(sQuote(covnames.na), collapse=", "), "were NA or undefined at", paste(pcerror, "%", " (", n.na, " out of ", n.tot, ")", sep=""), "of the", Pname) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) .mpl$SUBSET <- .mpl$SUBSET & !quadpoints.na details <- list(covnames.na = covnames.na, quadpoints.na = quadpoints.na, print = complaint) problems <- append(problems, list(na.covariates=details)) } } } } ###################### I n t e r a c t i o n #################### Vnames <- NULL IsOffset <- NULL if(want.inter) { # Form the matrix of "regression variables" V. # The rows of V correspond to the rows of P (quadrature points) # while the column(s) of V are the regression variables (log-potentials) E <- equalpairs.quad(Q) if(!skip.border) { # usual case V <- evalInteraction(X, P, E, interaction, correction, ..., precomputed=precomputed, savecomputed=savecomputed) } else { # evaluate only in eroded domain Retain <- .mpl$DOMAIN Psub <- P[Retain] # map serial numbers in 'P[Retain]' to serial numbers in 'Psub' Pmap <- cumsum(Retain) keepE <- Retain[ E[,2] ] # adjust equal pairs matrix Esub <- E[ keepE, , drop=FALSE] Esub[,2] <- Pmap[Esub[,2]] # call evaluator on reduced data # with 'W=NULL' (currently detected only by AreaInter) V <- evalInteraction(X, Psub, Esub, interaction, correction, ..., W=NULL, precomputed=precomputed, savecomputed=savecomputed) } if(!is.matrix(V)) stop("interaction evaluator did not return a matrix") # extract information about offsets IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- FALSE if(skip.border) { # fill in the values in the border region with zeroes. Vnew <- matrix(0, nrow=npoints(P), ncol=ncol(V)) colnames(Vnew) <- colnames(V) Vnew[Retain, ] <- V # retain attributes attr(Vnew, "IsOffset") <- IsOffset attr(Vnew, "computed") <- attr(V, "computed") attr(Vnew, "POT") <- attr(V, "POT") V <- Vnew } # extract intermediate computation results if(savecomputed) computed <- append(computed, attr(V, "computed")) # Augment data frame by appending the regression variables for interactions. # # First determine the names of the variables # Vnames <- dimnames(V)[[2]] if(is.null(Vnames)) { # No names were provided for the columns of V. # Give them default names. # In ppm the names will be "Interaction" or "Interact.1", "Interact.2", ... # In mppm an alternative tag will be specified by vnamebase. nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste(vnamebase[2], 1:nc, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } else if(!is.null(vnameprefix)) { # Variable names were provided by the evaluator (e.g. MultiStrauss). # Prefix the variable names by a string # (typically required by mppm) Vnames <- paste(vnameprefix, Vnames, sep="") dimnames(V) <- list(dimnames(V)[[1]], Vnames) } # Check the names are valid as column names in a dataframe okVnames <- make.names(Vnames, unique=TRUE) if(any(Vnames != okVnames)) { warning("Names of interaction terms contained illegal characters; names have been repaired.") Vnames <- okVnames } # Check for name clashes between the interaction variables # and the formula cc <- check.clashes(Vnames, termsinformula(trend), "model formula") if(cc != "") stop(cc) # and with the variables in 'covariates' if(!is.null(covariates)) { cc <- check.clashes(Vnames, names(covariates), sQuote("covariates")) if(cc != "") stop(cc) } # OK. append variables. glmdata <- data.frame(glmdata, V) # check IsOffset matches Vnames if(length(IsOffset) != length(Vnames)) { if(length(IsOffset) == 1) IsOffset <- rep.int(IsOffset, length(Vnames)) else stop("Internal error: IsOffset has wrong length", call.=FALSE) } # Keep only those quadrature points for which the # conditional intensity is nonzero. #KEEP <- apply(V != -Inf, 1, all) .mpl$KEEP <- matrowall(V != -Inf) .mpl$SUBSET <- .mpl$SUBSET & .mpl$KEEP # Check that there are at least some data and dummy points remaining datremain <- .mpl$Z[.mpl$SUBSET] somedat <- any(datremain) somedum <- !all(datremain) if(warn.unidentifiable && !(somedat && somedum)) { # Model would be unidentifiable if it were fitted. # Register problem is.identifiable <- FALSE if(ndata == 0) { complaint <- "model is unidentifiable: data pattern is empty" } else { offending <- !c(somedat, somedum) offending <- c("all data points", "all dummy points")[offending] offending <- paste(offending, collapse=" and ") complaint <- paste("model is unidentifiable:", offending, "have zero conditional intensity") } details <- list(data=!somedat, dummy=!somedum, print=complaint) problems <- append(problems, list(unidentifiable=details)) } # check whether the model has zero likelihood: # check whether ANY data points have zero conditional intensity if(any(.mpl$Z & !.mpl$KEEP)) { howmany <- sum(.mpl$Z & !.mpl$KEEP) complaint <- paste(howmany, "data point(s) are illegal", "(zero conditional intensity under the model)") details <- list(illegal=howmany, print=complaint) problems <- append(problems, list(zerolikelihood=details)) if(warn.illegal && is.identifiable) warning(paste(complaint, ". Occurred while executing: ", callstring, sep=""), call. = FALSE) likelihood.is.zero <- TRUE } } ################## D a t a f r a m e ################### if(correction == "border") .mpl$SUBSET <- .mpl$DOMAIN & .mpl$SUBSET glmdata <- cbind(glmdata, data.frame(.mpl.SUBSET=.mpl$SUBSET, stringsAsFactors=FALSE)) ################# F o r m u l a ################################## if(!want.trend) trend <- ~1 trendpart <- paste(as.character(trend), collapse=" ") if(!want.inter) rhs <- trendpart else { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") rhs <- paste(c(trendpart, VN), collapse= "+") } fmla <- paste(".mpl.Y ", rhs) fmla <- as.formula(fmla) #### character string of trend formula (without Vnames) trendfmla <- paste(".mpl.Y ", trendpart) #### return(list(fmla=fmla, trendfmla=trendfmla, glmdata=glmdata, Vnames=Vnames, IsOffset=IsOffset, problems=problems, likelihood.is.zero=likelihood.is.zero, is.identifiable=is.identifiable, computed=computed)) } #################################################################### #################################################################### mpl.get.covariates <- function(covariates, locations, type="locations", covfunargs=list()) { covargname <- sQuote(short.deparse(substitute(covariates))) locargname <- sQuote(short.deparse(substitute(locations))) if(is.null(covfunargs)) covfunargs <- list() # x <- locations$x y <- locations$y if(is.null(x) || is.null(y)) { xy <- xy.coords(locations) x <- xy$x y <- xy$y } if(is.null(x) || is.null(y)) stop(paste("Can't interpret", locargname, "as x,y coordinates")) n <- length(x) if(is.data.frame(covariates)) { if(nrow(covariates) != n) stop(paste("Number of rows in", covargname, "does not equal the number of", type)) return(covariates) } else if(is.list(covariates)) { if(length(covariates) == 0) return(as.data.frame(matrix(, n, 0))) is.number <- function(x) { is.numeric(x) && (length(x) == 1) } isim <- unlist(lapply(covariates, is.im)) isfun <- unlist(lapply(covariates, is.function)) iswin <- unlist(lapply(covariates, is.owin)) isnum <- unlist(lapply(covariates, is.number)) if(!all(isim | isfun | isnum | iswin)) stop(paste("Each entry in the list", covargname, "should be an image, a function, a window or a single number")) if(sum(nzchar(names(covariates))) < length(covariates)) stop(paste("Some entries in the list", covargname, "are un-named")) # look up values of each covariate at the quadrature points values <- covariates evalfxy <- function(f, x, y, extra) { if(length(extra) == 0) return(f(x,y)) # extra arguments must be matched explicitly by name ok <- names(extra) %in% names(formals(f)) z <- do.call(f, append(list(x,y), extra[ok])) return(z) } insidexy <- function(w, x, y) { inside.owin(x, y, w) } values[isim] <- lapply(covariates[isim], lookup.im, x=x, y=y, naok=TRUE, strict=FALSE) values[isfun] <- lapply(covariates[isfun], evalfxy, x=x, y=y, extra=covfunargs) values[isnum] <- lapply(covariates[isnum], rep, length(x)) values[iswin] <- lapply(covariates[iswin], insidexy, x=x, y=y) return(as.data.frame(values)) } else stop(paste(covargname, "must be either a data frame or a list")) } bt.frame <- function(Q, trend=~1, interaction=NULL, ..., covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) { prep <- mpl.engine(Q=Q, trend=trend, interaction=interaction, ..., covariates=covariates, correction=correction, rbord=rbord, use.gam=use.gam, allcovar=allcovar, preponly=TRUE, forcefit=TRUE) class(prep) <- c("bt.frame", class(prep)) return(prep) } print.bt.frame <- function(x, ...) { cat("Model frame for Berman-Turner device\n") df <- x$glmdata cat(paste("$glmdata: Data frame with", nrow(df), "rows and", ncol(df), "columns\n")) cat(" Column names:\t") cat(paste(paste(names(df),collapse="\t"), "\n")) cat("Complete model formula ($fmla):\t") print(x$fmla) info <- x$info if(info$want.trend) { cat("Trend:\tyes\nTrend formula string ($trendfmla):\t") cat(paste(x$trendfmla, "\n")) } else cat("Trend:\tno\n") cat("Interaction ($info$interaction):\t") inte <- info$interaction if(is.null(inte)) inte <- Poisson() print(inte, family=FALSE, brief=TRUE) if(!is.poisson.interact(inte)) { cat("Internal names of interaction variables ($Vnames):\t") cat(paste(x$Vnames, collapse="\t")) cat("\n") } edge <- info$correction cat(paste("Edge correction ($info$correction):\t", sQuote(edge), "\n")) if(edge == "border") cat(paste("\tBorder width ($info$rbord):\t", info$rbord, "\n")) if(length(x$problems) > 0) { cat("Problems:\n") print(x$problems) } if(length(x$computed) > 0) cat(paste("Frame contains saved computations for", commasep(dQuote(names(x$computed))))) return(invisible(NULL)) } partialModelMatrix <- function(X, D, model, callstring="", ...) { # X = 'data' # D = 'dummy' Q <- quad(X,D) P <- union.quad(Q) trend <- model$trend inter <- model$interaction covar <- model$covariates prep <- mpl.prepare(Q, X, P, trend, inter, covar, correction=model$correction, rbord=model$rbord, Pname="data points", callstring=callstring, warn.unidentifiable=FALSE, ...) fmla <- prep$fmla glmdata <- prep$glmdata mof <- model.frame(fmla, glmdata) mom <- model.matrix(fmla, mof) if(!identical(all.equal(colnames(mom), names(coef(model))), TRUE)) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") attr(mom, "mplsubset") <- glmdata$.mpl.SUBSET return(mom) } oversize.quad <- function(Q, ..., nU, nX) { # Determine whether the quadrature scheme is # too large to handle in one piece (in mpl) # for a generic interaction # nU = number of quadrature points # nX = number of data points if(missing(nU)) nU <- n.quad(Q) if(missing(nX)) nX <- npoints(Q$data) nmat <- as.double(nU) * nX nMAX <- spatstat.options("maxmatrix") needsplit <- (nmat > nMAX) return(needsplit) } # function that should be called to evaluate interaction terms # between quadrature points and data points evalInteraction <- function(X, P, E = equalpairs(P, X), interaction, correction, ..., precomputed=NULL, savecomputed=FALSE) { # evaluate the interaction potential # (does not assign/touch the variable names) verifyclass(interaction, "interact") # handle Poisson case if(is.poisson(interaction)) { out <- matrix(, nrow=npoints(P), ncol=0) attr(out, "IsOffset") <- logical(0) return(out) } # determine whether to use fast evaluation in C fastok <- (spatstat.options("fasteval") %in% c("on", "test")) if(fastok) { cando <- interaction$can.do.fast par <- interaction$par dofast <- !is.null(cando) && cando(X, correction, par) } else dofast <- FALSE # determine whether to split quadscheme into blocks if(dofast) { dosplit <- FALSE } else { # decide whether the quadrature scheme is too large to handle in one piece needsplit <- oversize.quad(nU=npoints(P), nX=npoints(X)) # not implemented when savecomputed=TRUE dosplit <- needsplit && !savecomputed if(needsplit && savecomputed) warning(paste("Oversize quadscheme cannot be split into blocks", "because savecomputed=TRUE;", "memory allocation error may occur")) } if(!dosplit) { # normal case V <- evalInterEngine(X=X, P=P, E=E, interaction=interaction, correction=correction, ..., precomputed=precomputed, savecomputed=savecomputed) } else { # Too many quadrature points: split into blocks nX <- npoints(X) nP <- npoints(P) # Determine which evaluation points are data points Pdata <- E[,2] # hence which are dummy points Pall <- seq_len(nP) Pdummy <- if(length(Pdata) > 0) Pall[-Pdata] else Pall nD <- length(Pdummy) # size of full matrix nmat <- (nD + nX) * nX nMAX <- spatstat.options("maxmatrix") # Calculate number of dummy points in largest permissible X * (X+D) matrix nperblock <- max(1, floor(nMAX/nX - nX)) # determine number of such blocks nblocks <- ceiling(nD/nperblock) nfull <- nblocks - 1 # announce if(nblocks > 1) message(paste("Large quadrature scheme", "split into blocks to avoid memory size limits;", nD, "dummy points", "split into", nblocks, "blocks,", "the first", if(nfull > 1) paste(nfull, "blocks") else "block", "containing", nperblock, "dummy", ngettext(nperblock, "point", "points"), "and the last block containing", nD - nperblock * nfull, "dummy points")) # # Ei <- cbind(1:nX, 1:nX) # for(iblock in 1:nblocks) { first <- min(nD, (iblock - 1) * nperblock + 1) last <- min(nD, iblock * nperblock) # extract dummy points Di <- P[Pdummy[first:last]] Pi <- superimpose(X, Di, check=FALSE, W=X$window) # evaluate potential Vi <- evalInterEngine(X=X, P=Pi, E=Ei, interaction=interaction, correction=correction, ..., savecomputed=FALSE) if(iblock == 1) { V <- Vi } else { # tack on the glm variables for the extra DUMMY points only V <- rbind(V, Vi[-(1:nX), , drop=FALSE]) } } } return(V) } # workhorse function that actually calls relevant code to evaluate interaction evalInterEngine <- function(X, P, E, interaction, correction, ..., precomputed=NULL, savecomputed=FALSE) { # fast evaluator (C code) may exist fasteval <- interaction$fasteval cando <- interaction$can.do.fast par <- interaction$par feopt <- spatstat.options("fasteval") dofast <- !is.null(fasteval) && (is.null(cando) || cando(X, correction,par)) && (feopt %in% c("on", "test")) V <- NULL if(dofast) { if(feopt == "test") message("Calling fasteval") V <- fasteval(X, P, E, interaction$pot, interaction$par, correction, ...) } if(is.null(V)) { # use generic evaluator for family evaluate <- interaction$family$eval Reach <- reach(interaction) if("precomputed" %in% names(formals(evaluate))) { # Use precomputed data # version 1.9-3 onward (pairwise and pairsat families) V <- evaluate(X, P, E, interaction$pot, interaction$par, correction, ..., Reach=Reach, precomputed=precomputed, savecomputed=savecomputed) } else { # Cannot use precomputed data # Object created by earlier version of ppm # or not pairwise/pairsat interaction V <- evaluate(X, P, E, interaction$pot, interaction$par, correction, ..., Reach=Reach) } } return(V) } deltasuffstat <- local({ deltasuffstat <- function(model, ..., restrict=TRUE, dataonly=TRUE, force=FALSE) { stopifnot(is.ppm(model)) if(dataonly) { X <- data.ppm(model) nX <- npoints(X) } else { X <- quad.ppm(model) nX <- n.quad(X) } ncoef <- length(coef(model)) inte <- as.interact(model) zeroes <- array(0, dim=c(nX, nX, ncoef)) if(is.poisson(inte)) return(zeroes) # look for member function $delta2 in the interaction v <- NULL if(!is.null(delta2 <- inte$delta2) && is.function(delta2)) { v <- delta2(X, inte, model$correction) } # look for generic $delta2 function for the family if(is.null(v) && !is.null(delta2 <- inte$family$delta2) && is.function(delta2)) v <- delta2(X, inte, model$correction) # no luck? if(is.null(v)) { if(!force) return(NULL) # use brute force algorithm v <- if(dataonly) deltasufX(model) else deltasufQ(model) } # make it a 3D array if(length(dim(v)) == 2) v <- array(v, dim=c(dim(v), 1)) if(restrict) { # kill contributions from points outside the domain of pseudolikelihood # (e.g. points in the border region) use <- if(dataonly) getppmdatasubset(model) else getglmsubset(model) if(any(kill <- !use)) { kill <- array(outer(kill, kill, "&"), dim=dim(v)) v[kill] <- 0 } } if(all(dim(v) == dim(zeroes))) return(v) # Pad to correct dimensions # Determine which coefficients correspond to interaction terms f <- fitin(model) Inames <- f$Vnames[!f$IsOffset] Imap <- match(Inames, names(coef(model))) if(length(Imap) == 0) return(v) if(any(is.na(Imap))) stop("Internal error: cannot match interaction coefficients") # insert 'v' into array result <- zeroes result[ , , Imap] <- v return(result) } # compute deltasuffstat using partialModelMatrix deltasufX <- function(model) { stopifnot(is.ppm(model)) X <- data.ppm(model) nX <- npoints(X) p <- length(coef(model)) isdata <- is.data(quad.ppm(model)) m <- model.matrix(model)[isdata, ] ok <- getppmdatasubset(model) # canonical statistic before and after deleting X[j] # mbefore[ , i, j] = h(X[i] | X) # mafter[ , i, j] = h(X[i] | X[-j]) mafter <- mbefore <- array(t(m), dim=c(p, nX, nX)) # identify close pairs R <- reach(model) if(is.finite(R)) { cl <- closepairs(X, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(X, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { # either infinite reach, or something wrong IJ <- expand.grid(I=1:nX, J=1:nX) IJ <- subset(IJ, I != J) I2 <- I <- IJ$I J2 <- J <- IJ$J } # filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] # if(length(I) > 0 && length(J) > 0) { # .............. loop over pairs ........................ # The following ensures that 'empty' and 'X' have compatible marks empty <- X[integer(0)] # Run through pairs for(i in unique(I)) { # all points within 2R J2i <- unique(J2[I2==i]) # all points within R Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { Xi <- X[i] # neighbours of X[i] XJi <- X[Ji] # replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) # index of XJi in X.i J.i <- match(Ji, J2i) if(any(is.na(J.i))) stop("Internal error: Ji not a subset of J2i") # equalpairs matrix E.i <- cbind(J.i, seq_len(nJi)) # values of sufficient statistic # h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] # for all j pmj <- partialModelMatrix(X.i, empty, model)[J.i, , drop=FALSE] # sufficient statistic in reverse order # h(X[i] | X[-j]) = h(X[i] | X[-c(i,j)] # for all j pmi <- matrix(, nJi, p) for(k in 1:nJi) { j <- Ji[k] # X.ij <- X[-c(i,j)] X.ij <- X.i[-J.i[k]] pmi[k, ] <- partialModelMatrix(X.ij, Xi, model)[nX.i, ] } # mafter[ , Ji, i] <- t(pmj) mafter[ , i, Ji] <- t(pmi) } } } # delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta <- mbefore - mafter # delta[i, j, ] = h(X[i] | X) - h(X[i] | X[-j]) delta <- aperm(delta, c(2,3,1)) return(delta) } deltasufQ <- function(model) { stopifnot(is.ppm(model)) Q <- quad.ppm(model) X <- data.ppm(model) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(X) isdata <- is.data(Q) isdummy <- !isdata p <- length(coef(model)) m <- model.matrix(model)[isdata, ] ok <- getglmsubset(model) # canonical statistic before and after adding/deleting U[j] mafter <- mbefore <- array(t(m), dim=c(p, nU, nU)) delta <- array(0, dim=dim(mafter)) # mbefore[ , i, j] = h(U[i] | X) # For data points X[j] # mafter[ , i, j] = h(U[i] | X[-j]) # delta[ , i, j] = h(U[i] | X) - h(U[i] | X[-j]) # For dummy points X[j] # mafter[ , i, j] = h(U[i] | X \cup U[j]) # delta[ , i, j] = h(U[i] | X \cup U[j]) - h(U[i] | X) changesign <- ifelseAB(isdata, -1, 1) # identify close pairs of quadrature points R <- reach(model) if(is.finite(R)) { cl <- closepairs(U, R, what="indices") I <- cl$i J <- cl$j cl2 <- closepairs(U, 2*R, what="indices") I2 <- cl2$i J2 <- cl2$j } else { # either infinite reach, or something wrong IJ <- expand.grid(I=1:nU, J=1:nX) IJ <- IJ[ with(IJ, I != J), ] I2 <- I <- IJ$I J2 <- J <- IJ$J } # filter: I and J must both belong to the nominated subset okIJ <- ok[I] & ok[J] I <- I[okIJ] J <- J[okIJ] # if(length(I) > 0 && length(J) > 0) { # .............. loop over pairs of quadrature points ............... # Run through pairs uI <- unique(I) zI <- isdata[uI] uIdata <- uI[zI] uIdummy <- uI[!zI] # Run through pairs i, j where 'i' is a data point for(i in uIdata) { # all DATA points within 2R of X[i] # This represents X[-i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] # all QUADRATURE points within R of X[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] # data points which are neighbours of X[i] XJi <- X[Ji[isd]] # dummy points which are neighbours of X[i] DJi <- U[Ji[!isd]] # replace X[-i] by X[-i] \cap b(0, 2R) X.i <- X[J2i] nX.i <- length(J2i) # index of XJi in X.i J.i <- match(Ji[isd], J2i) if(any(is.na(J.i))) stop("Internal error: Ji[isd] not a subset of J2i") # index of DJi in superimpose(X.i, DJi) JDi <- nX.i + seq_len(sum(!isd)) # values of sufficient statistic # h(X[j] | X[-i]) = h(X[j] | X[-c(i,j)] # for all j pmj <- partialModelMatrix(X.i, DJi, model)[c(J.i, JDi), , drop=FALSE] # mafter[ , Ji, i] <- t(pmj) } } # Run through pairs i, j where 'i' is a dummy point for(i in uIdummy) { # all DATA points within 2R of U[i] J2i <- unique(J2[I2==i]) J2i <- J2i[isdata[J2i]] # all QUADRATURE points within R of U[i] Ji <- unique(J[I==i]) nJi <- length(Ji) if(nJi > 0) { isd <- isdata[Ji] JiData <- Ji[isd] JiDummy <- Ji[!isd] # data points which are neighbours of U[i] XJi <- X[JiData] # dummy points which are neighbours of U[i] DJi <- U[JiDummy] # replace X \cup U[i] by (X \cap b(0, 2R)) \cup U[i] J2Ui <- c(J2i, i) XUi <- U[J2Ui] nXUi <- length(J2Ui) # index of XJi in X.i J.i <- match(JiData, J2Ui) if(any(is.na(J.i))) stop("Internal error: Ji[isd] not a subset of J2i") # index of DJi in superimpose(X.i, DJi) JDi <- nXUi + seq_len(length(JiDummy)) # values of sufficient statistic # h(X[j] | X \cup U[i]) # for all j pmj <- partialModelMatrix(XUi, DJi, model)[c(J.i, JDi), , drop=FALSE] # mafter[ , c(JiData, JiDummy), i] <- t(pmj) } } } # delta[ ,i,j] = h(X[i] | X) - h(X[i] | X[-j]) delta[ , , isdata] <- mbefore[, , isdata] - mafter[ , , isdata] # delta[ ,i,j] = h(X[i] | X \cup U[j]) - h(X[i] | X) delta[ , , isdummy] <- mafter[, , isdummy] - mbefore[ , , isdummy] # rearrange: new delta[i,j,] = old delta[, i, j] delta <- aperm(delta, c(2,3,1)) return(delta) } deltasuffstat }) spatstat/R/flipxy.R0000755000176000001440000000234012237642727014022 0ustar ripleyusers# # flipxy.R # # flip x and y coordinates # # $Revision: 1.1 $ $Date: 2011/08/02 03:10:23 $ # flipxy <- function(X) { UseMethod("flipxy") } flipxy.ppp <- function(X) { stopifnot(is.ppp(X)) ppp(X$y, X$x, marks=X$marks, window=flipxy(X$window), unitname=unitname(X), check=FALSE) } flipxypolygon <- function(p) { # flip x and y coordinates, and reinstate anticlockwise order oldy <- p$y p$y <- rev(p$x) p$x <- rev(oldy) # area and hole status unchanged return(p) } flipxy.owin <- function(X) { verifyclass(X, "owin") switch(X$type, rectangle={ W <- owin(X$yrange, X$xrange) }, polygonal={ bdry <- lapply(X$bdry, flipxypolygon) W <- owin(poly=bdry, check=FALSE, unitname=unitname(X)) }, mask={ W <- owin(mask=t(X$m), xy=list(x=X$yrow, y=X$xcol)) }, stop("Unrecognised window type") ) return(W) } flipxy.psp <- function(X) { stopifnot(is.psp(X)) flipends <- (X$ends)[, c(2,1,4,3), drop=FALSE] as.psp(flipends, window=flipxy(X$window), marks=X$marks, unitname=unitname(X), check=FALSE) } flipxy.im <- function(X) { im(t(X$v), xcol=X$yrow, yrow=X$xcol, unitname=unitname(X)) } spatstat/R/blur.R0000755000176000001440000000543012237642727013456 0ustar ripleyusers# # blur.R # # apply Gaussian blur to an image # # $Revision: 1.12 $ $Date: 2009/12/16 18:23:52 $ # fillNA <- function(x, value=0) { stopifnot(is.im(x)) v <- x$v v[is.na(v)] <- value x$v <- v return(x) } blur <- function(x, sigma=NULL, ..., normalise=FALSE, bleed=TRUE, varcov=NULL) { stopifnot(is.im(x)) # determine smoothing kernel sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if (sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1, 2)) stopifnot(all(sigma > 0)) } if (varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov) == 2) ngiven <- varcov.given + sigma.given switch(ngiven + 1, { sigma <- (1/8) * min(diff(x$xrange), diff(x$yrange)) }, { if (sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if (!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # replace NA's in image raster by zeroes X <- fillNA(x, 0) # convolve with Gaussian Y <- second.moment.calc(X, sigma=sigma, varcov=varcov, what="smooth") # if no bleeding, we restrict data to the original boundary if(!bleed) Y$v[is.na(x$v)] <- NA # if(!normalise) return(Y) # normalisation: # convert original image to window (0/1 image) Xone <- x isna <- is.na(x$v) Xone$v[isna] <- 0 Xone$v[!isna] <- 1 # convolve with Gaussian Ydenom <- second.moment.calc(Xone, sigma=sigma, ..., varcov=varcov, what="smooth") # normalise Z <- eval.im(Y/Ydenom) return(Z) } safelookup <- function(Z, X, factor=2, warn=TRUE) { # X is a ppp # evaluates Z[X], replacing any NA's by blur(Z)[X] Zvals <- Z[X, drop=FALSE] if(any(isna <- is.na(Zvals))) { # First pass - look up values at neighbouring pixels if valid XX <- X[isna] rc <- nearest.valid.pixel(XX$x, XX$y, Z) Zvals[isna] <- Z$v[cbind(rc$row, rc$col)] } if(any(isna <- is.na(Zvals))) { # Second pass - extrapolate XX <- X[isna] pixdiam <- sqrt(Z$xstep^2 + Z$ystep^2) # expand domain of Z RX <- as.rectangle(X) RZ <- as.rectangle(Z) bb <- bounding.box(RX, RZ) big <- grow.rectangle(bb, 2 * pixdiam) Z <- rebound.im(Z, big) # now blur Zblur <- blur(Z, factor * pixdiam, bleed=TRUE, normalise=TRUE) Bvals <- Zblur[XX, drop=FALSE] if(any(is.na(Bvals))) stop("Internal error: pixel values were NA, even after blurring") Zvals[isna] <- Bvals if(warn) warning(paste(sum(isna), "out of", X$n, "pixel values", "were outside the pixel image domain", "and were estimated by convolution")) } return(Zvals) } spatstat/R/distfun.R0000755000176000001440000000627312252245773014171 0ustar ripleyusers# # distfun.R # # distance function (returns a function of x,y) # # $Revision: 1.20 $ $Date: 2013/12/12 05:38:59 $ # distfun <- function(X, ...) { UseMethod("distfun") } distfun.ppp <- function(X, ..., k=1) { # this line forces X to be bound stopifnot(is.ppp(X)) stopifnot(length(k) == 1) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="dist", k=k) } attr(g, "Xclass") <- "ppp" g <- funxy(g, as.rectangle(as.owin(X))) attr(g, "k") <- k class(g) <- c("distfun", class(g)) return(g) } distfun.psp <- function(X, ...) { # this line forces X to be bound stopifnot(is.psp(X)) g <- function(x,y=NULL) { Y <- xy.coords(x, y)[c("x", "y")] nncross(Y, X, what="dist") } attr(g, "Xclass") <- "psp" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("distfun", class(g)) return(g) } distfun.owin <- function(X, ..., invert=FALSE) { # this line forces X to be bound stopifnot(is.owin(X)) # P <- as.psp(as.polygonal(X)) # g <- function(x,y=NULL) { Y <- xy.coords(x, y) inside <- inside.owin(Y$x, Y$y, X) D <- nncross(Y, P, what="dist") out <- if(!invert) ifelseAX(inside, 0, D) else ifelseXB(inside, D, 0) return(out) } attr(g, "Xclass") <- "owin" g <- funxy(g, as.rectangle(as.owin(X))) class(g) <- c("distfun", class(g)) return(g) } as.owin.distfun <- function(W, ..., fatal=TRUE) { X <- get("X", envir=environment(W)) result <- if(is.owin(X)) as.rectangle(X) else as.owin(X, ..., fatal=fatal) return(result) } as.im.distfun <- function(X, W=NULL, ..., eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) { k <- attr(X, "k") if(is.null(W) && (is.null(k) || (k == 1))) { # use 'distmap' for speed env <- environment(X) Xdata <- get("X", envir=env) if(is.owin(Xdata)) { invert <- get("invert", envir=env) if(invert) Xdata <- complement.owin(Xdata) } D <- distmap(Xdata, eps=eps, dimyx=dimyx, xy=xy) if(!is.null(na.replace)) D$v[is.null(D$v)] <- na.replace } else if(identical(attr(X, "Xclass"), "ppp")) { # point pattern --- use nngrid/knngrid env <- environment(X) Xdata <- get("X", envir=env) D <- nnmap(Xdata, W=W, what="dist", k=k, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace, ...) } else { # evaluate function at pixel centres D <- as.im.function(X, W=W, eps=eps, dimyx=dimyx, xy=xy, na.replace=na.replace) } return(D) } print.distfun <- function(x, ...) { xtype <- attr(x, "Xclass") typestring <- switch(xtype, ppp="point pattern", psp="line segment pattern", owin="window", "unrecognised object") objname <- switch(xtype, ppp="point", psp="line segment", "object") cat(paste("Distance function for", typestring, "\n")) X <- get("X", envir=environment(x)) print(X) if(!is.null(k <- attr(x, "k")) && k > 1) cat(paste("Distance to", ordinal(k), "nearest", objname, "will be computed\n")) return(invisible(NULL)) } spatstat/R/residppm.R0000755000176000001440000000664112237642727014342 0ustar ripleyusers# # residppm.R # # computes residuals for fitted point process model # # # $Revision: 1.18 $ $Date: 2013/04/25 06:37:43 $ # residuals.ppm <- function(object, type="raw", ..., check=TRUE, drop=FALSE, fittedvalues = fitted.ppm(object, check=check, drop=drop), coefs=NULL, quad=NULL) { verifyclass(object, "ppm") type <- pickoption("type", type, c(inverse="inverse", raw="raw", pearson="pearson", Pearson="pearson", score="score")) typenames <- c(inverse="inverse-lambda residuals", raw="raw residuals", pearson="Pearson residuals", score="score residuals") typename <- typenames[[type]] given.fitted <- !missing(fittedvalues) && !is.null(fittedvalues) # ................. determine fitted values ................. if(is.null(coefs) && is.null(quad)) { # use 'object' without modification # validate 'object' if(check && missing(fittedvalues) && damaged.ppm(object)) stop("object format corrupted; try update(object, use.internal=TRUE)") } else { # determine a new set of model coefficients if(!is.null(coefs)) { # use specified model parameters modelcoef <- coefs } else { # estimate model parameters using a (presumably) denser set of dummy pts # Determine new quadrature scheme if(inherits(quad, "quad")) hi.res.quad <- quad else if(is.ppp(quad)) hi.res.quad <- quadscheme(data=data.ppm(object), dummy=quad) else { # assume 'quad' is a list of arguments to 'quadscheme' hi.res.quad <- do.call("quadscheme", append(list(data.ppm(object)), quad)) } # refit the model with new quadscheme hi.res.fit <- update(object, hi.res.quad) modelcoef <- coef(hi.res.fit) } # now compute fitted values using new coefficients if(!given.fitted) fittedvalues <- fitted(object, drop=drop, new.coef=modelcoef) } # ..................... compute residuals ..................... # Extract quadrature points and weights Q <- quad.ppm(object, drop=drop) U <- union.quad(Q) # quadrature points Z <- is.data(Q) # indicator data/dummy # W <- w.quad(Q) # quadrature weights # Compute fitted conditional intensity at quadrature points lambda <- fittedvalues # indicator is 1 if lambda > 0 # (adjusted for numerical behaviour of predict.glm) indicator <- (lambda > .Machine$double.eps) if(type == "score") { # need the covariates X <- model.matrix(object) if(drop) { gs <- getglmsubset(object) ok <- !is.na(gs) && gs X <- X[ok,] } } # Evaluate residual measure components discrete <- switch(type, raw = rep.int(1, sum(Z)), inverse = 1/lambda[Z], pearson = 1/sqrt(lambda[Z]), score = X[Z, ] ) density <- switch(type, raw = -lambda, inverse = -indicator, pearson = -indicator * sqrt(lambda), score = -lambda * X) # Residual measure (return value) res <- msr(Q, discrete, density) # name the residuals attr(res, "type") <- type attr(res, "typename") <- typename return(res) } spatstat/R/levelset.R0000755000176000001440000000175512237642727014343 0ustar ripleyusers# levelset.R # # $Revision: 1.4 $ $Date: 2013/05/01 07:22:05 $ # # level set of an image levelset <- function(X, thresh, compare="<=") { # force X and thresh to be evaluated in this frame verifyclass(X, "im") thresh <- thresh switch(compare, "<" = { A <- eval.im(X < thresh) }, ">" = { A <- eval.im(X > thresh) }, "<=" = { A <- eval.im(X <= thresh) }, ">=" = { A <- eval.im(X >= thresh) }, "==" = { A <- eval.im(X == thresh) }, "!=" = { A <- eval.im(X != thresh) }, stop(paste("unrecognised comparison operator", sQuote(compare)))) W <- as.owin(eval.im(ifelse1NA(A))) return(W) } # compute owin containing all pixels where image expression is TRUE solutionset <- function(..., envir) { if(missing(envir)) envir <- parent.frame() A <- eval.im(..., envir=envir) if(A$type != "logical") stop("Evaluating the expression did not yield a logical-valued image") W <- as.owin(eval.im(ifelse1NA(A))) return(W) } spatstat/R/strauss.R0000755000176000001440000001176012243543436014213 0ustar ripleyusers# # # strauss.R # # $Revision: 2.29 $ $Date: 2013/11/22 01:01:39 $ # # The Strauss process # # Strauss() create an instance of the Strauss process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Strauss <- local({ # create blank template object without family and pars BlankStrauss <- list( name = "Strauss process", creator = "Strauss", family = "pairwise.family", # evaluated later pot = function(d, par) { d <= par$r }, par = list(r = NULL), # to be filled in parnames = "interaction distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=round(gamma,4))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma) && (loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(is.na(coeffs))) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(0) else return(r) }, version=NULL, # to be filled in # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for Strauss interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Strauss") r <- potpars$r answer <- strausscounts(U, X, r, EqualPairs) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r return((1-gamma) * pi * r^2) }, delta2 = function(X, inte, correction, ...) { if(!(correction %in% c("border", "none"))) return(NULL) r <- inte$par$r X <- as.ppp(X) # algorithm is the same for data and dummy points nX <- npoints(X) cl <- closepairs(X, r, what="indices") I <- factor(cl$i, levels=1:nX) J <- factor(cl$j, levels=1:nX) v <- table(I, J) return(v) } ) class(BlankStrauss) <- "interact" # Finally define main function Strauss <- function(r) { instantiate.interact(BlankStrauss, list(r=r)) } Strauss }) # generally accessible functions strausscounts <- function(U, X, r, EqualPairs=NULL) { answer <- crosspaircounts(U,X,r) # subtract counts of identical pairs if(length(EqualPairs) > 0) { nU <- npoints(U) idcount <- as.integer(table(factor(EqualPairs[,2], levels=1:nU))) answer <- answer - idcount } return(answer) } crosspaircounts <- function(X, Y, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine DUP <- spatstat.options("dupC") out <- .C("Ccrosspaircounts", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), rrmax = as.double(r), counts = as.integer(integer(nX)), DUP = DUP) # PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$counts return(answer) } closepaircounts <- function(X, r) { stopifnot(is.ppp(X)) stopifnot(is.numeric(r) && length(r) == 1) stopifnot(is.finite(r)) stopifnot(r >= 0) # sort in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] nX <- npoints(X) # call C routine DUP <- spatstat.options("dupC") out <- .C("Cclosepaircounts", nxy = as.integer(nX), x = as.double(Xsort$x), y = as.double(Xsort$y), rmaxi = as.double(r), counts = as.integer(integer(nX)), DUP = DUP) answer <- integer(nX) answer[oX] <- out$counts return(answer) } spatstat/R/options.R0000755000176000001440000002366512243070150014175 0ustar ripleyusers# # options.R # # Spatstat Options # # $Revision: 1.47 $ $Date: 2013/07/17 03:07:17 $ # # .spEnv <- new.env() assign(".Spatstat.Options", list(), envir = .spEnv) ".Spat.Stat.Opt.Table" <- list( scalable = list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), npixel=list( default=128, check=function(x){ is.numeric(x) && (length(x) %in% c(1,2)) && is.finite(x) && all(x == ceiling(x)) && all(x > 1) }, valid="an integer, or a pair of integers, greater than 1" ), maxedgewt=list( default=100, check=function(x){ is.numeric(x) && length(x) == 1 && is.finite(x) && x >= 1 }, valid="a finite numeric value, not less than 1" ), par.binary=list( default=list(), check=is.list, valid="a list" ), par.persp=list( default=list(), check=is.list, valid="a list" ), par.points=list( default=list(), check=is.list, valid="a list" ), par.contour=list( default=list(), check=is.list, valid="a list" ), par.fv=list( default=list(), check=is.list, valid="a list" ), image.colfun=list( default=function(n){topo.colors(n)}, check=function(x) { is.function(x) && length(formals(x)) > 0 && all(is.character(x(42))) }, valid="a function f(n) that returns character values" ), ndummy.min=list( default=32, check=function(x) { is.numeric(x) && length(x) <= 2 && all(x == ceiling(x)) && all(x > 1) }, valid="a single integer or a pair of integers, greater than 1" ), dupC = list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), progress = list( default="tty", check=function(x){ x %in% c("tty", "txtbar") }, valid=paste("one of the strings", dQuote("tty"), "or", dQuote("txtbar")) ), checkpolygons = list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), checksegments = list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1}, valid="a single logical value" ), ngrid.disc=list( default=128, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1 }, valid="a single integer, greater than 1" ), gpclib=list( default=FALSE, check=function(x) { message("gpclib is no longer needed") return(TRUE) }, valid="a single logical value" ), maxmatrix=list( default=2^24, # 16,777,216 check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024 }, valid="a single integer, greater than 1024" ), huge.npoints=list( default=1e6, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 1024 }, valid="a single integer, greater than 1024" ), expand=list( default=2, check=function(x) { is.numeric(x) && length(x) == 1 && x > 1 }, valid="a single numeric value, greater than 1" ), fasteval=list( default="on", check=function(x) { x %in% c("off", "on", "test") }, valid="one of the strings \'off\', \'on\' or \'test\'" ), densityC=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), exactdt.checks.data=list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), closepairs.newcode=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), crosspairs.newcode=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), psstG.remove.zeroes=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), Kcom.remove.zeroes=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), psstA.ngrid=list( default=32, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 8 }, valid="a single integer, greater than or equal to 8" ), psstA.nr=list( default=30, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x >= 4 }, valid="a single integer, greater than or equal to 4" ), crossing.psp.useCall=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), selfcrossing.psp.useCall=list( default=TRUE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), n.bandwidth=list( default=32, check=function(x) { is.numeric(x) && (length(x) == 1) && (x == ceiling(x)) && (x > 2) }, valid="a single integer, greater than 2" ), project.fast=list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), rmh.p=list( default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && x >= 0 && x <= 1 }, valid="a single numerical value, between 0 and 1" ), rmh.q=list( default=0.9, check=function(x) { is.numeric(x) && length(x) == 1 && x > 0 && x < 1 }, valid="a single numerical value, strictly between 0 and 1" ), rmh.nrep=list( default=5e5, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 0 }, valid="a single integer, greater than 0" ), print.ppm.SE=list( default="poisson", check=function(x) { is.character(x) && length(x) == 1 && x %in% c("always", "poisson", "never") }, valid="one of the strings \'always\', \'poisson\' or \'never\'" ), nvoxel=list( default=2^22, check=function(x) { is.numeric(x) && length(x) == 1 && (x == ceiling(x)) && x > 2^12 }, valid="a single integer, greater than 2^12" ), fastK.lgcp=list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), allow.logi.influence=list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ), old.morpho.psp=list( default=FALSE, check=function(x) { is.logical(x) && length(x) == 1 }, valid="a single logical value" ) ) # end of options list reset.spatstat.options <- function() { .Spatstat.Options <- lapply(.Spat.Stat.Opt.Table, function(z) { z$default }) assign(".Spatstat.Options", .Spatstat.Options, envir = .spEnv) invisible(.Spatstat.Options) } reset.spatstat.options() "spatstat.options" <- function (...) { .Spatstat.Options <- get(".Spatstat.Options", envir = .spEnv) called <- list(...) if(length(called) == 0) return(.Spatstat.Options) if(is.null(names(called)) && length(called)==1) { # spatstat.options(x) x <- called[[1]] if(is.null(x)) return(.Spatstat.Options) # spatstat.options(NULL) if(is.list(x)) called <- x } if(is.null(names(called))) { # spatstat.options("par1", "par2", ...) ischar <- unlist(lapply(called, is.character)) if(all(ischar)) { choices <- unlist(called) ok <- choices %in% names(.Spatstat.Options) if(!all(ok)) stop(paste("Unrecognised option(s):", called[!ok])) if(length(called) == 1) return(.Spatstat.Options[[choices]]) else return(.Spatstat.Options[choices]) } else { wrong <- called[!ischar] offending <- unlist(lapply(wrong, function(x) { y <- x; short.deparse(substitute(y)) })) offending <- paste(offending, collapse=",") stop(paste("Unrecognised mode of argument(s) [", offending, "]: should be character string or name=value pair")) } } # spatstat.options(name=value, name2=value2,...) assignto <- names(called) if (is.null(assignto) || !all(nzchar(assignto))) stop("options must all be identified by name=value") recog <- assignto %in% names(.Spat.Stat.Opt.Table) if(!all(recog)) stop(paste("Unrecognised option(s):", assignto[!recog])) # validate new values for(i in seq_along(assignto)) { nama <- assignto[i] valo <- called[[i]] entry <- .Spat.Stat.Opt.Table[[nama]] ok <- do.call(entry$check, list(valo)) if(!ok) stop(paste("Parameter", dQuote(nama), "should be", entry$valid)) } # reassign changed <- .Spatstat.Options[assignto] .Spatstat.Options[assignto] <- called assign(".Spatstat.Options", .Spatstat.Options, envir = .spEnv) # return invisible(changed) } spatstat/R/ppp.R0000755000176000001440000004147012237642727013315 0ustar ripleyusers# # ppp.R # # A class 'ppp' to define point patterns # observed in arbitrary windows in two dimensions. # # $Revision: 4.84 $ $Date: 2013/01/08 07:16:16 $ # # A point pattern contains the following entries: # # $window: an object of class 'owin' # defining the observation window # # $n: the number of points (for efficiency) # # $x: # $y: vectors of length n giving the Cartesian # coordinates of the points. # # It may also contain the entry: # # $marks: a vector of length n # whose entries are interpreted as the # 'marks' attached to the corresponding points. # #-------------------------------------------------------------------------- ppp <- function(x, y, ..., window, marks, check=TRUE ) { # Constructs an object of class 'ppp' # if(!missing(window)) verifyclass(window, "owin") else window <- owin(...) if((missing(x) && missing(y)) || (length(x) == 0 && length(y) == 0)) x <- y <- numeric(0) n <- length(x) if(length(y) != n) stop("coordinate vectors x and y are not of equal length") # validate x, y coordinates stopifnot(is.numeric(x)) stopifnot(is.numeric(y)) ok <- is.finite(x) & is.finite(y) if(any(!ok)) { nbg <- is.na(x) | is.na(y) if(any(nbg)) { howmany <- if(all(nbg)) "all" else paste(sum(nbg), "out of", length(nbg)) stop(paste(howmany, "coordinate values are NA or NaN")) } howmany <- if(!any(ok)) "all" else paste(sum(!ok), "out of", length(ok)) stop(paste(howmany, "coordinate values are infinite")) } names(x) <- NULL names(y) <- NULL # check (x,y) points lie inside window if(check && n > 0) { ok <- inside.owin(x, y, window) nout <- sum(!ok) if(nout > 0) { warning(paste(nout, ngettext(nout, "point was", "points were"), "rejected as lying outside the specified window")) rr <- ripras(x,y) bb <- bounding.box.xy(x,y) bb <- bounding.box(rr, bb, window) rejectwindow <- if(!is.null(rr)) rebound.owin(rr, bb) else bb rejects <- ppp(x[!ok], y[!ok], window=rejectwindow, check=FALSE) # discard illegal points x <- x[ok] y <- y[ok] n <- length(x) } } else nout <- 0 # initialise ppp object pp <- list(window=window, n=n, x=x, y=y) # coerce marks to appropriate forma if(missing(marks)) marks <- NULL if(is.hyperframe(marks)) stop("Hyperframes of marks are not implemented for ppp objects; use ppx") if(is.matrix(marks)) marks <- as.data.frame(marks) if(is.data.frame(marks)) { nc <- ncol(marks) if(nc == 0) marks <- NULL else if(nc == 1) marks <- marks[,,drop=TRUE] } # attach marks if(is.null(marks)) { # no marks pp$markformat <- "none" } else if(is.data.frame(marks)) { # data frame of marks pp$markformat <- "dataframe" if(nout > 0) { marks <- marks[ok, ] marks(rejects) <- marks[!ok,] } if(nrow(marks) != n) stop("number of rows of marks != length of x and y") pp$marks <- marks } else { # should be a vector or factor # To recognise vector, strip attributes if(!is.factor(marks)) attributes(marks) <- NULL if(!(is.vector(marks) || is.factor(marks))) stop("Format of marks not understood") # OK, it's a vector or factor pp$markformat <- "vector" if(nout > 0) { marks(rejects) <- marks[!ok] marks <- marks[ok] } if(length(marks) != n) stop("length of marks vector != length of x and y") names(marks) <- NULL pp$marks <- marks } class(pp) <- "ppp" if(check && any(duplicated(pp))) warning("data contain duplicated points") if(nout > 0) attr(pp, "rejects") <- rejects pp } # #-------------------------------------------------------------------------- # is.ppp <- function(x) { inherits(x, "ppp") } # #-------------------------------------------------------------------------- # as.ppp <- function(X, ..., fatal=TRUE) { UseMethod("as.ppp") } as.ppp.ppp <- function(X, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=FALSE))$check return(ppp(X$x, X$y, window=X$window, marks=X$marks, check=check)) } as.ppp.quad <- function(X, ..., fatal=TRUE) { return(union.quad(X)) } as.ppp.data.frame <- function(X, W = NULL, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=TRUE))$check if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) # columns 1 and 2 are assumed to be coordinates # marks from other columns marx <- if(ncol(X) > 2) X[, -(1:2)] else NULL if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal, marks=marx, check=check) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, marks=marx, check=check) } return(Z) } as.ppp.matrix <- function(X, W = NULL, ..., fatal=TRUE) { check <- resolve.defaults(list(...), list(check=TRUE))$check if(!verifyclass(X, "matrix", fatal=fatal) || !is.numeric(X)) return(complaining("X must be a numeric matrix", fatal, value=NULL)) if(ncol(X) < 2) return(complaining("X must have at least two columns", fatal, value=NULL)) if(is.null(W)) return(complaining("x,y coords given but no window specified", fatal, value=NULL)) if(is.function(W)) Z <- cobble.xy(X[,1], X[,2], W, fatal) else { win <- as.owin(W) Z <- ppp(X[,1], X[,2], window = win, check=check) } # add marks from other columns if(ncol(X) > 2) marks(Z) <- X[, -(1:2)] return(Z) } as.ppp.default <- function(X, W=NULL, ..., fatal=TRUE) { # tries to coerce data X to a point pattern # X may be: # 1. a structure with entries x, y, xl, xu, yl, yu # 2. a structure with entries x, y, area where # 'area' has entries xl, xu, yl, yu # 3. a structure with entries x, y # 4. a vector of length 2, interpreted as a single point. # The second argument W is coerced to an object of class 'owin' by the # function "as.owin" in window.S # If X also has an entry X$marks # then this will be interpreted as the marks vector for the pattern. # check <- resolve.defaults(list(...), list(check=TRUE))$check if(checkfields(X, c("x", "y", "xl", "xu", "yl", "yu"))) { xrange <- c(X$xl, X$xu) yrange <- c(X$yl, X$yu) if(is.null(X$marks)) Z <- ppp(X$x, X$y, xrange, yrange, check=check) else Z <- ppp(X$x, X$y, xrange, yrange, marks=X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y", "area")) && checkfields(X$area, c("xl", "xu", "yl", "yu"))) { win <- as.owin(X$area) if (is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks = X$marks, check=check) return(Z) } else if(checkfields(X, c("x", "y"))) { if(is.function(W)) return(cobble.xy(X$x, X$y, W, fatal)) if(is.null(W)) { if(fatal) stop("x,y coords given but no window specified") else return(NULL) } win <- as.owin(W) if(is.null(X$marks)) Z <- ppp(X$x, X$y, window=win, check=check) else Z <- ppp(X$x, X$y, window=win, marks=X$marks, check=check) return(Z) } else if(is.vector(X) && length(X) == 2) { win <- as.owin(W) Z <- ppp(X[1], X[2], window=win, check=check) return(Z) } else { if(fatal) stop("Can't interpret X as a point pattern") else return(NULL) } } cobble.xy <- function(x, y, f=ripras, fatal=TRUE, ...) { if(!is.function(f)) stop("f is not a function") w <- f(x,y) if(!is.owin(w)) { gripe <- "Supplied function f did not return an owin object" if(fatal) stop(gripe) else { warning(gripe) return(NULL) } } return(ppp(x, y, window=w, ...)) } # -------------------------------------------------------------- "[.ppp" <- function(x, i, j, drop, ...) { verifyclass(x, "ppp") if(missing(i) && missing(j)) return(x) if(!missing(i)) { if(inherits(i, "owin")) { # i is a window window <- i ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else if(inherits(i, "im")) { # i is an image if(i$type != "logical") stop(paste("Subset operator X[i] undefined", "when i is a pixel image", "unless it has logical values"), call.=FALSE) # convert logical image to window e <- sys.frame(sys.nframe()) window <- solutionset(i, e) ok <- inside.owin(x$x, x$y, window) x <- ppp(x$x[ok], x$y[ok], window=window, #SIC marks=marksubset(x$marks, ok), check=FALSE) } else { # assume i is a subset index if(x$n == 0) return(x) subset <- i x <- ppp(x$x[subset], x$y[subset], window=x$window, marks=marksubset(x$marks, subset), check=FALSE) } } if(!missing(j)) x <- x[j] # invokes code above return(x) } # ------------------------------------------------------------------ # # scanpp <- function(filename, window, header=TRUE, dir="", multitype=FALSE) { filename <- if(dir=="") filename else paste(dir, filename, sep=.Platform$file.sep) df <- read.table(filename, header=header) if(header) { # check whether there are columns named 'x' and 'y' colnames <- dimnames(df)[[2]] xycolumns <- match(c("x", "y"), colnames, 0) named <- all(xycolumns > 0) } else { named <- FALSE } if(named) { x <- df$x y <- df$y } else { # assume x, y given in columns 1, 2 respectively x <- df[,1] y <- df[,2] xycolumns <- c(1,2) } if(ncol(df) == 2) X <- ppp(x, y, window=window) else { marks <- df[ , -xycolumns] if(multitype) marks <- factor(marks) X <- ppp(x, y, window=window, marks = marks) } X } #------------------------------------------------------------------- "markspace.integral" <- function(X) { verifyclass(X, "ppp") if(!is.marked(X, dfok=TRUE)) return(1) if(is.multitype(X)) return(length(levels(marks(X)))) else stop("Don't know how to compute total mass of mark space") } #------------------------------------------------------------------- print.ppp <- function(x, ...) { verifyclass(x, "ppp") ism <- is.marked(x, dfok=TRUE) cat(paste(if(ism) "marked" else NULL, "planar point pattern:", x$n, ngettext(x$n, "point", "points"), "\n")) if(ism) { mks <- marks(x, dfok=TRUE) if(is.data.frame(mks)) { # data frame of marks cat(paste("Mark variables: ", paste(names(mks), collapse=", "), "\n")) } else { # vector of marks if(is.factor(mks)) { cat("multitype, with ") cat(paste("levels =", paste(levels(mks), collapse="\t"),"\n")) } else { cat(paste("marks are", if(is.numeric(mks)) "numeric,", "of type", sQuote(typeof(mks)), "\n")) } } } print(x$window) if(!is.null(rejects <- attr(x, "rejects"))) { nrejects <- rejects$n cat(paste("\n***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste("attr(,", dQuote("rejects"), ")", sep=""), "***\n")) } if(!is.null(info <- attr(x, "info")) && inherits(info, "rmhInfoList")) cat("\nPattern was generated by Metropolis-Hastings simulation.\n") return(invisible(NULL)) } summary.ppp <- function(object, ..., checkdup=TRUE) { verifyclass(object, "ppp") result <- list() result$is.marked <- is.marked(object, dfok=TRUE) result$n <- object$n result$window <- summary(object$window) result$intensity <- result$n/result$window$area if(checkdup) { result$nduplicated <- sum(duplicated(object)) result$rounding <- rounding(object) } if(result$is.marked) { mks <- marks(object, dfok=TRUE) if(result$multiple.marks <- is.data.frame(mks)) { result$marknames <- names(mks) result$is.numeric <- FALSE result$marktype <- "dataframe" result$is.multitype <- FALSE } else { result$is.numeric <- is.numeric(mks) result$marknames <- "marks" result$marktype <- typeof(mks) result$is.multitype <- is.multitype(object) } if(result$is.multitype) { tm <- as.vector(table(mks)) tfp <- data.frame(frequency=tm, proportion=tm/sum(tm), intensity=tm/result$window$area, row.names=levels(mks)) result$marks <- tfp } else result$marks <- summary(mks) } class(result) <- "summary.ppp" if(!is.null(rejects <- attr(object, "rejects"))) result$rejects <- rejects$n if(!is.null(info <- attr(object, "info")) && inherits(info, "rmhInfoList")) result$rmhinfo <- info return(result) } print.summary.ppp <- function(x, ..., dp=3) { verifyclass(x, "summary.ppp") cat(paste(if(x$is.marked) "Marked planar " else "Planar ", "point pattern: ", x$n, " points\n", sep="")) oneline <- resolve.defaults(list(...), list(oneline=FALSE))$oneline if(oneline) return(invisible(NULL)) unitinfo <- summary(x$window$units) cat(paste("Average intensity", signif(x$intensity,dp), "points per square", unitinfo$singular, unitinfo$explain, "\n")) ndup <- x$nduplicated if((!is.null(ndup)) && (ndup > 0)) cat("\n*Pattern contains duplicated points*\n") rndg <- x$rounding if(!is.null(rndg)) { if(rndg >= 4) { cat(paste("\nCoordinates are given to", rndg, "decimal places\n")) } else if(rndg > 0) { cat(paste("\nCoordinates are given to", rndg, "decimal places", "\ni.e. rounded to the nearest multiple of", 10^(-rndg), unitinfo$plural, unitinfo$explain, "\n")) } else if(rndg == 0) { cat(paste("\nCoordinates are integers", "\ni.e. rounded to the nearest 1", unitinfo$singular, unitinfo$explain, "\n")) } else { cat(paste("\nCoordinates are multiples of", 10^(-rndg), unitinfo$plural, unitinfo$explain, "\n")) } cat("\n") } if(x$is.marked) { if(x$multiple.marks) { cat(paste("Mark variables: ", paste(x$marknames, collapse=", "), "\n")) cat("Summary:\n") print(x$marks) } else if(x$is.multitype) { cat("Multitype:\n") print(signif(x$marks,dp)) } else { cat(paste("marks are ", if(x$is.numeric) "numeric, ", "of type", sQuote(x$marktype), "\n")) cat("Summary:\n") print(x$marks) } cat("\n") } print(x$window) if(!is.null(nrejects <- x$rejects)) cat(paste("\n***", nrejects, ngettext(nrejects, "illegal point", "illegal points"), "stored in", paste("attr(,", dQuote("rejects"), ")", sep=""), "***\n")) if(!is.null(info <- x$rmhinfo)) { cat("\nPattern was generated by Metropolis-Hastings algorithm rmh") print(info) } return(invisible(x)) } # --------------------------------------------------------------- identify.ppp <- function(x, ...) { verifyclass(x, "ppp") if(!is.marked(x) || "labels" %in% names(list(...))) identify(x$x, x$y, ...) else { marx <- marks(x, dfok=FALSE) marques <- if(is.numeric(marx)) paste(signif(marx, 3)) else paste(marx) id <- identify(x$x, x$y, labels=marques, ...) mk <- marx[id] if(is.factor(marx)) mk <- levels(marx)[mk] cbind(id=id, marks=mk) } } rebound <- function(x, rect) { UseMethod("rebound") } rebound.ppp <- function(x, rect) { verifyclass(x, "ppp") x$window <- rebound.owin(x$window, rect) return(x) } as.data.frame.ppp <- function(x, row.names=NULL, ...) { df <- data.frame(x=x$x, y=x$y, row.names=row.names) marx <- marks(x, dfok=TRUE) if(is.null(marx)) return(df) if(is.data.frame(marx)) df <- cbind(df, marx) else df <- data.frame(df, marks=marx) return(df) } is.empty.ppp <- function(x) { return(x$n == 0) } npoints <- function(x) { UseMethod("npoints") } nobjects <- function(x) { UseMethod("nobjects") } nobjects.ppp <- npoints.ppp <- function(x) { x$n } spatstat/R/clickpoly.R0000755000176000001440000000207512237642727014505 0ustar ripleyusers# # clickpoly.R # # # $Revision: 1.2 $ $Date: 2007/11/02 18:03:05 $ # # clickpoly <- function(add=FALSE, nv=NULL, np=1, ...) { if((!add) | dev.cur() == 1) { plot(0,0,type="n", xlab="", ylab="", xlim=c(0,1), ylim=c(0,1), asp=1.0, axes=FALSE) rect(0,0,1,1) } gon <- list() stopifnot(np >= 1) for(i in 1:np) { if(np > 1) cat(paste(".... Polygon number", i, ".....\n")) if(!is.null(nv)) cat(paste("click", nv, "times in window\n")) else cat(paste("to add points: click left mouse button in window\n", " to exit: click middle mouse button\n", "[The last point should NOT repeat the first point]\n")) xy <- do.call("locator", resolve.defaults(if(!is.null(nv)) list(n=nv) else list(), list(...), list(type="o"))) if(area.xypolygon(xy) < 0) xy <- lapply(xy, rev) gon[[i]] <- xy plot(owin(poly=xy), add=TRUE) } result <- owin(poly=gon) plot(result, add=TRUE) return(result) } spatstat/R/quadscheme.R0000755000176000001440000002177312237642727014641 0ustar ripleyusers# # # quadscheme.S # # $Revision: 4.29 $ $Date: 2013/05/20 00:55:25 $ # # quadscheme() generate a quadrature scheme from # data and dummy point patterns. # # quadscheme.spatial() case where both patterns are unmarked # # quadscheme.replicated() case where data are multitype # # #--------------------------------------------------------------------- quadscheme <- function(data, dummy, method="grid", ...) { # # generate a quadrature scheme from data and dummy patterns. # # Other arguments control how the quadrature weights are computed # data <- as.ppp(data) if(missing(dummy)) { # create dummy points dummy <- default.dummy(data, method=method, ...) # extract full set of parameters used to create dummy points dp <- attr(dummy, "dummy.parameters") # extract recommended parameters for computing weights wp <- attr(dummy, "weight.parameters") } else { # user-supplied dummy points if(!is.ppp(dummy)) { # convert to ppp object dummy <- as.ppp(dummy, data$window, check=FALSE) # confine dummy points to data window dummy <- dummy[data$window] wp <- dp <- list() } else { # if it's already a ppp, it may have been created by default.dummy dp <- attr(dummy, "dummy.parameters") wp <- attr(dummy, "weight.parameters") } } # arguments supplied directly to quadscheme() # override any arguments passed as attributes wp <- resolve.defaults(list(method=method), list(...), wp) mX <- is.marked(data) mD <- is.marked(dummy) if(!mX && !mD) Q <- do.call("quadscheme.spatial", append(list(data, dummy, check=FALSE), wp)) else if(mX && !mD) Q <- do.call("quadscheme.replicated", append(list(data, dummy, check=FALSE), wp)) else if(!mX && mD) stop("dummy points are marked but data are unmarked") else stop("marked data and marked dummy points -- sorry, this case is not implemented") # record parameters used to make dummy points Q$param$dummy <- dp return(Q) } quadscheme.spatial <- function(data, dummy, method="grid", ...) { # # generate a quadrature scheme from data and dummy patterns. # # The 'method' may be "grid" or "dirichlet" # # '...' are passed to gridweights() or dirichlet.weights() # # quadscheme.spatial: # for unmarked point patterns. # # weights are determined only by spatial locations # (i.e. weight computations ignore any marks) # # No two points should have the same spatial location # check <- resolve.defaults(list(...), list(check=TRUE))$check data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) # note data$window is the DEFAULT quadrature window # applicable when 'dummy' does not contain a window if(is.marked(data, dfok=TRUE)) warning("marks in data pattern - ignored") if(is.marked(dummy, dfok=TRUE)) warning("marks in dummy pattern - ignored") both <- as.ppp(concatxy(data, dummy), dummy$window, check=check) switch(method, grid={ w <- gridweights(both, window= dummy$window, ...) }, dirichlet = { w <- dirichlet.weights(both, window=dummy$window, ...) }, { stop(paste("unrecognised method", sQuote(method))) } ) # parameters actually used to make weights wp <- attr(w, "weight.parameters") param <- list(weight = wp, dummy = NULL) Q <- quad(data, dummy, w, param) return(Q) } "quadscheme.replicated" <- function(data, dummy, method="grid", ...) { # # generate a quadrature scheme from data and dummy patterns. # # The 'method' may be "grid" or "dirichlet" # # '...' are passed to gridweights() or dirichlet.weights() # # quadscheme.replicated: # for multitype point patterns. # # No two points in 'data'+'dummy' should have the same spatial location check <- resolve.defaults(list(...), list(check=TRUE))$check data <- as.ppp(data, check=check) dummy <- as.ppp(dummy, data$window, check=check) # note data$window is the DEFAULT quadrature window # unless otherwise specified in 'dummy' ndata <- data$n ndummy <- dummy$n if(!is.marked(data)) stop("data pattern does not have marks") if(is.marked(dummy, dfok=TRUE)) warning("dummy points have marks --- ignored") # first, ignore marks and compute spatial weights P <- quadscheme.spatial(unmark(data), dummy, method, ...) W <- w.quad(P) iz <- is.data(P) Wdat <- W[iz] Wdum <- W[!iz] # find the set of all possible marks if(!is.multitype(data)) stop("data pattern is not multitype") data.marks <- marks(data) markset <- levels(data.marks) nmarks <- length(markset) # replicate dummy points, one copy for each possible mark # -> dummy x {1,..,K} dumdum <- cartesian(dummy, markset) Wdumdum <- rep.int(Wdum, nmarks) Idumdum <- rep.int((ndata + 1):(ndata + ndummy), nmarks) # also make dummy marked points at same locations as data points # but with different marks dumdat <- cartesian(unmark(data), markset) Wdumdat <- rep.int(Wdat, nmarks) Mdumdat <- marks(dumdat) Idumdat <- rep.int(1:ndata, nmarks) Mrepdat <- rep.int(data.marks, nmarks) ok <- (Mdumdat != Mrepdat) dumdat <- dumdat[ok,] Wdumdat <- Wdumdat[ok] Idumdat <- Idumdat[ok] # combine the two dummy patterns dumb <- superimpose(dumdum, dumdat, W=dummy$window) Wdumb <- c(Wdumdum, Wdumdat) Idumb <- c(Idumdum, Idumdat) # record the quadrature parameters param <- list(weight = P$param$weight, dummy = NULL, sourceid=c(1:ndata, Idumb)) # wrap up Q <- quad(data, dumb, c(Wdat, Wdumb), param) return(Q) } "cartesian" <- function(pp, markset, fac=TRUE) { # given an unmarked point pattern 'pp' # and a finite set of marks, # create the marked point pattern which is # the Cartesian product, consisting of all pairs (u,k) # where u is a point of 'pp' and k is a mark in 'markset' nmarks <- length(markset) result <- ppp(rep.int(pp$x, nmarks), rep.int(pp$y, nmarks), window=pp$window, check=FALSE) marx <- rep.int(markset, rep.int(pp$n, nmarks)) if(fac) marx <- factor(marx, levels=markset) marks(result) <- marx return(result) } validate.quad <- function(Q, fatal=FALSE, repair=TRUE, announce=FALSE) { X <- Q$data D <- Q$dummy mX <- is.marked(X) mD <- is.marked(D) nbg <- function(whinge, fatal=FALSE, announce=FALSE) { if(fatal) stop(whinge, call.=FALSE) else { if(announce) warning(whinge, call.=FALSE) return(FALSE) } } if(mX != mD) { whinge <- if(mX) "data points are marked, but dummy points are not" else "dummy points are marked, but data points are not" return(nbg(whinge, fatal, announce)) } if(!mX) return(TRUE) # marked points fX <- is.factor(Xmarx <- marks(X)) fD <- is.factor(Dmarx <- marks(D)) if(fX != fD) { whinge <- if(fX) "data points are multitype, but dummy points are not" else "dummy points are multitype, but data points are not" return(nbg(whinge, fatal, announce)) } if(!fX) return(TRUE) # multitype points lX <- levels(Xmarx) lD <- levels(Dmarx) if(length(lX) != length(lD) || any(lX != lD)) { whinge <- "data and dummy points have different sets of possible marks" return(nbg(whinge, fatal, announce)) } return(TRUE) } pixelquad <- function(X, W=as.owin(X)) { # make a quadscheme with a dummy point at every pixel verifyclass(X, "ppp") # convert window to mask if not already one W <- as.owin(W) M <- as.mask(W) MM <- M$m pixelarea <- M$xstep * M$ystep # create pixel coordinates and corresponding row, column indices xx <- as.vector(raster.x(M)[MM]) yy <- as.vector(raster.y(M)[MM]) cc <- as.vector(col(MM)[MM]) rr <- as.vector(row(MM)[MM]) Nr <- M$dim[1] Nc <- M$dim[2] # discretise data points ij <- nearest.raster.point(X$x, X$y, M) ijrow <- ij$row ijcol <- ij$col # tabulate pixel locations of data points Xtab <- table(row=factor(ijrow, levels=1:Nr), col=factor(ijcol, levels=1:Nc)) # every pixel contains exactly one dummy point, # so the total count of quadrature points in each pixel is: Qtab <- Xtab + 1 # compute counting weights for data points wdat <- 1/Qtab[cbind(ijrow, ijcol)] # compute counting weights for dummy points wdum <- 1/Qtab[cbind(rr, cc)] wboth <- pixelarea * c(wdat, wdum) # create quadrature scheme dum <- ppp(xx, yy, window=W, check=FALSE) Q <- quad(X, dum, wboth) attr(Q, "M") <- M return(Q) } spatstat/R/lohboot.R0000644000176000001440000000715212237642727014160 0ustar ripleyusers# # lohboot.R # # $Revision: 1.7 $ $Date: 2013/08/01 09:40:03 $ # # Loh's bootstrap CI's for local pcf, local K etc # lohboot <- function(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"), ..., nsim=200, confidence=0.95, global=FALSE, type=7) { stopifnot(is.ppp(X)) fun <- match.arg(fun) # validate confidence level stopifnot(confidence > 0.5 && confidence < 1) alpha <- 1 - confidence if(!global) { probs <- c(alpha/2, 1-alpha/2) rank <- nsim * probs[2] } else { probs <- 1-alpha rank <- nsim * probs } if(abs(rank - round(rank)) > 0.001) warning(paste("confidence level", confidence, "corresponds to a non-integer rank", paren(rank), "so quantiles will be interpolated")) n <- npoints(X) # compute local functions localfun <- switch(fun, pcf=localpcf, Kest=localK, Lest=localK, pcfinhom=localpcfinhom, Kinhom=localKinhom, Linhom=localKinhom) f <- localfun(X, ...) theo <- f$theo # parse edge correction info correction <- attr(f, "correction") switch(correction, none = { ctag <- "un"; cadj <- "uncorrected" }, border = { ctag <- "bord"; cadj <- "border-corrected" }, translate = { ctag <- "trans"; cadj <- "translation-corrected" }, isotropic = { ctag <- "iso"; cadj <- "Ripley isotropic corrected" }) # first n columns are the local pcfs (etc) for the n points of X y <- as.matrix(as.data.frame(f))[, 1:n] # average them ymean <- rowMeans(y, na.rm=TRUE) # resample ystar <- matrix(, nrow=nrow(y), ncol=nsim) for(i in 1:nsim) { # resample n points with replacement ind <- sample(n, replace=TRUE) # average their local pcfs ystar[,i] <- rowMeans(y[,ind], na.rm=TRUE) } # compute quantiles if(!global) { # pointwise quantiles hilo <- apply(ystar, 1, quantile, probs=probs, na.rm=TRUE, type=type) } else { # quantiles of deviation ydif <- sweep(ystar, 1, ymean) ydev <- apply(abs(ydif), 2, max, na.rm=TRUE) crit <- quantile(ydev, probs=probs, na.rm=TRUE, type=type) hilo <- rbind(ymean - crit, ymean + crit) } # now transform from K to L if required if(fun %in% c("Lest", "Linhom")) { ymean <- sqrt(ymean/pi) theo <- sqrt(theo/pi) hilo <- sqrt(hilo/pi) } # create fv object df <- data.frame(r=f$r, theo=theo, ymean, lo=hilo[1,], hi=hilo[2,]) colnames(df)[3] <- ctag CIlevel <- paste(100 * confidence, "%% confidence", sep="") desc <- c("distance argument r", "theoretical Poisson %s", paste(cadj, "estimate of %s"), paste("lower", CIlevel, "limit for %s"), paste("upper", CIlevel, "limit for %s")) clabl <- paste("hat(%s)[", ctag, "](r)", sep="") labl <- c("r", "%s[pois](r)", clabl, "%s[loCI](r)", "%s[hiCI](r)") switch(fun, pcf={ fname <- "g" ; ylab <- quote(g(r)) }, Kest={ fname <- "K" ; ylab <- quote(K(r)) }, Lest={ fname <- "L" ; ylab <- quote(L(r)) }, pcfinhom={ fname <- "g[inhom]" ; ylab <- quote(g[inhom](r)) }, Kinhom={ fname <- "K[inhom]" ; ylab <- quote(K[inhom](r)) }, Linhom={ fname <- "L[inhom]" ; ylab <- quote(L[inhom](r)) }) g <- fv(df, "r", ylab, ctag, , c(0, max(f$r)), labl, desc, fname=fname) formula(g) <- . ~ r fvnames(g, ".") <- c(ctag, "hi", "lo", "theo") fvnames(g, ".s") <- c("hi", "lo") unitname(g) <- unitname(X) g } spatstat/R/rho2hat.R0000755000176000001440000002047012237642727014062 0ustar ripleyusers# # rho2hat.R # # Relative risk for pairs of covariate values # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # rho2hat <- function(object, cov1, cov2, ..., method=c("ratio", "reweight")) { cov1name <- short.deparse(substitute(cov1)) cov2name <- short.deparse(substitute(cov2)) callstring <- short.deparse(sys.call()) method <- match.arg(method) # validate model if(is.ppp(object) || inherits(object, "quad")) { model <- ppm(object, ~1, forcefit=TRUE) reference <- "area" modelcall <- NULL } else if(is.ppm(object)) { model <- object reference <- "model" modelcall <- model$call if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) } else stop("object should be a point pattern or a point process model") # interpret string "x" or "y" as a coordinate function getxyfun <- function(s) { switch(s, x = { function(x,y) { x } }, y = { function(x,y) { y } }, stop(paste("Unrecognised covariate name", sQuote(s)))) } if(is.character(cov1) && length(cov1) == 1) { cov1name <- cov1 cov1 <- getxyfun(cov1name) } if(is.character(cov2) && length(cov2) == 1) { cov2name <- cov2 cov2 <- getxyfun(cov2name) } if( (cov1name == "x" && cov2name == "y") || (cov1name == "y" && cov2name == "x")) { # spatial relative risk isxy <- TRUE needflip <- (cov1name == "y" && cov2name == "x") X <- data.ppm(model) if(needflip) X <- flipxy(X) switch(method, ratio = { # ratio of smoothed intensity estimates den <- density(X, ...) sigma <- attr(den, "sigma") varcov <- attr(den, "varcov") W <- as.owin(den) rslt <- switch(reference, area = { den }, model = { lam <- predict(model, locations=W) if(needflip) lam <- flipxy(lam) lam <- blur(lam, sigma=sigma, varcov=varcov, normalise=TRUE) eval.im(den/lam) }) }, reweight = { # smoothed point pattern with weights = 1/reference W <- do.call.matched("as.mask", append(list(w=as.owin(X)), list(...))) gstarX <- switch(reference, area = { rep.int(area.owin(W), npoints(X)) }, model = { lam <- predict(model, locations=W) if(needflip) lam <- flipxy(lam) lam[X] }) rslt <- density(X, ..., weights=1/gstarX) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) Z12points <- X r1 <- W$xrange r2 <- W$yrange } else { # general case isxy <- FALSE # harmonise covariates if(is.function(cov1) && is.im(cov2)) { cov1 <- as.im(cov1, W=cov2) } else if(is.im(cov1) && is.function(cov2)) { cov2 <- as.im(cov2, W=cov1) } # evaluate each covariate at data points and at pixels stuff1 <- evalCovar(model, cov1) stuff2 <- evalCovar(model, cov2) # unpack values1 <- stuff1$values values2 <- stuff2$values # covariate values at each data point Z1X <- values1$ZX Z2X <- values2$ZX # covariate values at each pixel Z1values <- values1$Zvalues Z2values <- values2$Zvalues # model intensity lambda <- values1$lambda # ranges of each covariate r1 <- range(Z1X, Z1values, finite=TRUE) r2 <- range(Z2X, Z2values, finite=TRUE) scal <- function(x, r) { (x - r[1])/diff(r) } # scatterplot coordinates Z12points <- ppp(scal(Z1X, r1), scal(Z2X, r2), c(0,1), c(0,1)) Z12pixels <- ppp(scal(Z1values, r1), scal(Z2values, r2), c(0,1), c(0,1)) # normalising constants nX <- length(Z1X) npixel <- length(lambda) area <- area.owin(as.owin(model)) pixelarea <- area/npixel baseline <- if(reference == "area") rep.int(1, npixel) else lambda wts <- baseline * pixelarea switch(method, ratio = { # estimate intensities fhat <- density(Z12points, ...) sigma <- attr(fhat, "sigma") varcov <- attr(fhat, "varcov") ghat <- do.call("density.ppp", resolve.defaults(list(Z12pixels, weights=wts), list(...), list(sigma=sigma, varcov=varcov))) # compute ratio of smoothed densities rslt <- eval.im(fhat/ghat) }, reweight = { # compute smoothed intensity with weight = 1/reference ghat <- density(Z12pixels, weights=wts, ...) rslt <- density(Z12points, weights=1/ghat[Z12points], ...) sigma <- attr(rslt, "sigma") varcov <- attr(rslt, "varcov") }) } # add scale and label info attr(rslt, "stuff") <- list(isxy=isxy, cov1name=cov1name, cov2name=cov2name, r1=r1, r2=r2, reference=reference, modelcall=modelcall, callstring=callstring, Z12points=Z12points, sigma=sigma, varcov=varcov) class(rslt) <- c("rho2hat", class(rslt)) rslt } plot.rho2hat <- function(x, ..., do.points=FALSE) { xname <- short.deparse(substitute(x)) s <- attr(x, "stuff") # resolve "..." arguments rd <- resolve.defaults(list(...), list(add=FALSE, axes=!s$isxy, xlab=s$cov1name, ylab=s$cov2name)) # plot image plotparams <- get("plotparams", environment(plot.im)) do.call.matched("plot.im", resolve.defaults(list(x=x, axes=FALSE), list(...), list(main=xname)), extrargs=c(plotparams, "add", "zlim", "breaks")) # add axes if(rd$axes) { axisparams <- get("axisparams", environment(plot.im)) Axis <- function(..., extrargs=axisparams) { do.call.matched("axis", resolve.defaults(list(...)), extrargs=extrargs) } if(s$isxy) { # for (x,y) plots the image is at the correct physical scale xr <- x$xrange yr <- x$yrange spak <- 0.05 * max(diff(xr), diff(yr)) Axis(side=1, ..., at=pretty(xr), pos=yr[1] - spak) Axis(side=2, ..., at=pretty(yr), pos=xr[1] - spak) } else { # for other plots the image was scaled to the unit square rx <- s$r1 ry <- s$r2 px <- pretty(rx) py <- pretty(ry) Axis(side=1, labels=px, at=(px - rx[1])/diff(rx), ...) Axis(side=2, labels=py, at=(py - ry[1])/diff(ry), ...) } title(xlab=rd$xlab) title(ylab=rd$ylab) } if(do.points) { do.call.matched("plot.ppp", resolve.defaults(list(x=s$Z12points, add=TRUE), list(...)), extrargs=c("pch", "col", "cols", "bg", "cex", "lwd", "lty")) } invisible(NULL) } print.rho2hat <- function(x, ...) { s <- attr(x, "stuff") cat("Scatterplot intensity estimate (class rho2hat)\n") cat(paste("for the covariates", s$cov1name, "and", s$cov2name, "\n")) switch(s$reference, area=cat("Function values are absolute intensities\n"), model={ cat("Function values are relative to fitted model\n") print(s$modelcall) }) cat(paste("Call:", s$callstring, "\n")) if(s$isxy) { cat("Obtained by spatial smoothing of original data\n") cat("Smoothing parameters used by density.ppp:\n") } else { cat("Obtained by transforming to the unit square and smoothing\n") cat("Smoothing parameters (on unit square) used by density.ppp:\n") } if(!is.null(s$sigma)) cat(paste("\tsigma = ", signif(s$sigma, 5), "\n")) if(!is.null(s$varcov)) { cat("\tvarcov =\n") ; print(s$varcov) } cat("Intensity values:\n") NextMethod("print") } spatstat/R/rescue.rectangle.R0000755000176000001440000000145312237642727015744 0ustar ripleyusers# # rescue.rectangle.R # # $Revision: 1.6 $ $Date: 2008/06/15 14:53:11 $ # rescue.rectangle <- function(W) { verifyclass(W, "owin") if(W$type == "mask" && all(W$m)) return(owin(W$xrange, W$yrange, unitname=unitname(W))) if(W$type == "polygonal" && length(W$bdry) == 1) { x <- W$bdry[[1]]$x y <- W$bdry[[1]]$y if(length(x) == 4 && length(y) == 4) { # could be a rectangle veryunique <- function(z) { uz <- sort(unique(z)) epsilon <- 2 * .Machine$double.eps * diff(range(uz)) close <- (diff(uz) <= epsilon) uz <- uz[c(TRUE, !close)] return(uz) } ux <- veryunique(x) uy <- veryunique(y) if(length(ux) == 2 && length(uy) == 2) return(owin(ux,uy, unitname=unitname(W))) } } return(W) } spatstat/R/distfunlpp.R0000644000176000001440000000126012237642727014674 0ustar ripleyusers# # distfunlpp.R # # method for 'distfun' for class 'lpp' # # $Revision: 1.4 $ $Date: 2012/10/21 02:52:11 $ # distfun.lpp <- local({ distfun.lpp <- function(X, ...) { stopifnot(inherits(X, "lpp")) force(X) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) d <- nncross.lpp(Y, X, what="dist") return(d) } f <- linfun(f, L) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { cat("Distance function for lpp object\n") X <- get("X", envir=environment(x)) print(X) } distfun.lpp }) spatstat/R/fii.R0000755000176000001440000001333112240743610013243 0ustar ripleyusers# # fii.R # # Class of fitted interpoint interactions # # fii <- function(interaction=NULL, coefs=numeric(0), Vnames=character(0), IsOffset=NULL) { if(is.null(interaction)) interaction <- Poisson() stopifnot(is.interact(interaction)) if(is.poisson.interact(interaction)) { if(length(Vnames) > 0) stop("Coefficients inappropriate for Poisson process") } if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, length(Vnames)) else { stopifnot(is.logical(IsOffset)) stopifnot(length(IsOffset) == length(Vnames)) } out <- list(interaction=interaction, coefs=coefs, Vnames=Vnames, IsOffset=IsOffset) class(out) <- c("fii", class(out)) return(out) } summary.fii <- function(object, ...) { y <- unclass(object) INTERACT <- object$interaction coefs <- object$coefs Vnames <- object$Vnames IsOffset <- object$IsOffset y$poisson <- is.poisson.interact(INTERACT) thumbnail <- NULL if(y$poisson) { thumbnail <- "Poisson()" } else { if(!is.null(INTERACT$interpret)) { # invoke auto-interpretation feature sensible <- if(newstyle.coeff.handling(INTERACT)) (INTERACT$interpret)(coefs[Vnames[!IsOffset]], INTERACT) else (INTERACT$interpret)(coefs, INTERACT) if(!is.null(sensible)) { header <- paste("Fitted", sensible$inames) printable <- sensible$printable # Try to make a thumbnail description param <- sensible$param ipar <- INTERACT$par if(all(unlist(lapply(param, length)) == 1) && all(unlist(lapply(ipar, length)) == 1)) { allargs <- append(ipar, param) allargs <- lapply(allargs, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, allargs) } } else { # no fitted interaction parameters (e.g. Hard Core) header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } else { # fallback sensible <- NULL VN <- Vnames[!IsOffset] if(length(VN) > 0) { header <- "Fitted interaction terms" icoef <- coefs[VN] printable <- exp(unlist(icoef)) ricoef <- lapply(icoef, signif, digits=4) thumbnail <- fakecallstring(INTERACT$creator, ricoef) } else { header <- NULL printable <- NULL thumbnail <- paste0(INTERACT$creator, "()") } } y <- append(y, list(sensible=sensible, header=header, printable=printable, thumbnail=thumbnail)) } class(y) <- c("summary.fii", class(y)) return(y) } print.fii <- function(x, ...) { tiny <- resolve.1.default("tiny", list(...), list(tiny=FALSE)) print(summary(x), brief=TRUE, tiny=tiny) return(invisible(NULL)) } print.summary.fii <- function(x, ...) { secret <- resolve.defaults(list(...), list(prefix="Interaction: ", family=TRUE, brief=FALSE, tiny=FALSE)) if(secret$tiny) { # use thumbnail if available thumbnail <- x$thumbnail if(!is.null(thumbnail)) { cat(paste(thumbnail, "\n")) return(invisible(NULL)) } } brief <- secret$brief if(!brief) cat(secret$prefix) if(x$poisson) cat("Poisson process\n") else { print(x$interaction, family=secret$family, brief=TRUE) if(!is.null(x$printable)) { nvalues <- length(x$printable) nheader <- length(x$header) if(nvalues == 1) { cat(paste(x$header, ":\t", x$printable, "\n", sep="")) } else if(nvalues == nheader) { for(i in 1:nheader) { cat(x$header[i]) xpi <- x$printable[[i]] if(!is.list(xpi) && length(xpi) == 1) { cat(":\t", xpi, "\n") } else { cat(":\n") print(xpi) } } } else { cat(paste(x$header, ":\n", sep="")) print(x$printable) } } } if(!brief) { co <- x$coefs[x$Vnames[!x$IsOffset]] if(length(co) > 0) { cat("\nRelevant coefficients:\n") print(co) } } return(invisible(NULL)) } coef.summary.fii <- function(object, ...) { object$printable } reach.fii <- function(x, ..., epsilon=0) { inte <- x$interaction coeffs <- x$coefs Vnames <- x$Vnames if(is.poisson.interact(inte)) return(0) # get 'irange' function from interaction object irange <- inte$irange if(is.null(irange)) return(Inf) # apply 'irange' function using fitted coefficients if(newstyle.coeff.handling(inte)) ir <- irange(inte, coeffs[Vnames], epsilon=epsilon) else ir <- irange(inte, coeffs, epsilon=epsilon) if(is.na(ir)) ir <- Inf return(ir) } plot.fii <- function(x, ...) { if(is.poisson.interact(x$interaction)) { message("Poisson interaction; nothing plotted") return(invisible(NULL)) } plfun <- x$interaction$family$plot if(is.null(plfun)) stop("Plotting not implemented for this type of interaction") plfun(x, ...) } fitin <- function(object) { UseMethod("fitin") } fitin.ppm <- function(object) { f <- object$fitin if(!is.null(f)) return(f) # For compatibility with older versions inte <- object$interaction if(is.null(inte)) f <- fii() # Poisson else { coefs <- coef(object) Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset # Internal names of regressor variables f <- fii(inte, coefs, Vnames, IsOffset) } return(f) } as.interact.fii <- function(object) { verifyclass(object, "fii") return(object$interaction) } coef.fii <- function(object, ...) { verifyclass(object, "fii") return(object$coefs) } spatstat/R/psst.R0000755000176000001440000001356512237642727013513 0ustar ripleyusers# # psst.R # # Computes the GNZ contrast of delta-f for any function f # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # ################################################################################ # psst <- function(object, fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL, funargs=list(correction="best"), verbose=TRUE) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp")) fit <- ppm(quadscheme(object, ...), trend=trend, interaction=interaction, rbord=rbord) else if(inherits(object, "quad")) fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord) else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- X$window npoints <- X$n area <- area.owin(Win) lambda <- npoints/area # adjustments to account for restricted domain of pseudolikelihood if(any(!USED)) { XUSED <- USED[Z] npoints.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npoints.used/area.used } else { XUSED <- rep.int(TRUE, npoints) npoints.used <- npoints area.used <- area lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals resid <- residuals(fit, type="raw",drop=FALSE, coefs=truecoef, quad=hi.res) rescts <- with(resid, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~S(r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~S") # evaluate fun(X) for data fX <- do.call(fun, append(list(X, r=rvals), funargs)) fXunits <- unitname(fX) # Extract 'best' estimate only fX <- with(fX, .y) zero <- numeric(length(fX)) # sum over all quadrature points iused <- seq(U$n)[USED] nused <- length(iused) if(verbose) cat(paste("\nProcessing", nused, "quadrature points...")) # running sums & integrals sumX <- zero integ <- integ2 <- zero # template for X \cup {u} uX <- superimpose(U[1], X, W=Win, check=FALSE) Ux <- U$x Uy <- U$y # for(j in seq(nused)) { i <- iused[j] wi <- wc[i] if(Z[i]) { # data point fXi <- do.call(fun, append(list(X[-i], r=rvals), funargs)) fXi <- with(fXi, .y) deltaf <- fX - fXi sumX <- sumX + deltaf } else { # dummy point uX$x[1] <- Ux[i] uX$y[1] <- Uy[i] fuX <- do.call(fun, append(list(uX, r=rvals), funargs)) fuX <- with(fuX, .y) deltaf <- fuX - fX } integ <- integ + wi * deltaf integ2 <- integ2 + wi * deltaf^2 # if(j %% 500 == 0) { cat("[garbage ") gc() cat("collected]") } if(verbose) progressreport(j, nused) } sdv <- sqrt(integ2) res <- sumX - integ ans <- bind.fv(ans, data.frame(dat=sumX, com=integ, var=integ2, sd=sdv, hi=2*sdv, lo=-2*sdv, res=res, stdres=res/sdv), c("Sigma~Delta~S(r)", "bold(C)~Delta~S(r)", "bold(C)^2~Delta~S(r)", "sqrt(bold(C)^2~Delta~S(r))", "%s[hi](r)", "%s[lo](r)", "bold(R)~Delta~S(r)", "bold(T)~Delta~S(r)"), c("data pseudosum (contribution to %s)", "model compensator (contribution to %s)", "pseudovariance of %s", "sqrt(pseudovariance) of %s", "upper 2 sigma critical band for %s", "lower 2 sigma critical band for %s", "pseudoresidual function %s", "standardised pseudoresidual function %s"), "res") fvnames(ans,".") <- c("res", "hi", "lo", "theo") unitname(ans) <- fXunits # return(ans) } npfun <- function(X, ..., r) { npts <- npoints(X) # initialise fv object df <- data.frame(r=r, theo=0, npoint=npts) desc <- c("distance argument r", "value 0", "value equal to number of points") ans <- fv(df, "r", substitute(npoints(r), NULL), "npoint", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="npoints") unitname(ans) <- unitname(X) return(ans) } nndcumfun <- function(X, ..., r) { nn <- nndist(X) bk <- breakpts.from.r(r) # nn <- nn[nn <= bdist.points(X)] h <- whist(nn, bk$val) # initialise fv object df <- data.frame(r=r, theo=0, obs=h) desc <- c("distance argument r", "value 0", "observed count") ans <- fv(df, "r", substitute(nndcount(r), NULL), "obs", . ~ r, alim=c(0, max(r)), c("r","%s[theo](r)", "%s[obs](r)"), desc, fname="nndcount") unitname(ans) <- unitname(X) return(ans) } spatstat/R/datasetup.R0000755000176000001440000000064212237642727014504 0ustar ripleyusers# # When the package is installed, this tells us # the directory where the .tab files are stored # # Typically data/murgatroyd.R reads data-raw/murgatroyd.tab # and applies special processing # spatstat.rawdata.location <- function(...) { locn <- system.file("data-raw", package="spatstat") if(length(list(...)) != 0) locn <- paste(c(locn, ...), collapse=.Platform$file.sep) return(locn) } spatstat/R/unique.ppp.R0000755000176000001440000000674312242562146014616 0ustar ripleyusers# # unique.ppp.R # # $Revision: 1.21 $ $Date: 2013/11/19 03:16:20 $ # unique.ppp <- function(x, ..., warn=FALSE) { verifyclass(x, "ppp") dupe <- duplicated.ppp(x, ...) if(!any(dupe)) return(x) if(warn) warning(paste(sum(dupe), "duplicated points were removed"), call.=FALSE) return(x[!dupe]) } duplicated.ppp <- function(x, ..., rule=c("spatstat", "deldir")) { verifyclass(x, "ppp") rule <- match.arg(rule) if(rule == "deldir") return(duplicatedxy(x)) n <- npoints(x) switch(markformat(x), none = { # unmarked points # check for duplication of x and y separately (a necessary condition) xx <- x$x yy <- x$y possible <- duplicated(xx) & duplicated(yy) if(!any(possible)) return(possible) # split by x coordinate of duplicated x values result <- possible xvals <- unique(xx[possible]) for(xvalue in xvals) { sub <- (xx == xvalue) # compare y values result[sub] <- duplicated(yy[sub]) } }, vector = { # marked points - split by mark value m <- marks(x) um <- if(is.factor(m)) levels(m) else unique(m) xx <- unmark(x) result <- logical(n) for(i in seq_along(um)) { sub <- (m == um[i]) result[sub] <- duplicated.ppp(xx[sub]) } }, dataframe = { result <- duplicated(as.data.frame(x)) }, # the following are currently not supported hyperframe = { result <- duplicated(as.data.frame(x)) }, listof = { result <- duplicated(as.data.frame(as.hyperframe(x))) }, stop(paste("Unknown mark type", sQuote(markformat(x)))) ) return(result) } multiplicity <- function(x) { UseMethod("multiplicity") } multiplicity.ppp <- function(x) { verifyclass(x, "ppp") np <- npoints(x) if(np == 0) return(integer(0)) cl <- closepairs(x, 0, what="indices") I <- cl$i J <- cl$j if(length(I) == 0) return(rep.int(1, np)) switch(markformat(x), none = { }, vector = { marx <- marks(x) agree <- (marx[I] == marx[J]) I <- I[agree] J <- J[agree] }, dataframe = { marx <- marks(x) agree <- apply(marx[I, ,drop=FALSE] == marx[J, ,drop=FALSE], 1, all) I <- I[agree] J <- J[agree] }, hyperframe =, listof = stop("Not implemented for hyperframes or lists of marks") ) if(length(I) == 0) return(rep.int(1, np)) JbyI <- split(J, factor(I, levels=1:np)) result <- 1 + sapply(JbyI, length) return(result) } multiplicity.data.frame <- local({ id <- function(i,j, a, b) identical(a[i,], b[j,]) IdenticalRows <- Vectorize(id, c("i", "j")) multiplicity.data.frame <- function(x) { dup <- duplicated(x) nx <- nrow(x) if(!any(dup)) return(rep.int(1, nx)) ux <- x[!dup, , drop=FALSE] dx <- x[dup, , drop=FALSE] row.names(ux) <- NULL row.names(dx) <- NULL nu <- nrow(ux) nd <- nrow(dx) hit <- outer(seq_len(nu), seq_len(nd), IdenticalRows, a=ux, b=dx) counts <- 1 + rowSums(hit) result <- numeric(nx) result[!dup] <- counts dumap <- apply(hit, 2, function(z) min(which(z))) result[dup] <- counts[dumap] return(result) } multiplicity.data.frame }) spatstat/R/resid4plot.R0000755000176000001440000003523112237642727014605 0ustar ripleyusers# # # Residual plots: # resid4plot four panels with matching coordinates # resid1plot one or more unrelated individual plots # resid1panel one panel of resid1plot # # $Revision: 1.17 $ $Date: 2013/07/17 02:37:41 $ # # resid4plot <- function(RES, plot.neg="image", plot.smooth="imagecontour", spacing=0.1, srange=NULL, monochrome=FALSE, main=NULL, ...) { clip <- RES$clip Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type typename <- RES$typename Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # set up 2 x 2 plot with space wide <- diff(W$xrange) high <- diff(W$yrange) space <- spacing * max(wide,high) width <- wide + space + wide height <- high + space + high outerspace <- 3 * space plot(c(0, width) + outerspace * c(-1,1), c(0, height) + outerspace * c(-1,1), type="n", asp=1.0, axes=FALSE, xlab="", ylab="") # determine colour map if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(0, Yrange, Zrange), na.rm=TRUE) } else { stopifnot(is.numeric(srange) && length(srange) == 2) stopifnot(all(is.finite(srange))) } cols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome) # ------ plot residuals/marks (in top left panel) ------------ Xlowleft <- c(W$xrange[1],W$yrange[1]) vec <- c(0, high) + c(0, space) - Xlowleft # shift the original window Ws <- shift(W, vec) # shift the residuals Ys <- shift(Yclip,vec) # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(!redundant) { if(!clip) plot(Ys$window, add=TRUE, ...) else ploterodewin(Ws, Ys$window, add=TRUE, ...) } switch(plot.neg, discrete={ neg <- (Ys$marks < 0) if(any(c("maxsize", "markscale") %in% names(list(...)))) plot(Ys[neg], add=TRUE, ...) else { hackmax <- 0.5 * sqrt(area.owin(Wclip)/Yclip$n) plot(Ys[neg], add=TRUE, maxsize=hackmax, ...) } plot(Ys[!neg], add=TRUE, ...) }, image={ Yds <- shift(Ydens, vec) Yms <- shift(Ymass, vec) if(redundant) ploterodeimage(Ws, Yds, rangeZ=srange, colsZ=cols, ...) else if(type != "eem") image(Yds, add=TRUE, ribbon=FALSE, col=cols, zlim=srange, ...) plot(Yms, add=TRUE, ...) } ) # --------- plot smoothed surface (in bottom right panel) ------------ vec <- c(wide, 0) + c(space, 0) - Xlowleft Zs <- shift.im(Z, vec) switch(plot.smooth, image={ image(Zs, add=TRUE, col=cols, zlim=srange, ribbon=FALSE, ...)}, contour={contour(Zs, add=TRUE, ...)}, persp={ warning("persp not available in 4-panel plot") }, imagecontour={ image(Zs, add=TRUE, col=cols, zlim=srange, ribbon=FALSE, ...) contour(Zs, add=TRUE, ...) } ) lines(Zs$xrange[c(1,2,2,1,1)], Zs$yrange[c(1,1,2,2,1)]) # -------------- lurking variable plots ----------------------- do.lines <- function(x, y, defaulty=1, ...) { do.call("lines", resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } # --------- lurking variable plot for x coordinate ------------------ # (cumulative or marginal) # in bottom left panel if(!is.null(RES$xmargin)) { a <- RES$xmargin observedV <- a$xZ observedX <- a$x theoreticalV <- a$ExZ theoreticalX <- a$x theoreticalSD <- NULL ylabel <- paste("marginal of", typename) } else if(!is.null(RES$xcumul)) { a <- RES$xcumul observedX <- a$empirical$covariate observedV <- a$empirical$value theoreticalX <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd ylabel <- paste("cumulative sum of", typename) } # pretty axis marks pX <- pretty(theoreticalX) if(is.null(theoreticalSD)) pV <- pretty(c(0,observedV,theoreticalV)) else pV <- pretty(c(0,observedV,theoreticalV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD)) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { high * (y - rr[1])/diff(rr) } xscale <- function(x) { x - W$xrange[1] } do.lines(xscale(observedX), yscale(observedV), 1, ...) do.lines(xscale(theoreticalX), yscale(theoreticalV), 2, ...) if(!is.null(theoreticalSD)) { do.lines(xscale(theoreticalX), yscale(theoreticalV + 2 * theoreticalSD), 3, ...) do.lines(xscale(theoreticalX), yscale(theoreticalV - 2 * theoreticalSD), 3, ...) } axis(side=1, pos=0, at=xscale(pX), labels=pX) text(xscale(mean(theoreticalX)), - outerspace, "x coordinate") axis(side=2, pos=0, at=yscale(pV), labels=pV) text(-outerspace, yscale(mean(pV)), ylabel, srt=90) # --------- lurking variable plot for y coordinate ------------------ # (cumulative or marginal) # in top right panel if(!is.null(RES$ymargin)) { a <- RES$ymargin observedV <- a$yZ observedY <- a$y theoreticalV <- a$EyZ theoreticalY <- a$y theoreticalSD <- NULL ylabel <- paste("marginal of", typename) } else if(!is.null(RES$ycumul)) { a <- RES$ycumul observedV <- a$empirical$value observedY <- a$empirical$covariate theoreticalY <- a$theoretical$covariate theoreticalV <- a$theoretical$mean theoreticalSD <- a$theoretical$sd ylabel <- paste("cumulative sum of", typename) } # pretty axis marks pY <- pretty(theoreticalY) if(is.null(theoreticalSD)) pV <- pretty(c(0,observedV,theoreticalV)) else pV <- pretty(c(0,observedV,theoreticalV, theoreticalV+2*theoreticalSD, theoreticalV-2*theoreticalSD)) # rescale smoothed values rr <- range(c(0, observedV, theoreticalV, pV)) yscale <- function(y) { y - W$yrange[1] + high + space} xscale <- function(x) { wide + space + wide * (rr[2] - x)/diff(rr) } do.lines(xscale(observedV), yscale(observedY), 1, ...) do.lines(xscale(theoreticalV), yscale(theoreticalY), 2, ...) if(!is.null(theoreticalSD)) { do.lines(xscale(theoreticalV+2*theoreticalSD), yscale(theoreticalY), 3, ...) do.lines(xscale(theoreticalV-2*theoreticalSD), yscale(theoreticalY), 3, ...) } axis(side=4, pos=width, at=yscale(pY), labels=pY) text(width + outerspace, yscale(mean(theoreticalY)), "y coordinate", srt=90) axis(side=3, pos=height, at=xscale(pV), labels=pV) text(xscale(mean(pV)), height + outerspace, ylabel) # if(!is.null(main)) title(main=main) invisible(NULL) } # # # Residual plot: single panel(s) # # resid1plot <- function(RES, opt, plot.neg="image", plot.smooth="imagecontour", srange=NULL, monochrome=FALSE, main=NULL, ...) { clip <- RES$clip Y <- RES$Y Yclip <- RES$Yclip Z <- RES$smooth$Z W <- RES$W Wclip <- Yclip$window type <- RES$type Ydens <- RES$Ydens[Wclip, drop=FALSE] Ymass <- RES$Ymass[Wclip] # determine colour map if(opt$all || opt$marks || opt$smooth) { if(is.null(srange)) { Yrange <- if(!is.null(Ydens)) summary(Ydens)$range else NULL Zrange <- if(!is.null(Z)) summary(Z)$range else NULL srange <- range(c(0, Yrange, Zrange), na.rm=TRUE) } cols <- beachcolours(srange, if(type=="eem") 1 else 0, monochrome) } # determine main heading if(is.null(main)) { prefix <- if(opt$marks) NULL else if(opt$smooth) "Smoothed" else if(opt$xcumul) "Lurking variable plot for x coordinate\n" else if(opt$ycumul) "Lurking variable plot for y coordinate\n" else if(opt$xmargin) "Lurking variable plot for x coordinate\n" else if(opt$ymargin) "Lurking variable plot for y coordinate\n" main <- paste(prefix, RES$typename) } # ------------- residuals --------------------------------- if(opt$marks) { # determine whether pre-plotting the window(s) is redundant redundant <- (plot.neg == "image") && (type != "eem") && (Yclip$window$type == "mask") # pre-plot the window(s) if(redundant) plot(as.rectangle(W), box=FALSE, main="", ...) else { if(!clip) plot(W, main="", ...) else ploterodewin(W, Wclip, main="", ...) } switch(plot.neg, discrete={ neg <- (Y$marks < 0) if(any(c("maxsize", "markscale") %in% names(list(...)))) plot(Y[neg], add=TRUE, ...) else { hackmax <- 0.5 * sqrt(area.owin(Wclip)/Yclip$n) plot(Y[neg], add=TRUE, maxsize=hackmax, ...) } plot(Y[!neg], add=TRUE, ...) }, image={ if(redundant) ploterodeimage(W, Ydens, rangeZ=srange, colsZ=cols, ...) else if(type != "eem") image(Ydens, col=cols, zlim=srange, add=TRUE, ribbon=FALSE, ...) plot(Ymass, add=TRUE, ...) } ) title(main=main) } # ------------- smooth ------------------------------------- if(opt$smooth) { if(!clip) { switch(plot.smooth, image={image(Z, main=main, axes=FALSE, xlab="", ylab="", col=cols, zlim=srange, ribbon=FALSE, ...)}, contour={contour(Z, main=main, axes=FALSE, xlab="", ylab="", ...)}, persp={persp(Z, main=main, axes=FALSE, xlab="", ylab="", ...)}, imagecontour={ image(Z, main=main, axes=FALSE, xlab="", ylab="", col=cols, zlim=srange, ribbon=FALSE, ...) contour(Z, add=TRUE, ...) } ) } else { switch(plot.smooth, image={ plot(as.rectangle(W), box=FALSE, main=main, ...) ploterodeimage(W, Z, colsZ=cols, rangeZ=srange, ...) }, contour={ plot(W, main=main, ...) contour(Z, add=TRUE, ...) }, persp={ persp(Z, main=main, axes=FALSE, xlab="", ylab="", ...) # there is no 'add' option for 'persp' }, imagecontour={ plot(as.rectangle(W), box=FALSE, main=main, ...) ploterodeimage(W, Z, colsZ=cols, rangeZ=srange, ...) contour(Z, add=TRUE, ...) } ) } } # ------------ cumulative x ----------------------------------------- if(opt$xcumul) { a <- RES$xcumul obs <- a$empirical theo <- a$theoretical resid1panel(obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "x coordinate", "cumulative mark", main=main, ...) } # ------------ cumulative y ----------------------------------------- if(opt$ycumul) { a <- RES$ycumul obs <- a$empirical theo <- a$theoretical resid1panel(obs$covariate, obs$value, theo$covariate, theo$mean, theo$sd, "y coordinate", "cumulative mark", main=main, ...) } # ------------ x margin ----------------------------------------- if(opt$xmargin) { a <- RES$xmargin resid1panel(a$x, a$xZ, a$x, a$ExZ, NULL, "x coordinate", "marginal of residuals", main=main, ...) } # ------------ y margin ----------------------------------------- if(opt$ymargin) { a <- RES$ymargin resid1panel(a$y, a$yZ, a$y, a$EyZ, NULL, "y coordinate", "marginal of residuals", main=main, ...) } return(invisible(NULL)) } resid1panel <- function(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab, ylab, ...) { # work out plot range rX <- range(observedX, theoreticalX) rV <- range(c(0, observedV, theoreticalV)) if(!is.null(theoreticalSD)) rV <- range(c(rV, theoreticalV + 2*theoreticalSD, theoreticalV - 2*theoreticalSD)) # argument handling do.lines <- function(x, y, defaulty=1, ...) { do.call("lines", resolve.defaults(list(x, y), list(...), list(lty=defaulty))) } # start plot plot(rX, rV, type="n", xlab=xlab, ylab=ylab, ...) do.lines(observedX, observedV, 1, ...) do.lines(theoreticalX, theoreticalV, 2, ...) if(!is.null(theoreticalSD)) { do.lines(theoreticalX, theoreticalV + 2 * theoreticalSD, 3, ...) do.lines(theoreticalX, theoreticalV - 2 * theoreticalSD, 3, ...) } } # # ploterodewin <- function(W1, W2, col.edge=grey(0.75), col.inside=rgb(1,0,0), ...) { # internal use only # W2 is assumed to be an erosion of W1 switch(W1$type, rectangle={ plot(W1, ...) plot(W2, add=TRUE, lty=2) }, polygonal={ plot(W1, ...) plot(W2, add=TRUE, lty=2) }, mask={ Z <- as.im(W1) x <- as.vector(raster.x(W1)) y <- as.vector(raster.y(W1)) ok <- inside.owin(x, y, W2) Z$v[ok] <- 2 plot(Z, ..., col=c(col.edge, col.inside), add=TRUE, ribbon=FALSE) } ) } ploterodeimage <- function(W, Z, ..., Wcol=grey(0.75), rangeZ, colsZ) { # Internal use only # Image Z is assumed to live on a subset of mask W # colsZ are the colours for the values in the range 'rangeZ' if(W$type != "mask") { plot(W, add=TRUE) W <- as.mask(W) } # Extend the colour map to include an extra colour for pixels in W # (1) Add the desired colour of W to the colour map pseudocols <- c(Wcol, colsZ) # (2) Breakpoints bks <- seq(from=rangeZ[1], to=rangeZ[2], length=length(colsZ)+1) dZ <- diff(bks)[1] pseudobreaks <- c(rangeZ[1] - dZ, bks) # (3) Determine a fake value for pixels in W Wvalue <- rangeZ[1] - dZ/2 # Create composite image on W grid # (with W-pixels initialised to Wvalue) X <- as.im(Wvalue, W) # Look up Z-values of W-pixels xx <- as.vector(raster.x(W)) yy <- as.vector(raster.y(W)) Zvalues <- lookup.im(Z, xx, yy, naok = TRUE, strict=FALSE) # Overwrite pixels in Z inZ <- !is.na(Zvalues) X$v[inZ] <- Zvalues[inZ] image(X, ..., add=TRUE, ribbon=FALSE, col=pseudocols, breaks=pseudobreaks) return(list(X, pseudocols, pseudobreaks)) } spatstat/R/rmhstart.R0000755000176000001440000000465412237642727014365 0ustar ripleyusers# # # rmhstart.R # # $Revision: 1.10 $ $Date: 2012/08/14 06:39:11 $ # # rmhstart <- function(start, ...) { UseMethod("rmhstart") } rmhstart.rmhstart <- function(start, ...) { return(start) } rmhstart.list <- function(start, ...) { st <- do.call.matched("rmhstart.default", start) return(st) } rmhstart.default <- function(start=NULL, ..., n.start=NULL, x.start=NULL) { if(!is.null(start) || length(list(...)) > 0) stop("Syntax should be rmhstart(n.start) or rmhstart(x.start)") ngiven <- !is.null(n.start) xgiven <- !is.null(x.start) # n.start and x.start are incompatible if(ngiven && xgiven) stop("Give only one of the arguments n.start and x.start") given <- if(ngiven) "n" else if(xgiven) "x" else "none" # Validate arguments if(ngiven && !is.numeric(n.start)) stop("n.start should be numeric") if(xgiven) { # We can't check x.start properly because we don't have the relevant window # Just check that it is INTERPRETABLE as a point pattern xx <- as.ppp(x.start, W=ripras, fatal=FALSE) if(is.null(xx)) stop(paste("x.start should be a point pattern object,", "or coordinate data in a format recognised by as.ppp")) } else xx <- NULL ################################################################### # return augmented list out <- list(n.start=n.start, x.start=x.start, given=given, xx=xx) class(out) <- c("rmhstart", class(out)) return(out) } print.rmhstart <- function(x, ...) { verifyclass(x, "rmhstart") cat("Metropolis-Hastings algorithm starting parameters\n") cat("Initial state: ") switch(x$given, none={ cat("not given\n") }, x = { cat("given as x.start\n") if(is.ppp(x$x.start)) print(x$x.start) else cat(paste("(x,y) coordinates of", x$xx$n, "points (window unspecified)\n")) cat("\n") }, n = { n.start <- x$n.start nstring <- if(length(n.start) == 1) paste(n.start) else paste("(", paste(n.start, collapse=","), ")", sep="") cat(paste("number fixed at n.start =", nstring, "\n")) } ) } update.rmhstart <- function(object, ...) { do.call.matched("rmhstart.default", resolve.defaults(list(...), as.list(object))) } spatstat/R/distances.psp.R0000755000176000001440000001130012237642727015261 0ustar ripleyusers# # distances.psp.R # # Hausdorff distance and Euclidean separation for psp objects # # $Revision: 1.9 $ $Date: 2013/04/25 05:13:34 $ # # pairdist.psp <- function(X, ..., method="Fortran", type="Hausdorff") { verifyclass(X, "psp") if(X$n == 0) return(matrix(, 0, 0)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) D12 <- AsymmDistance.psp(X, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(D12, t(D12)), dim=dim(D12)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(D12, t(D12)), dim=dim(D12)) # Identify any pairs of segments which cross cross <- test.selfcrossing.psp(X) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } crossdist.psp <- function(X, Y, ..., method="Fortran", type="Hausdorff") { verifyclass(X, "psp") Y <- as.psp(Y) if(X$n * Y$n == 0) return(matrix(, X$n, Y$n)) type <- pickoption("type", type, c(Hausdorff="Hausdorff", hausdorff="Hausdorff", separation="separation")) DXY <- AsymmDistance.psp(X, Y, metric=type, method=method) DYX <- AsymmDistance.psp(Y, X, metric=type, method=method) switch(type, Hausdorff={ # maximum is Hausdorff metric D <- array(pmax.int(DXY, t(DYX)), dim=dim(DXY)) }, separation={ # Take minimum of endpoint-to-segment distances D <- array(pmin.int(DXY, t(DYX)), dim=dim(DXY)) # Identify pairs of segments which cross cross <- test.crossing.psp(X, Y) # Assign separation = 0 to such pairs D[cross] <- 0 }) return(D) } nndist.psp <- function(X, ..., k=1, method="Fortran") { verifyclass(X, "psp") if(!(is.vector(k) && all(k %% 1 == 0) && all(k >= 1))) stop("k should be a positive integer or integers") n <- nobjects(X) kmax <- max(k) lenk <- length(k) result <- if(lenk == 1) numeric(n) else matrix(, nrow=n, ncol=lenk) if(n == 0) return(result) if(kmax >= n) { # not enough objects # fill with Infinite values result[] <- Inf if(any(ok <- (kmax < n))) { # compute the lower-order nnd's result[, ok] <- nndist.psp(X, ..., k=k[ok], method=method) } return(result) } # normal case: D <- pairdist.psp(X, ..., method=method) diag(D) <- Inf if(kmax == 1) NND <- apply(D, 1, min) else NND <- t(apply(D, 1, function(z,k) { sort(z)[k] }, k=k))[, , drop=TRUE] return(NND) } # ..... AsymmDistance.psp ..... # # If metric="Hausdorff": # this function computes, for each pair of segments A = X[i] and B = Y[j], # the value max_{a in A} d(a,B) = max_{a in A} min_{b in B} ||a-b|| # which appears in the definition of the Hausdorff metric. # Since the distance function d(a,B) of a segment B is a convex function, # the maximum is achieved at an endpoint of A. So the algorithm # actually computes h(A,B) = max (d(e_1,B), d(e_2,B)) where e_1, e_2 # are the endpoints of A. And H(A,B) = max(h(A,B),h(B,A)). # # If metric="separation": # the function computes, for each pair of segments A = X[i] and B = Y[j], # the MINIMUM distance from an endpoint of A to any point of B. # t(A,B) = min (d(e_1,B), d(e_2,B)) # where e_1, e_2 are the endpoints of A. # Define the separation distance # s(A,B) = min_{a in A} min_{b in B} ||a-b||. # The minimum (a*, b*) occurs either when a* is an endpoint of A, # or when b* is an endpoint of B, or when a* = b* (so A and B intersect). # (If A and B are parallel, the minimum is still achieved at an endpoint) # Thus s(A,B) = min(t(A,B), t(B,A)) unless A and B intersect. AsymmDistance.psp <- function(X, Y, metric="Hausdorff", method=c("Fortran", "C", "interpreted")) { method <- match.arg(method) # Extract endpoints of X EX <- endpoints.psp(X, "both") idX <- attr(EX, "id") # compute shortest dist from each endpoint of X to each segment of Y DPL <- distppll(cbind(EX$x,EX$y), Y$ends, mintype=0, method=method) # for each segment in X, maximise or minimise over the two endpoints Dist <- as.vector(DPL) Point <- as.vector(idX[row(DPL)]) Segment <- as.vector(col(DPL)) switch(metric, Hausdorff={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), max) }, separation={ DXY <- tapply(Dist, list(factor(Point), factor(Segment)), min) }) return(DXY) } spatstat/R/lineardisc.R0000755000176000001440000001441012237642727014625 0ustar ripleyusers# # # disc.R # # $Revision: 1.17 $ $Date: 2013/05/01 07:22:48 $ # # Compute the disc of radius r in a linear network # # lineardisc <- function(L, x=locator(1), r, plotit=TRUE, cols=c("blue", "red", "green")) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) check.1.real(r) lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) win <- L$window # # project x to nearest segment if(missing(x)) x <- clickppp(1, win, add=TRUE) else x <- as.ppp(x, win) pro <- project2segment(x, lines) # which segment? startsegment <- pro$mapXY # parametric position of x along this segment startfraction <- pro$tp # vertices at each end of this segment A <- L$from[startsegment] B <- L$to[startsegment] # distances from x to A and B dxA <- startfraction * lengths[startsegment] dxB <- (1-startfraction) * lengths[startsegment] # is r large enough to reach both A and B? startfilled <- (max(dxA, dxB) <= r) # compute vector of shortest path distances from x to each vertex j, # going through A: dxAv <- dxA + L$dpath[A,] # going through B: dxBv <- dxB + L$dpath[B,] # going either through A or through B: dxv <- pmin.int(dxAv, dxBv) # Thus dxv[j] is the shortest path distance from x to vertex j. # # Determine which vertices are inside the disc of radius r covered <- (dxv <= r) # Thus covered[j] is TRUE if the j-th vertex is inside the disc. # # Determine which line segments are completely inside the disc # from <- L$from to <- L$to # ( a line segment is inside the disc if the shortest distance # from x to one of its endpoints, plus the length of the segment, # is less than r .... allinside <- (dxv[from] + lengths <= r) | (dxv[to] + lengths <= r) # ... or alternatively, if the sum of the # two residual distances exceeds the length of the segment ) residfrom <- pmax.int(0, r - dxv[from]) residto <- pmax.int(0, r - dxv[to]) allinside <- allinside | (residfrom + residto >= lengths) # start segment is special allinside[startsegment] <- startfilled # Thus allinside[k] is TRUE if the k-th segment is inside the disc # Collect all these segments disclines <- lines[allinside] # # Determine which line segments cross the boundary of the disc boundary <- (covered[from] | covered[to]) & !allinside # For each of these, calculate the remaining distance at each end resid.from <- ifelseXB(boundary, pmax.int(r - dxv[from], 0), 0) resid.to <- ifelseXB(boundary, pmax.int(r - dxv[to], 0), 0) # Where the remaining distance is nonzero, create segment and endpoint okfrom <- (resid.from > 0) okfrom[startsegment] <- FALSE if(any(okfrom)) { v0 <- vertices[from[okfrom]] v1 <- vertices[to[okfrom]] tp <- (resid.from/lengths)[okfrom] vfrom <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesfrom <- as.psp(from=v0, to=vfrom) } else vfrom <- extralinesfrom <- NULL # okto <- (resid.to > 0) okto[startsegment] <- FALSE if(any(okto)) { v0 <- vertices[to[okto]] v1 <- vertices[from[okto]] tp <- (resid.to/lengths)[okto] vto <- ppp((1-tp)*v0$x + tp*v1$x, (1-tp)*v0$y + tp*v1$y, window=win) extralinesto <- as.psp(from=v0, to=vto) } else vto <- extralinesto <- NULL # # deal with special case where start segment is not fully covered if(!startfilled) { vA <- vertices[A] vB <- vertices[B] rfrac <- r/lengths[startsegment] tleft <- pmax.int(startfraction-rfrac, 0) tright <- pmin.int(startfraction+rfrac, 1) vleft <- ppp((1-tleft) * vA$x + tleft * vB$x, (1-tleft) * vA$y + tleft * vB$y, window=win) vright <- ppp((1-tright) * vA$x + tright * vB$x, (1-tright) * vA$y + tright * vB$y, window=win) startline <- as.psp(from=vleft, to=vright) startends <- superimpose(if(!covered[A]) vleft else NULL, if(!covered[B]) vright else NULL) } else startline <- startends <- NULL # # combine all lines disclines <- superimpose(disclines, extralinesfrom, extralinesto, startline, W=win, check=FALSE) # combine all disc endpoints discends <- superimpose(vfrom, vto, vertices[dxv == r], startends, W=win, check=FALSE) # if(plotit) { if(dev.cur() == 1) { # null device - initialise a plot plot(L, main="") } points(x, col=cols[1], pch=16) plot(disclines, add=TRUE, col=cols[2], lwd=2) plot(discends, add=TRUE, col=cols[3], pch=16) } return(list(lines=disclines, endpoints=discends)) } countends <- function(L, x=locator(1), r) { # L is the linear network (object of class "linnet") # x is the centre point of the disc # r is the radius of the disc # stopifnot(inherits(L, "linnet")) lines <- L$lines vertices <- L$vertices lengths <- lengths.psp(lines) dpath <- L$dpath win <- L$window nv <- vertices$n ns <- lines$n # get x if(missing(x)) x <- clickppp(1, win, add=TRUE) else x <- as.ppp(x, win) # np <- npoints(x) if(length(r) != np) stop("Length of vector r does not match number of points in x") # project x to nearest segment pro <- project2segment(x, lines) # which segment? startsegment <- pro$mapXY # parametric position of x along this segment startfraction <- pro$tp # convert indices to C seg0 <- startsegment - 1L from0 <- L$from - 1L to0 <- L$to - 1L toler <- 0.001 * min(lengths) DUP <- spatstat.options("dupC") zz <- .C("Ccountends", np = as.integer(np), f = as.double(startfraction), seg = as.integer(seg0), r = as.double(r), nv = as.integer(vertices$n), xv = as.double(vertices$x), yv = as.double(vertices$y), ns = as.integer(ns), from = as.integer(from0), to = as.integer(to0), dpath = as.double(dpath), lengths = as.double(lengths), toler=as.double(toler), nendpoints = as.integer(integer(np)), DUP=DUP) # PACKAGE="spatstat") zz$nendpoints } spatstat/R/deltametric.R0000755000176000001440000000136712237642727015014 0ustar ripleyusers# # deltametric.R # # Delta metric # # $Revision: 1.3 $ $Date: 2010/07/10 11:42:08 $ # deltametric <- function(A, B, p=2, c=Inf, ...) { stopifnot(is.numeric(p) && length(p) == 1 && p > 0) # ensure frames are identical bb <- bounding.box(as.rectangle(A), as.rectangle(B)) # enforce identical frames A <- rebound(A, bb) B <- rebound(B, bb) # compute distance functions dA <- distmap(A, ...) dB <- distmap(B, ...) if(!is.infinite(c)) { dA <- eval.im(pmin.int(dA, c)) dB <- eval.im(pmin.int(dB, c)) } if(is.infinite(p)) { # L^infinity Z <- eval.im(abs(dA-dB)) delta <- summary(Z)$max } else { # L^p Z <- eval.im(abs(dA-dB)^p) iZ <- summary(Z)$mean delta <- iZ^(1/p) } return(delta) } spatstat/R/eem.R0000755000176000001440000000062212237642727013256 0ustar ripleyusers# eem.R # # Computes the Stoyan-Grabarnik "exponential energy weights" # # $Revision: 1.4 $ $Date: 2008/07/25 19:51:05 $ # eem <- function(fit, check=TRUE) { verifyclass(fit, "ppm") lambda <- fitted.ppm(fit, check=check) Q <- quad.ppm(fit) Z <- is.data(Q) eemarks <- 1/lambda[Z] attr(eemarks, "type") <- "eem" attr(eemarks, "typename") <- "exponential energy marks" return(eemarks) } spatstat/R/exactdt.R0000755000176000001440000000415712237642727014153 0ustar ripleyusers# # exactdt.S # S function exactdt() for exact distance transform # # $Revision: 4.14 $ $Date: 2012/04/06 09:47:31 $ # "exactdt"<- function(X, ...) { verifyclass(X, "ppp") w <- X$window if(spatstat.options("exactdt.checks.data")) { # check validity of ppp structure bb <- as.rectangle(w) xr <- bb$xrange yr <- bb$yrange rx <- range(X$x) ry <- range(X$y) die <- function(why) { stop(paste("ppp object format corrupted:", why)) } if(rx[1] < xr[1] || rx[2] > xr[2]) die("x-coordinates out of bounds") if(ry[1] < yr[1] || ry[2] > yr[2]) die("y-coordinates out of bounds") if(length(X$x) != length(X$y)) die("x and y vectors have different length") if(length(X$x) != X$n) die("length of x,y vectors does not match n") } w <- as.mask(w, ...) # dimensions of result nr <- w$dim[1] nc <- w$dim[2] # margins in C array mr <- 2 mc <- 2 # full dimensions of allocated storage Nnr <- nr + 2 * mr Nnc <- nc + 2 * mc N <- Nnr * Nnc # output rows & columns (R indexing) rmin <- mr + 1 rmax <- Nnr - mr cmin <- mc + 1 cmax <- Nnc - mc # go DUP <- spatstat.options("dupC") res <- .C("exact_dt_R", as.double(X$x), as.double(X$y), as.integer(X$n), as.double(w$xrange[1]), as.double(w$yrange[1]), as.double(w$xrange[2]), as.double(w$yrange[2]), nr = as.integer(nr), nc = as.integer(nc), mr = as.integer(mr), mc = as.integer(mc), distances = as.double(double(N)), indices = as.integer(integer(N)), boundary = as.double(double(N)), DUP=DUP) # PACKAGE="spatstat") # extract dist <- matrix(res$distances, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] inde <- matrix(res$indices, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] bdry <- matrix(res$boundary, ncol=Nnc, nrow=Nnr, byrow = TRUE)[rmin:rmax, cmin:cmax] # convert index from C to R indexing inde <- inde + 1L return(list(d = dist, i = inde, b = bdry, w=w)) } spatstat/R/quadratresample.R0000755000176000001440000000226112237642727015703 0ustar ripleyusers# # quadratresample.R # # resample a point pattern by resampling quadrats # # $Revision: 1.5 $ $Date: 2010/11/25 02:58:49 $ # quadratresample <- function(X, nx, ny=nx, ..., replace=FALSE, nsamples=1, verbose=(nsamples > 1)) { stopifnot(is.ppp(X)) if(X$window$type != "rectangle") stop("Resampling is only implemented for rectangular windows") # create tessellation A <- quadrats(X, nx=nx, ny=ny) # split data over tessellation B <- split(X, A) nq <- length(B) # determine bottom left corner of each tile V <- lapply(B, function(z) { w <- z$window; c(w$xrange[1], w$yrange[1]) }) out <- list() if(verbose) cat("Generating resampled patterns...") for(i in 1:nsamples) { # resample tiles ind <- sample(1:nq, nq, replace=replace) Xresampled <- X Bresampled <- B for(j in 1:nq) { k <- ind[j] Bresampled[[j]] <- shift(B[[k]], unlist(V[[j]]) - unlist(V[[k]])) } split(Xresampled, A) <- Bresampled out[[i]] <- Xresampled if(verbose) progressreport(i, nsamples) } if(nsamples == 1) return(out[[1]]) return(as.listof(out)) } spatstat/R/quadratmtest.R0000644000176000001440000000074212237642730015220 0ustar ripleyusers# # method for 'quadrat.test' for class mppm # # $Revision: 1.7 $ $Date: 2012/09/06 03:50:17 $ # quadrat.test.mppm <- function(X, ...) { Xname <- short.deparse(substitute(X)) if(!is.poisson.mppm(X)) stop("Model is not a Poisson point process") subs <- subfits(X) tests <- lapply(subs, quadrat.test.ppm, ..., fitname=Xname) class(tests) <- c("listof", class(tests)) df.est <- length(coef(X)) return(pool.quadrattest(tests, Xname=Xname, df.est=df.est)) } spatstat/R/images.R0000755000176000001440000006532012237642727013763 0ustar ripleyusers# # images.R # # $Revision: 1.103 $ $Date: 2013/07/25 09:58:59 $ # # The class "im" of raster images # # im() object creator # # is.im() tests class membership # # rasterx.im(), rastery.im() # raster X and Y coordinates # # nearest.pixel() # lookup.im() # facilities for looking up pixel values # ################################################################ ######## basic support for class "im" ################################################################ # # creator im <- function(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) { typ <- typeof(mat) if(typ == "double") typ <- "real" miss.xcol <- missing(xcol) miss.yrow <- missing(yrow) # determine dimensions if(is.matrix(mat)) { nr <- nrow(mat) nc <- ncol(mat) if(length(xcol) != nc) stop("Length of xcol does not match ncol(mat)") if(length(yrow) != nr) stop("Length of yrow does not match nrow(mat)") } else { if(miss.xcol || miss.yrow) stop(paste(sQuote("mat"), "is not a matrix and I can't guess its dimensions")) stopifnot(length(mat) == length(xcol) * length(yrow)) nc <- length(xcol) nr <- length(yrow) } # deal with factor case if(is.factor(mat)) { typ <- "factor" } else if(!is.null(lev <- levels(mat))) { typ <- "factor" mat <- factor(mat, levels=lev) } # Ensure 'mat' is a matrix (without destroying factor information) if(!is.matrix(mat)) dim(mat) <- c(nr, nc) # set up coordinates if((miss.xcol || length(xcol) <= 1) && !is.null(xrange) ) { # use 'xrange' xstep <- diff(xrange)/nc xcol <- seq(from=xrange[1] + xstep/2, to=xrange[2] - xstep/2, length.out=nc) } else if(length(xcol) > 1) { # use 'xcol' # ensure spacing is constant xcol <- seq(from=min(xcol), to=max(xcol), length.out=length(xcol)) xstep <- diff(xcol)[1] xrange <- range(xcol) + c(-1,1) * xstep/2 } else stop("Cannot determine pixel width") if((miss.yrow || length(yrow) <= 1) && !is.null(yrange)) { # use 'yrange' ystep <- diff(yrange)/nr yrow <- seq(from=yrange[1] + ystep/2, to=yrange[2] - ystep/2, length.out=nr) } else if(length(yrow) > 1) { # use 'yrow' # ensure spacing is constant yrow <- seq(from=min(yrow), to=max(yrow), length.out=length(yrow)) ystep <- diff(yrow)[1] yrange <- range(yrow) + c(-1,1) * ystep/2 } else stop("Cannot determine pixel height") unitname <- as.units(unitname) # get rid of those annoying 8.67e-19 printouts swat <- function(x) {ifelseAX(abs(x) < .Machine$double.eps, 0, x)} xrange <- swat(xrange) yrange <- swat(yrange) out <- list(v = mat, dim = c(nr, nc), xrange = xrange, yrange = yrange, xstep = xstep, ystep = ystep, xcol = xcol, yrow = yrow, type = typ, units = unitname) class(out) <- "im" return(out) } is.im <- function(x) { inherits(x,"im") } levels.im <- function(x) { levels(x$v) } "levels<-.im" <- function(x, value) { if(x$type != "factor") stop("image is not factor-valued") levels(x$v) <- value x } ################################################################ ######## methods for class "im" ################################################################ shift.im <- function(X, vec=c(0,0), ..., origin=NULL) { verifyclass(X, "im") if(!is.null(origin)) { stopifnot(is.character(origin)) if(!missing(vec)) warning("argument vec ignored; overruled by argument origin") origin <- pickoption("origin", origin, c(centroid="centroid", midpoint="midpoint", bottomleft="bottomleft")) W <- as.owin(X) locn <- switch(origin, centroid={ unlist(centroid.owin(W)) }, midpoint={ c(mean(W$xrange), mean(W$yrange)) }, bottomleft={ c(W$xrange[1], W$yrange[1]) }) return(shift(X, -locn)) } X$xrange <- X$xrange + vec[1] X$yrange <- X$yrange + vec[2] X$xcol <- X$xcol + vec[1] X$yrow <- X$yrow + vec[2] return(X) } "[.im" <- function(x, i, j, ..., drop=TRUE, raster=NULL, rescue=is.owin(i)) { # detect 'blank' arguments like second argument in x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } if(missing(rescue) && itype != "given") rescue <- FALSE if(itype == "missing" && jtype == "missing") { # no indices: return entire image out <- if(is.null(raster)) x else as.im(raster) xy <- expand.grid(y=out$yrow,x=out$xcol) if(!is.null(raster)) { # resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } if(!drop) return(out) else { v <- out$v return(v[!is.na(v)]) } } if(itype == "given") { # ................................................................. # Try spatial index # ................................................................. if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") # 'i' is a window # if drop = FALSE, just set values outside window to NA # if drop = TRUE, extract values for all pixels inside window # as an image (if 'i' is a rectangle) # or as a vector (otherwise) out <- if(is.null(raster)) x else as.im(raster) xy <- expand.grid(y=out$yrow,x=out$xcol) if(!is.null(raster)) { # resample image on new pixel raster values <- lookup.im(x, xy$x, xy$y, naok=TRUE) out <- im(values, out$xcol, out$yrow, unitname=unitname(out)) } inside <- inside.owin(xy$x, xy$y, i) if(!drop) { out$v[!inside] <- NA return(out) } else if(!rescue || i$type != "rectangle") { values <- out$v[inside] return(values) } else { disjoint <- function(r, s) { (r[2] < s[1]) || (r[1] > s[2]) } clip <- function(r, s) { c(max(r[1],s[1]), min(r[2],s[2])) } inrange <- function(x, r) { (x >= r[1]) & (x <= r[2]) } if(disjoint(i$xrange, x$xrange) || disjoint(i$yrange, x$yrange)) # empty intersection return(numeric(0)) xr <- clip(i$xrange, x$xrange) yr <- clip(i$yrange, x$yrange) colsub <- inrange(out$xcol, xr) rowsub <- inrange(out$yrow, yr) ncolsub <- sum(colsub) nrowsub <- sum(rowsub) if(ncolsub == 0 || nrowsub == 0) return(numeric(0)) marg <- list(mat=out$v[rowsub, colsub, drop=FALSE], unitname=unitname(x)) xarg <- if(ncolsub > 1) list(xcol = out$xcol[colsub]) else list(xrange=xr) yarg <- if(nrowsub > 1) list(yrow = out$yrow[rowsub]) else list(yrange=yr) result <- do.call("im", c(marg, xarg, yarg)) return(result) } } if(verifyclass(i, "im", fatal=FALSE)) { if(jtype == "given") warning("Argument j ignored") # logical images OK if(i$type == "logical") { # convert to window w <- as.owin(eval.im(ifelse1NA(i))) return(x[w, drop=drop, ..., raster=raster]) } else stop("Subset argument \'i\' is an image, but not of logical type") } if(is.ppp(i)) { # 'i' is a point pattern if(jtype == "given") warning("Argument j ignored") # Look up the greyscale values for the points of the pattern values <- lookup.im(x, i$x, i$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) # ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } } # ............... not a spatial index ............................. # Try indexing as a matrix # Construct a matrix index call for possible re-use M <- as.matrix(x) ycall <- switch(itype, given = { switch(jtype, given = quote(M[i, j, drop=FALSE]), blank = quote(M[i, , drop=FALSE]), missing = quote(M[i, drop=FALSE])) }, blank = { switch(jtype, given = quote(M[ , j, drop=FALSE]), blank = quote(M[ , , drop=FALSE]), missing = quote(M[ , drop=FALSE])) }, missing = { switch(jtype, given = quote(M[j=j, drop=FALSE]), blank = quote(M[j= , drop=FALSE]), missing = quote(M[ drop=FALSE])) }) # try it y <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(y, "try-error")) { # valid subset index for a matrix if(rescue) { # check whether it's a rectangular block, in correct order RR <- row(x$v) CC <- col(x$v) rcall <- ycall rcall[[2]] <- quote(RR) ccall <- ycall ccall[[2]] <- quote(CC) rr <- eval(as.call(rcall)) cc <- eval(as.call(ccall)) rseq <- sort(unique(as.vector(rr))) cseq <- sort(unique(as.vector(cc))) if(all(diff(rseq) == 1) && all(diff(cseq) == 1) && (length(rr) == length(rseq) * length(cseq)) && all(rr == RR[rseq, cseq]) && all(cc == CC[rseq,cseq])) { # yes - make image dim(y) <- c(length(rseq), length(cseq)) Y <- x Y$v <- y Y$dim <- dim(y) Y$xcol <- x$xcol[cseq] Y$yrow <- x$yrow[rseq] Y$xrange <- range(Y$xcol) + c(-1,1) * x$xstep/2 Y$yrange <- range(Y$yrow) + c(-1,1) * x$ystep/2 return(Y) } } # return pixel values (possibly as matrix) return(y) } # Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=as.owin(x), fatal=FALSE, check=FALSE))) { # 'i' is convertible to a point pattern # Look up the greyscale values for the points of the pattern values <- lookup.im(x, ip$x, ip$y, naok=TRUE) if(drop) values <- values[!is.na(values)] if(length(values) == 0) # ensure the zero-length vector is of the right type values <- switch(x$type, factor={ factor(, levels=levels(x)) }, integer = { integer(0) }, logical = { logical(0) }, real = { numeric(0) }, complex = { complex(0) }, character = { character(0) }, { values } ) return(values) } stop("The subset operation is undefined for this type of index") } "[<-.im" <- function(x, i, j, value) { # detect 'blank' arguments like second argument of x[i, ] ngiven <- length(sys.call()) nmatched <- length(match.call()) nblank <- ngiven - nmatched itype <- if(missing(i)) "missing" else "given" jtype <- if(missing(j)) "missing" else "given" if(nblank == 1) { if(!missing(i)) jtype <- "blank" if(!missing(j)) itype <- "blank" } else if(nblank == 2) { itype <- jtype <- "blank" } X <- x W <- as.owin(X) if(is.im(value)) { value <- value$v } stopifnot(is.vector(value) || is.matrix(value) || is.factor(value)) if(itype == "missing" && jtype == "missing") { # no index provided # set all pixels to 'value' v <- X$v v[!is.na(v)] <- value X$v <- v return(X) } if(itype == "given") { # ..................... Try a spatial index .................... if(verifyclass(i, "owin", fatal=FALSE)) { if(jtype == "given") warning("Index j ignored") # 'i' is a window if(is.empty(i)) return(X) xx <- as.vector(raster.x(W)) yy <- as.vector(raster.y(W)) ok <- inside.owin(xx, yy, i) X$v[ok] <- value return(X) } if(verifyclass(i, "im", fatal=FALSE) && i$type == "logical") { if(jtype == "given") warning("Index j ignored") # convert logical vector to window where entries are TRUE i <- as.owin(eval.im(ifelse1NA(i))) # continue as above xx <- as.vector(raster.x(W)) yy <- as.vector(raster.y(W)) ok <- inside.owin(xx, yy, i) X$v[ok] <- value return(X) } if(is.ppp(i)) { # 'i' is a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(i) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(i$x, i$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] i <- i[ok] } if(npoints(i) > 0) { # determine row & column positions for each point loc <- nearest.pixel(i$x, i$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } return(X) } } # .................. 'i' is not a spatial index .................... # Construct a matrix replacement call ycall <- switch(itype, given = { switch(jtype, given = quote(X$v[i, j] <- value), blank = quote(X$v[i, ] <- value), missing = quote(X$v[i] <- value)) }, blank = { switch(jtype, given = quote(X$v[ , j] <- value), blank = quote(X$v[ , ] <- value), missing = quote(X$v[ ] <- value)) }, missing = { switch(jtype, given = quote(X$v[j=j] <- value), blank = quote(X$v[j= ] <- value), missing = quote(X$v[] <- value)) }) # try it litmus <- try(eval(as.call(ycall)), silent=TRUE) if(!inherits(litmus, "try-error")) return(X) # Last chance! if(itype == "given" && !is.matrix(i) && !is.null(ip <- as.ppp(i, W=W, fatal=FALSE, check=TRUE))) { # 'i' is convertible to a point pattern if(jtype == "given") warning("Index j ignored") nv <- length(value) np <- npoints(ip) if(nv != np && nv != 1) stop("Length of replacement value != number of point locations") # test whether all points are inside window FRAME ok <- inside.owin(ip$x, ip$y, as.rectangle(W)) if(any(!ok)) { warning("Some points are outside the outer frame of the image") if(nv == np) value <- value[ok] ip <- ip[ok] } if(npoints(ip) > 0) { # determine row & column positions for each point loc <- nearest.pixel(ip$x, ip$y, X) # set values X$v[cbind(loc$row, loc$col)] <- value } return(X) } stop("The subset operation is undefined for this type of index") } ################################################################ ######## other tools ################################################################ # # This function is similar to nearest.raster.point except for # the third argument 'im' and the different idiom for calculating # row & column - which could be used in nearest.raster.point() nearest.pixel <- function(x,y,im) { verifyclass(im, "im") if(length(x) > 0) { nr <- im$dim[1] nc <- im$dim[2] cc <- round(1 + (x - im$xcol[1])/im$xstep) rr <- round(1 + (y - im$yrow[1])/im$ystep) cc <- pmax.int(1,pmin.int(cc, nc)) rr <- pmax.int(1,pmin.int(rr, nr)) } else cc <- rr <- integer(0) return(list(row=rr, col=cc)) } # Explores the 3 x 3 neighbourhood of nearest.pixel # and finds the nearest pixel that is not NA nearest.valid.pixel <- function(x,y,im) { rc <- nearest.pixel(x,y,im) rr <- rc$row cc <- rc$col # check whether any pixels are outside image domain outside <- is.na(im$v) miss <- outside[cbind(rr, cc)] if(!any(miss)) return(rc) # for offending pixels, explore 3 x 3 neighbourhood nr <- im$dim[1] nc <- im$dim[2] xcol <- im$xcol yrow <- im$yrow for(i in which(miss)) { rows <- rr[i] + c(-1,0,1) cols <- cc[i] + c(-1,0,1) rows <- unique(pmax.int(1, pmin.int(rows, nr))) cols <- unique(pmax.int(1, pmin.int(cols, nc))) rcp <- expand.grid(row=rows, col=cols) ok <- !outside[as.matrix(rcp)] if(any(ok)) { # At least one of the neighbours is valid # Find the closest one rcp <- rcp[ok,] dsq <- with(rcp, (x[i] - xcol[col])^2 + (y[i] - yrow[row])^2) j <- which.min(dsq) rc$row[i] <- rcp$row[j] rc$col[i] <- rcp$col[j] } } return(rc) } # This function is a generalisation of inside.owin() # to images other than binary-valued images. lookup.im <- function(Z, x, y, naok=FALSE, strict=TRUE) { verifyclass(Z, "im") if(Z$type == "factor") Z <- repair.old.factor.image(Z) if(length(x) != length(y)) stop("x and y must be numeric vectors of equal length") # initialise answer to NA if(Z$type != "factor") { niets <- NA mode(niets) <- mode(Z$v) } else { niets <- factor(NA, levels=levels(Z)) } value <- rep.int(niets, length(x)) # test whether inside bounding rectangle xr <- Z$xrange yr <- Z$yrange eps <- sqrt(.Machine$double.eps) frameok <- (x >= xr[1] - eps) & (x <= xr[2] + eps) & (y >= yr[1] - eps) & (y <= yr[2] + eps) if(!any(frameok)) { # all points OUTSIDE range - no further work needed if(!naok) warning("Internal error: all values NA") return(value) # all NA } # consider only those points which are inside the frame xf <- x[frameok] yf <- y[frameok] # map locations to raster (row,col) coordinates if(strict) loc <- nearest.pixel(xf,yf,Z) else loc <- nearest.valid.pixel(xf,yf,Z) # look up image values vf <- Z$v[cbind(loc$row, loc$col)] # insert into answer value[frameok] <- vf if(!naok && any(is.na(value))) warning("Internal error: NA's generated") return(value) } rasterx.im <- function(x) { verifyclass(x, "im") v <- x$v xx <- x$xcol matrix(xx[col(v)], ncol=ncol(v), nrow=nrow(v)) } rastery.im <- function(x) { verifyclass(x, "im") v <- x$v yy <- x$yrow matrix(yy[row(v)], ncol=ncol(v), nrow=nrow(v)) } rasterxy.im <- function(x, drop=FALSE) { verifyclass(x, "im") v <- x$v xx <- x$xcol yy <- x$yrow if(!drop) { ans <- cbind(as.vector(xx[col(v)]), as.vector(yy[row(v)])) } else { ok <- !is.null(x$v) ans <- cbind(as.vector(xx[col(v)[ok]]), as.vector(yy[row(v)[ok]])) } colnames(ans) <- c("x", "y") return(ans) } ############## # methods for other functions xtfrm.im <- function(x) { as.numeric(as.matrix.im(x)) } as.matrix.im <- function(x, ...) { return(x$v) } as.array.im <- function(x, ...) { m <- as.matrix(x) a <- do.call(array, resolve.defaults(list(m), list(...), list(dim=c(dim(m), 1)))) return(a) } as.data.frame.im <- function(x, ...) { verifyclass(x, "im") v <- x$v xx <- x$xcol[col(v)] yy <- x$yrow[row(v)] ok <- !is.na(v) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) # extract pixel values without losing factor info vv <- v[ok] dim(vv) <- NULL # data.frame(x=xx, y=yy, value=vv, ...) } mean.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(mean(xvalues)) } sum.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(sum(xvalues, ...)) } median.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(median(xvalues, ...)) } range.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(range(xvalues, ...)) } max.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(max(xvalues, ...)) } min.im <- function(x, ...) { verifyclass(x, "im") xvalues <- x[drop=TRUE] return(min(xvalues, ...)) } hist.im <- function(x, ..., probability=FALSE) { xname <- short.deparse(substitute(x)) verifyclass(x, "im") main <- paste("Histogram of", xname) # default plot arguments # extract pixel values values <- as.matrix(x) dim(values) <- NULL # barplot or histogram if(x$type %in% c("logical", "factor")) { # barplot tab <- table(values) probs <- tab/sum(tab) if(probability) { heights <- probs ylab <- "Probability" } else { heights <- tab ylab <- "Number of pixels" } mids <- do.call("barplot", resolve.defaults(list(heights), list(...), list(xlab=paste("Pixel value"), ylab=ylab, main=main))) out <- list(counts=tab, probs=probs, heights=heights, mids=mids, xname=xname) class(out) <- "barplotdata" } else { # histogram values <- values[!is.na(values)] plotit <- resolve.defaults(list(...), list(plot=TRUE))$plot if(plotit) { ylab <- if(probability) "Probability density" else "Number of pixels" out <- do.call("hist.default", resolve.defaults(list(values), list(...), list(probability=probability), list(xlab=paste("Pixel value"), ylab=ylab, main=main))) out$xname <- xname } else { # plot.default whinges if `probability' given when plot=FALSE out <- do.call("hist.default", resolve.defaults(list(values), list(...))) # hack! out$xname <- xname } } return(invisible(out)) } plot.barplotdata <- function(x, ...) { do.call("barplot", resolve.defaults(list(height=x$heights), list(...), list(main=paste("Histogram of ", x$xname)))) } cut.im <- function(x, ...) { verifyclass(x, "im") vcut <- cut(as.numeric(as.matrix(x)), ...) return(im(vcut, xcol=x$xcol, yrow=x$yrow, unitname=unitname(x))) } quantile.im <- function(x, ...) { verifyclass(x, "im") q <- do.call("quantile", resolve.defaults(list(as.numeric(as.matrix(x))), list(...), list(na.rm=TRUE))) return(q) } integral.im <- function(x, ...) { verifyclass(x, "im") typ <- x$type if(!any(typ == c("integer", "real", "complex", "logical"))) stop(paste("Don't know how to integrate an image of type", sQuote(typ))) a <- with(x, sum(v, na.rm=TRUE) * xstep * ystep) return(a) } conform.imagelist <- function(X, Zlist) { # determine points of X where all images in Zlist are defined ok <- rep.int(TRUE, length(X$x)) for(i in seq_along(Zlist)) { Zi <- Zlist[[i]] ZiX <- Zi[X, drop=FALSE] ok <- ok & !is.na(ZiX) } return(ok) } split.im <- function(x, f, ..., drop=FALSE) { stopifnot(is.im(x)) if(inherits(f, "tess")) subsets <- tiles(f) else if(is.im(f)) { if(f$type != "factor") f <- eval.im(factor(f)) subsets <- tiles(tess(image=f)) } else stop("f should be a tessellation or a factor-valued image") if(!is.subset.owin(as.owin(x), as.owin(f))) stop("f does not cover the window of x") n <- length(subsets) out <- vector(mode="list", length=n) names(out) <- names(subsets) for(i in 1:n) out[[i]] <- x[subsets[[i]], drop=drop] if(drop) return(out) else return(as.listof(out)) } by.im <- function(data, INDICES, FUN, ...) { stopifnot(is.im(data)) V <- split(data, INDICES) U <- lapply(V, FUN, ...) return(as.listof(U)) } rebound.im <- function(x, rect) { stopifnot(is.im(x)) stopifnot(is.owin(rect)) rect <- as.rectangle(rect) stopifnot(is.subset.owin(as.rectangle(x), rect)) # compute number of extra rows/columns dx <- x$xstep nleft <- max(0, floor((x$xrange[1]-rect$xrange[1])/dx)) nright <- max(0, floor((rect$xrange[2]-x$xrange[2])/dx)) dy <- x$ystep nbot <- max(0, floor((x$yrange[1]-rect$yrange[1])/dy)) ntop <- max(0, floor((rect$yrange[2]-x$yrange[2])/dy)) # determine exact x and y ranges (to preserve original pixel locations) xrange.new <- x$xrange + c(-nleft, nright) * dx yrange.new <- x$yrange + c(-nbot, ntop) * dy # expand pixel data matrix nr <- x$dim[1] nc <- x$dim[2] nrnew <- nbot + nr + ntop ncnew <- nleft + nc + nright naval <- switch(x$type, factor=, integer=NA_integer_, real=NA_real_, character=NA_character_, complex=NA_complex_, NA) vnew <- matrix(naval, nrnew, ncnew) if(x$type != "factor") { vnew[nbot + (1:nr), nleft + (1:nc)] <- x$v } else { vnew[nbot + (1:nr), nleft + (1:nc)] <- as.integer(x$v) vnew <- factor(vnew, labels=levels(x)) dim(vnew) <- c(nrnew, ncnew) } # build new image object xnew <- im(vnew, xrange = xrange.new, yrange = yrange.new, unitname = unitname(x)) return(xnew) } sort.im <- function(x, ...) { verifyclass(x, "im") sort(as.vector(as.matrix(x)), ...) } dim.im <- function(x) { x$dim } # colour images rgbim <- function(R, G, B, maxColorValue=255) { eval.im(factor(rgbNA(as.vector(R), as.vector(G), as.vector(B), maxColorValue=maxColorValue))) } hsvim <- function(H, S, V) { eval.im(factor(hsvNA(as.vector(H), as.vector(S), as.vector(V)))) } scaletointerval <- function(x, from=0, to=1) { UseMethod("scaletointerval") } scaletointerval.default <- function(x, from=0, to=1) { rr <- range(x, na.rm=TRUE) b <- (to - from)/diff(rr) y <- from + b * (x - rr[1]) return(y) } scaletointerval.im <- function(x, from=0, to=1) { v <- scaletointerval(x$v, from, to) y <- im(v, x$xcol, x$yrow, x$xrange, x$yrange, unitname(x)) return(y) } zapsmall.im <- function(x, digits) { if(missing(digits)) return(eval.im(zapsmall(x))) return(eval.im(zapsmall(x, digits=digits))) } spatstat/R/ripras.R0000755000176000001440000000267312237642727014020 0ustar ripleyusers# # ripras.S Ripley-Rasson estimator of domain # # # $Revision: 1.13 $ $Date: 2012/07/07 09:20:45 $ # # # # #------------------------------------- bounding.box.xy <- function(x, y=NULL) { xy <- xy.coords(x,y) if(length(xy$x) == 0) return(NULL) owin(range(xy$x), range(xy$y), check=FALSE) } convexhull.xy <- function(x, y=NULL) { xy <- xy.coords(x, y) x <- xy$x y <- xy$y if(length(x) < 3) return(NULL) h <- rev(chull(x, y)) # must be anticlockwise if(length(h) < 3) return(NULL) w <- owin(poly=list(x=x[h], y=y[h]), check=FALSE) return(w) } ripras <- function(x, y=NULL, shape="convex", f) { xy <- xy.coords(x, y) n <- length(xy$x) w <- switch(shape, convex = convexhull.xy(xy), rectangle = bounding.box.xy(xy), stop(paste("Unrecognised option: shape=", dQuote(shape)))) if(is.null(w)) return(NULL) # expansion factor if(!missing(f)) stopifnot(is.numeric(f) && length(f) == 1 && f >= 1) else switch(shape, convex = { # number of vertices m <- summary(w)$nvertices f <- if(m < n) 1/sqrt(1 - m/n) else 2 }, rectangle = { f <- (n+1)/(n-1) }) # centroid ce <- unlist(centroid.owin(w)) # shift centroid to origin W <- shift(w, -ce) # rescale W <- affine(W, mat=diag(c(f,f))) # shift origin to centroid W <- shift(W, ce) return(W) } spatstat/R/leverage.R0000755000176000001440000002624112237642727014307 0ustar ripleyusers# # leverage.R # # leverage and influence # # $Revision: 1.32 $ $Date: 2013/09/25 06:00:07 $ # leverage <- function(model, ...) { UseMethod("leverage") } leverage.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) { fitname <- short.deparse(substitute(model)) u <- list(fit=model, fitname=fitname) s <- ppm.influence(model, what="leverage", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ...) a <- append(u, s) class(a) <- "leverage.ppm" return(a) } influence.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) { fitname <- short.deparse(substitute(model)) u <- list(fit=model,fitname=fitname) s <- ppm.influence(model, what="influence", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ...) a <- append(u, s) class(a) <- "influence.ppm" return(a) } dfbetas.ppm <- function(model, ..., drop=FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) { fitname <- short.deparse(substitute(model)) u <- list(fit=model,fitname=fitname) s <- ppm.influence(model, what="dfbetas", drop=drop, iScore=iScore, iHessian=iHessian, iArgs=iArgs, ...) a <- s$dfbetas attr(a, "info") <- u return(a) } ppm.influence <- function(fit, what=c("leverage", "influence", "dfbetas", "derivatives", "increments"), ..., iScore=NULL, iHessian=NULL, iArgs=list(), drop=FALSE, method=c("C", "interpreted"), precomputed=list()) { stopifnot(is.ppm(fit)) what <- match.arg(what, several.ok=TRUE) method <- match.arg(method) gotScore <- !is.null(iScore) gotHess <- !is.null(iHessian) needHess <- gotScore && any(what %in% c("leverage", "influence", "dfbetas")) if(!gotHess && needHess) stop("Must supply iHessian") # if(fit$method == "logi" && !spatstat.options("allow.logi.influence")) stop("ppm influence measures are not yet implemented for method=logi") # # extract precomputed values if given theta <- precomputed$coef %orifnull% coef(fit) lam <- precomputed$lambda %orifnull% fitted(fit, check=FALSE) mom <- precomputed$mom %orifnull% model.matrix(fit) # p <- length(theta) vc <- vcov(fit, hessian=TRUE) fush <- hess <- solve(vc) Q <- quad.ppm(fit) # hess = negative hessian of log (pseudo) likelihood # fush = E(hess) # invhess = solve(hess) # vc = solve(fush) # w <- w.quad(Q) loc <- union.quad(Q) isdata <- is.data(Q) # if(length(w) != length(lam)) stop(paste("Internal error: length(w) = ", length(w), "!=", length(lam), "= length(lam)\n")) # # second order interaction terms # ddS[i,j, ] = Delta_i Delta_j S(x) ddS <- NULL if(!all(what == "derivatives") && !is.poisson(fit)) { ddS <- deltasuffstat(fit, dataonly=FALSE) if(is.null(ddS)) warning("Second order interaction terms are not implemented for this model; they are treated as zero") } # # if(!is.null(iScore)) { # evaluate additional (`irregular') components of score iscoredf <- mpl.get.covariates(iScore, loc, covfunargs=iArgs) iscoremat <- as.matrix(iscoredf) # count regular and irregular parameters nreg <- ncol(mom) nirr <- ncol(iscoremat) # add extra columns to model matrix mom <- cbind(mom, iscoremat) # add extra planes of zeroes to second-order model matrix # (zero because the irregular components are part of the trend) if(!is.null(ddS)) { paddim <- c(dim(ddS)[1:2], nirr) ddS <- abind(ddS, array(0, dim=paddim), along=3) } # evaluate additional (`irregular') entries of Hessian if(gotHess) { ihessdf <- mpl.get.covariates(iHessian, loc, covfunargs=iArgs) ihessmat <- as.matrix(ihessdf) } # recompute negative Hessian of log PL and its mean fush <- hessextra <- matrix(0, ncol(mom), ncol(mom)) sub <- nreg + 1:nirr # integral over domain switch(method, interpreted = { for(i in seq(loc$n)) { # weight for integrand wti <- lam[i] * w[i] if(all(is.finite(wti))) { # integral of outer product of score momi <- mom[i, ] v1 <- outer(momi, momi, "*") * wti if(all(is.finite(v1))) fush <- fush + v1 # integral of Hessian # contributions nonzero for irregular parameters if(gotHess) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) * wti if(all(is.finite(v2))) hessextra[sub, sub] <- hessextra[sub, sub] + v2 } } } # subtract sum over data points if(gotHess) { for(i in which(isdata)) { v2 <- matrix(as.numeric(ihessmat[i,]), nirr, nirr) if(all(is.finite(v2))) hessextra[sub, sub] <- hessextra[sub, sub] - v2 } hess <- fush + hessextra invhess <- solve(hess) } else { invhess <- hess <- NULL } }, C = { wlam <- lam * w fush <- sumouter(mom, wlam) if(gotHess) { # integral term ok <- is.finite(wlam) & apply(is.finite(ihessmat), 1, all) vintegral <- if(all(ok)) wlam %*% ihessmat else wlam[ok] %*% ihessmat[ok,, drop=FALSE] # sum over data points vdata <- colSums(ihessmat[isdata, , drop=FALSE], na.rm=TRUE) vcontrib <- vintegral - vdata hessextra[sub, sub] <- hessextra[sub, sub] + matrix(vcontrib, nirr, nirr) hess <- fush + hessextra invhess <- solve(hess) } else { invhess <- hess <- NULL } }) vc <- solve(fush) } if(!needHess) { hess <- fush invhess <- vc } # if(drop) { ok <- complete.cases(mom) Q <- Q[ok] mom <- mom[ok, , drop=FALSE] loc <- loc[ok] lam <- lam[ok] w <- w[ok] isdata <- isdata[ok] if(!is.null(ddS)) ddS <- ddS[ok, ok, , drop=FALSE] } # ........ start assembling results ..................... # result <- list() # if("derivatives" %in% what) { rawresid <- isdata - lam * w score <- matrix(rawresid, nrow=1) %*% mom result$deriv <- list(mom=mom, score=score, fush=fush, vc=vc, hess=hess, invhess=invhess) } if(all(what == "derivatives")) return(result) # compute effect of adding/deleting each quadrature point # columns index the point being added/deleted # rows index the points affected eff <- mom if(!is.poisson(fit) && !is.null(ddS)) { # effect of addition/deletion of U[j] on score contribution from data points ddSX <- ddS[isdata, , , drop=FALSE] eff.data <- apply(ddSX, c(2,3), sum) # model matrix after addition/deletion of each U[j] # mombefore[i,j,] <- mom[i,] di <- dim(ddS) mombefore <- array(apply(mom, 2, rep, times=di[2]), dim=di) changesign <- ifelse(isdata, -1, 1) momchange <- ddS momchange[ , isdata, ] <- - momchange[, isdata, ] momafter <- mombefore + momchange # effect of addition/deletion of U[j] on lambda(U[i], X) lamratio <- exp(tensor(momchange, theta, 3, 1)) lamratio <- array(lamratio, dim=dim(momafter)) # integrate ddSintegrand <- lam * (momafter * lamratio - mombefore) eff.back <- changesign * tensor(ddSintegrand, w, 1, 1) # total eff <- eff + eff.data - eff.back } else ddSintegrand <- NULL # if("increments" %in% what) { result$increm <- list(ddS=ddS, ddSintegrand=ddSintegrand, isdata=isdata, wQ=w) } if(!any(c("leverage", "influence", "dfbetas") %in% what)) return(result) # ............ compute leverage, influence, dfbetas .............. # compute basic contribution from each quadrature point nloc <- npoints(loc) switch(method, interpreted = { b <- numeric(nloc) for(i in seq(nloc)) { effi <- eff[i,, drop=FALSE] momi <- mom[i,, drop=FALSE] b[i] <- momi %*% invhess %*% t(effi) } }, C = { b <- bilinearform(mom, invhess, eff) }) # .......... leverage ............. if("leverage" %in% what) { # values of leverage (diagonal) at points of 'loc' h <- b * lam levval <- loc %mark% h levsmo <- Smooth(levval, sigma=max(nndist(loc))) # nominal mean level a <- area.owin(loc$window) levmean <- p/a lev <- list(val=levval, smo=levsmo, ave=levmean) result$lev <- lev } # .......... influence ............. if("influence" %in% what) { # values of influence at data points X <- loc[isdata] M <- (1/p) * b[isdata] V <- X %mark% M result$infl <- V } # .......... dfbetas ............. if("dfbetas" %in% what) { vex <- invhess %*% t(eff) switch(method, interpreted = { dis <- con <- matrix(0, nloc, ncol(mom)) for(i in seq(nloc)) { vexi <- vex[,i, drop=FALSE] dis[i, ] <- isdata[i] * vexi con[i, ] <- - lam[i] * vexi } }, C = { tvex <- t(vex) dis <- isdata * tvex con <- - lam * tvex }) colnames(dis) <- colnames(con) <- colnames(mom) result$dfbetas <- msr(Q, dis[isdata, ], con) } return(result) } plot.leverage.ppm <- function(x, ..., showcut=TRUE) { fitname <- x$fitname defaultmain <- paste("Leverage for", fitname) y <- x$lev do.call("plot.im", resolve.defaults(list(y$smo), list(...), list(main=defaultmain))) if(showcut) contour(y$smo, levels=y$ave, add=TRUE, drawlabels=FALSE) invisible(NULL) } plot.influence.ppm <- function(x, ...) { fitname <- x$fitname defaultmain <- paste("Influence for", fitname) do.call("plot.ppp", resolve.defaults(list(x$infl), list(...), list(main=defaultmain))) } as.im.leverage.ppm <- function(X, ...) { return(X$lev$smo) } as.ppp.influence.ppm <- function(X, ...) { return(X$infl) } print.leverage.ppm <- function(x, ...) { cat("Point process leverage function\n") fitname <- x$fitname cat(paste("for model:", fitname, "\n")) lev <- x$lev cat("\nExact values:\n") print(lev$val) cat("\nSmoothed values:\n") print(lev$smo) if(is.poisson(x$fit)) cat(paste("\nAverage value:", lev$ave, "\n")) return(invisible(NULL)) } print.influence.ppm <- function(x, ...) { cat("Point process influence measure\n") fitname <- x$fitname cat(paste("for model:", fitname, "\n")) cat("\nExact values:\n") print(x$infl) return(invisible(NULL)) } spatstat/R/rmhcontrol.R0000755000176000001440000001611612237642727014704 0ustar ripleyusers# # # rmhcontrol.R # # $Revision: 1.24 $ $Date: 2012/08/14 10:23:55 $ # # rmhcontrol <- function(...) { UseMethod("rmhcontrol") } rmhcontrol.rmhcontrol <- function(...) { argz <- list(...) if(length(argz) == 1) return(argz[[1]]) stop("Arguments not understood") } rmhcontrol.list <- function(...) { argz <- list(...) nama <- names(argz) if(length(argz) == 1 && !any(nzchar(nama))) do.call("rmhcontrol.default", argz[[1]]) else do.call.matched("rmhcontrol.default", argz) } rmhcontrol.default <- function(..., p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE) { argh <- list(...) nargh <- length(argh) if(nargh > 0) { # allow rmhcontrol(NULL), otherwise flag an error if(!(nargh == 1 && is.null(argh[[1]]))) stop(paste("Unrecognised arguments to rmhcontrol;", "valid arguments are listed in help(rmhcontrol.default)")) } # impose default values if(missing(p)) p <- spatstat.options("rmh.p") if(missing(q)) q <- spatstat.options("rmh.q") if(missing(nrep)) nrep <- spatstat.options("rmh.nrep") # validate arguments if(!is.numeric(p) || length(p) != 1 || p < 0 || p > 1) stop("p should be a number in [0,1]") if(!is.numeric(q) || length(q) != 1 || q < 0 || q > 1) stop("q should be a number in [0,1]") if(!is.numeric(nrep) || length(nrep) != 1 || nrep < 1) stop("nrep should be an integer >= 1") nrep <- as.integer(nrep) if(!is.numeric(nverb) || length(nverb) != 1 || nverb < 0 || nverb > nrep) stop("nverb should be an integer <= nrep") nverb <- as.integer(nverb) if(!is.logical(fixall) || length(fixall) != 1) stop("fixall should be a logical value") if(!is.null(periodic) && (!is.logical(periodic) || length(periodic) != 1)) stop(paste(sQuote("periodic"), "should be a logical value or NULL")) if(saving <- !is.null(nsave)) { if(!is.numeric(nsave) || length(nsave) != 1 || nsave < 0 || nsave >= nrep) stop("nsave should be an integer < nrep") if(is.null(nburn)) nburn <- min(nsave, nrep-nsave) if(!is.null(nburn)) stopifnot(nburn + nsave <= nrep) } stopifnot(is.logical(track)) ################################################################# # Conditioning on point configuration # # condtype = "none": no conditioning # condtype = "Palm": conditioning on the presence of specified points # condtype = "window": conditioning on the configuration in a subwindow # if(is.null(x.cond)) { condtype <- "none" n.cond <- NULL } else if(is.ppp(x.cond)) { condtype <- "window" n.cond <- x.cond$n } else if(is.data.frame(x.cond)) { if(ncol(x.cond) %in% c(2,3)) { condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of columns in data frame x.cond") } else if(is.list(x.cond)) { if(length(x.cond) %in% c(2,3)) { x.cond <- as.data.frame(x.cond) condtype <- "Palm" n.cond <- nrow(x.cond) } else stop("Wrong number of components in list x.cond") } else stop("Unrecognised format for x.cond") if(condtype == "Palm" && n.cond == 0) { warning(paste("Ignored empty configuration x.cond;", "conditional (Palm) simulation given an empty point pattern", "is equivalent to unconditional simulation")) condtype <- "none" x.cond <- NULL n.cond <- NULL } ################################################################# # Fixing the number of points? # # fixcode = 1 <--> no conditioning # fixcode = 2 <--> conditioning on n = number of points # fixcode = 3 <--> conditioning on the number of points of each type. fixcode <- 2 - (p<1) + fixall - fixall*(p<1) fixing <- switch(fixcode, "none", "n.total", "n.each.type") # Warn about silly combination if(fixall && p < 1) warning("fixall = TRUE conflicts with p < 1. Ignored.\n") ############################################################### # `expand' determines expansion of the simulation window expand <- rmhexpand(expand) # No expansion is permitted if we are conditioning on the # number of points if(fixing != "none") { if(expand$force.exp) stop(paste("When conditioning on the number of points,", "no expansion may be done.\n"), call.=FALSE) # no expansion expand <- .no.expansion } ################################################################### # return augmented list out <- list(p=p, q=q, nrep=nrep, nverb=nverb, expand=expand, periodic=periodic, ptypes=ptypes, fixall=fixall, fixcode=fixcode, fixing=fixing, condtype=condtype, x.cond=x.cond, saving=saving, nsave=nsave, nburn=nburn, track=track) class(out) <- c("rmhcontrol", class(out)) return(out) } print.rmhcontrol <- function(x, ...) { verifyclass(x, "rmhcontrol") cat("Metropolis-Hastings algorithm control parameters\n") cat(paste("Probability of shift proposal: p =", x$p, "\n")) if(x$fixing == "none") { cat(paste("Conditional probability of death proposal: q =", x$q, "\n")) if(!is.null(x$ptypes)) { cat("Birth proposal probabilities for each type of point:\n") print(x$ptypes) } } switch(x$fixing, none={}, n.total=cat("The total number of points is fixed\n"), n.each.type=cat("The number of points of each type is fixed\n")) switch(x$condtype, none={}, window={ cat(paste("Conditional simulation given the", "configuration in a subwindow\n")) print(x$x.cond$window) }, Palm={ cat("Conditional simulation of Palm type\n") }) cat(paste("Number of M-H iterations: nrep =", x$nrep, "\n")) if(x$saving) cat(paste("Save point pattern every", x$nsave, "iterations", "after a burn-in of", x$nburn, "iterations\n")) cat(paste("Track proposal type and acceptance/rejection?", if(x$track) "yes" else "no", "\n")) if(x$nverb > 0) cat(paste("Progress report every nverb=", x$nverb, "iterations\n")) else cat("No progress reports (nverb = 0).\n") # invoke print.rmhexpand print(x$expand) cat("Periodic edge correction? ") if(is.null(x$periodic)) cat("Not yet determined.\n") else if(x$periodic) cat("Yes.\n") else cat("No.\n") # return(invisible(NULL)) } default.rmhcontrol <- function(model) { # set default for 'expand' return(rmhcontrol(expand=default.expand(model))) } update.rmhcontrol <- function(object, ...) { do.call.matched("rmhcontrol.default", resolve.defaults(list(...), as.list(object))) } rmhResolveControl <- function(control, model) { # adjust control information once the model is known stopifnot(inherits(control, "rmhcontrol")) # change *default* expansion rule to something appropriate for model # (applies only if expansion rule is undecided) control$expand <- change.default.expand(control$expand, default.expand(model)) return(control) } spatstat/R/distances.R0000755000176000001440000001465712237642727014502 0ustar ripleyusers# # distances.R # # $Revision: 1.44 $ $Date: 2013/11/03 05:20:13 $ # # # Interpoint distances between pairs # # pairdist <- function(X, ...) { UseMethod("pairdist") } pairdist.ppp <- function(X, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") if(!periodic) return(pairdist.default(X$x, X$y, method=method, squared=squared)) # periodic case W <- X$window if(W$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in a non-rectangular window")) wide <- diff(W$xrange) high <- diff(W$yrange) return(pairdist.default(X$x, X$y, period=c(wide,high), method=method, squared=squared)) } pairdist.default <- function(X, Y=NULL, ..., period=NULL, method="C", squared=FALSE) { xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # special cases if(n == 0) return(matrix(numeric(0), nrow=0, ncol=0)) else if(n == 1) return(matrix(0,nrow=1,ncol=1)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1) period <- rep.int(period, 2) wide <- period[1] high <- period[2] } switch(method, interpreted={ xx <- matrix(rep.int(x, n), nrow = n) yy <- matrix(rep.int(y, n), nrow = n) if(!periodic) { d2 <- (xx - t(xx))^2 + (yy - t(yy))^2 } else { dx <- xx - t(xx) dy <- yy - t(yy) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } if(squared) dout <- d2 else dout <- sqrt(d2) }, C={ d <- numeric( n * n) DUP <- spatstat.options("dupC") if(!periodic) { z<- .C("Cpairdist", n = as.integer(n), x= as.double(x), y= as.double(y), squared=as.integer(squared), d= as.double(d), DUP=DUP) } else { z <- .C("CpairPdist", n = as.integer(n), x= as.double(x), y= as.double(y), xwidth=as.double(wide), yheight=as.double(high), squared = as.integer(squared), d= as.double(d), DUP=DUP) } dout <- matrix(z$d, nrow=n, ncol=n) }, stop(paste("Unrecognised method", sQuote(method))) ) return(dout) } crossdist <- function(X, Y, ...) { UseMethod("crossdist") } crossdist.ppp <- function(X, Y, ..., periodic=FALSE, method="C", squared=FALSE) { verifyclass(X, "ppp") Y <- as.ppp(Y) if(!periodic) return(crossdist.default(X$x, X$y, Y$x, Y$y, method=method)) # periodic case WX <- X$window WY <- Y$window if(WX$type != "rectangle" || WY$type != "rectangle") stop(paste("periodic edge correction can't be applied", "in non-rectangular windows")) if(!is.subset.owin(WX,WY) || !is.subset.owin(WY, WX)) stop(paste("periodic edge correction is not implemented", "for the case when X and Y lie in different rectangles")) wide <- diff(WX$xrange) high <- diff(WX$yrange) return(crossdist.default(X$x, X$y, Y$x, Y$y, period=c(wide,high), method=method, squared=squared)) } crossdist.default <- function(X, Y, x2, y2, ..., period=NULL, method="C", squared=FALSE) { x1 <- X y1 <- Y # returns matrix[i,j] = distance from (x1[i],y1[i]) to (x2[j],y2[j]) if(length(x1) != length(y1)) stop("lengths of x and y do not match") if(length(x2) != length(y2)) stop("lengths of x2 and y2 do not match") n1 <- length(x1) n2 <- length(x2) if(n1 == 0 || n2 == 0) return(matrix(numeric(0), nrow=n1, ncol=n2)) if((periodic<- !is.null(period))) { stopifnot(is.numeric(period)) stopifnot(length(period) == 2 || length(period) == 1) stopifnot(all(period > 0)) if(length(period) == 1) period <- rep.int(period, 2) wide <- period[1] high <- period[2] } switch(method, interpreted = { X1 <- matrix(rep.int(x1, n2), ncol = n2) Y1 <- matrix(rep.int(y1, n2), ncol = n2) X2 <- matrix(rep.int(x2, n1), ncol = n1) Y2 <- matrix(rep.int(y2, n1), ncol = n1) if(!periodic) d2 <- (X1 - t(X2))^2 + (Y1 - t(Y2))^2 else { dx <- X1 - t(X2) dy <- Y1 - t(Y2) dx2 <- pmin.int(dx^2, (dx + wide)^2, (dx - wide)^2) dy2 <- pmin.int(dy^2, (dy + high)^2, (dy - high)^2) d2 <- dx2 + dy2 } return(if(squared) d2 else sqrt(d2)) }, C = { DUP <- spatstat.options("dupC") if(!periodic) { z<- .C("Ccrossdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), DUP=DUP) } else { z<- .C("CcrossPdist", nfrom = as.integer(n1), xfrom = as.double(x1), yfrom = as.double(y1), nto = as.integer(n2), xto = as.double(x2), yto = as.double(y2), xwidth = as.double(wide), yheight = as.double(high), squared = as.integer(squared), d = as.double(matrix(0, nrow=n1, ncol=n2)), DUP=DUP) } return(matrix(z$d, nrow=n1, ncol=n2)) }, stop(paste("Unrecognised method", method)) ) } spatstat/R/reduceformula.R0000644000176000001440000000634712237642730015346 0ustar ripleyusers# # reduceformula.R # # $Revision: 1.3 $ $Date: 2007/04/02 06:28:17 $ # # delete variable from formula # #...................................................... # reduceformula <- function(fmla, deletevar, verbose=FALSE) { # removes the variable `deletevar' from the formula `fmla' # returns a simplified formula, or NULL if it can't simplify. stopifnot(inherits(fmla, "formula")) stopifnot(is.character(deletevar) && length(deletevar) == 1) if(!(deletevar %in% all.vars(as.expression(fmla)))) { if(verbose) message(paste("The formula does not involve", dQuote(deletevar), "and is therefore unchanged")) return(fmla) } lhs <- if(length(fmla) < 3) NULL else fmla[[2]] # create terms object tt <- attributes(terms(fmla)) formula.has.intercept <- (tt$intercept == 1) # extract all variables appearing in the model vars <- as.list(tt$variables)[-1] nvars <- length(vars) varstrings <- sapply(vars, function(x) paste(as.expression(x))) # identify any offsets offs <- tt$offset v.is.offset <- if(!is.null(offs)) (1:nvars) %in% offs else rep(FALSE, nvars) # remove the response repo <- tt$response if(repo != 0) { vars <- vars[-repo] varstrings <- varstrings[-repo] v.is.offset <- v.is.offset[-repo] } # a term may be a variable name v.is.name <- sapply(vars, is.name) # a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(vars, function(x) all.vars(as.expression(x))) v.has.delete <- sapply(v.args, function(x,d) { d %in% x }, d=deletevar) v.has.other <- sapply(v.args, function(x,d) { !all(x == d) }, d=deletevar) v.is.mixed <- v.has.delete & v.has.other # we can't handle mixed terms like sin(x-d), poly(x,d) # where d is to be deleted. Handling these would require # knowledge about the functions sin and poly. if(any(v.is.mixed)) { nmixed <- sum(v.is.mixed) if(verbose) message(paste("Don't know how to reduce the", ngettext(nmixed, "term", "terms"), paste(dQuote(varstrings[v.is.mixed]), collapse=","))) return(NULL) } # OK. We have identified all first order terms to be deleted. condemned.names <- varstrings[v.has.delete] # Determine the terms of all orders that include these first order terms # (1) terms with model coefficients fax <- tt$factors if(prod(dim(fax)) == 0) retained.terms <- character(0) else { # Rows are first order terms condemned.row <- rownames(fax) %in% condemned.names # Columns are the terms of all orders allterms <- colnames(fax) # Find all columns containing a 1 in a row that is to be deleted if(any(condemned.row)) { condemned.column <- apply(fax[condemned.row, , drop=FALSE] != 0, 2, any) retained.terms <- allterms[!condemned.column] } else retained.terms <- allterms } # (2) offsets if any if(any(v.is.offset)) retained.terms <- c(retained.terms, varstrings[v.is.offset & !v.has.delete]) # (3) intercept forced? if(length(retained.terms) == 0) retained.terms <- "1" # OK. Cut-and-paste f <- paste(lhs, "~", paste(retained.terms, collapse=" + ")) return(as.formula(f)) } spatstat/R/iplotlayered.R0000644000176000001440000002125712237642727015211 0ustar ripleyusers# # interactive plot # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # # iplot.default <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) x <- layered(x) iplot(x, ..., xname=xname) } iplot.layered <- local({ faster.layers <- function(x) { if(any(islinnet <- unlist(lapply(x, inherits, what="linnet")))) { # convert linnet layers to psp, for efficiency x[islinnet] <- lapply(x[islinnet], as.psp) } repeat{ islpp <- unlist(lapply(x, inherits, what="lpp")) if(!any(islpp)) break # convert an lpp layer to two layers: psp and ppp, for efficiency ii <- min(which(islpp)) pl <- layerplotargs(x) n <- length(x) xpre <- if(ii == 1) NULL else x[1:ii] xpost <- if(ii == n) NULL else x[(ii+1):n] ppre <- if(ii == 1) NULL else pl[1:ii] ppost <- if(ii == n) NULL else pl[(ii+1):n] a <- as.psp(as.linnet(x[[ii]])) b <- as.ppp(x[[ii]]) x <- layered(LayerList=c(xpre, list(a, b), xpost), plotargs=unname(c(ppre, pl[ii], pl[ii], ppost))) } return(x) } iplot.layered <- function(x, ..., xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "layered") require(rpanel) x <- faster.layers(x) x <- freeze.colourmaps(x) bb <- as.rectangle(as.owin(x)) bbmid <- unlist(centroid.owin(bb)) lnames <- names(x) if(sum(nzchar(lnames)) != length(x)) lnames <- paste("Layer", seq_len(length(x))) ## p <- rp.control(paste("iplot(", xname, ")", sep=""), x=x, w=as.owin(x), xname=xname, layernames=lnames, bb=bb, bbmid=bbmid, zoomfactor=1, zoomcentre=bbmid, which = rep.int(TRUE, length(x)), size=c(700, 400)) # Split panel into three # Left: plot controls # Middle: data # Right: navigation/zoom rp.grid(p, "gcontrols", pos=list(row=0,column=0)) rp.grid(p, "gdisplay", pos=list(row=0,column=1)) rp.grid(p, "gnavigate", pos=list(row=0,column=2)) #----- Data display ------------ # This line is to placate the package checker mytkr <- NULL # Create data display panel rp.tkrplot(p, mytkr, plotfun=do.iplot.layered, action=click.iplot.layered, pos=list(row=0,column=0,grid="gdisplay")) #----- Plot controls ------------ nextrow <- 0 pozzie <- function(n=nextrow, ...) append(list(row=n,column=0,grid="gcontrols"), list(...)) # main title rp.textentry(p, xname, action=redraw.iplot.layered, title="Plot title", pos=pozzie(0)) nextrow <- 1 # select some layers nx <- length(x) which <- rep.int(TRUE, nx) if(nx > 1) { rp.checkbox(p, which, labels=lnames, action=redraw.iplot.layered, title="Select layers to plot", pos=pozzie(nextrow), sticky="") nextrow <- nextrow + 1 } # button to print a summary at console rp.button(p, title="Print summary information", pos=pozzie(nextrow), action=function(panel) { lapply(panel$x, function(z) print(summary(z))) return(panel) }) # #----- Navigation controls ------------ nextrow <- 0 navpos <- function(n=nextrow,cc=0, ...) append(list(row=n,column=cc,grid="gnavigate"), list(...)) rp.button(p, title="Up", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2] stepsize <- (height/4)/zo panel$zoomcentre <- ce + c(0, stepsize) redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Left", pos=navpos(nextrow,0,sticky="w"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1] stepsize <- (width/4)/zo panel$zoomcentre <- ce - c(stepsize, 0) redraw.iplot.layered(panel) return(panel) }) rp.button(p, title="Right", pos=navpos(nextrow,2,sticky="e"), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb width <- sidelengths(bb)[1] stepsize <- (width/4)/zo panel$zoomcentre <- ce + c(stepsize, 0) redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Down", pos=navpos(nextrow,1,sticky=""), action=function(panel) { zo <- panel$zoomfactor ce <- panel$zoomcentre bb <- panel$bb height <- sidelengths(bb)[2] stepsize <- (height/4)/zo panel$zoomcentre <- ce - c(0, stepsize) redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Zoom In", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor * 2 redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Zoom Out", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- panel$zoomfactor / 2 redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Reset", pos=navpos(nextrow,1,sticky=""), action=function(panel) { panel$zoomfactor <- 1 panel$zoomcentre <- panel$bbmid redraw.iplot.layered(panel) return(panel) }) nextrow <- nextrow + 1 rp.button(p, title="Redraw", pos=navpos(nextrow,1,sticky=""), action=redraw.iplot.layered) nextrow <- nextrow+1 # quit button rp.button(p, title="Quit", quitbutton=TRUE, pos=navpos(nextrow, 1, sticky=""), action= function(panel) { panel }) invisible(NULL) } # Function to redraw the whole shebang redraw.iplot.layered <- function(panel) { rp.tkrreplot(panel, mytkr) panel } # Function executed when data display is clicked click.iplot.layered <- function(panel, x, y) { panel$zoomcentre <- panel$zoomcentre + (c(x,y) - panel$bbmid)/panel$zoomfactor redraw.iplot.layered(panel) return(panel) } # function that updates the plot when the control panel is operated do.iplot.layered <- function(panel) { # scale and clip the pattern x <- panel$x[panel$which] w <- panel$w z <- panel$zoomfactor if(is.null(z)) z <- 1 ce <- panel$zoomcentre bb <- panel$bb bbmid <- panel$bbmid scalex <- shift(affine(shift(x, -ce), diag(c(z,z))), bbmid) scalew <- shift(affine(shift(w, -ce), diag(c(z,z))), bbmid) scalex <- scalex[, bb] scalew <- intersect.owin(scalew, bb, fatal=FALSE) # determine what is plotted under the clipped pattern blankargs <- list(type="n") dashargs <- list(lty=3, border="red") panel.begin <- if(is.null(scalew)) { # empty intersection; just create the plot space layered(bb, plotargs=list(blankargs)) } else if(identical(bb, scalew)) { if(z == 1) { # original state # window is rectangular # plot the data window as a solid black rectangle layered(bb, scalew, plotargs=list(blankargs, list(lwd=2))) } else { # zoom view is entirely inside window # plot the clipping region as a red dashed rectangle layered(bb, plotargs=list(dashargs)) } } else { # field of view is not a subset of window # plot the clipping region as a red dashed rectangle # Then add the data window layered(bb, scalew, plotargs=list(dashargs, list(invert=TRUE))) } # draw it opa <- par(ask=FALSE) plot(panel.begin, main=panel$xname) plot(scalex, add=TRUE) par(opa) panel } freeze.colourmaps <- function(x) { # tweak a layered object to ensure that # the colours of image layers don't change with zoom/pan isim <- unlist(lapply(x, is.im)) if(any(isim)) { # ensure there are plotargs pl <- attr(x, "plotargs") if(is.null(pl)) pl <- rep.int(list(list()), length(x)) # make sure the plotargs include 'zlim' for(i in which(isim)) { x.i <- x[[i]] if(x.i$type %in% c("integer", "real")) pl[[i]] <- resolve.defaults(pl[[i]], list(zlim=range(x.i))) } # put back attr(x, "plotargs") <- pl } return(x) } iplot.layered }) spatstat/R/nncross.R0000755000176000001440000001632112252030011014146 0ustar ripleyusers# # nncross.R # # # $Revision: 1.26 $ $Date: 2013/12/11 08:42:46 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2012 # Licence: GNU Public Licence >= 2 nncross <- function(X, Y, ...) { UseMethod("nncross") } nncross.default <- function(X, Y, ...) { X <- as.ppp(X, W=bounding.box.xy) nncross(X, Y, ...) } nncross.ppp <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.ppp(Y) || is.psp(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # trivial cases nX <- npoints(X) nY <- nobjects(Y) # deal with null cases if(nX == 0) return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk), which=matrix(0L, nrow=0, ncol=nk))[what])) if(nY == 0) return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA, nrow=nX, ncol=nk))[what])) # Y is a line segment pattern if(is.psp(Y)) { if(!identical(k, 1L)) stop("Sorry, the case k > 1 is not yet implemented for psp objects") return(ppllengine(X,Y,"distance")[, what]) } # Y is a point pattern if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\"")) # decide whether to sort on x or y coordinate switch(sortby, range = { WY <- as.owin(Y) sortby.y <- (diff(WY$xrange) < diff(WY$yrange)) }, var = { sortby.y <- (var(Y$x) < var(Y$y)) }, x={ sortby.y <- FALSE}, y={ sortby.y <- TRUE} ) # The C code expects points to be sorted by y coordinate. if(sortby.y) { Xx <- X$x Xy <- X$y Yx <- Y$x Yy <- Y$y } else { Xx <- X$y Xy <- X$x Yx <- Y$y Yy <- Y$x } # sort only if needed if(!is.sorted.X){ oX <- fave.order(Xy) Xx <- Xx[oX] Xy <- Xy[oX] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(Yy) Yx <- Yx[oY] Yy <- Yy[oY] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) DUP <- spatstat.options("dupC") huge <- 1.1 * diameter(bounding.box(as.rectangle(X), as.rectangle(Y))) z <- .C("nnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), DUP=DUP) if(want.which) { nnwcode <- z$nnwhich #sic. C code now increments by 1 if(any(uhoh <- (nnwcode == 0))) { warning("NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) DUP <- spatstat.options("dupC") huge <- 1.1 * diameter(bounding.box(as.rectangle(X), as.rectangle(Y))) z <- .C("knnXinterface", n1=as.integer(nX), x1=as.double(Xx), y1=as.double(Xy), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(Yx), y2=as.double(Yy), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), DUP=DUP) # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) colnames(result) <- c(if(want.dist) paste0("dist.", k) else NULL, if(want.which) paste0("which.",k) else NULL) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/discarea.R0000755000176000001440000000545412237642727014273 0ustar ripleyusers# # discarea.R # # $Revision: 1.16 $ $Date: 2013/10/06 04:36:07 $ # # # Compute area of intersection between a disc and a window, # discpartarea <- function(X, r, W=as.owin(X)) { if(!missing(W)) { verifyclass(W, "owin") if(!inherits(X, "ppp")) X <- as.ppp(X, W) } verifyclass(X, "ppp") n <- X$n if(is.matrix(r) && nrow(r) != n) stop("the number of rows of r should match the number of points in X") if(!is.matrix(r)) { nr <- length(r) r <- matrix(r, nrow=n, ncol=nr, byrow=TRUE) } else { nr <- ncol(r) } W <- as.polygonal(W) # convert polygon to line segments Y <- as.psp(W) # remove vertical segments (contribution is zero) vert <- (Y$ends$x1 == Y$ends$x0) Y <- Y[!vert] # go DUP <- spatstat.options("dupC") z <- .C("discareapoly", nc=as.integer(n), xc=as.double(X$x), yc=as.double(X$y), nr=as.integer(nr), rmat=as.double(r), nseg=as.integer(Y$n), x0=as.double(Y$ends$x0), y0=as.double(Y$ends$y0), x1=as.double(Y$ends$x1), y1=as.double(Y$ends$y1), eps=as.double(.Machine$double.eps), out=as.double(numeric(length(r))), DUP=DUP) # PACKAGE="spatstat") areas <- matrix(z$out, n, nr) return(areas) } # Compute area of dilation of point pattern # using Dirichlet tessellation or distmap # (areas of other dilations using distmap) dilated.areas <- function(X, r, W=as.owin(X), ..., constrained=TRUE, exact=FALSE) { if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } if(exact && !is.ppp(X)) { exact <- FALSE warning("Option exact=TRUE is only available for ppp objects") } if(!constrained) { # unconstrained dilation bb <- as.rectangle(X) W <- grow.rectangle(bb, max(r)) if(is.owin(X)) X <- rebound.owin(X, W) else X$window <- W } else W <- as.owin(W) if(!exact) { D <- distmap(X) pixelarea <- D$xstep * D$ystep Dvals <- D[W, drop=TRUE] if(is.im(Dvals)) Dvals <- as.vector(as.matrix(Dvals)) Dvals <- Dvals[!is.na(Dvals)] rr <- c(-1, r) h <- cumsum(whist(Dvals, rr)) return(h * pixelarea) } X <- unique(X) npoints <- X$n nr <- length(r) if(npoints == 0) return(numeric(nr)) else if(npoints == 1) return(discpartarea(X, r, W)) samebox <- (W$type == "rectangle") && identical(all.equal(W, as.owin(X)), "TRUE") needclip <- constrained && !samebox dd <- dirichlet(X) til <- tiles(dd) out <- matrix(0, npoints, nr) for(i in 1:npoints) { Ti <- til[[i]] if(needclip) Ti <- intersect.owin(Ti, W) out[i,] <- discpartarea(X[i], r, Ti) } return(apply(out, 2, sum)) } spatstat/R/anova.ppm.R0000755000176000001440000000410212237642727014404 0ustar ripleyusers# # anova.ppm.R # # $Revision: 1.9 $ $Date: 2013/08/19 09:00:49 $ # anova.ppm <- function(object, ..., test=NULL, override=FALSE) { # list of models objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.ppm)))) stop(paste("Arguments must all be", sQuote("ppm"), "objects")) # non-Poisson models? pois <- all(unlist(lapply(objex, is.poisson.ppm))) if(!pois) { whinge <- paste("Some of the fitted models are not Poisson processes:", "p-values are not supported by any theory") if(override) warning(whinge) else stop(whinge) } # all models fitted by MPL? mplfit <- unlist(lapply(objex, function(x) { x$method=="mpl" })) if(!all(mplfit)) stop(paste("Not all models fitted by maximum pseudolikelihood;", "comparison not possible")) # Extract glmfit objects fitz <- lapply(objex, getglmfit) # Any trivial models? (uniform Poisson) trivial <- unlist(lapply(fitz, is.null)) # force all non-trivial models to be fitted using same method # (all using GLM or all using GAM) isgam <- unlist(lapply(fitz, function(x) { inherits(x, "gam") })) isglm <- unlist(lapply(fitz, function(x) { inherits(x, "glm") })) usegam <- any(isgam) if(usegam && any(isglm)) { warning("Some, but not all, models were fitted with use.gam=TRUE;", "refitting all models with use.gam=TRUE.") objex[isglm] <- lapply(objex[isglm], update.ppm, forcefit=TRUE, use.gam=TRUE, envir=parent.frame()) fitz[isglm] <- lapply(objex[isglm], getglmfit) } # Force any trivial models to be refitted using GLM or GAM if(any(trivial)) { # force them to be fitted using glm objex[trivial] <- lapply(objex[trivial], update.ppm, forcefit=TRUE, use.gam=usegam, envir=parent.frame()) fitz[trivial] <- lapply(objex[trivial], getglmfit) } # Finally do the appropriate ANOVA result <- do.call("anova", append(fitz, list(test=test, dispersion=1))) return(result) } spatstat/R/qqplotppm.R0000755000176000001440000002211512237642727014546 0ustar ripleyusers# # QQ plot of smoothed residual field against model # # qqplot.ppm() QQ plot (including simulation) # # $Revision: 1.24 $ $Date: 2013/04/25 06:37:43 $ # qqplot.ppm <- function(fit, nsim=100, expr=NULL, ..., type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE) { verifyclass(fit, "ppm") if(check && damaged.ppm(fit)) { if(!repair) stop("object format corrupted; try update(fit, use.internal=TRUE)") message("object format corrupted; repairing it.") fit <- update(fit, use.internal=TRUE) } if(fast) { oldnpixel <- spatstat.options("npixel") if(is.null(dimyx)) dimyx <- pmin(40, rev(oldnpixel)) spatstat.options(npixel=rev(dimyx)) } ################ How to evaluate residuals ########################## # Quantiles of the residual field will be computed. residualfield <- function(fit, ...) { d <- diagnose.ppm(fit, which="smooth", plot.it=FALSE, compute.cts=FALSE, compute.sd=FALSE, check=FALSE, ...) return(d$smooth$Z$v) } # Data values dat <- residualfield(fit, type=type, ..., dimyx=dimyx) # How to refit the model properly! refit <- function(fit, pattern) { update.ppm(fit, Q=pattern, use.internal=TRUE) } ################## How to perform simulations? ####################### simulate.from.fit <- is.null(expr) how.simulating <- if(simulate.from.fit) "simulating from fitted model" else paste("evaluating", sQuote("expr")) if(!simulate.from.fit) { # 'expr' will be evaluated 'nsim' times if(!is.expression(expr)) stop(paste("Argument", sQuote("expr"), "should be an expression")) } else{ # We will simulate from the fitted model 'nsim' times # and refit the model to these simulations # prepare rmh arguments rcontrol <- rmhcontrol(control) rmodel <- rmhmodel(fit, control=rcontrol, project=FALSE, verbose=verbose) rstart <- rmhstart(n.start=data.ppm(fit)$n) # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontrol, preponly=TRUE, verbose=FALSE) # expression to be evaluated each time expr <- expression( refit(fit, rmhEngine(rmhinfolist, verbose=FALSE))) } ###### Perform simulations if(verbose) cat(paste("Simulating", nsim, "realisations... ")) simul.sizes <- numeric(nsim) i <- 0 ierr <- 0 repeat { # protect from randomly-generated crashes in gam ei <- try(eval(expr),silent=!verbose) if(inherits(ei, "try-error")) { # error encountered in evaluating 'expr' ierr <- ierr + 1 if(ierr > maxerr) stop(paste("Exceeded maximum of", maxerr, "failures in", how.simulating, "after generating only", i, "realisations")) else break } else { # simulation successful i <- i + 1 fiti <- if(simulate.from.fit) ei else if(is.ppm(ei)) ei else if(is.ppp(ei)) refit(fit, ei) else stop("result of eval(expr) is not a ppm or ppp object") # diagnostic info simul.sizes[i] <- data.ppm(fiti)$n # compute residual field resi <- residualfield(fiti, type=type, ..., dimyx=dimyx) if(i == 1) sim <- array(, dim=c(dim(resi), nsim)) sim[,,i] <- resi if(verbose) progressreport(i, nsim) if(i >= nsim) break } } ###### Report diagnostics if(ierr > 0) cat(paste("\n\n**Alert:", ierr, "failures occurred in", how.simulating, "\n\n")) nempty <- sum(simul.sizes == 0) if(nempty > 0) cat(paste("\n\n**Alert:", nempty, "out of", nsim, "simulated patterns were empty.\n\n")) else cat(paste("\nDiagnostic info:\n", "simulated patterns contained an average of", mean(simul.sizes), "points.\n")) if(nempty == nsim) warning("All simulated patterns were empty") ############ Plot them switch(style, classical = { rr <- range(c(dat,sim)) result <- qqplot(sim, dat, xlim=rr, ylim=rr, asp=1.0, xlab="Quantiles of simulation", ylab="Quantiles of data",plot.it=plot.it) title(sub=paste("Residuals:", type)) abline(0,1, lty=2) result <- append(result, list(data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Quantiles of simulation", ylab="Quantiles of data", rtype=type, nsim=nsim, fit=fit, expr= if(simulate.from.fit) NULL else deparse(expr), simulate.from.fit=simulate.from.fit ) ) }, mean = { # compute quantiles corresponding to probabilities p[i] # separately in each realisation. if(verbose) cat("Calculating quantiles...") if(fast) { p <- ppoints(min(100,length(dat)), 3/8) qsim <- apply(sim, 3, quantile, probs=p, na.rm=TRUE) } else { qsim <- apply(sim, 3, sort, na.last=TRUE) } if(verbose) cat("averaging...") # sample mean of each quantile meanq <- apply(qsim, 1, mean, na.rm=TRUE) # et cetera varq <- apply(qsim, 1, var, na.rm=TRUE) sdq <- sqrt(varq) q.025 <- apply(qsim, 1, quantile, probs=0.025, na.rm=TRUE) q.975 <- apply(qsim, 1, quantile, probs=0.975, na.rm=TRUE) rr <- range(c(meanq,dat), na.rm=TRUE) dats <- if(fast) quantile(dat, probs=p, na.rm=TRUE) else sort(dat, na.last=TRUE) if(verbose) cat("..Done.\n") if(plot.it) { plot(meanq, dats, xlab="Mean quantile of simulations", ylab="data quantile", xlim=rr, ylim=rr, asp=1.0) abline(0,1) lines(meanq, q.025, lty=2, col=limcol) lines(meanq, q.975, lty=2, col=limcol) title(sub=paste("Residuals:", type)) } result <- list(x=meanq, y=dats, sdq=sdq, q.025=q.025, q.975=q.975, data=dat, sim=sim, xlim=rr, ylim=rr, xlab="Mean quantile of simulations", ylab="data quantile", rtype=type, nsim=nsim, fit=fit, expr=if(simulate.from.fit) NULL else deparse(expr), simulate.from.fit=simulate.from.fit) }, stop(paste("Unrecognised option for", sQuote("style"))) ) # Throw out baggage if not wanted if(!saveall) { result$fit <- summary(fit, quick=TRUE) result$sim <- NULL } # reset npixel if(fast) spatstat.options(npixel=oldnpixel) # class(result) <- c("qqppm", class(result)) return(invisible(result)) } plot.qqppm <- function(x, ..., limits=TRUE, monochrome=FALSE, limcol=if(monochrome) "black" else "red") { stopifnot(inherits(x, "qqppm")) default.type <- if(length(x$x) > 150) "l" else "p" myplot <- function(object, xlab = object$xlab, ylab = object$ylab, xlim = object$xlim, ylim = object$ylim, asp = 1, type = default.type, ..., limits=TRUE) { plot(object$x, object$y, xlab = xlab, ylab = ylab, xlim = xlim, ylim = ylim, asp = asp, type = type, ...) abline(0, 1) if(limits) { if(!is.null(object$q.025)) lines(object$x, object$q.025, lty = 2, col=limcol) if(!is.null(object$q.975)) lines(object$x, object$q.975, lty = 2, col=limcol) } title(sub=paste("Residuals:", object$rtype)) } myplot(x, ..., limits=limits) return(invisible(x)) } print.qqppm <- function(x, ...) { stopifnot(inherits(x, "qqppm")) cat(paste("Q-Q plot of point process residuals ", "of type", sQuote(x$rtype), "\n", "based on ", x$nsim, " simulations\n", sep="")) if(x$simulate.from.fit) { fit <- x$fit sumfit <- if(is.ppm(fit)) summary(fit, quick=TRUE) else if(inherits(fit, "summary.ppm")) fit else list(name="(unrecognised format)") cat(paste("\nSimulations from fitted model:", sumfit$name, "\n")) } else { cat("Simulations obtained by evaluating the following expression:\n") print(x$expr) } invisible(NULL) } spatstat/R/eval.im.R0000755000176000001440000001375712237642727014060 0ustar ripleyusers# # eval.im.R # # eval.im() Evaluate expressions involving images # # compatible.im() Check whether two images are compatible # # harmonise.im() Harmonise images # commonGrid() # # $Revision: 1.30 $ $Date: 2013/04/25 06:37:43 $ # eval.im <- function(expr, envir, harmonize=TRUE) { e <- as.expression(substitute(expr)) # get names of all variables in the expression varnames <- all.vars(e) allnames <- all.names(e, unique=TRUE) funnames <- allnames[!(allnames %in% varnames)] if(length(varnames) == 0) stop("No variables in this expression") # get the values of the variables if(missing(envir)) envir <- sys.parent() vars <- lapply(as.list(varnames), function(x, e) get(x, envir=e), e=envir) names(vars) <- varnames funs <- lapply(as.list(funnames), function(x, e) get(x, envir=e), e=envir) names(funs) <- funnames # find out which variables are images ims <- unlist(lapply(vars, is.im)) if(!any(ims)) stop("No images in this expression") images <- vars[ims] nimages <- length(images) # test that the images are compatible if(!(ok <- do.call("compatible", unname(images)))) { whinge <- paste(if(nimages > 2) "some of" else NULL, "the images", commasep(sQuote(names(images))), if(!harmonize) "are" else "were", "not compatible") if(!harmonize) { stop(whinge, call.=FALSE) } else { warning(whinge, call.=FALSE) images <- do.call("harmonise.im", images) } } # replace each image by its matrix of pixel values, and evaluate getvalues <- function(x) { v <- as.matrix(x) dim(v) <- NULL return(v) } imagevalues <- lapply(images, getvalues) template <- images[[1]] # This bit has been repaired: vars[ims] <- imagevalues v <- eval(e, append(vars, funs)) # # reshape, etc result <- im(v, template$xcol, template$yrow, unitname=unitname(template)) return(result) } compatible.im <- function(A, B, ..., tol=1e-6) { verifyclass(A, "im") if(missing(B)) return(TRUE) verifyclass(B, "im") if(!all(A$dim == B$dim)) return(FALSE) xdiscrep <- max(abs(A$xrange - B$xrange), abs(A$xstep - B$xstep), abs(A$xcol - B$xcol)) ydiscrep <- max(abs(A$yrange - B$yrange), abs(A$ystep - B$ystep), abs(A$yrow - B$yrow)) xok <- (xdiscrep < tol * min(A$xstep, B$xstep)) yok <- (ydiscrep < tol * min(A$ystep, B$ystep)) uok <- compatible.units(unitname(A), unitname(B)) if(!(xok && yok && uok)) return(FALSE) # A and B are compatible if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.im(B, ..., tol=tol)) } # force a list of images to be compatible harmonize.im <- harmonise.im <- function(...) { argz <- list(...) n <- length(argz) if(n < 2) return(argz) result <- vector(mode="list", length=n) isim <- unlist(lapply(argz, is.im)) if(!any(isim)) stop("No images supplied") imgs <- argz[isim] # if any windows are present, extract bounding box iswin <- unlist(lapply(argz, is.owin)) bb0 <- if(!any(iswin)) NULL else do.call("bounding.box", unname(argz[iswin])) if(length(imgs) == 1 && is.null(bb0)) { # only one 'true' image: use it as template. result[isim] <- imgs Wtemplate <- imgs[[1]] } else { # test for compatible units un <- lapply(imgs, unitname) uok <- unlist(lapply(un, compatible.units, y=un[[1]])) if(!all(uok)) stop("Images have incompatible units of length") # find the image with the highest resolution xsteps <- unlist(lapply(imgs, function(a) { a$xstep })) which.finest <- which.min(xsteps) finest <- imgs[[which.finest]] # get the bounding box bb <- do.call("bounding.box", lapply(unname(imgs), as.rectangle)) if(!is.null(bb0)) bb <- bounding.box(bb, bb0) # determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) # resample all images on new raster newimgs <- lapply(imgs, as.im, xy=xy) result[isim] <- newimgs Wtemplate <- newimgs[[which.finest]] } # convert other data to images if(any(notim <- !isim)) result[notim] <- lapply(argz[notim], as.im, W=as.mask(Wtemplate)) names(result) <- names(argz) return(result) } # Return just the corresponding template window commonGrid <- local({ # auxiliary function gettype <- function(x) { if(is.im(x) || is.mask(x)) "raster" else if(is.owin(x) || is.ppp(x) || is.psp(x)) "spatial" else "none" } commonGrid <- function(...) { argz <- list(...) type <- unlist(lapply(argz, gettype)) israster <- (type == "raster") haswin <- (type != "none") if(any(israster)) { # Get raster data rasterlist <- argz[israster] } else { # No existing raster data - apply default resolution if(!any(haswin)) stop("No spatial data supplied") wins <- lapply(argz[haswin], as.owin) rasterlist <- lapply(wins, as.mask) } # Find raster object with finest resolution if(length(rasterlist) == 1) { # only one raster object finest <- rasterlist[[1]] } else { # test for compatible units un <- lapply(rasterlist, unitname) uok <- unlist(lapply(un, compatible.units, y=un[[1]])) if(!all(uok)) stop("Objects have incompatible units of length") # find the image/mask with the highest resolution xsteps <- unlist(lapply(rasterlist, function(a) { a$xstep })) which.finest <- which.min(xsteps) finest <- rasterlist[[which.finest]] } # determine the bounding box bb <- do.call("bounding.box", lapply(unname(argz[haswin]), as.rectangle)) # determine new raster coordinates xcol <- prolongseq(finest$xcol, bb$xrange) yrow <- prolongseq(finest$yrow, bb$yrange) xy <- list(x=xcol, y=yrow) # generate template Wtemplate <- as.mask(bb, xy=xy) return(Wtemplate) } commonGrid }) spatstat/R/suffstat.R0000755000176000001440000000633112237642727014352 0ustar ripleyusers# # suffstat.R # # calculate sufficient statistic # # $Revision: 1.17 $ $Date: 2013/04/25 06:37:43 $ # # suffstat <- function(model, X=data.ppm(model)) { cl <- sys.call() callstring <- short.deparse(cl) verifyclass(model, "ppm") if(!missing(X)) verifyclass(X, "ppp") else X <- NULL inter <- model$interaction func <- if(is.null(inter) || is.poisson(inter)) suffstat.poisson else if(!is.null(ssinter <- inter$suffstat)) ssinter else if(!is.null(ssfamily <- inter$family$suffstat)) ssfamily else suffstat.generic return(func(model, X, callstring)) } suffstat.generic <- function(model, X=NULL, callstring="suffstat.generic") { # This should work for an arbitrary ppm # since it uses the fundamental relation between # conditional intensity and likelihood. # But it is computationally intensive. verifyclass(model, "ppm") coefnames <- names(coef(model)) if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") # refit the model to determine which points are used in pseudolikelihood modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] if(!any(contribute)) # result is zero vector return(0 * coef(model)) # Add points one-by-one # If there are points which don't contribute, condition on them use <- which(contribute) dontuse <- which(!contribute) for(i in seq_along(use)) { prior <- if(i == 1) c() else use[1:(i-1)] prior <- c(dontuse, prior) Xprior <- X[prior] Xcurrent <- X[use[i]] mom <- partialModelMatrix(Xprior, Xcurrent, model, "suffstat") lastrow <- length(prior) + 1 momrow <- mom[lastrow, ] if(i == 1) result <- momrow else result <- momrow + result } names(result) <- coefnames attr(result, "mplsubset") <- NULL return(result) } killinteraction <- function(model) { verifyclass(model, "ppm") ispoisson <- summary(model, quick=TRUE)$poisson if(ispoisson) return(model) # surgery required newmodel <- model newmodel$interaction <- NULL if(!is.null(Vnames <- model$internal$Vnames)) { matches <- names(model$coef) %in% Vnames newmodel$coef <- model$coef[!matches] newmodel$internal$Vnames <- NULL } # the other 'internal' stuff may still be wrong (or `preserved') return(newmodel) } suffstat.poisson <- function(model, X, callstring="suffstat.poisson") { verifyclass(model, "ppm") if(is.null(X)) X <- data.ppm(model) else verifyclass(X, "ppp") if(!is.poisson(model)) stop("Model is not a Poisson process") Empty <- X[numeric(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat") nmom <- ncol(mom) ncoef <- length(coef(model)) if(nmom != ncoef) stop("Internal error: number of columns of model matrix does not match number of coefficients in fitted model") if(nmom > 1 && any(colnames(mom) != names(coef(model)))) warning("Internal error: mismatch between column names of model matrix and names of coefficient vector in fitted model") o1sum <- apply(mom, 2, sum) return(o1sum) } spatstat/R/clickjoin.R0000755000176000001440000000152512237642727014460 0ustar ripleyusers# # clickjoin.R # # interactive addition/deletion of segments between vertices # clickjoin <- function(X, ..., add=TRUE, m=NULL, join=TRUE) { verifyclass(X, "ppp") if(!(is.logical(join) && length(join) == 1)) stop("join should be a single logical value") plot(X, add=add, pch=16) if(is.null(m)) { m <- matrix(FALSE, npoints(X), npoints(X)) } else { stopifnot(is.matrix(m) && is.logical(m)) stopifnot(all(dim(m) == npoints(X))) from <- as.vector(row(m)[m]) to <- as.vector(col(m)[m]) with(X, segments(x[from], y[from], x[to], y[to])) } while(TRUE) { twoid <- identify(X, plot=FALSE, n=2) n <- length(twoid) if(n == 0) break if(n == 2) { m[twoid[1],twoid[2]] <- m[twoid[2],twoid[1]] <- join lines(X$x[twoid], X$y[twoid], ...) } } return(m) } spatstat/R/plot.splitppp.R0000755000176000001440000001534612237642727015351 0ustar ripleyusers# plot.listof <- plot.splitppp <- local({ # auxiliary functions extraplot <- function(nnn, ..., panel.args=NULL, plotcommand="plot") { if(is.null(panel.args)) { do.call(plotcommand, list(...)) } else { xtra <- if(is.function(panel.args)) panel.args(nnn) else panel.args if(!is.list(xtra)) stop("panel.args should be a list") do.call(plotcommand, append(list(...), xtra)) } } exec.or.plot <- function(cmd, i, xi, ...) { if(is.null(cmd)) return(NULL) if(is.function(cmd)) { do.call(cmd, resolve.defaults(list(i, xi, ...))) } else { do.call(plot, resolve.defaults(list(cmd, ...))) } } plot.splitppp <- function(x, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), panel.begin=NULL, panel.end=NULL, panel.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2) { xname <- short.deparse(substitute(x)) # `boomerang despatch' cl <- match.call() if(missing(plotcommand) && all(unlist(lapply(x, is.im)))) { cl[[1]] <- as.name("image.listof") parenv <- sys.parent() return(eval(cl, envir=parenv)) } n <- length(x) names(x) <- good.names(names(x), "Component_", 1:n) if(is.null(main.panel)) main.panel <- names(x) else { stopifnot(is.character(main.panel) || is.expression(main.panel)) nmp <- length(main.panel) if(nmp == 1) main.panel <- rep.int(main.panel, n) else if(nmp != n) stop("Incorrect length for main.panel") } if(!arrange) { # sequence of plots for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i]) extraplot(i, xi, ..., add=!is.null(panel.begin), main=main.panel[i], panel.args=panel.args, plotcommand=plotcommand) exec.or.plot(panel.end, i, xi, add=TRUE) } if(!is.null(adorn.left)) warning("adorn.left was ignored because arrange=FALSE") if(!is.null(adorn.right)) warning("adorn.right was ignored because arrange=FALSE") if(!is.null(adorn.top)) warning("adorn.top was ignored because arrange=FALSE") if(!is.null(adorn.bottom)) warning("adorn.bottom was ignored because arrange=FALSE") return(invisible(NULL)) } # ARRAY of plots # decide whether to plot a main header main <- if(!missing(main) && !is.null(main)) main else xname if(!is.character(main)) { # main title could be an expression nlines <- 1 banner <- TRUE } else { # main title is character string/vector, possibly "" banner <- any(nzchar(main)) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- length(unlist(strsplit(main, "\n"))) } # determine arrangement of plots # arrange like mfrow(nrows, ncols) plus a banner at the top if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n # determine approximate relative dimensions for equal scale plots boxes <- try(lapply(x, as.rectangle), silent=TRUE) sizes.known <- !inherits(boxes, "try-error") # set up layout mat <- matrix(c(seq_len(n), integer(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) if(sizes.known) { xwidths <- unlist(lapply(boxes, function(z) { diff(z$xrange) })) xheights <- unlist(lapply(boxes, function(z) { diff(z$yrange) })) heights <- apply(mat, 1, function(j,h) { max(h[j[j>0]]) }, h=xheights) widths <- apply(mat, 2, function(i,w) { max(w[i[i>0]]) }, w=xwidths) } else { heights <- rep.int(1, nrows) widths <- rep.int(1, ncols) } meanheight <- mean(heights) meanwidth <- mean(widths) nall <- n if(!is.null(adorn.left)) { # add margin at left, of width adorn.size * meanwidth nall <- i.left <- n+1 mat <- cbind(i.left, mat) widths <- c(adorn.size * meanwidth, widths) } if(!is.null(adorn.right)) { # add margin at right, of width adorn.size * meanwidth nall <- i.right <- nall+1 mat <- cbind(mat, i.right) widths <- c(widths, adorn.size * meanwidth) } if(!is.null(adorn.bottom)) { # add margin at bottom, of height adorn.size * meanheight nall <- i.bottom <- nall+1 mat <- rbind(mat, i.bottom) heights <- c(heights, adorn.size * meanheight) } if(!is.null(adorn.top)) { # add margin at top, of height adorn.size * meanheight nall <- i.top <- nall + 1 mat <- rbind(i.top, mat) heights <- c(adorn.size * meanheight, heights) } if(banner) { # Increment existing panel numbers # New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1 mat <- rbind(1, mat) heights <- c(0.1 * meanheight * (1 + nlines), heights) } # declare layout layout(mat, heights=heights, widths=widths, respect=sizes.known) # start output ..... # .... plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0,main, cex=cex) } # plot panels npa <- par(mar=mar.panel) if(!banner) opa <- npa for(i in 1:n) { xi <- x[[i]] exec.or.plot(panel.begin, i, xi, main=main.panel[i]) extraplot(i, xi, ..., add = !is.null(panel.begin), main = main.panel[i], panel.args=panel.args, plotcommand=plotcommand) exec.or.plot(panel.end, i, xi, add=TRUE) } # adornments if(nall > n) { par(mar=rep.int(0,4), xpd=TRUE) if(!is.null(adorn.left)) adorn.left() if(!is.null(adorn.right)) adorn.right() if(!is.null(adorn.bottom)) adorn.bottom() if(!is.null(adorn.top)) adorn.top() } # revert layout(1) par(opa) return(invisible(NULL)) } plot.splitppp }) density.splitppp <- function(x, ...) { as.listof(lapply(x, density, ...)) } spatstat/R/dgs.R0000755000176000001440000000706212237642727013272 0ustar ripleyusers# # # dgs.R # # $Revision: 1.6 $ $Date: 2013/04/25 06:37:43 $ # # Diggle-Gates-Stibbard process # # # ------------------------------------------------------------------- # DiggleGatesStibbard <- local({ # .......... auxiliary functions ................ dgsTerms <- function(X, Y, idX, idY, rho) { stopifnot(is.numeric(rho)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine DUP <- spatstat.options("dupC") out <- .C("Ediggatsti", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), rrho = as.double(rho), values = as.double(double(nX)), DUP = DUP) # PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # ...... template object ...................... BlankDGS <- list( name = "Diggle-Gates-Stibbard process", creator = "DiggleGatesStibbard", family = "pairwise.family", # evaluated later pot = function(d, par) { rho <- par$rho v <- log(sin((pi/2) * d/rho)^2) v[ d > par$rho ] <- 0 attr(v, "IsOffset") <- TRUE v }, par = list(rho = NULL), # to be filled in later parnames = "interaction range", init = function(self) { rho <- self$par$rho if(!is.numeric(rho) || length(rho) != 1 || rho <= 0) stop("interaction range rho must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { return(NULL) }, valid = function(coeffs, self) { return(TRUE) }, project = function(coeffs, self) { return(NULL) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for DiggleGatesStibbard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGatesStibbard") rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1, npoints(U)) idU[EqualPairs[,2]] <- EqualPairs[,1] v <- dgsTerms(U, X, idU, idX, rho) v <- matrix(v, ncol=1) attr(v, "IsOffset") <- TRUE return(v) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho return((pi/2 - 2/pi) * rho^2) } ) class(BlankDGS) <- "interact" DiggleGatesStibbard <- function(rho) { instantiate.interact(BlankDGS, list(rho = rho)) } DiggleGatesStibbard }) spatstat/R/Gres.R0000755000176000001440000000502512237642727013412 0ustar ripleyusers# # Gres.R # # Residual G # # $Revision: 1.3 $ $Date: 2013/04/25 06:37:43 $ # ############################################################################# # Gres <- function(object, ...) { if(!is.fv(object)) { # usual case where 'object' is a ppm, ppp or quad G <- Gcom(object, ...) } else { # case where 'object' is the output of 'Gcom' a <- attr(object, "maker") if(is.null(a) || a != "Gcom") stop("fv object was not created by Gcom") G <- object if(length(list(...)) > 0) warning("Extra arguments ignored") } # initialise fv object df <- data.frame(r=G$r, theo=numeric(length(G$r))) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~hat(G)(r), NULL), "theo", . ~ r, attr(G, "alim"), c("r","bold(R)~%s[theo](r)"), desc, fname="G") # add residual estimates nam <- names(G) if(all(c("border","bcom") %in% nam)) ans <- bind.fv(ans, data.frame(bres=with(G, border-bcom)), "bold(R)~hat(%s)[bord](r)", "border corrected residual of %s", "bres") if(all(c("han","hcom") %in% nam)) ans <- bind.fv(ans, data.frame(hres=with(G, han-hcom)), "bold(R)~hat(%s)[han](r)", "Hanisch corrected residual of %s", "hres") if("hvar" %in% nam) { savedotnames <- fvnames(ans, ".") hsd <- with(G, sqrt(hvar)) ans <- bind.fv(ans, data.frame(hvar=with(G, hvar), hsd = hsd, hi = 2*hsd, lo = -2*hsd), c("bold(C)^2~hat(%s)[han](r)", "sqrt(bold(C)^2~hat(%s)[han](r))", "bold(R)~hat(%s)[Hi](r)", "bold(R)~hat(%s)[Lo](r)"), c("pseudovariance of Hanisch corrected residual %s", "pseudo-SD of Hanisch corrected residual %s", "upper critical band for Hanisch corrected residual %s", "lower critical band for Hanisch corrected residual %s"), "hres") ans <- bind.fv(ans, data.frame(hstdres=with(ans, hres/hsd)), "bold(T)~hat(%s)[han](r)", "standardised Hanisch-corrected residual %s", "hres") fvnames(ans, ".") <- c(savedotnames, c("hi", "lo")) } unitname(ans) <- unitname(G) return(ans) } spatstat/R/kppm.R0000755000176000001440000005703112240447357013461 0ustar ripleyusers# # kppm.R # # kluster/kox point process models # # $Revision: 1.82 $ $Date: 2013/11/12 14:17:38 $ # kppm <- function(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), covariates = NULL, ..., method = c("mincon", "clik"), weightfun=NULL, control=list(), statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) { Xname <- short.deparse(substitute(X)) clusters <- match.arg(clusters) method <- match.arg(method) if(method == "mincon") statistic <- pickoption("summary statistic", statistic, c(K="K", g="pcf", pcf="pcf")) isquad <- inherits(X, "quad") if(!is.ppp(X) && !isquad) stop("X should be a point pattern (ppp) or quadrature scheme (quad)") if(is.marked(X)) stop("Sorry, cannot handle marked point patterns") po <- ppm(Q=X, trend=trend, covariates=covariates, forcefit=TRUE, rename.intercept=FALSE, covfunargs=covfunargs, use.gam=use.gam, nd=nd, eps=eps) XX <- if(isquad) X$data else X # fit out <- switch(method, mincon = kppmMinCon(X=XX, Xname=Xname, po=po, clusters=clusters, statistic=statistic, statargs=statargs, control=control, rmax=rmax, ...), clik = kppmComLik(X=XX, Xname=Xname, po=po, clusters=clusters, control=control, weightfun=weightfun, rmax=rmax, ...)) # class(out) <- c("kppm", class(out)) return(out) } kppmMinCon <- function(X, Xname, po, clusters, statistic, statargs, ...) { # Minimum contrast fit stationary <- is.stationary(po) # compute summary function if(stationary) { StatFun <- if(statistic == "K") "Kest" else "pcf" StatName <- if(statistic == "K") "K-function" else "pair correlation function" Stat <- do.call(StatFun, resolve.defaults(list(X=X), statargs, list(correction="best"))) lambda <- summary(po)$trend$value } else { StatFun <- if(statistic == "K") "Kinhom" else "pcfinhom" StatName <- if(statistic == "K") "inhomogeneous K-function" else "inhomogeneous pair correlation function" # compute intensity at high resolution if available w <- as.owin(po, from="covariates") if(!is.mask(w)) w <- NULL lambda <- predict(po, locations=w) Stat <- do.call(StatFun, resolve.defaults(list(X=X, lambda=lambda), statargs, list(correction="best"))) } # determine initial values of parameters selfstart <- spatstatClusterModelInfo(clusters)$selfstart startpar0 <- selfstart(X) # fit switch(clusters, Thomas={ FitFun <- if(statistic == "K") "thomas.estK" else "thomas.estpcf" mcfit <- do.call(FitFun, resolve.defaults( list(X=Stat, lambda=lambda), list(...), list(startpar=startpar0, dataname=Xname))) # kappa = intensity of parents kappa <- mcfit$par[["kappa"]] # mu = mean number of points per cluster mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) isPCP <- TRUE }, MatClust={ FitFun <- if(statistic == "K") "matclust.estK" else "matclust.estpcf" mcfit <- do.call(FitFun, resolve.defaults( list(X=Stat, lambda=lambda), list(...), list(startpar=startpar0, dataname=Xname))) # kappa = intensity of parents kappa <- mcfit$par[["kappa"]] # mu = mean number of points per cluster mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) isPCP <- TRUE }, Cauchy = { FitFun <- if (statistic == "K") "cauchy.estK" else "cauchy.estpcf" mcfit <- do.call(FitFun, resolve.defaults(list(X = Stat, lambda = lambda), list(...), list(startpar = startpar0, dataname = Xname))) # kappa = intensity of parents kappa <- mcfit$par[["kappa"]] # mu = mean number of points per cluster mu <- if (stationary) lambda/kappa else eval.im(lambda/kappa) isPCP <- TRUE }, VarGamma = { FitFun <- if (statistic == "K") "vargamma.estK" else "vargamma.estpcf" mcfit <- do.call(FitFun, resolve.defaults(list(X = Stat, lambda = lambda), list(...), list(startpar = startpar0, dataname = Xname, nu = 0.5))) kappa <- mcfit$par[["kappa"]] mu <- if (stationary) lambda/kappa else eval.im(lambda/kappa) isPCP <- TRUE }, LGCP={ FitFun <- if(statistic == "K") "lgcp.estK" else "lgcp.estpcf" mcfit <- do.call(FitFun, resolve.defaults( list(X=Stat, lambda=lambda), list(...), list(startpar=startpar0, dataname=Xname))) sigma2 <- mcfit$par[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) isPCP <- FALSE }) # all info that depends on the fitting method: Fit <- list(method = "mincon", statistic = statistic, Stat = Stat, StatFun = StatFun, StatName = StatName, FitFun = FitFun, statargs = statargs, mcfit = mcfit) # results out <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = mcfit$info$modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = mcfit$par, modelpar = mcfit$modelpar, covmodel = mcfit$covmodel, Fit = Fit) return(out) } kppmComLik <- function(X, Xname, po, clusters, control, weightfun, rmax, ...) { W <- as.owin(X) if(is.null(rmax)) rmax <- rmax.rule("K", W, intensity(X)) # identify pairs of points that contribute cl <- closepairs(X, rmax) I <- cl$i J <- cl$j dIJ <- cl$d # compute weights for pairs of points if(is.function(weightfun)) { wIJ <- weightfun(dIJ) sumweight <- sum(wIJ) } else { npairs <- length(dIJ) wIJ <- rep.int(1, npairs) sumweight <- npairs } # convert window to mask, saving other arguments for later dcm <- do.call.matched("as.mask", append(list(w=W), list(...)), sieve=TRUE) M <- dcm$result otherargs <- dcm$otherargs # compute intensity at pairs of data points # and c.d.f. of interpoint distance in window if(stationary <- is.stationary(po)) { # stationary unmarked Poisson process lambda <- intensity(X) lambdaIJ <- lambda^2 # compute cdf of distance between two uniform random points in W g <- distcdf(W) # scaling constant is (area * intensity)^2 gscale <- npoints(X)^2 } else { # compute fitted intensity at data points and in window lambdaX <- fitted(po, dataonly=TRUE) lambda <- lambdaM <- predict(po, locations=M) # lambda(x_i) * lambda(x_j) lambdaIJ <- lambdaX[I] * lambdaX[J] # compute cdf of distance between two random points in W # with density proportional to intensity function g <- distcdf(M, dW=lambdaM) # scaling constant is (integral of intensity)^2 gscale <- integral.im(lambdaM)^2 } # trim 'g' to [0, rmax] g <- g[with(g, .x) <= rmax,] # get pair correlation function (etc) for model info <- spatstatClusterModelInfo(clusters) pcfun <- info$pcf funaux <- info$funaux selfstart <- info$selfstart isPCP <- info$isPCP parhandler <- info$parhandler modelname <- info$modelname # Assemble information required for computing pair correlation pcfunargs <- list(funaux=funaux) if(is.function(parhandler)) { # Additional parameters of cluster model are required. # These may be given as individual arguments, # or in a list called 'covmodel' clustargs <- if("covmodel" %in% names(otherargs)) otherargs[["covmodel"]] else otherargs clargs <- do.call(parhandler, clustargs) pcfunargs <- append(clargs, pcfunargs) } else clargs <- NULL # determine starting parameter values startpar <- selfstart(X) # create local function to evaluate pair correlation # (with additional parameters 'pcfunargs' in its environment) paco <- function(d, par) { do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) } # define objective function if(!is.function(weightfun)) { # pack up necessary information objargs <- list(dIJ=dIJ, sumweight=sumweight, g=g, envir=environment(paco)) # define objective function (with 'paco' in its environment) # Note that this is 1/2 of the log composite likelihood, # minus the constant term # sum(log(lambdaIJ)) - npairs * log(gscale) obj <- function(par, objargs) { with(objargs, sum(log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(paco, g, par=par))), enclos=objargs$envir) } } else { # create local function to evaluate pair correlation(d) * weight(d) # (with additional parameters 'pcfunargs', 'weightfun' in its environment) force(weightfun) wpaco <- function(d, par) { y <- do.call(pcfun, append(list(par=par, rvals=d), pcfunargs)) w <- weightfun(d) return(y * w) } # pack up necessary information objargs <- list(dIJ=dIJ, wIJ=wIJ, sumweight=sumweight, g=g, envir=environment(wpaco)) # define objective function (with 'paco', 'wpaco' in its environment) # Note that this is 1/2 of the log composite likelihood, # minus the constant term # sum(wIJ * log(lambdaIJ)) - sumweight * log(gscale) obj <- function(par, objargs) { with(objargs, sum(wIJ * log(paco(dIJ, par))) - sumweight * log(unlist(stieltjes(wpaco, g, par=par))), enclos=objargs$envir) } } # optimize it ctrl <- resolve.defaults(list(fnscale=-1), control, list(trace=0)) opt <- optim(startpar, obj, objargs=objargs, control=ctrl) # raise warning/error if something went wrong signalStatus(optimStatus(opt), errors.only=TRUE) # meaningful model parameters optpar <- opt$par modelpar <- info$interpret(optpar, lambda) # infer parameter 'mu' if(isPCP) { # Poisson cluster process: extract parent intensity kappa kappa <- optpar[["kappa"]] # mu = mean cluster size mu <- if(stationary) lambda/kappa else eval.im(lambda/kappa) } else { # LGCP: extract variance parameter sigma2 sigma2 <- optpar[["sigma2"]] # mu = mean of log intensity mu <- if(stationary) log(lambda) - sigma2/2 else eval.im(log(lambda) - sigma2/2) } # all info that depends on the fitting method: Fit <- list(method = "clik", clfit = opt, weightfun = weightfun, rmax = rmax, objfun = obj, objargs = objargs) # pack up result <- list(Xname = Xname, X = X, stationary = stationary, clusters = clusters, modelname = modelname, isPCP = isPCP, po = po, lambda = lambda, mu = mu, par = optpar, modelpar = modelpar, covmodel = clargs, Fit = Fit) return(result) } is.kppm <- function(x) { inherits(x, "kppm")} print.kppm <- function(x, ...) { isPCP <- x$isPCP # handle outdated objects - which were all cluster processes if(is.null(isPCP)) isPCP <- TRUE cat(paste(if(x$stationary) "Stationary" else "Inhomogeneous", if(isPCP) "cluster" else "Cox", "point process model\n")) if(nchar(x$Xname) < 20) cat(paste("Fitted to point pattern dataset", sQuote(x$Xname), "\n")) switch(x$Fit$method, mincon = { cat("Fitted by minimum contrast\n") cat(paste("\tSummary statistic:", x$Fit$StatName, "\n")) }, clik = { cat("Fitted by maximum second order composite likelihood\n") cat(paste("\trmax =", x$Fit$rmax, "\n")) if(!is.null(wtf <- x$Fit$weightfun)) { cat("\tweight function: ") print(wtf) } }, warning(paste("Unrecognised fitting method", sQuote(x$Fit$method))) ) # ............... trend ......................... print(x$po, what="trend") # ..................... clusters ................ cat(paste(if(isPCP) "Cluster" else "Cox", "model:", x$modelname, "\n")) cm <- x$covmodel if(!is.null(cm)) { # Covariance model - LGCP only cat(paste("\tCovariance model:", cm$model, "\n")) margs <- cm$margs if(!is.null(margs)) { nama <- names(margs) tags <- ifelse(nzchar(nama), paste(nama, "="), "") tagvalue <- paste(tags, margs) cat(paste("\tCovariance parameters:", paste(tagvalue, collapse=", "), "\n")) } } pa <- x$par if (!is.null(pa)) { cat(paste("Fitted", if(isPCP) "cluster" else "correlation", "parameters:\n")) print(pa) } if(!is.null(mu <- x$mu)) { if(isPCP) { cat("Mean cluster size: ") if(!is.im(mu)) cat(mu, "points\n") else cat("[pixel image]\n") } else { cat("Fitted mean of log of random intensity: ") if(!is.im(mu)) cat(mu, "\n") else cat("[pixel image]\n") } } invisible(NULL) } plot.kppm <- function(x, ..., what=c("intensity", "statistic")) { objectname <- short.deparse(substitute(x)) plotem <- function(x, ..., main=dmain, dmain) { plot(x, ..., main=main) } what <- pickoption("plot type", what, c(statistic="statistic", intensity="intensity"), multi=TRUE) # handle older objects Fit <- x$Fit if(is.null(Fit)) { warning("kppm object is in outdated format") Fit <- x Fit$method <- "mincon" } inappropriate <- ((what == "intensity") & (x$stationary)) | ((what == "statistic") & (Fit$method != "mincon")) if(any(inappropriate)) { what <- what[!inappropriate] if(length(what) == 0) return(invisible(NULL)) } pauseit <- interactive() && (length(what) > 1) if(pauseit) opa <- par(ask=TRUE) for(style in what) switch(style, intensity={ plotem(x$po, ..., dmain=c(objectname, "Intensity"), how="image", se=FALSE) }, statistic={ plotem(Fit$mcfit, ..., dmain=c(objectname, Fit$StatName)) }) if(pauseit) par(opa) return(invisible(NULL)) } predict.kppm <- function(object, ...) { predict(object$po, ...) } fitted.kppm <- function(object, ...) { fitted(object$po, ...) } simulate.kppm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10) { starttime <- proc.time() verbose <- verbose && (nsim > 1) check.1.real(retry) # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # .................................. # determine window for simulation results if(!is.null(window)) { stopifnot(is.owin(window)) win <- window } else { win <- as.owin(object) } # .................................. # determine parameters mp <- as.list(object$modelpar) # parameter 'mu' # = parent intensity of cluster process # = mean log intensity of log-Gaussian Cox process if(is.null(covariates) && (object$stationary || is.null(window))) { # use existing 'mu' (scalar or image) mu <- object$mu } else { # recompute 'mu' using new data switch(object$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process kappa <- mp$kappa lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(lambda/kappa) }, LGCP={ # log-Gaussian Cox process sigma2 <- mp$sigma2 lambda <- predict(object, window=win, covariates=covariates) mu <- eval.im(log(lambda) - sigma2/2) }, stop(paste("Simulation of", sQuote(object$clusters), "processes is not yet implemented")) ) } # prepare data for execution out <- list() switch(object$clusters, Thomas={ kappa <- mp$kappa sigma <- mp$sigma cmd <- expression(rThomas(kappa,sigma,mu,win)) }, MatClust={ kappa <- mp$kappa r <- mp$R cmd <- expression(rMatClust(kappa,r,mu,win)) }, Cauchy = { kappa <- mp$kappa omega <- mp$omega cmd <- expression(rCauchy(kappa, omega, mu, win)) }, VarGamma = { kappa <- mp$kappa omega <- mp$omega nu.ker <- object$covmodel$margs$nu.ker cmd <- expression(rVarGamma(kappa, nu.ker, omega, mu, win)) }, LGCP={ sigma2 <- mp$sigma2 alpha <- mp$alpha cm <- object$covmodel model <- cm$model margs <- cm$margs param <- c(0, sigma2, 0, alpha, unlist(margs)) if(!is.im(mu)) { # simulate in 'win' cmd <- expression(rLGCP(model=model, mu=mu, param=param, ..., win=win)) } else { # simulate in as.owin(mu), then change window cmd <- expression(rLGCP(model=model, mu=mu, param=param, ...)[win]) } }) # run if(verbose) cat(paste("Generating", nsim, "simulations... ")) for(i in 1:nsim) { out[[i]] <- try(eval(cmd)) if(verbose) progressreport(i, nsim) } # detect failures if(any(bad <- unlist(lapply(out, inherits, what="try-error")))) { nbad <- sum(bad) gripe <- paste(nbad, ngettext(nbad, "simulation was", "simulations were"), "unsuccessful") if(verbose) cat(paste(gripe, "\n")) if(retry <= 0) { fate <- "returned as NULL" out[bad] <- list(NULL) } else { if(verbose) cat("Retrying...") ntried <- 0 while(ntried < retry) { ntried <- ntried + 1 for(j in which(bad)) out[[j]] <- try(eval(cmd)) bad <- unlist(lapply(out, inherits, what="try-error")) nbad <- sum(bad) if(nbad == 0) break } if(verbose) cat("Done.\n") fate <- if(nbad == 0) "all recomputed" else paste(nbad, "simulations still unsuccessful") fate <- paste(fate, "after", ntried, ngettext(ntried, "further try", "further tries")) } warning(paste(gripe, fate, sep=": ")) } if(verbose) cat("Done.\n") # pack up out <- as.listof(out) names(out) <- paste("Simulation", 1:nsim) attr(out, "seed") <- RNGstate out <- timed(out, starttime=starttime) return(out) } formula.kppm <- function(x, ...) { formula(x$po, ...) } terms.kppm <- function(x, ...) { terms(x$po, ...) } labels.kppm <- function(object, ...) { labels(object$po, ...) } update.kppm <- function(object, trend=~1, ..., clusters=NULL) { if(!missing(trend)) trend <- update(formula(object), trend) if(is.null(clusters)) clusters <- object$clusters out <- do.call(kppm, resolve.defaults(list(trend=trend, clusters=clusters), list(...), list(X=object$X))) out$Xname <- object$Xname return(out) } unitname.kppm <- function(x) { return(unitname(x$X)) } "unitname<-.kppm" <- function(x, value) { unitname(x$X) <- value if(!is.null(x$Fit$mcfit)) { unitname(x$Fit$mcfit) <- value } else if(is.null(x$Fit)) { warning("kppm object in outdated format") if(!is.null(x$mcfit)) unitname(x$mcfit) <- value } return(x) } as.fv.kppm <- function(x) as.fv(x$Fit$mcfit) coef.kppm <- function(object, ...) { return(coef(object$po)) } Kmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="K") } pcfmodel.kppm <- function(model, ...) { Kpcf.kppm(model, what="pcf") } Kpcf.kppm <- function(model, what=c("K", "pcf")) { what <- match.arg(what) # Extract function definition from internal table clusters <- model$clusters tableentry <- spatstatClusterModelInfo(clusters) if(is.null(tableentry)) stop("No information available for", sQuote(clusters), "cluster model") fun <- tableentry[[what]] if(is.null(fun)) stop("No expression available for", what, "for", sQuote(clusters), "cluster model") # Extract model parameters par <- model$par # Extract auxiliary definitions (if applicable) funaux <- tableentry$funaux # Extract covariance model (if applicable) cm <- model$covmodel model <- cm$model margs <- cm$margs # f <- function(r) as.numeric(fun(par=par, rvals=r, funaux=funaux, model=model, margs=margs)) return(f) } is.stationary.kppm <- function(x) { return(x$stationary) } is.poisson.kppm <- function(x) { switch(x$clusters, Cauchy=, VarGamma=, Thomas=, MatClust={ # Poisson cluster process mu <- x$mu return(!is.null(mu) && (max(mu) == 0)) }, LGCP = { # log-Gaussian Cox process sigma2 <- x$par[["sigma2"]] return(sigma2 == 0) }, return(FALSE)) } # extract ppm component as.ppm.kppm <- function(object) { object$po } # other methods that pass through to 'ppm' as.owin.kppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { from <- match.arg(from) as.owin(as.ppm(W), ..., from=from, fatal=fatal) } model.images.kppm <- function(object, W=as.owin(object), ...) { model.images(as.ppm(object), W=W, ...) } model.matrix.kppm <- function(object, data=model.frame(object), ..., keepNA=TRUE) { model.matrix(as.ppm(object), data=data, ..., keepNA=keepNA) } model.frame.kppm <- function(formula, ...) { model.frame(as.ppm(formula), ...) } spatstat/R/ppm.R0000755000176000001440000001114612237642727013307 0ustar ripleyusers# # $Revision: 1.32 $ $Date: 2013/07/19 03:55:36 $ # # ppm() # Fit a point process model to a two-dimensional point pattern # # "ppm" <- function(Q, trend = ~1, interaction = Poisson(), ..., covariates = NULL, covfunargs = list(), correction="border", rbord = reach(interaction), use.gam=FALSE, method = "mpl", forcefit=FALSE, project=FALSE, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL ) { Qname <- short.deparse(substitute(Q)) if(!(method %in% c("mpl", "ho", "logi"))) stop(paste("Unrecognised fitting method", sQuote(method))) cl <- match.call() if(is.null(callstring)) callstring <- paste(short.deparse(sys.call()), collapse="") if(is.ppp(Q) && is.marked(Q) && !is.multitype(Q)) stop(paste("ppm is not yet implemented for marked point patterns,", "other than multitype patterns.")) if(!(is.ppp(Q) || inherits(Q, "quad"))) stop("Argument Q must be a point pattern or a quadrature scheme") # Ensure interaction is fully defined if(is.null(interaction)) interaction <- Poisson() if(!is.null(ss <- interaction$selfstart)) { # invoke selfstart mechanism to fix all parameters X <- if(is.ppp(Q)) Q else Q$data interaction <- ss(X, interaction) } # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # validate rbord if(correction == "border") { # rbord for border correction rbord.given <- !missing(rbord) && !is.null(rbord) if(is.null(rbord)) rbord <- reach(interaction) infin <- is.infinite(rbord) too.large <- infin || (eroded.areas(as.owin(Q), rbord) == 0) if(too.large) { whinge <- paste(if(rbord.given) "rbord" else "the reach of this interaction", if(infin) "is infinite or unknown;" else "is too large for this window;", "please specify", if(rbord.given) "a smaller value of", "rbord, or use a different edge correction") stop(whinge) } } else { # rbord must be numeric to satisfy mpl.engine if(is.null(rbord)) rbord <- 0 } if(method == "logi") { fitLOGI <- logi.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, gcontrol=gcontrol, callstring=callstring, ...) fitLOGI$Qname <- Qname fitLOGI$call <- cl fitLOGI$callstring <- callstring fitLOGI$callframe <- parent.frame() if(project && !valid.ppm(fitLOGI)) fitLOGI <- project.ppm(fitLOGI) return(fitLOGI) } # fit by maximum pseudolikelihood fitMPL <- mpl.engine(Q=Q, trend=trend, interaction=interaction, covariates=covariates, covfunargs=covfunargs, correction=correction, rbord=rbord, use.gam=use.gam, forcefit=forcefit, nd = nd, eps = eps, gcontrol=gcontrol, callstring=callstring, ...) fitMPL$Qname <- Qname if(!is.ppm(fitMPL)) { # internal use only - returns some other data return(fitMPL) } fitMPL$call <- cl fitMPL$callstring <- callstring fitMPL$callframe <- parent.frame() if(project && !valid.ppm(fitMPL)) fitMPL <- project.ppm(fitMPL) if(method == "mpl" || is.poisson.ppm(fitMPL)) return(fitMPL) fitHO <- ho.engine(fitMPL, nsim=nsim, nrmh=nrmh, start=start, control=control, verb=verb) if(is.null(fitHO)) return(fitMPL) if(project && !valid.ppm(fitHO)) fitHO <- project.ppm(fitHO) return(fitHO) } spatstat/R/nncross3D.R0000644000176000001440000001627412237642727014373 0ustar ripleyusers# # nncross3D.R # # $Revision: 1.6 $ $Date: 2013/11/03 03:17:02 $ # # Copyright (C) Adrian Baddeley, Jens Oehlschlaegel and Rolf Turner 2000-2013 # Licence: GNU Public Licence >= 2 nncross.pp3 <- function(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), ..., k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) { stopifnot(is.pp3(Y)) sortby <- match.arg(sortby) what <- match.arg(what, choices=c("dist", "which"), several.ok=TRUE) want.dist <- "dist" %in% what want.which <- "which" %in% what want.both <- want.dist && want.which if(!missing(k)) { # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } } k <- as.integer(k) kmax <- max(k) nk <- length(k) # trivial cases nX <- npoints(X) nY <- nobjects(Y) # deal with null cases if(nX == 0) return(as.data.frame(list(dist=matrix(0, nrow=0, ncol=nk), which=matrix(0L, nrow=0, ncol=nk))[what])) if(nY == 0) return(as.data.frame(list(dist=matrix(Inf, nrow=nX, ncol=nk), which=matrix(NA, nrow=nX, ncol=nk))[what])) if(is.null(iX) != is.null(iY)) stop("If one of iX, iY is given, then both must be given") exclude <- (!is.null(iX) || !is.null(iY)) if(exclude) { stopifnot(is.integer(iX) && is.integer(iY)) if(length(iX) != nX) stop("length of iX does not match the number of points in X") if(length(iY) != nY) stop("length of iY does not match the number of points in Y") } if((is.sorted.X || is.sorted.Y) && !(sortby %in% c("x", "y", "z"))) stop(paste("If data are already sorted,", "the sorting coordinate must be specified explicitly", "using sortby = \"x\" or \"y\" or \"z\"")) # decide which coordinate to sort on switch(sortby, range = { s <- sidelengths(as.box3(Y)) sortcoord <- c("x", "y", "z")[which.min(s)] }, var = { v <- apply(coords(Y), 2, var) sortcoord <- c("x", "y", "z")[which.min(v)] }, x={ sortcoord <- "x" }, y={ sortcoord <- "y" }, z={ sortcoord <- "z" } ) # The C code expects points to be sorted by z coordinate. XX <- coords(X) YY <- coords(Y) switch(sortcoord, x = { # rotate x axis to z axis XX <- XX[, c(3,2,1)] YY <- YY[, c(3,2,1)] }, y = { # rotate y axis to z axis XX <- XX[, c(3,1,2)] YY <- YY[, c(3,1,2)] }, z = { }) # sort only if needed if(!is.sorted.X){ oX <- fave.order(XX[,3]) XX <- XX[oX, , drop=FALSE] if(exclude) iX <- iX[oX] } if (!is.sorted.Y){ oY <- fave.order(YY[,3]) YY <- YY[oY, , drop=FALSE] if(exclude) iY <- iY[oY] } # number of neighbours that are well-defined kmaxcalc <- min(nY, kmax) if(kmaxcalc == 1) { # ............... single nearest neighbour .................. # call C code nndv <- if(want.dist) numeric(nX) else numeric(1) nnwh <- if(want.which) integer(nX) else integer(1) if(!exclude) iX <- iY <- integer(1) DUP <- spatstat.options("dupC") huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("nnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), DUP=DUP) if(want.which) { # conversion to R indexing is done in C code nnwcode <- z$nnwhich if(any(uhoh <- (nnwcode == 0))) { warning("Internal error: NA's produced in nncross()$which") nnwcode[uhoh] <- NA } } # reinterpret in original ordering if(is.sorted.X){ if(want.dist) nndv <- z$nnd if(want.which) nnwh <- if(is.sorted.Y) nnwcode else oY[nnwcode] } else { if(want.dist) nndv[oX] <- z$nnd if(want.which) nnwh[oX] <- if(is.sorted.Y) nnwcode else oY[nnwcode] } if(want.both) return(data.frame(dist=nndv, which=nnwh)) return(if(want.dist) nndv else nnwh) } else { # ............... k nearest neighbours .................. # call C code nndv <- if(want.dist) numeric(nX * kmaxcalc) else numeric(1) nnwh <- if(want.which) integer(nX * kmaxcalc) else integer(1) if(!exclude) iX <- iY <- integer(1) DUP <- spatstat.options("dupC") huge <- 1.1 * diameter(bounding.box3(as.box3(X),as.box3(Y))) z <- .C("knnX3Dinterface", n1=as.integer(nX), x1=as.double(XX[,1]), y1=as.double(XX[,2]), z1=as.double(XX[,3]), id1=as.integer(iX), n2=as.integer(nY), x2=as.double(YY[,1]), y2=as.double(YY[,2]), z2=as.double(YY[,3]), id2=as.integer(iY), kmax=as.integer(kmaxcalc), exclude = as.integer(exclude), wantdist = as.integer(want.dist), wantwhich = as.integer(want.which), nnd=as.double(nndv), nnwhich=as.integer(nnwh), huge=as.double(huge), DUP=DUP) # PACKAGE="spatstat") # extract results nnD <- z$nnd nnW <- z$nnwhich # map 0 to NA if(want.which && any(uhoh <- (nnW == 0))) { nnW[uhoh] <- NA if(want.dist) nnD[uhoh] <- Inf } # reinterpret indices in original ordering if(!is.sorted.Y) nnW <- oY[nnW] # reform as matrices NND <- if(want.dist) matrix(nnD, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 NNW <- if(want.which) matrix(nnW, nrow=nX, ncol=kmaxcalc, byrow=TRUE) else 0 if(!is.sorted.X){ # rearrange rows to correspond to original ordering of points if(want.dist) NND[oX, ] <- NND if(want.which) NNW[oX, ] <- NNW } # the return value should correspond to the original vector k if(kmax > kmaxcalc) { # add columns of NA / Inf kextra <- kmax - kmaxcalc if(want.dist) NND <- cbind(NND, matrix(Inf, nrow=nX, ncol=kextra)) if(want.which) NNW <- cbind(NNW, matrix(NA_integer_, nrow=nX, ncol=kextra)) } if(length(k) < kmax) { # select only the specified columns if(want.dist) NND <- NND[, k, drop=TRUE] if(want.which) NNW <- NNW[, k, drop=TRUE] } result <- as.data.frame(list(dist=NND, which=NNW)[what]) if(ncol(result) == 1) result <- result[, , drop=TRUE] return(result) } } spatstat/R/localpcf.R0000755000176000001440000001434212237642727014277 0ustar ripleyusers# # localpcf.R # # $Revision: 1.18 $ $Date: 2013/04/25 06:37:43 $ # # localpcf <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { if(length(list(...)) > 0) warning("Additional arguments ignored") stopifnot(is.ppp(X)) localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan) } localpcfinhom <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL) { stopifnot(is.ppp(X)) if(is.null(lambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { # validate if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npoints(X)) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) } localpcfengine(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) } localpcfengine <- function(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL) { m <- localpcfmatrix(X, delta=delta, rmax=rmax, nr=nr, stoyan=stoyan, lambda=lambda) r <- attr(m, "r") delta <- attr(m, "delta") nX <- npoints(X) if(nX == 0) { df <- data.frame(r=r, theo=rep.int(1, length(r))) nama <- desc <- labl <- NULL } else { # border correction dbord <- bdist.points(X) m[r[row(m)] > dbord[col(m)]] <- NA # df <- data.frame(m, r=r, theo=rep.int(1, length(r))) icode <- unlist(lapply(seq_len(nX), numalign, nmax=nX)) nama <- paste("est", icode, sep="") desc <- paste("estimate of %s for point", icode) labl <- paste("%s[", icode, "](r)", sep="") } names(df) <- c(nama, "r", "theo") desc <- c(desc, "distance argument r", "theoretical Poisson %s") labl <- c(labl, "r", "%s[pois](r)") # create fv object g <- fv(df, "r", quote(localg(r)), "theo", , c(0, max(r)), labl, desc, fname="localg") # default is to display them all formula(g) <- . ~ r fvnames(g, ".") <- names(df)[names(df) != "r"] unitname(g) <- unitname(X) attr(g, "delta") <- delta attr(g, "correction") <- "border" return(g) } localpcfmatrix <- function(X, i=seq_len(npoints(X)), ..., lambda = NULL, delta=NULL, rmax=NULL, nr=512, stoyan=0.15) { missi <- missing(i) weighted <- !is.null(lambda) nX <- npoints(X) nY <- if(missi) nX else length(seq_len(nX)[i]) W <- as.owin(X) lambda.ave <- nX/area.owin(W) if(is.null(delta)) delta <- stoyan/sqrt(lambda.ave) if(is.null(rmax)) rmax <- rmax.rule("K", W, lambda.ave) # if(nX == 0 || nY == 0) { out <- matrix(0, nr, 0) } else { # sort points in increasing order of x coordinate oX <- fave.order(X$x) Xsort <- X[oX] idXsort <- (1:nX)[oX] if(weighted) { lambdaXsort <- lambda[oX] weightXsort <- 1/lambdaXsort } if(missi) { Y <- X oY <- oX Ysort <- Xsort idYsort <- idXsort } else { # i is some kind of index Y <- X[i] idY <- (1:nX)[i] oY <- fave.order(Y$x) Ysort <- Y[oY] idYsort <- idY[oY] } nY <- npoints(Y) force(nr) # call C DUP <- spatstat.options("dupC") if(!weighted) { zz <- .C("locpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), DUP=DUP) # PACKAGE="spatstat") } else { zz <- .C("locWpcfx", nn1 = as.integer(nY), x1 = as.double(Ysort$x), y1 = as.double(Ysort$y), id1 = as.integer(idYsort), nn2 = as.integer(nX), x2 = as.double(Xsort$x), y2 = as.double(Xsort$y), id2 = as.integer(idXsort), w2 = as.double(weightXsort), nnr = as.integer(nr), rmaxi=as.double(rmax), del=as.double(delta), pcf=as.double(double(nr * nY)), DUP=DUP) # PACKAGE="spatstat") } out <- matrix(zz$pcf, nr, nY) # reorder columns to match original out[, oY] <- out # rescale out <- out/(2 * pi * if(!weighted) lambda.ave else 1) } # dress up attr(out, "r") <- seq(from=0, to=rmax, length.out=nr) attr(out, "delta") <- delta class(out) <- c("localpcfmatrix", class(out)) return(out) } print.localpcfmatrix <- function(x, ...) { cat("Matrix of local pair correlation estimates\n") nc <- ncol(x) nr <- nrow(x) cat(paste("pcf estimates for", nc, ngettext(nc, "point", "points"), "\n")) rval <- attr(x, "r") cat(paste("r values from 0 to", max(rval), "in", nrow(x), "steps\n")) return(invisible(NULL)) } plot.localpcfmatrix <- function(x, ...) { xname <- short.deparse(substitute(x)) rval <- attr(x, "r") do.call("matplot", resolve.defaults(list(rval, x), list(...), list(type="l", main=xname, xlab="r", ylab="pair correlation"))) } "[.localpcfmatrix" <- function(x, i, ...) { r <- attr(x, "r") delta <- attr(x, "delta") class(x) <- "matrix" if(missing(i)) { x <- x[ , ...] } else { x <- x[i, ...] if(is.matrix(i)) return(x) r <- r[i] } if(!is.matrix(x)) x <- matrix(x, nrow=length(r)) attr(x, "r") <- r attr(x, "delta") <- delta class(x) <- c("localpcfmatrix", class(x)) return(x) } spatstat/R/rmh.default.R0000755000176000001440000007224312237642727014731 0ustar ripleyusers# # $Id: rmh.default.R,v 1.97 2013/04/25 06:37:43 adrian Exp adrian $ # rmh.default <- function(model,start=NULL, control=default.rmhcontrol(model), ..., verbose=TRUE, snoop=FALSE) { # # Function rmh. To simulate realizations of 2-dimensional point # patterns, given the conditional intensity function of the # underlying process, via the Metropolis-Hastings algorithm. # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # V A L I D A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(verbose) cat("Checking arguments..") # validate arguments and fill in the defaults model <- rmhmodel(model) start <- rmhstart(start) if(is.null(control)) { control <- default.rmhcontrol(model) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) control <- rmhResolveControl(control, model) # retain "..." arguments unrecognised by rmhcontrol # These are assumed to be arguments of functions defining the trend argh <- list(...) known <- names(argh) %in% names(formals(rmhcontrol.default)) f.args <- argh[!known] #### Multitype models # Decide whether the model is multitype; if so, find the types. types <- rmhResolveTypes(model, start, control) ntypes <- length(types) mtype <- (ntypes > 1) # If the model is multitype, check that the model parameters agree with types # and digest them if(mtype && !is.null(model$check)) { model <- rmhmodel(model, types=types) } else { model$types <- types } ######## Check for illegal combinations of model, start and control ######## # No expansion can be done if we are using x.start if(start$given == "x") { if(control$expand$force.exp) stop("Cannot expand window when using x.start.\n", call.=FALSE) control$expand <- .no.expansion } # Warn about a silly value of fixall: if(control$fixall & ntypes==1) { warning("control$fixall applies only to multitype processes. Ignored. \n") control$fixall <- FALSE if(control$fixing == "n.each.type") control$fixing <- "n.total" } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # M O D E L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ####### Determine windows ################################ if(verbose) cat("determining simulation windows...") # these may be NULL w.model <- model$w x.start <- start$x.start trend <- model$trend trendy <- !is.null(trend) singletrend <- trendy && (is.im(trend) || is.function(trend) || (is.numeric(trend) && length(trend) == 1)) trendlist <- if(singletrend) list(trend) else trend # window implied by trend image, if any w.trend <- if(is.im(trend)) as.owin(trend) else if(is.list(trend) && any(ok <- unlist(lapply(trend, is.im)))) as.owin((trend[ok])[[1]]) else NULL ## Clipping window (for final result) w.clip <- if(!is.null(model$w)) model$w else if(!will.expand(control$expand)) { if(start$given == "x" && is.ppp(x.start)) x.start$window else if(is.owin(w.trend)) w.trend } else NULL if(!is.owin(w.clip)) stop("Unable to determine window for pattern") ## Simulation window xpn <- rmhResolveExpansion(w.clip, control, trendlist, "trend") w.sim <- xpn$wsim expanded <- xpn$expanded ## Check the fine print if(expanded) { if(control$fixing != "none") stop(paste("If we're conditioning on the number of points,", "we cannot clip the result to another window.\n")) if(!is.subset.owin(w.clip, w.sim)) stop("Expanded simulation window does not contain model window") } ####### Trend ################################ # Check that the expanded window fits inside the window # upon which the trend(s) live if there are trends and # if any trend is given by an image. if(expanded && !is.null(trend)) { trends <- if(is.im(trend)) list(trend) else trend images <- unlist(lapply(trends, is.im)) if(any(images)) { iwindows <- lapply(trends[images], as.owin) nimages <- length(iwindows) misfit <- !unlist(lapply(iwindows, function(x,w) { is.subset.owin(w,x) }, w = w.sim)) nmisfit <- sum(misfit) if(nmisfit > 1) stop(paste("Expanded simulation window is not contained in", "several of the trend windows.\n", "Bailing out.\n")) else if(nmisfit == 1) { warning(paste("Expanded simulation window is not contained in", if(nimages == 1) "the trend window.\n" else "one of the trend windows.\n", "Expanding to this trend window (only).\n")) w.sim <- iwindows[[which(misfit)]] } } } # Extract the 'beta' parameters if(length(model$cif) == 1) { # single interaction beta <- model$C.beta betalist <- list(beta) } else { # hybrid betalist <- model$C.betalist # multiply beta vectors for each component beta <- Reduce("*", betalist) } ##### .................. CONDITIONAL SIMULATION ................... ##### #|| Determine windows for conditional simulation #|| #|| w.state = window for the full configuration #|| #|| w.sim = window for the 'free' (random) points #|| w.state <- w.sim condtype <- control$condtype x.cond <- control$x.cond n.cond <- control$n.cond switch(condtype, none={ w.cond <- NULL }, window={ # conditioning on the realisation inside a subwindow w.cond <- as.owin(x.cond) # subtract from w.sim w.sim <- setminus.owin(w.state, w.cond) if(is.empty(w.sim)) stop(paste("Conditional simulation is undefined;", "the conditioning window", sQuote("as.owin(control$x.cond)"), "covers the entire simulation window")) }, Palm={ # Palm conditioning w.cond <- NULL }) ##### #|| Convert conditioning points to appropriate format x.condpp <- switch(condtype, none=NULL, window=x.cond, Palm=as.ppp(x.cond, w.state)) # validate if(!is.null(x.condpp)) { if(mtype) { if(!is.marked(x.condpp)) stop("Model is multitype, but x.cond is unmarked") if(!identical(all.equal(types, levels(marks(x.condpp))), TRUE)) stop("Types of points in x.cond do not match types in model") } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S T A R T I N G S T A T E # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ###################### Starting state data ############################ # There must now be a starting state. if(start$given == "none") { # For conditional simulation, the starting state must be given if(condtype != "none") stop("No starting state given") # Determine integral of beta * trend over data window. # This is the expected number of points in the reference Poisson process. area.w.clip <- area.owin(w.clip) if(trendy) { tsummaries <- summarise.trend(trend, w=w.clip, a=area.w.clip) En <- beta * unlist(lapply(tsummaries, function(x) { x$integral })) } else { En <- beta * area.w.clip } # Fix n.start equal to this integral n.start <- if(spatstat.options("scalable")) round(En) else ceiling(En) start <- rmhstart(n.start=n.start) } # In the case of conditional simulation, the start data determine # the 'free' points (i.e. excluding x.cond) in the initial state. switch(start$given, none={ stop("No starting state given") }, x = { # x.start was given # coerce it to a ppp object if(!is.ppp(x.start)) x.start <- as.ppp(x.start, w.state) if(condtype == "window") { # clip to simulation window xs <- x.start[w.sim] nlost <- x.start$n - xs$n if(nlost > 0) warning(paste(nlost, ngettext(nlost, "point","points"), "of x.start", ngettext(nlost, "was", "were"), "removed because", ngettext(nlost, "it", "they"), "fell in the window of x.cond")) x.start <- xs } npts.free <- x.start$n }, n = { # n.start was given n.start <- start$n.start # Adjust the number of points in the starting state in accordance # with the expansion that has occurred. if(expanded) { holnum <- if(spatstat.options("scalable")) round else ceiling n.start <- holnum(n.start * area.owin(w.sim)/area.owin(w.clip)) } # npts.free <- sum(n.start) # The ``sum()'' is redundant if n.start # is scalar; no harm, but. }, stop("Internal error: start$given unrecognized")) #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # C O N T R O L P A R A M E T E R S # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ################### Periodic boundary conditions ######################### periodic <- control$periodic if(is.null(periodic)) { # undecided. Use default rule control$periodic <- periodic <- expanded && is.rectangle(w.state) } else if(periodic && !is.rectangle(w.state)) { # if periodic is TRUE we have to be simulating in a rectangular window. stop("Need rectangular window for periodic simulation.\n") } # parameter passed to C: period <- if(periodic) c(diff(w.state$xrange), diff(w.state$yrange)) else c(-1,-1) #### vector of proposal probabilities if(!mtype) ptypes <- 1 else { ptypes <- control$ptypes if(is.null(ptypes)) { # default proposal probabilities ptypes <- if(start$given == "x" && (nx <- npoints(x.start)) > 0) { table(marks(x.start, dfok=FALSE))/nx } else rep.int(1/ntypes, ntypes) } else { # Validate ptypes if(length(ptypes) != ntypes | sum(ptypes) != 1) stop("Argument ptypes is mis-specified.\n") } } ######################################################################## # Normalising constant for proposal density # # Integral of trend over the expanded window (or area of window): # Iota == Integral Of Trend (or) Area. area.w.sim <- area.owin(w.sim) if(trendy) { if(verbose) cat("Evaluating trend integral...") tsummaries <- summarise.trend(trend, w=w.sim, a=area.w.sim) nbg <- unlist(lapply(tsummaries, function(x) { x$min < 0 })) if(any(nbg)) stop("Trend has negative values") iota <- unlist(lapply(tsummaries, function(x) { x$integral })) tmax <- unlist(lapply(tsummaries, function(x) { x$max })) } else { iota <- area.w.sim tmax <- NULL } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # A.S. EMPTY PROCESS # # for conditional simulation, 'empty' means there are no 'free' points # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== a.s.empty <- FALSE # # Empty pattern, simulated conditional on n # if(npts.free == 0 && control$fixing != "none") { a.s.empty <- TRUE if(verbose) { mess <- paste("Initial pattern has 0 random points,", "and simulation is conditional on the number of points -") if(condtype == "none") warning(paste(mess, "returning an empty pattern\n")) else warning(paste(mess, "returning a pattern with no random points\n")) } } # # If beta = 0, the process is almost surely empty # if(all(beta < .Machine$double.eps)) { if(control$fixing == "none" && condtype == "none") { # return empty pattern if(verbose) warning("beta = 0 implies an empty pattern\n") a.s.empty <- TRUE } else stop("beta = 0 implies an empty pattern, but we are simulating conditional on a nonzero number of points") } # # If we're conditioning on the contents of a subwindow, # and the subwindow covers the clipping region, # the result is deterministic. if(condtype == "window" && is.subset.owin(w.clip, w.cond)) { a.s.empty <- TRUE warning(paste("Model window is a subset of conditioning window:", "result is deterministic\n")) } # # if(a.s.empty) { # create empty pattern, to be returned if(!is.null(x.condpp)) empty <- x.condpp[w.clip] else { empty <- ppp(numeric(0), numeric(0), window=w.clip) if(mtype) { vide <- factor(types[integer(0)], levels=types) empty <- empty %mark% vide } } } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # PACK UP # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ######### Store decisions Model <- model Start <- start Control <- control Model$w <- w.clip Model$types <- types Control$expand <- if(expanded) rmhexpand(w.state) else .no.expansion Control$internal <- list(w.sim=w.sim, w.state=w.state, x.condpp=x.condpp, ptypes=ptypes, period=period) Model$internal <- list(a.s.empty=a.s.empty, empty=if(a.s.empty) empty else NULL, mtype=mtype, trendy=trendy, betalist=betalist, beta=beta, iota=iota, tmax=tmax) Start$internal <- list(npts.free=npts.free) InfoList <- list(model=Model, start=Start, control=Control) class(InfoList) <- c("rmhInfoList", class(InfoList)) # go do.call("rmhEngine", append(list(InfoList, verbose=verbose, snoop=snoop, kitchensink=TRUE), f.args)) } print.rmhInfoList <- function(x, ...) { cat("\nPre-digested Metropolis-Hastings algorithm parameters (rmhInfoList)\n") print(as.listof(x)) } #--------------- rmhEngine ------------------------------------------- # # This is the interface to the C code. # # InfoList is a list of pre-digested, validated arguments # obtained from rmh.default. # # This function is called by rmh.default to generate one simulated # realisation of the model. # It's called repeatedly by ho.engine and qqplot.ppm to generate multiple # realisations (saving time by not repeating the argument checking # in rmh.default). # arguments: # kitchensink: whether to tack InfoList on to the return value as an attribute # preponly: whether to just return InfoList without simulating # # rmh.default digests arguments and calls rmhEngine with kitchensink=T # # qqplot.ppm first gets InfoList by calling rmh.default with preponly=T # (which digests the model arguments and calls rmhEngine # with preponly=T, returning InfoList), # then repeatedly calls rmhEngine(InfoList) to simulate. # # ------------------------------------------------------- rmhEngine <- function(InfoList, ..., verbose=FALSE, kitchensink=FALSE, preponly=FALSE, snoop=FALSE) { # Internal Use Only! # This is the interface to the C code. if(!inherits(InfoList, "rmhInfoList")) stop("data not in correct format for internal function rmhEngine") if(preponly) return(InfoList) model <- InfoList$model start <- InfoList$start control <- InfoList$control w.sim <- control$internal$w.sim w.state <- control$internal$w.state w.clip <- model$w condtype <- control$condtype x.condpp <- control$internal$x.condpp types <- model$types ntypes <- length(types) ptypes <- control$internal$ptypes period <- control$internal$period mtype <- model$internal$mtype trend <- model$trend trendy <- model$internal$trendy betalist <- model$internal$betalist beta <- model$internal$beta iota <- model$internal$iota tmax <- model$internal$tmax npts.free <- start$internal$npts.free n.start <- start$n.start x.start <- start$x.start #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # E M P T Y P A T T E R N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== if(model$internal$a.s.empty) { if(verbose) cat("\n") empty <- model$internal$empty attr(empty, "info") <- InfoList return(empty) } #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== # # S I M U L A T I O N # #==+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+===+=== ############################################# #### #### Random number seed: initialisation & capture #### ############################################# if(!exists(".Random.seed")) runif(1) saved.seed <- .Random.seed ############################################# #### #### Poisson case #### ############################################# if(is.poisson.rmhmodel(model)) { if(verbose) cat("\n") intensity <- if(!trendy) beta else model$trend Xsim <- switch(control$fixing, none= { # Poisson process if(!mtype) rpoispp(intensity, win=w.sim, ...) else rmpoispp(intensity, win=w.sim, types=types) }, n.total = { # Binomial/multinomial process with fixed total number of points if(!mtype) rpoint(npts.free, intensity, win=w.sim, verbose=verbose) else rmpoint(npts.free, intensity, win=w.sim, types=types, verbose=verbose) }, n.each.type = { # Multinomial process with fixed number of points of each type npts.each <- switch(start$given, n = n.start, x = as.integer(table(marks(x.start, dfok=FALSE))), stop("No starting state given; can't condition on fixed number of points")) rmpoint(npts.each, intensity, win=w.sim, types=types, verbose=verbose) }, stop("Internal error: control$fixing unrecognised") ) # if conditioning, add fixed points if(condtype != "none") Xsim <- superimpose(Xsim, x.condpp, W=w.state) # clip result to output window Xclip <- Xsim[w.clip] attr(Xclip, "info") <- InfoList return(Xclip) } ######################################################################## # M e t r o p o l i s H a s t i n g s s i m u l a t i o n ######################################################################## if(verbose) cat("Starting simulation.\nInitial state...") #### Build starting state npts.cond <- if(condtype != "none") x.condpp$n else 0 npts.total <- npts.free + npts.cond #### FIRST generate the 'free' points #### First the marks, if any. #### The marks must be integers 0 to (ntypes-1) for passing to C Ctypes <- if(mtype) 0:(ntypes-1) else 0 Cmarks <- if(!mtype) 0 else switch(start$given, n = { # n.start given if(control$fixing=="n.each.type") rep.int(Ctypes,n.start) else sample(Ctypes,npts.free,TRUE,ptypes) }, x = { # x.start given as.integer(marks(x.start, dfok=FALSE))-1 }, stop("internal error: start$given unrecognised") ) # # Then the x, y coordinates # switch(start$given, x = { x <- x.start$x y <- x.start$y }, n = { xy <- if(!trendy) runifpoint(npts.free, w.sim, ...) else rpoint.multi(npts.free, trend, tmax, factor(Cmarks,levels=Ctypes), w.sim, ...) x <- xy$x y <- xy$y }) ## APPEND the free points AFTER the conditioning points if(condtype != "none") { x <- c(x.condpp$x, x) y <- c(x.condpp$y, y) if(mtype) Cmarks <- c(as.integer(marks(x.condpp))-1, Cmarks) } # decide whether to activate visual debugger if(snoop) { Xinit <- ppp(x, y, window=w.sim) if(mtype) marks(Xinit) <- Cmarks + 1 if(verbose) cat("\nCreating debugger environment..") snoopenv <- rmhSnoopEnv(Xinit=Xinit, Wclip=w.clip, R=reach(model)) if(verbose) cat("Done.\n") } else snoopenv <- "none" ####################################################################### # Set up C call ###################################################################### # Determine the name of the cif used in the C code C.id <- model$C.id ncif <- length(C.id) # Get the parameters in C-ese ipar <- model$C.ipar iparlist <- if(ncif == 1) list(ipar) else model$C.iparlist iparlen <- unlist(lapply(iparlist, length)) beta <- model$internal$beta # Absorb the constants or vectors `iota' and 'ptypes' into the beta parameters beta <- (iota/ptypes) * beta # Algorithm control parameters p <- control$p q <- control$q nrep <- control$nrep fixcode <- control$fixcode fixing <- control$fixing fixall <- control$fixall nverb <- control$nverb saving <- control$saving nsave <- control$nsave nburn <- control$nburn track <- control$track if(verbose) cat("Proposal points...") # If the pattern is multitype, generate the mark proposals (0 to ntypes-1) Cmprop <- if(mtype) sample(Ctypes,nrep,TRUE,prob=ptypes) else 0 # Generate the ``proposal points'' in the expanded window. xy <- if(trendy) rpoint.multi(nrep,trend,tmax, factor(Cmprop, levels=Ctypes), w.sim, ..., warn=FALSE) else runifpoint(nrep, w.sim, warn=FALSE) xprop <- xy$x yprop <- xy$y if(verbose) cat("Start simulation.\n") storage.mode(ncif) <- "integer" storage.mode(C.id) <- "character" storage.mode(beta) <- "double" storage.mode(ipar) <- "double" storage.mode(iparlen) <- "integer" storage.mode(period) <- "double" storage.mode(xprop) <- storage.mode(yprop) <- "double" storage.mode(Cmprop) <- "integer" storage.mode(ntypes) <- "integer" storage.mode(nrep) <- "integer" storage.mode(p) <- storage.mode(q) <- "double" storage.mode(nverb) <- "integer" storage.mode(x) <- storage.mode(y) <- "double" storage.mode(Cmarks) <- "integer" storage.mode(fixall) <- "integer" storage.mode(npts.cond) <- "integer" storage.mode(track) <- "integer" if(!saving) { # ////////// Single block ///////////////////////////////// nrep0 <- 0 storage.mode(nrep0) <- "integer" # Call the Metropolis-Hastings C code: out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xprop, yprop, Cmprop, ntypes, nrep, p, q, nverb, nrep0, x, y, Cmarks, npts.cond, fixall, track, snoopenv) # PACKAGE="spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1]], y=out[[2]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R marx <- factor(out[[3]], levels=0:(ntypes-1)) # then restore original type levels levels(marx) <- types # glue to points marks(X) <- marx } # Now clip the pattern to the ``clipping'' window: if(!control$expand$force.noexp) X <- X[w.clip] # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) History <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) History <- cbind(History, data.frame(numerator=numerator, denominator=denominator)) } } } else { # ////////// Multiple blocks ///////////////////////////////// # determine length of each block of simulations nblocks <- as.integer(1 + ceiling((nrep - nburn)/nsave)) block <- c(nburn, rep.int(nsave, nblocks-1)) block[nblocks] <- block[nblocks] - (sum(block)-nrep) block <- block[block >= 1] nblocks <- length(block) blockend <- cumsum(block) # set up list to contain the saved point patterns Xlist <- vector(mode="list", length=nblocks) # Call the Metropolis-Hastings C code repeatedly: xprev <- x yprev <- y Cmarksprev <- Cmarks # # ................ loop ......................... for(I in 1:nblocks) { # number of iterations for this block nrepI <- block[I] storage.mode(nrepI) <- "integer" # number of previous iterations nrep0 <- if(I == 1) 0 else blockend[I-1] storage.mode(nrep0) <- "integer" # proposals seqI <- 1:nrepI xpropI <- xprop[seqI] ypropI <- yprop[seqI] CmpropI <- Cmprop[seqI] storage.mode(xpropI) <- storage.mode(ypropI) <- "double" storage.mode(CmpropI) <- "integer" # call out <- .Call("xmethas", ncif, C.id, beta, ipar, iparlen, period, xpropI, ypropI, CmpropI, ntypes, nrepI, p, q, nverb, nrep0, xprev, yprev, Cmarksprev, npts.cond, fixall, track, snoopenv) # PACKAGE="spatstat") # Extract the point pattern returned from C X <- ppp(x=out[[1]], y=out[[2]], window=w.state, check=FALSE) if(mtype) { # convert integer marks from C to R marx <- factor(out[[3]], levels=0:(ntypes-1)) # then restore original type levels levels(marx) <- types # glue to points marks(X) <- marx } # Now clip the pattern to the ``clipping'' window: if(!control$expand$force.noexp) X <- X[w.clip] # commit to list Xlist[[I]] <- X # Extract transition history: if(track) { usedout <- if(mtype) 3 else 2 proptype <- factor(out[[usedout+1]], levels=1:3, labels=c("Birth", "Death", "Shift")) accepted <- as.logical(out[[usedout+2]]) HistoryI <- data.frame(proposaltype=proptype, accepted=accepted) if(length(out) >= usedout + 4) { # history includes numerator & denominator of Hastings ratio numerator <- as.double(out[[usedout + 3]]) denominator <- as.double(out[[usedout + 4]]) HistoryI <- cbind(HistoryI, data.frame(numerator=numerator, denominator=denominator)) } # concatenate with histories of previous blocks History <- if(I == 1) HistoryI else rbind(History, HistoryI) } # update 'previous state' xprev <- out[[1]] yprev <- out[[2]] Cmarksprev <- if(!mtype) 0 else out[[3]] storage.mode(xprev) <- storage.mode(yprev) <- "double" storage.mode(Cmarksprev) <- "integer" # discard used proposals xprop <- xprop[-seqI] yprop <- yprop[-seqI] Cmprop <- Cmprop[-seqI] } # .............. end loop ............................... # Result of simulation is final state 'X' # Tack on the list of intermediate states names(Xlist) <- paste("Iteration", as.integer(blockend), sep="_") attr(X, "saved") <- as.listof(Xlist) } # Append to the result information about how it was generated. if(kitchensink) { attr(X, "info") <- InfoList attr(X, "seed") <- saved.seed } if(track) attr(X, "history") <- History return(X) } # helper function summarise.trend <- local({ # main function summarise.trend <- function(trend, w, a=area.owin(w)) { tlist <- if(is.function(trend) || is.im(trend)) list(trend) else trend return(lapply(tlist, summarise1, w=w, a=a)) } # summarise1 <- function(x, w, a) { if(is.numeric(x)) { mini <- maxi <- x integ <- a*x } else { Z <- as.im(x, w)[w, drop=FALSE] ran <- range(Z) mini <- ran[1] maxi <- ran[2] integ <- integral.im(Z) } return(list(min=mini, max=maxi, integral=integ)) } summarise.trend }) spatstat/R/sharpen.R0000755000176000001440000000406212237642727014152 0ustar ripleyusers# # sharpen.R # # $Revision: 1.6 $ $Date: 2013/08/29 03:52:17 $ # sharpen <- function(X, ...) { UseMethod("sharpen") } sharpen.ppp <- function(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) { stopifnot(is.ppp(X)) Yx <- Smooth(X %mark% X$x, at="points", sigma=sigma, varcov=varcov, edge=TRUE) Yy <- Smooth(X %mark% X$y, at="points", sigma=sigma, varcov=varcov, edge=TRUE) # trap NaN etc nbad <- sum(!(is.finite(Yx) & is.finite(Yy))) if(nbad > 0) stop(paste(nbad, ngettext(nbad, "point is", "points are"), "undefined due to numerical problems;", "smoothing parameter is probably too small")) # W <- as.owin(X) if(edgecorrect) { # convolve x and y coordinate functions with kernel xim <- as.im(function(x,y){x}, W) yim <- as.im(function(x,y){y}, W) xblur <- blur(xim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) yblur <- blur(yim, sigma=sigma, varcov=varcov, normalise=TRUE, ...) # evaluate at data locations xx <- safelookup(xblur, X, warn=FALSE) yy <- safelookup(yblur, X, warn=FALSE) # estimated vector bias of sharpening procedure xbias <- xx - X$x ybias <- yy - X$y # adjust Yx <- Yx - xbias Yy <- Yy - ybias # check this does not place points outside window if(any(uhoh <- !inside.owin(Yx, Yy, W))) { # determine mass of edge effect edgeim <- blur(as.im(W), sigma=sigma, varcov=varcov, normalise=FALSE, ...) edg <- safelookup(edgeim, X[uhoh], warn=FALSE) # contract bias correction Yx[uhoh] <- (1 - edg) * X$x[uhoh] + edg * Yx[uhoh] Yy[uhoh] <- (1 - edg) * X$y[uhoh] + edg * Yy[uhoh] } # check again if(any(nbg <- !inside.owin(Yx, Yy, W))) { # give up Yx[nbg] <- X$x[nbg] Yy[nbg] <- X$y[nbg] } } # make point pattern Y <- ppp(Yx, Yy, marks=marks(X), window=W) # tack on smoothing information attr(Y, "sigma") <- sigma attr(Y, "varcov") <- varcov attr(Y, "edgecorrected") <- edgecorrect return(Y) } spatstat/R/predict.ppm.R0000755000176000001440000004071012240433535014724 0ustar ripleyusers# # predict.ppm.S # # $Revision: 1.77 $ $Date: 2013/11/12 14:11:10 $ # # predict.ppm() # From fitted model obtained by ppm(), # evaluate the fitted trend or conditional intensity # at a grid/list of other locations # # # ------------------------------------------------------------------- predict.ppm <- local({ # # extract undocumented arguments and trap others # xtract <- function(..., newdata=NULL, sumobj=NULL, E=NULL) { if(!is.null(newdata)) warning(paste("The use of the argument", sQuote("newdata"), "is out-of-date. See help(predict.ppm)")) trap.extra.arguments(..., .Context="In predict.ppm") return(list(sumobj=sumobj, E=E)) } predict.ppm <- function(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type="trend", X=data.ppm(object), correction, ..., new.coef=NULL, check=TRUE, repair=TRUE) { # # options for `type' type <- pickoption("type", type, c(trend="trend", cif="cif", lambda="cif", se="se", SE="se")) # extract undocumented arguments xarg <- xtract(...) sumobj <- xarg$sumobj E <- xarg$E # # # 'object' is the output of ppm() # model <- object verifyclass(model, "ppm") # if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } if(missing(correction) || is.null(correction)) correction <- object$correction fitcoef <- coef(object) if(!is.null(new.coef)) { # validate coefs if(length(new.coef) != length(fitcoef)) stop(paste("Argument new.coef has wrong length", length(new.coef), ": should be", length(fitcoef))) coeffs <- new.coef } else { coeffs <- fitcoef } # # find out what kind of model it is # if(is.null(sumobj)) sumobj <- summary(model, quick="entries") # undocumented hack! stationary <- sumobj$stationary poisson <- sumobj$poisson marked <- sumobj$marked multitype <- sumobj$multitype notrend <- sumobj$no.trend changedcoef <- sumobj$changedcoef || !is.null(new.coef) trivial <- poisson && notrend need.covariates <- sumobj$has.covars if(sumobj$antiquated) warning("The model was fitted by an out-of-date version of spatstat") # # determine mark space # if(marked) { if(!multitype) stop("Prediction not yet implemented for general marked point processes") else types <- levels(marks(sumobj$entries$data)) } # # # Standard error only available for Poisson models # if(type == "se" && !poisson) stop(paste("Standard error calculation", "is only available for Poisson models"), call.=FALSE) # # determine what kind of output is required: # (arguments present) (output) # window, ngrid -> image # locations (mask) -> image # locations (rectangle) -> treat locations as 'window' # locations (polygonal) -> treat locations as 'window' # locations (other) -> data frame # if(is.null(window) && is.owin(locations) && !is.mask(locations)) { window <- locations locations <- NULL } if(!is.null(ngrid) && !is.null(locations)) stop(paste("Only one of", sQuote("ngrid"), "and", sQuote("locations"), "should be specified")) if(is.null(ngrid) && is.null(locations)) # use regular grid ngrid <- rev(spatstat.options("npixel")) want.image <- is.null(locations) || is.mask(locations) make.grid <- !is.null(ngrid) # # ################ Determine prediction points ##################### # if(!want.image) { # (A) list of (x,y) coordinates given by `locations' xpredict <- locations$x ypredict <- locations$y if(is.null(xpredict) || is.null(ypredict)) { xy <- xy.coords(locations) xpredict <- xy$x xpredict <- xy$y } if(is.null(xpredict) || is.null(ypredict)) stop(paste("Don't know how to extract x,y coordinates from", sQuote("locations"))) # marks if required if(marked) { # extract marks from data frame `locations' mpredict <- locations$marks if(is.null(mpredict)) stop(paste("The argument", sQuote("locations"), "does not contain a column of marks", "(required since the fitted model", "is a marked point process)")) if(is.factor(mpredict)) { # verify mark levels match those in model if(!identical(all.equal(levels(mpredict), types), TRUE)) { if(all(levels(mpredict) %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same levels as", "the marks in the model")) } } else { # coerce to factor if possible if(all(mpredict %in% types)) mpredict <- factor(mpredict, levels=types) else stop(paste("The marks in", sQuote("locations"), "do not have the same values as the marks in the model")) } } } else { # (B) pixel grid of points # if(!make.grid) # (B)(i) The grid is given in `locations' masque <- locations else { # (B)(ii) We have to make the grid ourselves # Validate ngrid # if(!is.null(ngrid)) { if(!is.numeric(ngrid)) stop("ngrid should be a numeric vector") nn <- length(ngrid) if(nn < 1 || nn > 2) stop("ngrid should be a vector of length 1 or 2") if(nn == 1) ngrid <- rep.int(ngrid,2) } if(is.null(window)) window <- sumobj$entries$data$window masque <- as.mask(window, dimyx=ngrid) } # Hack ----------------------------------------------- # gam with lo() will not allow extrapolation beyond the range of x,y # values actually used for the fit. Check this: tums <- termsinformula(model$trend) if(any( tums == "lo(x)" | tums == "lo(y)" | tums == "lo(x,y)" | tums == "lo(y,x)") ) { # determine range of x,y used for fit gg <- model$internal$glmdata gxr <- range(gg$x[gg$SUBSET]) gyr <- range(gg$y[gg$SUBSET]) # trim window to this range masque <- intersect.owin(masque, owin(gxr, gyr)) } # ------------------------------------ End Hack # # Finally, determine x and y vectors for grid xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] } # ################## CREATE DATA FRAME ########################## # ... to be passed to predict.glm() # # First the x, y coordinates if(!marked) newdata <- data.frame(x=xpredict, y=ypredict) else if(!want.image) newdata <- data.frame(x=xpredict, y=ypredict, marks=mpredict) else { # replicate nt <- length(types) np <- length(xpredict) xpredict <- rep.int(xpredict,nt) ypredict <- rep.int(ypredict,nt) mpredict <- rep.int(types, rep.int(np, nt)) mpredict <- factor(mpredict, levels=types) newdata <- data.frame(x = xpredict, y = ypredict, marks=mpredict) } #### Next the external covariates, if any # if(need.covariates) { if(is.null(covariates)) { # Extract covariates from fitted model object # They have to be images. oldcov <- model$covariates if(is.null(oldcov)) stop("External covariates are required, and are not available") if(is.data.frame(oldcov)) stop(paste("External covariates are required.", "Prediction is not possible at new locations")) covariates <- oldcov } covfunargs <- model$covfunargs covariates.df <- mpl.get.covariates(covariates, list(x=xpredict, y=ypredict), "prediction points", covfunargs) newdata <- cbind(newdata, covariates.df) } # ######## Set up prediction variables ################################ # # # Provide SUBSET variable # if(is.null(newdata$SUBSET)) newdata$SUBSET <- rep.int(TRUE, nrow(newdata)) # # Dig out information used in Berman-Turner device # Vnames: the names for the ``interaction variables'' # glmdata: the data frame used for the glm fit # glmfit: the fitted glm object # if(!trivial) { Vnames <- model$internal$Vnames glmdata <- getglmdata(model) glmfit <- getglmfit(model) if(object$method=="logi") newdata$.logi.B <- rep(glmdata$.logi.B[1], nrow(newdata)) } ############ COMPUTE PREDICTION ############################## # # Compute the predicted value z[i] for each row of 'newdata' # Store in a vector z and reshape it later # ############################################################### if(trivial) { ############# UNIFORM POISSON PROCESS ##################### lambda <- exp(coeffs[[1]]) switch(type, cif=, trend={ z <- rep.int(lambda, nrow(newdata)) }, se={ npts <- npoints(data.ppm(model)) se.lambda <- lambda/sqrt(npts) z <- rep.int(se.lambda, nrow(newdata)) }, stop("Internal error: unrecognised type")) ################################################################ } else if((type %in% c("trend","se")) || poisson) { # ############# COMPUTE TREND ################################### # # set explanatory variables to zero # zeroes <- numeric(nrow(newdata)) for(vn in Vnames) newdata[[vn]] <- zeroes # # predict # lambda <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) # switch(type, cif=, trend={ z <- lambda }, se={ # extract variance-covariance matrix of parameters vc <- vcov(model) # compute model matrix fmla <- formula(model) mf <- model.frame(fmla, newdata, ..., na.action=na.pass) mm <- model.matrix(fmla, mf, ..., na.action=na.pass) if((nr <- nrow(mm)) != nrow(newdata)) stop("Internal error: row mismatch in SE calculation") # compute relative variance = diagonal of quadratic form vv <- quadform(mm, vc) # vv <- numeric(nr) # for(i in 1:nr) { # mmi <- mm[i, ] # vv[i] <- mmi %*% vc %*% mmi # } z <- lambda * sqrt(vv) }, stop("Internal error: unrecognised type")) ############################################################## } else if(type == "cif" || type =="lambda") { ######### COMPUTE FITTED CONDITIONAL INTENSITY ################ # # # set up arguments inter <- model$interaction if(!missing(X)) stopifnot(is.ppp(X)) W <- as.owin(data.ppm(model)) U <- ppp(newdata$x, y=newdata$y, window=W, check=FALSE) if(marked) marks(U) <- Umarks <- newdata$marks # determine which prediction points are data points if(is.null(E)) E <- equalpairs(U, X, marked) # evaluate interaction Vnew <- evalInteraction(X, U, E, inter, correction=correction, check=check) # Negative infinite values signify cif = zero cif.equals.zero <- matrowany(Vnew == -Inf) # Insert the potential into the relevant column(s) of `newdata' if(ncol(Vnew) == 1) # Potential is real valued (Vnew is a column vector) # Assign values to a column of the same name in newdata newdata[[Vnames]] <- as.vector(Vnew) # else if(is.null(dimnames(Vnew)[[2]])) { # Potential is vector-valued (Vnew is a matrix) # with unnamed components. # Assign the components, in order of their appearance, # to the columns of newdata labelled Vnames[1], Vnames[2],... for(i in seq_along(Vnames)) newdata[[Vnames[i] ]] <- Vnew[,i] # } else { # Potential is vector-valued (Vnew is a matrix) # with named components. # Match variables by name for(vn in Vnames) newdata[[vn]] <- Vnew[,vn] # } # invoke predict.glm or compute prediction z <- GLMpredict(glmfit, newdata, coeffs, changecoef=changedcoef) # reset to zero if potential was zero if(any(cif.equals.zero)) z[cif.equals.zero] <- 0 ################################################################# } else stop(paste("Unrecognised type", sQuote(type))) ################################################################# # # reshape the result # if(!want.image) out <- as.vector(z) else { # make an image of the right shape imago <- as.im(masque) imago <- eval.im(as.double(imago)) if(!marked) { # single image out <- imago # set entries out$v[masque$m] <- z } else { # list of images out <- list() for(i in seq_along(types)) { outi <- imago # set entries outi$v[masque$m] <- z[newdata$marks == types[i]] out[[i]] <- outi } out <- as.listof(out) names(out) <- paste("mark", types, sep="") } } # # FINISHED # return(out) } predict.ppm }) #################################################################### # # compute pointwise uncertainty of fitted intensity # model.se.image <- function(fit, W=as.owin(fit), ..., what="sd") { if(!is.poisson.ppm(fit)) stop("Only implemented for Poisson point process models", call.=FALSE) what <- pickoption("option", what, c(sd="sd", var="var", cv="cv", CV="cv", ce="ce", CE="ce")) W <- as.mask(as.owin(W)) # variance-covariance matrix of coefficients vc <- vcov(fit) np <- dim(vc)[1] # extract sufficient statistic for each coefficient mm <- model.images(fit, W, ...) # compute fitted intensity lam <- predict(fit, locations=W) # initialise resulting image U <- as.im(W) U[] <- 0 # compute pointwise matrix product, assuming vc is symmetric for(i in 1:np) { Si <- mm[[i]] aii <- vc[i,i] U <- eval.im(U + aii * Si^2) if(i > 1) { for(j in 1:(i-1)) { Sj <- mm[[j]] aij <- vc[i,j] twoaij <- 2 * aij U <- eval.im(U + twoaij * Si * Sj) } } } # the matrix product is the relative variance (CV) if(what=="cv") return(U) # relative sd if(what=="ce") { U <- eval.im(sqrt(U)) return(U) } # multiply by squared intensity to obtain variance U <- eval.im(U * lam^2) # variance if(what=="var") return(U) # compute SD and return U <- eval.im(sqrt(U)) return(U) } GLMpredict <- function(fit, data, coefs, changecoef=TRUE) { if(!changecoef) { answer <- predict(fit, newdata=data, type="response") } else { # do it by hand fmla <- formula(fit) data$.mpl.Y <- 1 fram <- model.frame(fmla, data=data) # linear predictor mm <- model.matrix(fmla, data=fram) eta <- as.vector(mm %*% coefs) # offset mo <- model.offset(fram) if(!is.null(mo)) { if(is.matrix(mo)) mo <- apply(mo, 1, sum) eta <- mo + eta } # response linkinv <- family(fit)$linkinv answer <- linkinv(eta) } # Convert from fitted logistic prob. to lambda for logistic fit if(family(fit)$family=="binomial") answer <- fit$data$.logi.B[1] * answer/(1-answer) return(answer) } # An 'equalpairs' matrix E is needed in the ppm class # to determine which quadrature points and data points are identical # (not just which quadrature points are data points). # It is a two-column matrix specifying all the identical pairs. # The first column gives the index of a data point (in the data pattern X) # and the second column gives the corresponding index in U. # The following function determines the equal pair information # from the coordinates (and marks) of U and X alone; # it should be used only if we can't figure out this information otherwise. equalpairs <- function(U, X, marked=FALSE) { nn <- nncross(U, X) coincides <- (nn$dist == 0) Xind <- nn$which[coincides] Uind <- which(coincides) if(marked) { samemarks <- (marks(X)[Xind] == marks(U)[Uind]) Xind <- Xind[samemarks] Uind <- Uind[samemarks] } return(cbind(Xind, Uind)) } spatstat/R/dclftest.R0000644000176000001440000001536112237642727014323 0ustar ripleyusers# # dclftest.R # # $Revision: 1.17 $ $Date: 2013/08/06 10:16:30 $ # # Monte Carlo tests for CSR (etc) # clf.test <- function(...) { .Deprecated("dclf.test", package="spatstat") dclf.test(...) } dclf.test <- function(X, ..., rinterval=NULL, use.theo=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., power=2, use.theo=use.theo, rinterval=rinterval, Xname=Xname) } mad.test <- function(X, ..., rinterval=NULL, use.theo=FALSE) { Xname <- short.deparse(substitute(X)) envelopeTest(X, ..., power=Inf, use.theo=use.theo, rinterval=rinterval, Xname=Xname) } envelopeTest <- function(X, ..., power=1, rinterval=NULL, use.theo=FALSE, tie.rule=c("randomise","mean"), save.envelope = savefuns || savepatterns, savefuns = FALSE, savepatterns = FALSE, Xname=NULL, verbose=TRUE, internal=NULL) { if(is.null(Xname)) Xname <- short.deparse(substitute(X)) tie.rule <- match.arg(tie.rule) force(save.envelope) check.1.real(power) explain.ifnot(power >= 0) if(use.theo) { # using theoretical function as reference. # ensure resulting envelope object includes theoretical function. internal <- resolve.defaults(internal, list(csr=TRUE)) } # case where X is a previous result of dclf.test, etc if(inherits(X, "htest")) { if(is.null(envX <- attr(X, "envelope"))) stop(paste(Xname, "does not contain simulation data")) X <- envX } # compute or extract simulated functions X <- envelope(X, ..., savefuns=TRUE, savepatterns=savepatterns, Yname=Xname, internal=internal, verbose=verbose) Y <- attr(X, "simfuns") # extract values r <- with(X, .x) obs <- with(X, .y) sim <- as.matrix(as.data.frame(Y))[, -1] nsim <- ncol(sim) # choose function as reference has.theo <- ("theo" %in% names(X)) if(use.theo && !has.theo) warning("No theoretical function available; use.theo ignored") if(use.theo && has.theo) { reference <- with(X, theo) used.theo <- TRUE } else { # compute sample mean of simulations *and* observed reference <- apply(cbind(sim, obs), 1, mean, na.rm=TRUE) used.theo <- FALSE } # determine interval of r values for computation if(!is.null(rinterval)) { stopifnot(is.numeric(rinterval)) stopifnot(length(rinterval) == 2) stopifnot(rinterval[1] < rinterval[2]) if(max(r) < rinterval[2]) { oldrinterval <- rinterval rinterval <- intersect.ranges(rinterval, range(r)) if(verbose) warning(paste("The interval", prange(oldrinterval), "is too large for the available data;", "it has been trimmed to", prange(rinterval))) } ok <- (rinterval[1] <= r & r <= rinterval[2]) obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] } else { rinterval <- range(r) bad <- !apply(is.finite(as.matrix(X)), 1, all) if(any(bad)) { if(bad[1] && !any(bad[-1])) { # ditch r = 0 rinterval <- c(r[2], max(r)) if(verbose) warning(paste("Some function values were infinite or NaN", "at distance r = 0; interval of r values was reset to", prange(rinterval))) ok <- (rinterval[1] <= r & r <= rinterval[2]) obs <- obs[ok] sim <- sim[ok, ] reference <- reference[ok] } else { # problem rbadmax <- max(r[bad]) unitinfo <- summary(unitname(X)) stop(paste("Some function values were infinite or NaN", "at distances r up to", paste(rbadmax, ".", sep=""), "Please specify a shorter", sQuote("rinterval"))) } } } # compute test statistic if(is.infinite(power)) { # MAD devdata <- max(abs(obs-reference)) names(devdata) <- "mad" devsim <- apply(abs(sim-reference), 2, max) testname <- "Maximum absolute deviation test" } else { a <- diff(rinterval) * (if(used.theo) 1 else ((nsim+1)/nsim)^power) if(power == 2) { # Cramer-von Mises devdata <- a * mean((obs - reference)^2) names(devdata) <- "u" devsim <- a * colMeans((sim - reference)^2) testname <- "Diggle-Cressie-Loosmore-Ford test" } else if(power == 1) { # integral absolute deviation devdata <- a * mean(abs(obs - reference)) names(devdata) <- "L1" devsim <- a * colMeans(abs(sim - reference)) testname <- "Integral absolute deviation test" } else { # general p devdata <- a * mean((abs(obs - reference)^power)) names(devdata) <- "Lp" devsim <- a * colMeans((abs(sim - reference)^power)) testname <- paste("Integrated", ordinal(power), "Power Deviation test") } } # compute rank and p-value datarank <- sum(devdata < devsim) + 1 nties <- sum(devdata == devsim) if(nties > 0) { tierank <- switch(tie.rule, mean = nties/2, randomise = sample(1:nties, 1)) datarank <- datarank + tierank if(verbose) message("Ties were encountered") } pvalue <- datarank/(nsim+1) # bookkeeping statistic <- data.frame(devdata, rank=datarank) colnames(statistic)[1] <- names(devdata) e <- attr(X, "einfo") nullmodel <- if(identical(e$csr, TRUE)) "CSR" else if(!is.null(e$simtype)) { switch(e$simtype, csr = "CSR", rmh = paste("fitted", if(identical(e$pois, TRUE)) "Poisson" else "Gibbs", "model"), kppm = "fitted cluster model", expr = "model simulated by evaluating expression", list = "model simulated by drawing patterns from a list", "unrecognised model") } else "unrecognised model" fname <- deparse(attr(X, "ylab")) uname <- with(summary(unitname(X)), if(!vanilla) paste(plural, explain) else NULL) testname <- c(paste(testname, "of", nullmodel), paste("Monte Carlo test based on", nsim, "simulations"), paste("Summary function:", fname), paste("Reference function:", if(used.theo) "theoretical" else "sample mean"), paste("Interval of distance values:", prange(rinterval), uname) ) result <- structure(list(statistic = statistic, p.value = pvalue, method = testname, data.name = e$Yname), class="htest") attr(result, "rinterval") <- rinterval if(save.envelope) attr(result, "envelope") <- X return(result) } spatstat/R/plot.ppp.R0000755000176000001440000002532412247607350014264 0ustar ripleyusers# # plot.ppp.R # # $Revision: 1.52 $ $Date: 2013/12/04 11:10:17 $ # # #-------------------------------------------------------------------------- plot.ppp <- function(x, main, ..., chars=NULL, cols=NULL, use.marks=TRUE, which.marks=NULL, add=FALSE, type=c("p", "n"), maxsize=NULL, markscale=NULL, zap=0.01) { if(missing(main)) main <- short.deparse(substitute(x)) type <- match.arg(type) if(type == "n") { # plot the window only do.call("plot.owin", resolve.defaults(list(x$window), list(...), list(main=main, invert=TRUE, add=add))) return(invisible(NULL)) } # Handle multiple columns of marks as separate plots # (unless add=TRUE or which.marks selects a single column) if(use.marks && is.data.frame(mx <- marks(x))) { implied.all <- is.null(which.marks) do.several <- implied.all || is.data.frame(mx <- mx[,which.marks]) if(add && implied.all) { message("Plotting the first column of marks") which.marks <- 1 } else if(!add && do.several) { y <- as.listof(lapply(mx, function(z, P) setmarks(P,z), P=x)) out <- do.call("plot", resolve.defaults(list(x=y, main=main), list(...), list(chars=chars, cols=cols, maxsize=maxsize, markscale=markscale, zap=zap))) if(is.null(out)) return(invisible(NULL)) else return(out) } } # First handle `rejected' points sick <- inherits(x, "ppp") && !is.null(rejects <- attr(x, "rejects")) if(sick) { # get any parameters par.direct <- list(main=main, use.marks=use.marks, maxsize=maxsize, markscale=markscale) par.rejects.default <- list(pch="+") par.rejects <- resolve.defaults(list(...), list(par.rejects=par.rejects.default))$par.rejects par.rejects <- resolve.defaults(par.rejects, par.rejects.default) par.all <- resolve.defaults(par.rejects, par.direct) rw <- resolve.defaults(list(...), list(rejectwindow=NULL))$rejectwindow # determine window for rejects rwin <- if(is.null(rw)) rejects$window else if(is.logical(rw) && rw) rejects$window else if(inherits(rw, "owin")) rw else if(is.character(rw)) { switch(rw, box={bounding.box(rejects, x)}, ripras={ripras(c(rejects$x, x$x), c(rejects$y, x$y))}, stop(paste("Unrecognised option: rejectwindow=", rw))) } else stop("Unrecognised format for rejectwindow") if(is.null(rwin)) stop("Selected window for rejects pattern is NULL") # Create suitable space plot(rejects$window, add=add, type="n", main="") if(!add) title(main=main) # plot rejects window if commanded if(!is.null(rw)) { rwinpardefault <- list(lty=2,lwd=1,border=1) rwinpars <- resolve.defaults(par.rejects, rwinpardefault)[names(rwinpardefault)] do.call("plot.owin", append(list(rwin, add=TRUE), rwinpars)) } # plot window of main pattern do.call("plot.owin", resolve.defaults(list(x$window, add=TRUE), list(...), list(invert=TRUE))) # plot points do.call("plot.ppp", append(list(rejects, add=TRUE), par.all)) warning(paste(rejects$n, "illegal points also plotted")) # the rest is added add <- TRUE } # Now convert to bona fide point pattern x <- as.ppp(x) xwindow <- x$window marked <- is.marked(x, dfok=TRUE, na.action="ignore") # Plot observation window if(!add) do.call("plot.owin", resolve.defaults(list(xwindow), list(...), list(invert=TRUE, main=main))) if(x$n == 0) return(invisible()) # Handle plot parameters explicit <- list() if(!is.null(cols)) explicit <- append(explicit, list(cols=cols)) if(!is.null(chars)) explicit <- append(explicit, list(chars=chars)) defaults <- spatstat.options("par.points") # Prepare to plot points smartpoints <- function(xx, yy, ..., index=1, col=NULL, pch=NULL, cols=NULL, chars=NULL) { if(!is.null(cols)) col <- cols[index] if(is.null(pch) && !is.null(chars)) pch <- chars[index] do.call.matched("points", resolve.defaults(list(x=list(x=xx, y=yy), ...), if(!is.null(col)) list(col=col) else NULL, if(!is.null(pch)) list(pch=pch) else NULL), extrargs=c("col", "pch", "type", "bg", "cex", "lwd", "lty")) } if(!marked || !use.marks) { do.call("smartpoints", resolve.defaults(list(xx=x$x, yy=x$y), explicit, list(...), spatstat.options("par.points"))) return(invisible()) } # marked point pattern marx <- marks(x, dfok=TRUE) if(is.data.frame(marx)) { # select column or take first colum marx <- marx[, which.marks] } # check there are some valid marks! ok <- !is.na(marx) if(all(!ok)) { warning("All mark values are NA; plotting locations only.") do.call("smartpoints", resolve.defaults(list(xx=x$x, yy=x$y), explicit, list(...), spatstat.options("par.points"))) return(invisible()) } # otherwise ignore invalid marks if(!all(ok)) { warning(paste("Some marks are NA;", "corresponding points are omitted.")) x <- x[ok] marx <- marx[ok] } ################ convert POSIX times to real numbers ########### if(marks.are.times <- inherits(marx, "POSIXt")) { marx <- as.POSIXct(marx) tzone <- attr(marx, "tzone") earliest.time <- min(marx) marx <- as.numeric(marx - earliest.time) } ################ real-valued marks ############################ if(is.numeric(marx)) { ok <- is.finite(marx) if(!all(ok)) { warning(paste("Some marks are infinite", "corresponding points are omitted.")) x <- x[ok] marx <- marx[ok] } scal <- mark.scale.default(marx, xwindow, markscale=markscale, maxsize=maxsize) if(is.na(scal)) { # data cannot be scaled successfully; # plot as points do.call("smartpoints", resolve.defaults(list(x$x, x$y), explicit, list(...), spatstat.options("par.points"))) return(invisible()) } # scale determined. # Apply the scaling ms <- marx * scal # Finally, plot them.. absmarx <- abs(marx) tiny <- (absmarx <= zap * max(absmarx)) neg <- (marx < 0) & !tiny pos <- (marx > 0) & !tiny # plot positive values as circles if(any(pos)) do.call("symbols", resolve.defaults( list(x$x[pos], x$y[pos]), list(circles = ms[pos]), list(inches = FALSE, add = TRUE), if(!is.null(cols)) list(fg=cols[1]) else NULL, list(...))) # plot negative values as squares if(any(neg)) do.call("symbols", resolve.defaults( list(x$x[neg], x$y[neg]), list(squares = - ms[neg]), list(inches = FALSE, add = TRUE), if(!is.null(cols)) list(fg=cols[1]) else NULL, list(...))) # return a plottable scale bar mr <- range(marx) mp.value <- if(is.na(scal)) mr[1] else pretty(mr) mp.plotted <- mp.value * scal if(marks.are.times) mp.value <- as.POSIXct(mp.value, tz=tzone, origin=earliest.time) names(mp.plotted) <- paste(mp.value) return(mp.plotted) } ##################### non-numeric marks ############################### um <- if(is.factor(marx)) levels(marx) else sort(unique(marx)) ntypes <- length(um) if(is.null(chars)) { if(ntypes <= 25) { # numerical 'pch' chars <- 1:ntypes } else { # letters ltr <- c(letters, LETTERS) if(ntypes <= 52) { chars <- ltr[1:ntypes] } else { # wrapped sequence of letters warning("There are too many types to display every type as a different character") chars <- ltr[1 + (0:(ntypes - 1) %% 52)] } } } else if((nchars <- length(chars)) != ntypes) { if(nchars != 1) stop(paste("length of", sQuote("chars"), "is not equal to the number of types")) else explicit$chars <- chars <- rep.int(chars, ntypes) } if(!is.null(cols) && ((ncols <- length(cols)) != ntypes)) { if(ncols != 1) stop(paste("length of", sQuote("cols"), "is not equal to the number of types")) else explicit$cols <- cols <- rep.int(cols, ntypes) } for(i in seq_along(um)) { relevant <- (marx == um[i]) if(any(relevant)) do.call("smartpoints", resolve.defaults(list(x$x[relevant], x$y[relevant]), list(pch = chars[i]), explicit, list(index=i), list(...), spatstat.options("par.points"))) } names(chars) <- um if(length(chars) < 20) return(chars) else return(invisible(chars)) } mark.scale.default <- function(marx, w, markscale=NULL, maxsize=NULL) { # establish values of markscale, maxsize if(!is.null(maxsize) && !is.null(markscale)) stop("Only one of maxsize and markscale should be given") if(is.null(maxsize) && is.null(markscale)) { # if BOTH are absent, enforce the spatstat defaults # (which could also be null) pop <- spatstat.options("par.points") markscale <- pop$markscale maxsize <- pop$maxsize } # Now check whether markscale is fixed if(!is.null(markscale)) { stopifnot(markscale > 0) return(markscale) } # Usual case: markscale is to be determined from maximum physical size if(is.null(maxsize)) { # guess appropriate max physical size of symbols bb <- as.rectangle(w) maxsize <- 1.4/sqrt(pi * length(marx)/area.owin(bb)) maxsize <- min(maxsize, diameter(bb) * 0.07) } else stopifnot(maxsize > 0) # Examine mark values maxabs <- max(abs(marx)) tiny <- (maxabs < 4 * .Machine$double.eps) if(tiny) return(NA) else return(maxsize/maxabs) } spatstat/R/areadiff.R0000755000176000001440000002004112237642727014246 0ustar ripleyusers# # areadiff.R # # $Revision: 1.28 $ $Date: 2013/11/02 01:53:09 $ # # Computes sufficient statistic for area-interaction process # # Invokes areadiff.c # # areaLoss = area lost by removing X[i] from X areaLoss <- function(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaLoss.diri(X, r, ..., W=W, subset=subset) else areaLoss.grid(X, r, ..., W=W, subset=subset, ngrid=ngrid) } # areaGain = area gained by adding u[i] to X areaGain <- function(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) { if(exact) areaGain.diri(u, X, r, ..., W=W) else areaGain.grid(u, X, r, W=W, ngrid=ngrid) } #//////////////////////////////////////////////////////////// # algorithms using Dirichlet tessellation #/////////////////////////////////////////////////////////// areaLoss.diri <- function(X, r, ..., W=as.owin(X), subset=NULL) { stopifnot(is.ppp(X)) npts <- npoints(X) if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(npts == 0) return(matrix(, nrow=0, ncol=nr)) else if(npts == 1) return(matrix(discpartarea(X, r, W), nrow=1)) # set up output array indices <- 1:npts if(!is.null(subset)) indices <- indices[subset] out <- matrix(, nrow=length(indices), ncol=nr) # w <- X$window pir2 <- pi * r^2 # dirichlet neighbour relation in entire pattern dd <- deldir(X$x, X$y, rw=c(w$xrange, w$yrange)) a <- dd$delsgs[,5] b <- dd$delsgs[,6] for(k in seq_along(indices)) { i <- indices[k] # find all Delaunay neighbours of i jj <- c(b[a==i], a[b==i]) jj <- sort(unique(jj)) # extract only these points Yminus <- X[jj] Yplus <- X[c(jj, i)] # dilate aplus <- dilated.areas(Yplus, r, W, exact=TRUE) aminus <- dilated.areas(Yminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be positive and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[k, ] <- areas } return(out) } areaGain.diri <- function(u, X, r, ..., W=as.owin(X)) { stopifnot(is.ppp(X)) Y <- as.ppp(u, W=W) nX <- X$n nY <- Y$n if(is.matrix(r)) { if(sum(dim(r) > 1) > 1) stop("r should be a vector or single value") r <- as.vector(r) } nr <- length(r) if(nY == 0) return(matrix(, nrow=0, ncol=nr)) if(nX == 0) return(matrix(pi * r^2, nrow=nY, ncol=nr, byrow=TRUE)) cat(paste("areaGain,", nY, "points,", nr, "r values\n")) out <- matrix(0, nrow=nY, ncol=nr) pir2 <- pi * r^2 wbox <- as.rectangle(as.owin(X)) # for(i in 1:nY) { progressreport(i, nY) V <- superimpose(Y[i], X, W=wbox, check=FALSE) # Dirichlet neighbour relation for V dd <- deldir(V$x, V$y, rw=c(wbox$xrange, wbox$yrange)) aa <- dd$delsgs[,5] bb <- dd$delsgs[,6] # find all Delaunay neighbours of Y[1] in V jj <- c(bb[aa==1], aa[bb==1]) jj <- sort(unique(jj)) # extract only these points Zminus <- V[jj] Zplus <- V[c(1, jj)] # dilate aplus <- dilated.areas(Zplus, r, W, exact=TRUE) aminus <- dilated.areas(Zminus, r, W, exact=TRUE) areas <- aplus - aminus # area/(pi * r^2) must be in [0,1] and nonincreasing y <- ifelseAX(r == 0, 1, areas/pir2) y <- pmin.int(1, y) ok <- is.finite(y) y[ok] <- rev(cummax(rev(y[ok]))) areas <- pmax.int(0, y * pir2) # save out[i,] <- areas } return(out) } #//////////////////////////////////////////////////////////////////////// # alternative implementations using grid counting in C #//////////////////////////////////////////////////////////////////////// areaGain.grid <- function(u, X, r, ..., W=NULL, ngrid=spatstat.options("ngrid.disc")) { verifyclass(X, "ppp") u <- as.ppp(u, W=as.owin(X)) stopifnot(is.numeric(r) && all(is.finite(r)) && all(r >= 0)) # nu <- u$n nr <- length(r) if(nr == 0) return(numeric(0)) rmax <- max(r) # constrain <- !is.null(W) if(constrain && (W$type != "rectangle")) { # Constrained to an irregular window # initialise to value for small-r result <- matrix(pi * r^2, nrow=nu, ncol=nr, byrow=TRUE) # vector of radii below which b(u,r) is disjoint from U(X,r) rcrit.u <- nncross(u, X, what="dist")/2 rcrit.min <- min(rcrit.u) # Use distance transform and set covariance D <- distmap(X, ...) DW <- D[W, drop=FALSE] # distance from (0,0) - thresholded to make digital discs discWin <- owin(c(-rmax,rmax),c(-rmax,rmax)) discWin <- as.mask(discWin, eps=min(D$xstep, rmax/4)) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W=discWin) # for(j in which(r > rcrit.min)) { # rj is above the critical radius rcrit.u[i] for at least one point u[i] rj <- r[j] if(any(above <- (rj > rcrit.u))) { Uncovered <- levelset(DW, rj, ">") DiscRj <- levelset(rad, rj, "<=") AreaGainIm <- setcov(Uncovered, DiscRj) result[above, j] <- safelookup(AreaGainIm, u[above]) } } return(result) } # # xx <- X$x yy <- X$y result <- matrix(, nrow=nu, ncol=nr) DUP <- spatstat.options("dupC") # for(i in 1:nu) { # shift u[i] to origin xu <- u$x[i] yu <- u$y[i] xshift <- xx - xu yshift <- yy - yu # find points within distance 2 rmax of origin close <- (xshift^2 + yshift^2 < 4 * rmax^2) nclose <- sum(close) # invoke C routine if(!constrain) { z <- .C("areadifs", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), answer = as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") result[i,] <- z$answer } else { z <- .C("areaBdif", rad = as.double(r), nrads = as.integer(nr), x = as.double(xshift[close]), y = as.double(yshift[close]), nn = as.integer(nclose), ngrid = as.integer(ngrid), x0 = as.double(W$xrange[1] - xu), y0 = as.double(W$yrange[1] - yu), x1 = as.double(W$xrange[2] - xu), y1 = as.double(W$yrange[2] - yu), answer = as.double(numeric(nr)), DUP=DUP) # PACKAGE="spatstat") result[i,] <- z$answer } } return(result) } areaLoss.grid <- function(X, r, ..., W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) { verifyclass(X, "ppp") n <- npoints(X) nr <- length(r) indices <- if(is.null(subset)) 1:n else (1:n)[subset] answer <- matrix(, nrow=length(indices), ncol=nr) if(missing(method)) { method <- if(nr <= 20 || exact) "count" else "distmap" } else method <- match.arg(method) switch(method, count = { # one value of r: use grid-counting for(k in seq_along(indices)) { i <- indices[k] answer[k,] <- areaGain(X[i], X[-i], r, W=W, ngrid=ngrid, exact=exact) } }, distmap = { # Many values of r: use distance transform D <- distmap(X, ...) DW <- D[W, drop=FALSE] a <- area.owin(as.owin(DW)) # empirical cdf of distance values FW <- ecdf(DW[drop=TRUE]) # radii below which there are no overlaps rcrit <- nndist(X)/2 for(k in seq_along(indices)) { i <- indices[k] Di <- distmap(X[-i], ...) FiW <- ecdf(Di[W, drop=TRUE]) answer[k, ] <- ifelseXY(r > rcrit[i], a * (FW(r) - FiW(r)), pi * r^2) } }) return(answer) } spatstat/R/units.R0000755000176000001440000001016012237642727013650 0ustar ripleyusers# # Functions for extracting and setting the name of the unit of length # # $Revision: 1.18 $ $Date: 2013/09/24 01:13:00 $ # # unitname <- function(x) { UseMethod("unitname") } unitname.owin <- function(x) { u <- as.units(x$units) return(u) } unitname.ppp <- function(x) { u <- as.units(x$window$units) return(u) } unitname.im <- function(x) { u <- as.units(x$units) return(u) } unitname.default <- function(x) { return(as.units(attr(x, "units"))) } "unitname<-" <- function(x, value) { UseMethod("unitname<-") } "unitname<-.owin" <- function(x, value) { x$units <- as.units(value) return(x) } "unitname<-.ppp" <- function(x, value) { w <- x$window unitname(w) <- value x$window <- w return(x) } "unitname<-.im" <- function(x, value) { x$units <- as.units(value) return(x) } "unitname<-.default" <- function(x, value) { attr(x, "units") <- as.units(value) return(x) } ### class 'units' makeunits <- function(sing="unit", plur="units", mul = 1) { if(!is.character(sing)) stop("In unit name, first entry should be a character string") if(!is.character(plur)) stop("In unit name, second entry should be a character string") if(!is.numeric(mul)) { mul <- try(as.numeric(mul), silent=TRUE) if(inherits(mul, "try-error")) stop("In unit name, third entry should be a number") } if(length(mul) != 1 || mul <= 0) stop("In unit name, third entry should be a single positive number") u <- list(singular=sing, plural=plur, multiplier=mul) if(mul != 1 && (sing=="unit" || plur=="units")) stop(paste("A multiplier is not allowed", "if the unit does not have a specific name")) class(u) <- "units" return(u) } as.units <- function(s) { s <- as.list(s) n <- length(s) if(n > 3) stop(paste("Unit name should be a character string,", "or a vector/list of 2 character strings,", "or a list(character, character, numeric)")) out <- switch(n+1, makeunits(), makeunits(s[[1]], s[[1]]), makeunits(s[[1]], s[[2]]), makeunits(s[[1]], s[[2]], s[[3]])) return(out) } print.units <- function(x, ...) { mul <- x$multiplier if(mul == 1) cat(paste(x$singular, "/", x$plural, "\n")) else cat(paste(mul, x$plural, "\n")) return(invisible(NULL)) } summary.units <- function(object, ...) { x <- object scaled <- (x$multiplier != 1) named <- (x$singular != "unit") vanilla <- !named && !scaled out <- if(vanilla) { list(legend = NULL, axis = NULL, explain = NULL, singular = "unit", plural = "units") } else if(named & !scaled) { list(legend = paste("Unit of length: 1", x$singular), axis = paste("(", x$plural, ")", sep=""), explain = NULL, singular = x$singular, plural = x$plural) } else { expanded <- paste(x$multiplier, x$plural) list(legend = paste("Unit of length:", expanded), axis = paste("(one unit = ", expanded, ")", sep=""), explain = paste("(one unit = ", expanded, ")", sep=""), singular = "unit", plural = "units") } out <- append(out, list(scaled = scaled, named = named, vanilla = vanilla)) class(out) <- "summary.units" return(out) } print.summary.units <- function(x, ...) { if(x$vanilla) cat("Unit of length (unnamed)\n") else cat(paste(x$legend, "\n")) invisible(NULL) } compatible.units <- function(A, B, ..., coerce=TRUE) { stopifnot(inherits(A, "units")) if(missing(B)) return(TRUE) stopifnot(inherits(B, "units")) # check for null units Anull <- summary(A)$vanilla Bnull <- summary(B)$vanilla # `coerce' determines whether `vanilla' units are compatible with other units coerce <- as.logical(coerce) # agree <- if(!Anull && !Bnull) identical(all.equal(A,B), TRUE) else if(Anull && Bnull) TRUE else coerce # if(!agree) return(FALSE) # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.units(B, ...)) } spatstat/R/randomonlines.R0000755000176000001440000001322612237642727015364 0ustar ripleyusers# # randomOnLines.R # # $Revision: 1.7 $ $Date: 2013/04/25 06:37:43 $ # # Generate random points on specified lines # runifpointOnLines <- function(n, L) { if(!is.numeric(n) || any(n < 0) || any(n %% 1 != 0)) stop("n should be a nonnegative integer or integers") if(!is.psp(L)) L <- as.psp(L) X <- datagen.runifpointOnLines(n, L) out <- ppp(X$x, X$y, marks=X$marks, window=as.owin(L), check=FALSE) return(out) } datagen.runifpointOnLines <- function(n, L) { stopifnot(is.psp(L)) m <- length(n) ismarked <- (m > 1) if(m == 0 || (m == 1 && n == 0)) return(data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0))) # extract segment information len <- lengths.psp(L) sumlen <- sum(len) cumlen <- cumsum(len) cum0len <- c(0, cumlen) Ldf <- as.data.frame(L) x0 <- with(Ldf, x0) y0 <- with(Ldf, y0) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) # determine mark space if(ismarked) { markvalues <- names(n) if(sum(nzchar(markvalues)) < m) markvalues <- paste(1:m) } # initialise output data.frame out <- data.frame(x=numeric(0), y=numeric(0), seg=integer(0), tp=numeric(0)) if(ismarked) out <- cbind(out, data.frame(marks=character(0))) # generate points of each mark in turn for(j in 1:m) { if(n[[j]] > 0) { # generate random positions uu <- runif(n[[j]], min=0, max=sumlen) # identify segment for each point kk <- findInterval(uu, cum0len, rightmost.closed=TRUE, all.inside=TRUE) # parametric position along segment tt <- (uu - cum0len[kk])/len[kk] tt[!is.finite(tt)] <- 0 # convert to (x,y) x <- x0[kk] + tt * dx[kk] y <- y0[kk] + tt * dy[kk] # assemble result if(!ismarked) { out <- data.frame(x=x, y=y, seg=kk, tp=tt) } else { outj <- data.frame(x=x, y=y, seg=kk, tp=tt, marks=markvalues[j]) out <- rbind(out, outj) } } } if(ismarked) out$marks <- factor(out$marks, levels=markvalues) return(out) } runifpoisppOnLines <- function(lambda, L) { if(!is.numeric(lambda) || !all(is.finite(lambda) && (lambda >= 0))) stop("lambda should be a finite, nonnegative number or numbers") if(!is.psp(L)) L <- as.psp(L) X <- datagen.runifpoisppOnLines(lambda, L) out <- ppp(X$x, X$y, marks=X$marks, window=as.owin(L), check=FALSE) return(out) } datagen.runifpoisppOnLines <- function(lambda, L) { stopifnot(is.psp(L)) mu <- lambda * sum(lengths.psp(L)) n <- rpois(rep.int(1, length(mu)), mu) if(length(n) > 1) names(n) <- names(lambda) df <- datagen.runifpointOnLines(n, L) return(df) } rpoisppOnLines <- function(lambda, L, lmax=NULL, ...) { if(!is.psp(L)) L <- as.psp(L) X <- datagen.rpoisppOnLines(lambda, L, lmax=lmax, ...) out <- ppp(X$x, X$y, marks=X$marks, window=as.owin(L)) return(out) } datagen.rpoisppOnLines <- function(lambda, L, lmax=NULL, ..., check=TRUE) { stopifnot(is.psp(L)) if(is.numeric(lambda)) return(datagen.runifpoisppOnLines(lambda, L)) # ensure lambda is a list if(is.function(lambda) || is.im(lambda)) lambda <- list(lambda) m <- length(lambda) # determine type of argument argtype <- if(all(unlist(lapply(lambda, is.im)))) "im" else if(all(unlist(lapply(lambda, is.function)))) "function" else stop(paste(sQuote("lambda"), "must be a numeric vector, a function, an image,", "a list of functions, or a list of images")) # check values of lambda if(argtype == "im") { for(j in seq_len(m)) { lamj <- lambda[[j]] if(!(lamj$type %in% c("real", "integer"))) stop("lambda must be numeric-valued or integer-valued") lrange <- range(lamj) if(any(is.infinite(lrange))) stop("Infinite pixel values not permitted") if(lrange[1] < 0) stop("Negative pixel values not permitted") } } # determine uniform bound if(!is.null(lmax)) { stopifnot(is.numeric(lmax)) if(length(lmax) != m) { if(length(lmax) == 1) { lmax <- rep.int(lmax, m) } else stop("Length of lmax does not match length of lambda") } } else { # compute lmax lmax <- numeric(m) for(j in seq_len(m)) { lamj <- lambda[[j]] if(is.function(lamj)) { X <- pointsOnLines(L, np=10000) lambdaX <- lamj(X$x, X$y, ...) lmax[j] <- max(lambdaX, na.rm=TRUE) } else if(is.im(lamj)) lmax[j] <- max(lamj) } if(!all(is.finite(lmax))) stop("Infinite values of lambda obtained") if(any(lmax < 0)) stop("Negative upper bound for lambda obtained") names(lmax) <- names(lambda) } # Lewis-Shedler (rejection) method Y <- datagen.runifpoisppOnLines(lmax, L) n <- nrow(Y) if(n == 0) return(Y) # evaluate lambda at each simulated point if(m == 1) { lambda <- lambda[[1]] markindex <- 1 if(is.function(lambda)) lambdaY <- lambda(Y$x, Y$y, ...) else lambdaY <- safelookup(lambda, as.ppp(Y, W=as.owin(L))) } else { lambdaY <- numeric(n) markindex <- as.integer(Y$marks) for(j in seq_len(m)) { lamj <- lambda[[j]] jrows <- (markindex == j) Yj <- Y[jrows, , drop=FALSE] if(is.function(lamj)) lambdaY[jrows] <- lamj(Yj$x, Yj$y, ...) else lambdaY[jrows] <- safelookup(lamj, as.ppp(Yj, W=as.owin(L))) } } lambdaY[is.na(lambdaY)] <- 0 # accept/reject pY <- lambdaY/lmax[markindex] if(check) { if(any(pY < 0)) warning("Negative values of lambda obtained") if(any(pY > 1)) warning("lmax is not an upper bound for lambda") } retain <- (runif(n) < pY) Y <- Y[retain, , drop=FALSE] return(Y) } spatstat/R/dg.R0000755000176000001440000001147512237642727013112 0ustar ripleyusers# # dg.S # # $Revision: 1.16 $ $Date: 2013/04/25 06:37:43 $ # # Diggle-Gratton pair potential # # DiggleGratton <- local({ # .... auxiliary functions ...... diggraterms <- function(X, Y, idX, idY, delta, rho) { stopifnot(is.numeric(delta)) stopifnot(is.numeric(rho)) stopifnot(delta < rho) # sort in increasing order of x coordinate oX <- fave.order(X$x) oY <- fave.order(Y$x) Xsort <- X[oX] Ysort <- Y[oY] idXsort <- idX[oX] idYsort <- idY[oY] nX <- npoints(X) nY <- npoints(Y) # call C routine DUP <- spatstat.options("dupC") out <- .C("Ediggra", nnsource = as.integer(nX), xsource = as.double(Xsort$x), ysource = as.double(Xsort$y), idsource = as.integer(idXsort), nntarget = as.integer(nY), xtarget = as.double(Ysort$x), ytarget = as.double(Ysort$y), idtarget = as.integer(idYsort), ddelta = as.double(delta), rrho = as.double(rho), values = as.double(double(nX)), DUP = DUP) # PACKAGE = "spatstat") answer <- integer(nX) answer[oX] <- out$values return(answer) } # .......... template object .......... BlankDG <- list( name = "Diggle-Gratton process", creator = "DiggleGratton", family = "pairwise.family", #evaluated later pot = function(d, par) { delta <- par$delta rho <- par$rho above <- (d > rho) inrange <- (!above) & (d > delta) h <- above + inrange * (d - delta)/(rho - delta) return(log(h)) }, par = list(delta=NULL, rho=NULL), # to be filled in later parnames = list("lower limit delta", "upper limit rho"), init = function(self) { delta <- self$par$delta rho <- self$par$rho if(!is.numeric(delta) || length(delta) != 1) stop("lower limit delta must be a single number") if(!is.numeric(rho) || length(rho) != 1) stop("upper limit rho must be a single number") stopifnot(delta >= 0) stopifnot(rho > delta) stopifnot(is.finite(rho)) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { kappa <- as.numeric(coeffs[1]) return(list(param=list(kappa=kappa), inames="exponent kappa", printable=round(kappa,4))) }, valid = function(coeffs, self) { kappa <- as.numeric(coeffs[1]) return(is.finite(kappa) && (kappa >= 0)) }, project = function(coeffs, self) { kappa <- as.numeric(coeffs[1]) if(is.finite(kappa) && (kappa >= 0)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { rho <- self$par$rho if(all(is.na(coeffs))) return(rho) kappa <- coeffs[1] delta <- self$par$delta if(abs(kappa) <= epsilon) return(delta) else return(rho) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for DiggleGratton interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for DiggleGratton") delta <- potpars$delta rho <- potpars$rho idX <- seq_len(npoints(X)) idU <- rep.int(-1, npoints(U)) idU[EqualPairs[,2]] <- EqualPairs[,1] answer <- diggraterms(U, X, idU, idX, delta, rho) answer <- log(pmax.int(0, answer)) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral rho <- self$par$rho delta <- self$par$delta width <- rho - delta kappa <- coeffs[1] ans <- pi * (rho^2 - 2 * rho* width/(kappa + 1) + 2 * width^2/((kappa + 1) * (kappa + 2))) return(ans) } ) class(BlankDG) <- "interact" DiggleGratton <- function(delta, rho) { instantiate.interact(BlankDG, list(delta=delta, rho=rho)) } DiggleGratton }) spatstat/R/split.ppp.R0000755000176000001440000001704112237642727014444 0ustar ripleyusers# # split.ppp.R # # $Revision: 1.19 $ $Date: 2013/04/25 06:37:43 $ # # split.ppp and "split<-.ppp" # ######################################### split.ppp <- function(x, f = marks(x), drop=FALSE, un=NULL, ...) { verifyclass(x, "ppp") mf <- markformat(x) if(is.null(un)) un <- missing(f) && (mf != "dataframe") if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) splittype <- "factor" } else{ # f was given fsplit <- f if(is.factor(f)) { splittype <- "factor" } else if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.im(f)) { # f is an image: determine the grouping fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) splittype <- "tess" } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) splittype <- "factor" } else stop(paste("f must be", "a factor, a tessellation, an image,", "or the name of a column of marks")) if(length(f) != npoints(x)) stop("length(f) must equal the number of points in x") } # At this point # 'f' is a factor that can be used to separate the points # 'fsplit' is the object (either a factor or a tessellation) # that determines the split (and can be "un-split") lev <- levels(f) if(drop) { # remove components that don't contain points retain <- (table(f) > 0) lev <- lev[retain] switch(splittype, tess = { # remove tiles that don't contain points fsplit <- fsplit[retain] }, factor = { # delete levels that don't occur fsplit <- factor(fsplit, levels=lev) }, stop("Internal error: wrong format for fsplit")) } # split the data out <- list() for(l in lev) out[[paste(l)]] <- x[!is.na(f) & (f == l)] if(un) out <- lapply(out, unmark) if(splittype == "tess") { til <- tiles(fsplit) for(i in seq_along(out)) out[[i]]$window <- til[[i]] } class(out) <- c("splitppp", class(out)) attr(out, "fsplit") <- fsplit return(out) } "split<-.ppp" <- function(x, f=marks(x), drop=FALSE, un=missing(f), ..., value) { verifyclass(x, "ppp") W <- x$window mf <- markformat(x) # evaluate `un' before assigning value of 'f' force(un) # validate assignment value stopifnot(is.list(value)) if(!all(unlist(lapply(value, is.ppp)))) stop(paste("Each entry of", sQuote("value"), "must be a point pattern")) ismark <- unlist(lapply(value, is.marked)) if(any(ismark) && !all(ismark)) stop(paste("Some entries of", sQuote("value"), "are marked, and others are unmarked")) vmarked <- all(ismark) # determine type of splitting if(missing(f)) { # f defaults to marks of x switch(mf, none={ stop("f is missing and there are no marks") }, vector={ if(!is.multitype(x)) stop("f is missing and the pattern is not multitype") f <- fsplit <- marks(x) }, dataframe={ f <- fsplit <- firstfactor(marks(x)) if(is.null(f)) stop("Data frame of marks contains no factors") }) } else { # f given fsplit <- f if(is.tess(f)) { # f is a tessellation: determine the grouping f <- marks(cut(x, fsplit)) } else if(is.im(f)) { # f is an image: determine the grouping fsplit <- tess(image=f) f <- marks(cut(x, fsplit)) } else if(is.character(f) && length(f) == 1) { # f is the name of a column of marks marx <- marks(x) if(is.data.frame(marx) && (f %in% names(marx))) fsplit <- f <- marx[[f]] else stop(paste("The name", sQuote(f), "does not match any column of marks")) } else if(!is.factor(f)) stop(paste("f must be", "a factor, a tessellation, an image,", "or the name of a column of marks")) if(length(f) != x$n) stop("length(f) must equal the number of points in x") } # all.levels <- lev <- levels(f) if(!drop) levtype <- "levels of f" else { levtype <- "levels which f actually takes" # remove components that don't contain points lev <- lev[table(f) > 0] } if(length(value) != length(lev)) stop(paste("length of", sQuote("value"), "should equal the number of", levtype)) # ensure value[[i]] is associated with lev[i] if(!is.null(names(value))) { if(!all(names(value) %in% as.character(lev))) stop(paste("names of", sQuote("value"), "should be levels of f")) value <- value[lev] } names(value) <- NULL # restore the marks, if they were discarded if(un && is.marked(x)) { if(vmarked) warning(paste(sQuote("value"), "contains marked point patterns:", "this is inconsistent with un=TRUE; marks ignored.")) for(i in seq_along(value)) value[[i]] <- value[[i]] %mark% factor(lev[i], levels=all.levels) } # handle NA's in splitting factor if(any(isNA <- is.na(f))) { xNA <- x[isNA] if(un && is.marked(x)) xNA <- xNA %mark% factor(NA, levels=all.levels) value <- append(value, list(xNA)) } # put Humpty together again out <- do.call(superimpose,c(value,list(W=W))) return(out) } print.splitppp <- function(x, ...) { f <- attr(x, "fsplit") what <- if(is.tess(f)) "tessellation" else if(is.factor(f)) "factor" else "unknown data" cat(paste("Point pattern split by", what, "\n")) nam <- names(x) for(i in seq_along(x)) { cat(paste("\n", nam[i], ":\n", sep="")) print(x[[i]]) } return(invisible(NULL)) } summary.splitppp <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.splitppp" x } print.summary.splitppp <- function(x, ...) { class(x) <- "listof" print(x) invisible(NULL) } "[.splitppp" <- function(x, ...) { f <- attr(x, "fsplit") # invoke list method on x class(x) <- "list" y <- x[...] # then make it a 'splitppp' object too class(y) <- c("splitppp", class(y)) if(is.tess(f)) { fsplit <- f[...] } else if(is.factor(f)) { lev <- levels(f) sublev <- lev[...] subf <- f[f %in% sublev] fsplit <- factor(subf, levels=lev) } else stop("Unknown splitting type") attr(y, "fsplit") <- fsplit y } "[<-.splitppp" <- function(x, ..., value) { if(!all(unlist(lapply(value, is.ppp)))) stop("replacement value must be a list of point patterns") f <- attr(x, "fsplit") # invoke list method class(x) <- "list" x[...] <- value # then make it a 'splitppp' object too class(x) <- c("splitppp", class(x)) if(is.tess(f)) { fsplit <- f } else if(is.factor(f)) { lev <- levels(f) fsplit <- factor(rep.int(lev, unlist(lapply(x, npoints))), levels=lev) } attr(x, "fsplit") <- fsplit x } spatstat/R/fv.R0000755000176000001440000010217512241624725013122 0ustar ripleyusers# # # fv.R # # class "fv" of function value objects # # $Revision: 1.97 $ $Date: 2013/11/16 08:02:00 $ # # # An "fv" object represents one or more related functions # of the same argument, such as different estimates of the K function. # # It is a data.frame with additional attributes # # argu column name of the function argument (typically "r") # # valu column name of the recommended function # # ylab generic label for y axis e.g. K(r) # # fmla default plot formula # # alim recommended range of function argument # # labl recommended xlab/ylab for each column # # desc longer description for each column # # unitname name of unit of length for 'r' # # shade (optional) column names of upper & lower limits # of shading - typically a confidence interval # # Objects of this class are returned by Kest(), etc # ################################################################## # creator fv <- function(x, argu="r", ylab=NULL, valu, fmla=NULL, alim=NULL, labl=names(x), desc=NULL, unitname=NULL, fname=NULL, yexp=ylab) { stopifnot(is.data.frame(x)) # check arguments stopifnot(is.character(argu)) if(!is.null(ylab)) stopifnot(is.character(ylab) || is.language(ylab)) if(!missing(yexp)) { if(is.null(yexp)) yexp <- ylab else stopifnot(is.language(yexp)) } stopifnot(is.character(valu)) if(!(argu %in% names(x))) stop(paste(sQuote("argu"), "must be the name of a column of x")) if(!(valu %in% names(x))) stop(paste(sQuote("valu"), "must be the name of a column of x")) if(is.null(fmla)) fmla <- paste(valu, "~", argu) else if(inherits(fmla, "formula")) { # convert formula to string fmla <- deparse(fmla) } else if(!is.character(fmla)) stop(paste(sQuote("fmla"), "should be a formula or a string")) if(is.null(alim)) { argue <- x[[argu]] alim <- range(argue[is.finite(argue)], na.rm=TRUE) } if(!is.numeric(alim) || length(alim) != 2) stop(paste(sQuote("alim"), "should be a vector of length 2")) if(!is.character(labl)) stop(paste(sQuote("labl"), "should be a vector of strings")) stopifnot(length(labl) == ncol(x)) if(is.null(desc)) desc <- character(ncol(x)) else { stopifnot(is.character(desc)) stopifnot(length(desc) == ncol(x)) nbg <- is.na(desc) if(any(nbg)) desc[nbg] <- "" } if(!is.null(fname)) stopifnot(is.character(fname) && length(fname) == 1) # pack attributes attr(x, "argu") <- argu attr(x, "valu") <- valu attr(x, "ylab") <- ylab attr(x, "yexp") <- yexp attr(x, "fmla") <- fmla attr(x, "alim") <- alim attr(x, "labl") <- labl attr(x, "desc") <- desc attr(x, "units") <- as.units(unitname) attr(x, "fname") <- fname attr(x, "dotnames") <- NULL attr(x, "shade") <- NULL # class(x) <- c("fv", class(x)) return(x) } .Spatstat.FvAttrib <- c( "argu", "valu", "ylab", "yexp", "fmla", "alim", "labl", "desc", "units", "fname", "dotnames", "shade") as.data.frame.fv <- function(x, ...) { stopifnot(is.fv(x)) fva <- .Spatstat.FvAttrib attributes(x)[fva] <- NULL class(x) <- "data.frame" x } is.fv <- function(x) { inherits(x, "fv") } # as.fv <- function(x) { UseMethod("as.fv") } as.fv.fv <- function(x) x as.fv.data.frame <- function(x) { if(ncol(x) < 2) stop("Need at least 2 columns") return(fv(x, names(x)[1], , names(x)[2])) } as.fv.matrix <- function(x) { y <- as.data.frame(x) if(any(bad <- is.na(names(y)))) names(y)[bad] <- paste0("V", which(bad)) return(as.fv.data.frame(y)) } # other methods for as.fv are described in the files for the relevant classes. vanilla.fv <- function(x) { # remove everything except basic fv characteristics retain <- c("names", "row.names", .Spatstat.FvAttrib) attributes(x) <- attributes(x)[retain] class(x) <- c("fv", "data.frame") return(x) } print.fv <- function(x, ...) { verifyclass(x, "fv") nama <- names(x) a <- attributes(x) cat(paste("Function value object (class ", sQuote("fv"), ")\n", sep="")) if(!is.null(ylab <- a$ylab)) { if(is.language(ylab)) ylab <- paste(deparse(ylab), collapse=" ") xlab <- fvlabels(x)[[a$argu]] cat(paste("for the function", xlab, "->", ylab, "\n")) } # Descriptions .. desc <- a$desc # .. may require insertion of ylab if(!is.null(ylab)) desc <- sprintf(desc, ylab) # Labels .. labl <- a$labl # .. may require insertion of function name if it is known if(!is.null(fname <- attr(x, "fname"))) labl <- sprintf(labl, fname) # Start printing cat("Entries:\n") lablen <- nchar(labl) labjump <- max(c(lablen,5)) + 3 idlen <- nchar(nama) idjump <- max(c(idlen,5)) + 3 pad <- function(n) { paste(rep(" ", n), collapse="") } cat("id", pad(idjump-2), "label", pad(labjump - 5), "description\n", sep="") cat("--", pad(idjump-2), "-----", pad(labjump - 5), "-----------\n", sep="") for(j in seq_len(ncol(x))) cat(paste(nama[j], pad(idjump - idlen[j]), labl[j],pad(labjump - lablen[j]), desc[j],"\n", sep="")) cat("--------------------------------------\n\n") cat(paste("Default plot formula:\t", deparse(as.formula(a$fmla)), "\n")) if(!is.null(a$shade)) cat(paste("\nColumns", commasep(sQuote(a$shade)), "will be plotted as shading (by default)\n")) alim <- signif(a$alim, 5) rang <- signif(range(with(x, .x)), 5) cat(paste("\nRecommended range of argument ", a$argu, ": ", prange(alim), sep="")) cat(paste("\n Available range of argument ", a$argu, ": ", prange(rang), "\n", sep="")) ledge <- summary(unitname(x))$legend if(!is.null(ledge)) cat(paste(ledge, "\n")) invisible(NULL) } # manipulating the names in fv objects .Spatstat.FvAbbrev <- c( ".x", ".y", ".s", ".", "*") fvnames <- function(X, a=".") { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop("argument a must be a character string") switch(a, ".y"={ return(attr(X, "valu")) }, ".x"={ return(attr(X, "argu")) }, ".s"={ return(attr(X, "shade")) }, "." = { # The specified 'dotnames' dn <- attr(X, "dotnames") if(is.null(dn)) dn <- fvnames(X, "*") return(dn) }, "*"={ # all column names other than the function argument allvars <- names(X) argu <- attr(X, "argu") nam <- allvars[allvars != argu] nam <- rev(nam) # convention return(nam) }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) } "fvnames<-" <- function(X, a=".", value) { verifyclass(X, "fv") if(!is.character(a) || length(a) > 1) stop(paste("argument", sQuote("a"), "must be a character string")) if(a == "*") { warning(paste("Cannot reset fvnames(x,", dQuote("*"), ")")) return(X) } if(a == "." && length(value) == 0) { # clear the dotnames attr(X, "dotnames") <- NULL return(X) } # validate the names switch(a, ".x"=, ".y"={ if(!is.character(value) || length(value) != 1) stop("value should be a single string") }, ".s"={ if(!is.character(value) || length(value) != 2) stop("value should be a vector of 2 character strings") }, "."={ if(!is.character(value)) stop("value should be a character vector") }, stop(paste("Unrecognised abbreviation", dQuote(a))) ) # check the names match existing column names tags <- names(X) if(any(nbg <- !(value %in% tags))) stop(paste(ngettext(sum(nbg), "The string", "The strings"), commasep(dQuote(value[nbg])), ngettext(sum(nbg), "does not match the name of any column of X", "do not match the names of any columns of X"))) # reassign names switch(a, ".x"={ attr(X, "argu") <- value }, ".y"={ attr(X, "valu") <- value }, ".s"={ attr(X, "shade") <- value }, "."={ attr(X, "dotnames") <- value }) return(X) } fvlabels <- function(x, expand=FALSE) { lab <- attr(x, "labl") names(lab) <- names(x) if(expand) { # expand plot labels if(!is.null(fname <- attr(x, "fname"))) lab <- sprintf(lab, fname) } return(lab) } "fvlabels<-" <- function(x, value) { stopifnot(is.fv(x)) stopifnot(is.character(value)) stopifnot(length(value) == length(fvlabels(x))) attr(x, "labl") <- value return(x) } fvlabelmap <- local({ magic <- function(x) { subx <- paste("substitute(", x, ", NULL)") out <- try(eval(parse(text=subx)), silent=TRUE) if(inherits(out, "try-error")) out <- as.name(make.names(subx)) out } fvlabelmap <- function(x, dot=TRUE) { labl <- fvlabels(x, expand=TRUE) # construct mapping from identifiers to labels map <- as.list(labl) map <- lapply(map, magic) names(map) <- colnames(x) if(dot) { # also map "." to name of target function if(!is.null(ye <- attr(x, "yexp"))) map <- append(map, list("."=ye)) # map other fvnames to their corresponding labels map <- append(map, list(".x"=map[[fvnames(x, ".x")]], ".y"=map[[fvnames(x, ".y")]])) if(!is.null(fvnames(x, ".s"))) { shex <- unname(map[fvnames(x, ".s")]) shadexpr <- substitute(c(A,B), list(A=shex[[1]], B=shex[[2]])) map <- append(map, list(".s" = shadexpr)) } } return(map) } fvlabelmap }) fvlegend <- function(object, elang) { # Compute mathematical legend(s) for column(s) in fv object # transformed by language expression 'elang'. # The expression must already be in 'expanded' form. # The result is an expression, or expression vector. # The j-th entry of the vector is an expression for the # j-th column of function values. ee <- distributecbind(as.expression(elang)) map <- fvlabelmap(object, dot = TRUE) eout <- as.expression(lapply(ee, function(ei, map) { eval(substitute(substitute(e, mp), list(e = ei, mp = map))) }, map = map)) return(eout) } bind.fv <- function(x, y, labl=NULL, desc=NULL, preferred=NULL) { verifyclass(x, "fv") ax <- attributes(x) if(is.fv(y)) { # y is already an fv object ay <- attributes(y) if(ax$fname != ay$fname) { # x and y represent different functions # expand the labels separately fvlabels(x) <- fvlabels(x, expand=TRUE) fvlabels(y) <- fvlabels(y, expand=TRUE) ax <- attributes(x) ay <- attributes(y) } # check compatibility of 'r' values xr <- ax$argu yr <- ay$argu rx <- x[[xr]] ry <- y[[yr]] if((length(rx) != length(rx)) || (max(abs(rx-ry)) > .Machine$double.eps)) stop("fv objects x and y have incompatible domains") # reduce y to data frame and strip off 'r' values ystrip <- as.data.frame(y) yrpos <- which(colnames(ystrip) == yr) ystrip <- ystrip[, -yrpos, drop=FALSE] # determine descriptors if(is.null(labl)) labl <- attr(y, "labl")[-yrpos] if(is.null(desc)) desc <- attr(y, "desc")[-yrpos] # y <- ystrip } else { # y is a matrix or data frame y <- as.data.frame(y) } # check for duplicated column names allnames <- c(colnames(x), colnames(y)) if(any(dup <- duplicated(allnames))) { nbg <- unique(allnames[dup]) nn <- length(nbg) warning(paste("The column", ngettext(nn, "name", "names"), commasep(sQuote(nbg)), ngettext(nn, "was", "were"), "duplicated. Unique names were generated")) allnames <- make.names(allnames, unique=TRUE, allow_ = FALSE) colnames(y) <- allnames[ncol(x) + seq_len(ncol(y))] } if(is.null(labl)) labl <- paste("%s[", colnames(y), "](r)", sep="") else if(length(labl) != ncol(y)) stop(paste("length of", sQuote("labl"), "does not match number of columns of y")) if(is.null(desc)) desc <- character(ncol(y)) else if(length(desc) != ncol(y)) stop(paste("length of", sQuote("desc"), "does not match number of columns of y")) if(is.null(preferred)) preferred <- ax$valu xy <- cbind(as.data.frame(x), y) z <- fv(xy, ax$argu, ax$ylab, preferred, ax$fmla, ax$alim, c(ax$labl, labl), c(ax$desc, desc), unitname=unitname(x), fname=ax$fname) return(z) } cbind.fv <- function(...) { a <- list(...) n <- length(a) if(n == 0) return(NULL) if(n == 1) { # single argument - extract it a <- a[[1]] # could be an fv object if(is.fv(a)) return(a) n <- length(a) } z <- a[[1]] if(!is.fv(z)) stop("First argument should be an object of class fv") if(n > 1) for(i in 2:n) z <- bind.fv(z, a[[i]]) return(z) } collapse.fv <- function(..., same=NULL, different=NULL) { x <- list(...) n <- length(x) if(n == 0) return(NULL) if(n == 1) { # single argument - could be a list - extract it x1 <- x[[1]] if(!is.fv(x1)) x <- x1 } if(!all(unlist(lapply(x, is.fv)))) stop("arguments should be objects of class fv") if(is.null(same)) same <- character(0) if(is.null(different)) different <- character(0) if(any(duplicated(c(same, different)))) stop(paste("The arguments", sQuote("same"), "and", sQuote("different"), "should not have entries in common")) either <- c(same, different) # validate nbg <- unlist(lapply(x, function(z, e) { e[!(e %in% names(z))] }, e=either)) nbg <- unique(nbg) if((nbad <- length(nbg)) > 0) stop(paste(ngettext(nbad, "The name", "The names"), commasep(sQuote(nbg)), ngettext(nbad, "is", "are"), "not present in the function objects")) # names for different versions versionnames <- names(x) if(is.null(versionnames)) versionnames <- paste("x", seq_along(x), sep="") shortnames <- abbreviate(versionnames) # extract the common values y <- x[[1]] if(length(same) > 0 && !(fvnames(y, ".y") %in% same)) fvnames(y, ".y") <- same[1] z <- y[, c(fvnames(y, ".x"), same)] dotnames <- same # now merge the different values for(i in seq_along(x)) { # extract values for i-th object xi <- x[[i]] wanted <- (names(xi) %in% different) y <- as.data.frame(xi)[, wanted, drop=FALSE] desc <- attr(xi, "desc")[wanted] labl <- attr(xi, "labl")[wanted] # relabel prefix <- shortnames[i] preamble <- versionnames[i] names(y) <- if(ncol(y) == 1) prefix else paste(prefix,names(y),sep="") dotnames <- c(dotnames, names(y)) # glue onto fv object z <- bind.fv(z, y, labl=paste(prefix, labl, sep="~"), desc=paste(preamble, desc)) } fvnames(z, ".") <- dotnames return(z) } # rename one of the columns of an fv object tweak.fv.entry <- function(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) { hit <- (names(x) == current.tag) if(!any(hit)) return(x) # update descriptions of column i <- min(which(hit)) if(!is.null(new.labl)) attr(x, "labl")[i] <- new.labl if(!is.null(new.desc)) attr(x, "desc")[i] <- new.desc # adjust column tag if(!is.null(new.tag)) { names(x)[i] <- new.tag # update dotnames dn <- fvnames(x, ".") if(current.tag %in% dn ) { dn[dn == current.tag] <- new.tag fvnames(x, ".") <- dn } # if the tweaked column is the preferred value, adjust accordingly if(attr(x, "valu") == current.tag) attr(x, "valu") <- new.tag # if the tweaked column is the function argument, adjust accordingly if(attr(x, "argu") == current.tag) attr(x, "valu") <- new.tag } return(x) } # change some or all of the auxiliary text in an fv object rebadge.fv <- function(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp=new.ylab, new.dotnames, new.preferred, new.formula, new.tags) { if(!missing(new.ylab)) attr(x, "ylab") <- new.ylab if(!missing(new.yexp) || !missing(new.ylab)) attr(x, "yexp") <- new.yexp if(!missing(new.fname)) attr(x, "fname") <- new.fname if(!missing(tags) && !(missing(new.desc) && missing(new.labl) && missing(new.tags))) { nama <- names(x) desc <- attr(x, "desc") labl <- attr(x, "labl") valu <- attr(x, "valu") for(i in seq_along(tags)) if(!is.na(m <- match(tags[i], nama))) { if(!missing(new.desc)) desc[m] <- new.desc[i] if(!missing(new.labl)) labl[m] <- new.labl[i] if(!missing(new.tags)) { names(x)[m] <- new.tags[i] if(tags[i] == valu) attr(x, "valu") <- new.tags[i] } } attr(x, "desc") <- desc attr(x, "labl") <- labl } if(!missing(new.dotnames)) fvnames(x, ".") <- new.dotnames if(!missing(new.preferred)) { stopifnot(new.preferred %in% names(x)) attr(x, "valu") <- new.preferred } if(!missing(new.formula)) formula(x) <- new.formula return(x) } # subset extraction operator "[.fv" <- function(x, i, j, ..., drop=FALSE) { igiven <- !missing(i) jgiven <- !missing(j) y <- as.data.frame(x) if(igiven && jgiven) z <- y[i, j, drop=drop] else if(igiven) z <- y[i, , drop=drop] else if(jgiven) z <- y[ , j, drop=drop] else z <- y # return only the selected values as a data frame or vector. if(drop) return(z) if(!jgiven) selected <- seq_len(ncol(x)) else { nameindices <- seq_along(names(x)) names(nameindices) <- names(x) selected <- as.vector(nameindices[j]) } nama <- names(z) argu <- attr(x, "argu") if(!(argu %in% nama)) stop(paste("The function argument", sQuote(argu), "must not be removed")) valu <- attr(x, "valu") if(!(valu %in% nama)) stop(paste("The default column of function values", sQuote(valu), "must not be removed")) # If range of argument was implicitly changed, adjust "alim" alim <- attr(x, "alim") rang <- range(z[[argu]]) alim <- c(max(alim[1], rang[1]), min(alim[2], rang[2])) result <- fv(z, argu=attr(x, "argu"), ylab=attr(x, "ylab"), valu=attr(x, "valu"), fmla=attr(x, "fmla"), alim=alim, labl=attr(x, "labl")[selected], desc=attr(x, "desc")[selected], unitname=attr(x, "units"), fname=attr(x,"fname")) # carry over preferred names, if possible dotn <- fvnames(x, ".") fvnames(result, ".") <- dotn[dotn %in% colnames(result)] shad <- fvnames(x, ".s") if(!is.null(shad) && all(shad %in% colnames(result))) fvnames(result, ".s") <- shad return(result) } # method for 'formula' formula.fv <- function(x, ...) { attr(x, "fmla") } # method for 'formula<-' # (generic is defined in formulae.R) "formula<-.fv" <- function(x, ..., value) { if(is.null(value)) value <- paste(fvnames(x, ".y"), "~", fvnames(x, ".x")) else if(inherits(value, "formula")) { # convert formula to string value <- deparse(value) } else if(!is.character(value)) stop("Assignment value should be a formula or a string") attr(x, "fmla") <- value return(x) } # method for with() with.fv <- function(data, expr, ..., fun=NULL, enclos=NULL) { if(any(names(list(...)) == "drop")) stop("Outdated argument 'drop' used in with.fv") cl <- short.deparse(sys.call()) verifyclass(data, "fv") if(is.null(enclos)) enclos <- parent.frame() # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # map "." etc to names of columns of data datanames <- names(data) xname <- fvnames(data, ".x") yname <- fvnames(data, ".y") ux <- as.name(xname) uy <- as.name(yname) dnames <- datanames[datanames %in% fvnames(data, ".")] ud <- as.call(lapply(c("cbind", dnames), as.name)) if(!is.null(fvnames(data, ".s"))) { snames <- datanames[datanames %in% fvnames(data, ".s")] us <- as.call(lapply(c("cbind", snames), as.name)) } else us <- NULL expandelang <- eval(substitute(substitute(ee, list(.=ud, .x=ux, .y=uy, .s=us)), list(ee=elang))) evars <- all.vars(expandelang) used.dotnames <- evars[evars %in% dnames] # evaluate expression datadf <- as.data.frame(data) results <- eval(expandelang, as.list(datadf), enclos=enclos) # -------------------- # commanded to return numerical values only? if(!is.null(fun) && !fun) return(results) if(!is.matrix(results) && !is.data.frame(results)) { # result is a vector if(is.null(fun)) fun <- FALSE if(!fun || length(results) != nrow(datadf)) return(results) results <- matrix(results, ncol=1) } else { # result is a matrix or data frame if(is.null(fun)) fun <- TRUE if(!fun || nrow(results) != nrow(datadf)) return(results) } # result is a matrix or data frame of the right dimensions # make a new fv object # ensure columns of results have names if(is.null(colnames(results))) colnames(results) <- paste("col", seq_len(ncol(results)), sep="") resultnames <- colnames(results) # get values of function argument xvalues <- datadf[[xname]] # tack onto result matrix results <- cbind(xvalues, results) colnames(results) <- c(xname, resultnames) results <- data.frame(results) # check for alteration of column names oldnames <- resultnames resultnames <- colnames(results)[-1] if(any(resultnames != oldnames)) warning("some column names were illegal and have been changed") # determine mapping (if any) from columns of output to columns of input namemap <- match(colnames(results), names(datadf)) okmap <- !is.na(namemap) # Build up fv object # decide which of the columns should be the preferred value newyname <- if(yname %in% resultnames) yname else resultnames[1] # construct default plot formula fmla <- deparse(as.formula(paste(". ~", xname))) dotnames <- resultnames # construct description strings desc <- character(ncol(results)) desc[okmap] <- attr(data, "desc")[namemap[okmap]] desc[!okmap] <- paste("Computed value", resultnames[!okmap]) # function name fname <- deparse(cl) # construct mathematical expression for function (yexp) oldyexp <- attr(data, "yexp") if(is.null(oldyexp)) yexp <- substitute(f(xname), list(f=as.name(fname), xname=as.name(xname))) else { # map 'cbind(....)' to "." for name of function only cb <- paste("cbind(", paste(used.dotnames, collapse=", "), ")", sep="") compresselang <- gsub(cb, ".", deparse(expandelang), fixed=TRUE) compresselang <- as.formula(paste(compresselang, "~1"))[[2]] # construct mapping using original function name labmap <- fvlabelmap(data, dot=TRUE) labmap[["."]] <- oldyexp yexp <- eval(substitute(substitute(ee, ff), list(ee=compresselang, ff=labmap))) } # construct mathematical labels mathlabl <- as.character(fvlegend(data, expandelang)) labl <- colnames(results) mathmap <- match(labl, used.dotnames) okmath <- !is.na(mathmap) labl[okmath] <- mathlabl[mathmap[okmath]] # form fv object and return out <- fv(results, argu=xname, valu=newyname, labl=labl, desc=desc, alim=attr(data, "alim"), fmla=fmla, unitname=unitname(data), fname=fname, yexp=yexp, ylab=yexp) fvnames(out, ".") <- dotnames return(out) } # stieltjes integration for fv objects stieltjes <- function(f, M, ...) { # stieltjes integral of f(x) dM(x) if(!is.fv(M)) stop("M must be an object of class fv") if(!is.function(f)) stop("f must be a function") # integration variable argu <- attr(M, "argu") x <- M[[argu]] # values of integrand fx <- f(x, ...) # estimates of measure valuenames <- names(M) [names(M) != argu] Mother <- as.data.frame(M)[, valuenames] Mother <- as.matrix(Mother, nrow=nrow(M)) # increments of measure dM <- apply(Mother, 2, diff) dM <- rbind(dM, 0) dM[is.na(dM)] <- 0 # integrate f(x) dM(x) results <- apply(fx * dM, 2, sum) results <- as.list(results) names(results) <- valuenames return(results) } prefixfv <- function(x, tagprefix="", descprefix="", lablprefix=tagprefix, whichtags=fvnames(x, "*")) { # attach a prefix to fv information stopifnot(is.fv(x)) att <- attributes(x) relevant <- names(x) %in% whichtags oldtags <- names(x)[relevant] newtags <- paste(tagprefix, oldtags, sep="") newlabl <- paste(lablprefix, att$labl[relevant], sep="") newdesc <- paste(descprefix, att$desc[relevant], sep="") y <- rebadge.fv(x, tags=oldtags, new.desc=newdesc, new.labl=newlabl, new.tags=newtags) return(y) } reconcile.fv <- function(...) { # reconcile several fv objects by finding the columns they share in common z <- list(...) if(!all(unlist(lapply(z, is.fv)))) { if(length(z) == 1 && is.list(z[[1]]) && all(unlist(lapply(z[[1]], is.fv)))) z <- z[[1]] else stop("all arguments should be fv objects") } n <- length(z) if(n <= 1) return(z) # find columns that are common to all estimates keepcolumns <- names(z[[1]]) keepvalues <- fvnames(z[[1]], "*") for(i in 2:n) { keepcolumns <- intersect(keepcolumns, names(z[[i]])) keepvalues <- intersect(keepvalues, fvnames(z[[i]], "*")) } if(length(keepvalues) == 0) stop("cannot reconcile fv objects: they have no columns in common") # determine name of the 'preferred' column prefs <- unlist(lapply(z, fvnames, a=".y")) prefskeep <- prefs[prefs %in% keepvalues] if(length(prefskeep) > 0) { # pick the most popular chosen <- unique(prefskeep)[which.max(table(prefskeep))] } else { # drat - pick a value arbitrarily chosen <- keepvalues[1] } z <- lapply(z, rebadge.fv, new.preferred=chosen) z <- lapply(z, "[.fv", j=keepcolumns) # also clip to the same r values rmax <- min(unlist(lapply(z, function(x) { max(with(x, .x)) }))) z <- lapply(z, function(x, rmax) { x[ with(x, .x) <= rmax, ] }, rmax=rmax) return(z) } as.function.fv <- function(x, ..., value=".y", extrapolate=FALSE) { trap.extra.arguments(...) # extract function argument xx <- with(x, .x) # extract all function values yy <- as.data.frame(x)[, fvnames(x, "*"), drop=FALSE] # determine which value(s) to supply if(!is.character(value)) stop("value should be a character string or vector specifying columns of x") if(!all(value %in% colnames(yy))) { expandvalue <- try(fvnames(x, value)) if(!inherits(expandvalue, "try-error")) { value <- expandvalue } else stop("Unable to determine columns of x") } yy <- yy[,value] argname <- fvnames(x, ".x") endrule <- if(extrapolate) 1 else 2 if(length(value) == 1) { # make a single 'approxfun' and return it f <- approxfun(xx, yy, rule=endrule) # magic names(formals(f))[1] <- argname body(f)[[4]] <- as.name(argname) } else { # make a list of 'approxfuns' funs <- lapply(yy, function(z, u, endrule) { approxfun(x=u, y=z, rule=endrule)}, u=xx, endrule=endrule) # return a function which selects the appropriate 'approxfun' and executes f <- function(x, what=value) { what <- match.arg(what) funs[[what]](x) } # magic formals(f)[[2]] <- value names(formals(f))[1] <- argname body(f)[[3]][[2]] <- as.name(argname) } class(f) <- c("fvfun", class(f)) attr(f, "fname") <- attr(x, "fname") attr(f, "yexp") <- attr(x, "yexp") return(f) } print.fvfun <- function(x, ...) { y <- args(x) yexp <- as.expression(attr(x, "yexp")) body(y) <- as.name(paste("Returns interpolated value of", yexp)) print(y, ...) return(invisible(NULL)) } findcbind <- function(root, depth=0, maxdepth=1000) { # recursive search through a parse tree to find calls to 'cbind' if(depth > maxdepth) stop("Reached maximum depth") if(length(root) == 1) return(NULL) if(identical(as.name(root[[1]]), as.name("cbind"))) return(list(numeric(0))) out <- NULL for(i in 2:length(root)) { di <- findcbind(root[[i]], depth+1, maxdepth) if(!is.null(di)) out <- append(out, lapply(di, function(z,i){ c(i,z)}, i=i)) } return(out) } .MathOpNames <- c("+", "-", "*", "/", "^", "%%", "%/%", "&", "|", "!", "==", "!=", "<", "<=", ">=", ">") distributecbind <- function(x) { # x is an expression involving a call to 'cbind' # return a vector of expressions, each obtained by replacing 'cbind(...)' # by one of its arguments in turn. stopifnot(typeof(x) == "expression") xlang <- x[[1]] locations <- findcbind(xlang) if(length(locations) == 0) return(x) # cbind might occur more than once # check that the number of arguments is the same each time narg <- unique(unlist(lapply(locations, function(loc, y) { if(length(loc) > 0) length(y[[loc]]) else length(y) }, y=xlang))) - 1 if(length(narg) > 1) return(NULL) out <- NULL if(narg > 0) { for(i in 1:narg) { # make a version of the expression # in which cbind() is replaced by its i'th argument fakexlang <- xlang for(loc in locations) { if(length(loc) > 0) { # usual case: 'loc' is integer vector representing nested index cbindcall <- xlang[[loc]] # extract i-th argument argi <- cbindcall[[i+1]] # if argument is an expression, enclose it in parentheses if(length(argi) > 1 && paste(argi[[1]]) %in% .MathOpNames) argi <- substitute((x), list(x=argi)) # replace cbind call by its i-th argument fakexlang[[loc]] <- argi } else { # special case: 'loc' = integer(0) representing xlang itself cbindcall <- xlang # extract i-th argument argi <- cbindcall[[i+1]] # replace cbind call by its i-th argument fakexlang <- cbindcall[[i+1]] } } # add to final expression out <- c(out, as.expression(fakexlang)) } } return(out) } # Form a new 'fv' object as a ratio ratfv <- function(df, numer, denom, ..., ratio=TRUE) { # Determine y if(!missing(df)) { y <- fv(df, ...) num <- NULL } else { # Compute numer/denom # Numerator must be a data frame num <- fv(numer, ...) # Denominator may be a data frame or a constant force(denom) y <- eval.fv(num/denom) # relabel y <- fv(as.data.frame(y), ...) } if(!ratio) return(y) if(is.null(num)) { # Compute num = y * denom # Denominator may be a data frame or a constant force(denom) num <- eval.fv(y * denom) # ditch labels num <- fv(as.data.frame(num), ...) } # make denominator an fv object if(is.data.frame(denom)) { den <- fv(denom, ...) } else { # scalar check.1.real(denom, "Unless it is a data frame,") # replicate it in all the data columns dendf <- as.data.frame(num) valuecols <- (names(num) != fvnames(num, ".x")) dendf[, valuecols] <- denom den <- fv(dendf, ...) } # tweak the descriptions ok <- (names(y) != fvnames(y, ".x")) attr(num, "desc")[ok] <- paste("numerator of", attr(num, "desc")[ok]) attr(den, "desc")[ok] <- paste("denominator of", attr(den, "desc")[ok]) # form ratio object y <- rat(y, num, den, check=FALSE) return(y) } # Tack new column(s) onto a ratio fv object bind.ratfv <- function(x, numerator, denominator, labl = NULL, desc = NULL, preferred = NULL, ratio=TRUE) { y <- bind.fv(x, numerator/denominator, labl=labl, desc=desc, preferred=preferred) if(!ratio) return(y) stopifnot(inherits(x, "rat")) num <- attr(x, "numerator") den <- attr(x, "denominator") # convert scalar denominator to data frame if(!is.data.frame(denominator)) { check.1.real(denominator, "Unless it is a data frame,") dvalue <- denominator denominator <- numerator denominator[] <- dvalue } num <- bind.fv(num, numerator, labl=labl, desc=paste("numerator of", desc), preferred=preferred) den <- bind.fv(den, denominator, labl=labl, desc=paste("denominator of", desc), preferred=preferred) y <- rat(y, num, den, check=FALSE) return(y) } conform.ratfv <- function(x) { # harmonise display properties in components of a ratio stopifnot(inherits(x, "rat"), is.fv(x)) num <- attr(x, "numerator") den <- attr(x, "denominator") formula(num) <- formula(den) <- formula(x) fvnames(num, ".") <- fvnames(den, ".") <- fvnames(x, ".") unitname(num) <- unitname(den) <- unitname(x) attr(x, "numerator") <- num attr(x, "denominator") <- den return(x) } spatstat/R/Kscaled.R0000755000176000001440000001136112237642727014060 0ustar ripleyusers# # Kscaled.R Estimation of K function for locally-scaled process # # $Revision: 1.7 $ $Date: 2013/02/07 09:58:14 $ # "Lscaled" <- function(...) { K <- Kscaled(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, substitute(Lscaled(r), NULL), "Lscaled") return(L) } "Kscaled"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "isotropic", "translate"), sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") rfixed <- !missing(r) || !missing(breaks) # determine basic parameters W <- X$window npts <- X$n area <- area.owin(W) rmaxdefault <- rmax.rule("K", W, npts/area) * sqrt(npts/area) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # if(missing(lambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", substitute(Kscaled(r), NULL), "theo", , alim, c("r","%s[pois](r)"), desc, fname="Kscaled") # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) I <- close$i J <- close$j # locally-scaled distances lamIJ <- (sqrt(lambda[I]) + sqrt(lambda[J]))/2 absDIJ <- close$d DIJ <- absDIJ * lamIJ XI <- ppp(close$xi, close$yi, window=W, check=FALSE) if(any(correction == "none")) { # uncorrected! For demonstration purposes only! wh <- whist(DIJ, breaks$val) # no weights Kun <- cumsum(wh)/npts K <- bind.fv(K, data.frame(un=Kun), "%s[un](r)", "uncorrected estimate of %s", "un") } if(any(correction == "border")) { # border method # Compute SCALED distances to boundary b <- bdist.points(X) * sqrt(lambda) I <- close$i bI <- b[I] # apply reduced sample algorithm RS <- Kount(DIJ, bI, b, breaks) if(any(correction == "border")) { Kb <- RS$numerator/RS$denom.count K <- bind.fv(K, data.frame(border=Kb), "%s[bord](r)", "border-corrected estimate of %s", "border") } } if(any(correction == "translate")) { # translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) wh <- whist(DIJ, breaks$val, edgewt) Ktrans <- cumsum(wh)/npts h <- diameter(W)/2 Ktrans[r >= h] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "%s[trans](r)", "translation-corrected estimate of %s", "trans") } if(any(correction == "isotropic")) { # Ripley isotropic correction (using UN-SCALED distances) edgewt <- edge.Ripley(XI, matrix(absDIJ, ncol=1)) wh <- whist(DIJ, breaks$val, edgewt) Kiso <- cumsum(wh)/npts h <- diameter(W)/2 Kiso[r >= h] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "%s[iso](r)", "Ripley isotropic correction estimate of %s", "iso") } # default plot will display all edge corrections formula(K) <- . ~ r nama <- rev(colnames(K)) fvnames(K, ".") <- nama[!(nama %in% c("r", "rip", "ls"))] # unitname(K) <- c("normalised unit", "normalised units") return(K) } spatstat/R/slrm.R0000755000176000001440000004477712237642727013510 0ustar ripleyusers# # slrm.R # # Spatial Logistic Regression # # $Revision: 1.24 $ $Date: 2013/09/05 07:44:08 $ # slrm <- function(formula, ..., data=NULL, offset=TRUE, link="logit", dataAtPoints=NULL, splitby=NULL) { # remember call CallInfo <- list(callstring = short.deparse(sys.call()), cl = match.call(), formula = formula, offset=offset, link=link, splitby=splitby, dotargs=list(...)) if(!(link %in% c("logit", "cloglog"))) stop(paste("Unrecognised link", dQuote(link))) ########### INTERPRET FORMULA ############################## if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- lhs <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") varnames <- unique(variablesinformula(trend)) specials <- c("x", "y", "logpixelarea") covnames <- varnames[!(varnames %in% specials)] # add 'splitby' to covariate names if(!is.null(splitby)) { if(!is.character(splitby) || length(splitby) != 1) stop("splitby should be a single character string") covnames <- unique(c(covnames, splitby)) } CallInfo$responsename <- Yname CallInfo$varnames <- varnames CallInfo$covnames <- covnames # Parent environment parenv <- environment(formula) ######## FIND DATA AND RESHAPE ####################### Data <- slr.prepare(CallInfo, parenv, data, dataAtPoints, splitby) W <- Data$W df <- Data$df ######## FIT MODEL ############################### dformula <- formula if(offset) { # insert offset term in formula rhs <- paste(as.character(rhs), collapse=" ") rhs <- paste(c(rhs, "offset(logpixelarea)"), collapse="+") dformula <- as.formula(paste(Yname, rhs)) } linkname <- link FIT <- glm(dformula, family=binomial(link=linkname), data=df, na.action=na.exclude) result <- list(call = CallInfo$cl, CallInfo = CallInfo, Data = Data, Fit = list(FIT=FIT, dformula=dformula), terms = terms(formula)) class(result) <- c("slrm", class(result)) return(result) } ################ UTILITY TO FIND AND RESHAPE DATA ################# slr.prepare <- function(CallInfo, envir, data, dataAtPoints=NULL, splitby=NULL, clip=TRUE) { # CallInfo is produced by slrm() # envir is parent environment of model formula # data is 'data' argument that takes precedence over 'envir' # 'clip' is TRUE if the data should be clipped to the domain of Y Yname <- CallInfo$responsename varnames <- CallInfo$varnames covnames <- CallInfo$covnames dotargs <- CallInfo$dotargs # getobj <- function(nama, env, dat) { if(!is.null(dat) && !is.null(x <- dat[[nama]])) return(x) else return(get(nama, envir=env)) } # Get the response point pattern Y Y <- getobj(Yname, envir, data) if(!is.ppp(Y)) stop(paste("The response", sQuote(Yname), "must be a point pattern")) # if(!is.null(dataAtPoints)) { dataAtPoints <- as.data.frame(dataAtPoints) if(nrow(dataAtPoints) != npoints(Y)) stop(paste("dataAtPoints should have one row for each point in", dQuote(Yname))) } # Find the covariates ncov <- length(covnames) covlist <- lapply(as.list(covnames), getobj, env = envir, dat=data) names(covlist) <- covnames # Each covariate should be an image, a window, a function, or a single number if(ncov == 0) { isim <- isowin <- ismask <- isfun <- isnum <- isspatial <- israster <- logical(0) } else { isim <- unlist(lapply(covlist, is.im)) isowin <- unlist(lapply(covlist, is.owin)) ismask <- unlist(lapply(covlist, is.mask)) isfun <- unlist(lapply(covlist, is.function)) isspatial <- isim | isowin | isfun israster <- isim | ismask isnum <- unlist(lapply(covlist, function(x) { is.numeric(x) && length(x) == 1} )) } if(!all(ok <- (isspatial | isnum))) { n <- sum(!ok) stop(paste(ngettext(n, "The argument", "Each of the arguments"), commasep(sQuote(covnames[!ok])), "should be either an image, a window, or a single number")) } # 'splitby' if(!is.null(splitby)) { splitwin <- covlist[[splitby]] if(!is.owin(splitwin)) stop("The splitting covariate must be a window") # ensure it is a polygonal window covlist[[splitby]] <- splitwin <- as.polygonal(splitwin) # delete splitting covariate from lists to be processed issplit <- (covnames == splitby) isspatial[issplit] <- FALSE israster[issplit] <- FALSE } # nnum <- sum(isnum) nspatial <- sum(isspatial) nraster <- sum(israster) # numlist <- covlist[isnum] spatiallist <- covlist[isspatial] rasterlist <- covlist[israster] # numnames <- names(numlist) spatialnames <- names(spatiallist) rasternames <- names(rasterlist) # ######## CONVERT TO RASTER DATA ############################### convert <- function(x,W) { if(is.im(x) || is.function(x)) return(as.im(x,W)) if(is.owin(x)) return(as.im(x, W, value=TRUE, na.replace=FALSE)) return(NULL) } # determine spatial domain & common resolution: convert all data to it if(length(dotargs) > 0 || nraster == 0) { # Pixel resolution is determined by explicit arguments if(clip) { # Window extent is determined by response point pattern D <- as.owin(Y) } else { # Window extent is union of domains of data domains <- lapply(append(spatiallist, list(Y)), as.owin) D <- do.call("union.owin", domains) } # Create template mask W <- do.call("as.mask", append(list(D), dotargs)) # Convert all spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } else { # Pixel resolution is determined implicitly by covariate data W <- do.call("commonGrid", rasterlist) if(clip) { # Restrict data to spatial extent of response point pattern W <- intersect.owin(W, as.owin(Y)) } # Adjust spatial objects to this resolution spatiallist <- lapply(spatiallist, convert, W=W) } # images containing coordinate values xcoordim <- as.im(function(x,y){x}, W=W) ycoordim <- as.im(function(x,y){y}, W=W) # # create a list of covariate images, with names as in formula covimages <- append(list(x=xcoordim, y=ycoordim), spatiallist) basepixelarea <- W$xstep * W$ystep ######## ASSEMBLE DATA FRAME ############################### if(is.null(splitby)) { df <- slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, basepixelarea) sumYloga <- Y$n * log(basepixelarea) serial <- attr(df, "serial") } else { # fractional pixel areas pixsplit <- pixellate(splitwin, W) splitpixelarea <- as.vector(as.matrix(pixsplit)) # determine which points of Y are inside/outside window ins <- inside.owin(Y$x, Y$y, splitwin) # split processing dfIN <- slrAssemblePixelData(Y[ins], Yname, W, covimages, dataAtPoints[ins, ], splitpixelarea) serialIN <- attr(dfIN, "serial") dfIN[[splitby]] <- TRUE dfOUT <- slrAssemblePixelData(Y[!ins], Yname, W, covimages, dataAtPoints[!ins, ], basepixelarea - splitpixelarea) serialOUT <- attr(dfOUT, "serial") dfOUT[[splitby]] <- FALSE df <- rbind(dfIN, dfOUT) serial <- c(serialIN, serialOUT) # sum of log pixel areas associated with points Ysplit <- pixsplit[Y] sumYloga <- sum(log(ifelseXY(ins, Ysplit, basepixelarea - Ysplit))) } # tack on any numeric values df <- do.call("cbind", append(list(df), numlist)) ### RETURN ALL Data <- list(response=Y, covariates=covlist, spatialnames=spatialnames, numnames=numnames, W=W, df=df, serial=serial, sumYloga=sumYloga, dataAtPoints=dataAtPoints) return(Data) } # slrAssemblePixelData <- function(Y, Yname, W, covimages, dataAtPoints, pixelarea) { # pixellate point pattern Z <- pixellate(Y, W=W) Z <- eval.im(as.integer(Z>0)) # overwrite pixel entries for data points using exact values # coordinates xcoordim <- covimages[["x"]] ycoordim <- covimages[["y"]] xcoordim[Y] <- Y$x ycoordim[Y] <- Y$y covimages[["x"]] <- xcoordim covimages[["y"]] <- ycoordim # overwrite pixel entries if(!is.null(dataAtPoints)) { enames <- colnames(dataAtPoints) relevant <- enames %in% names(covimages) for(v in enames[relevant]) { cova <- covimages[[v]] cova[Y] <- dataAtPoints[, v, drop=TRUE] covimages[[v]] <- cova } } # assemble list of all images Ylist <- list(Z) names(Ylist) <- Yname allimages <- append(Ylist, covimages) # extract pixel values of each image pixelvalues <- function(z) { v <- as.vector(as.matrix(z)) if(z$type != "factor") return(v) lev <- levels(z) return(factor(v, levels=seq_along(lev), labels=lev)) } pixdata <- lapply(allimages, pixelvalues) df <- as.data.frame(pixdata) serial <- seq_len(nrow(df)) # add log(pixel area) column if(length(pixelarea) == 1) { df <- cbind(df, logpixelarea=log(pixelarea)) } else { ok <- (pixelarea > 0) df <- cbind(df[ok, ], logpixelarea=log(pixelarea[ok])) serial <- serial[ok] } attr(df, "serial") <- serial return(df) } is.slrm <- function(x) { inherits(x, "slrm") } coef.slrm <- function(object, ...) { coef(object$Fit$FIT) } print.slrm <- function(x, ...) { lk <- x$CallInfo$link switch(lk, logit= { cat("Fitted spatial logistic regression model\n") }, cloglog= { cat("Fitted spatial regression model (complementary log-log) \n") }, { cat("Fitted spatial regression model\n") cat(paste("Link =", dQuote(lk), "\n")) }) cat("Formula:\t") print(x$CallInfo$formula) cat("Fitted coefficients:\n") print(coef(x)) return(invisible(NULL)) } logLik.slrm <- function(object, ..., adjust=TRUE) { FIT <- object$Fit$FIT ll <- -deviance(FIT)/2 if(adjust) { sumYloga <- object$Data$sumYloga ll <- ll - sumYloga } attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } fitted.slrm <- function(object, ...) { if(length(list(...)) > 0) warning("second argument (and any subsequent arguments) ignored") predict(object, type="probabilities") } predict.slrm <- function(object, ..., type="intensity", newdata=NULL, window=NULL) { type <- pickoption("type", type, c(probabilities="probabilities", link="link", intensity="intensity", lambda="intensity")) FIT <- object$Fit$FIT link <- object$CallInfo$link W <- object$Data$W df <- object$Data$df loga <- df$logpixelarea if(is.null(newdata) && is.null(window)) { # fitted values from existing fit switch(type, probabilities={ values <- fitted(FIT) }, link={ values <- predict(FIT, type="link") }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { linkvalues <- predict(FIT, type="link") values <- exp(linkvalues - loga) } else { probs <- fitted(FIT) values <- -log(1-probs)/exp(loga) } } ) out <- im(values, xcol=W$xcol, yrow=W$yrow, unitname=unitname(W)) return(out) } else { # prediction using new values # update arguments that may affect pixel resolution CallInfo <- object$CallInfo CallInfo$dotargs <- resolve.defaults(list(...), CallInfo$dotargs) # if(!is.null(window)) { # insert fake response in new window if(is.null(newdata)) newdata <- list() window <- as.owin(window) newdata[[CallInfo$responsename]] <- ppp(numeric(0), numeric(0), window=window) } # process new data newData <- slr.prepare(CallInfo, environment(CallInfo$formula), newdata, clip=!is.null(window)) newdf <- newData$df newW <- newData$W newloga <- newdf$logpixelarea # avoid NA etc npixel <- nrow(newdf) ok <- complete.cases(newdf) if(!all(ok)) { newdf <- newdf[ok, , drop=FALSE] newloga <- newloga[ok] } # compute link values linkvalues <- predict(FIT, newdata=newdf, type="link") # transform to desired scale linkinv <- family(FIT)$linkinv switch(type, probabilities={ values <- linkinv(linkvalues) }, link={ values <- linkvalues }, intensity={ # this calculation applies whether an offset was included or not if(link == "cloglog") { values <- exp(linkvalues - newloga) } else { probs <- linkinv(linkvalues) values <- -log(1-probs)/exp(newloga) } } ) # form image v <- rep.int(NA_real_, npixel) v[ok] <- values out <- im(v, xcol=newW$xcol, yrow=newW$yrow, unitname=unitname(W)) return(out) } } plot.slrm <- function(x, ..., type="intensity") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call("plot.im", resolve.defaults(list(x=y), list(...), list(main=xname))) } formula.slrm <- function(x, ...) { f <- x$CallInfo$formula return(f) } terms.slrm <- function(x, ...) { terms(formula(x), ...) } labels.slrm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } extractAIC.slrm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } model.matrix.slrm <- function(object,..., keepNA=TRUE) { FIT <- object$Fit$FIT mm <- model.matrix(FIT, ...) if(!keepNA) return(mm) df <- object$Data$df comp <- complete.cases(df) if(all(comp)) return(mm) if(sum(comp) != nrow(mm)) stop("Internal error in patching NA's") mmplus <- matrix(NA, nrow(df), ncol(mm)) mmplus[comp, ] <- mm return(mmplus) } model.images.slrm <- function(object, ...) { mm <- model.matrix(object, ...) mm <- as.data.frame(mm) Data <- object$Data W <- Data$W serial <- Data$serial splitby <- object$CallInfo$splitby blank <- as.im(NA_real_, W) assignbyserial <- function(values, serial, template) { Z <- template Z$v[serial] <- values return(Z) } if(is.null(splitby)) { result <- lapply(as.list(mm), assignbyserial, serial=serial, template=blank) } else { df <- Data$df IN <- as.logical(df[[splitby]]) OUT <- !IN mmIN <- mm[IN, , drop=FALSE] mmOUT <- mm[OUT, , drop=FALSE] resultIN <- lapply(as.list(mmIN), assignbyserial, serial=serial[IN], template=blank) resultOUT <- lapply(as.list(mmOUT), assignbyserial, serial=serial[OUT], template=blank) names(resultIN) <- paste(names(resultIN), splitby, "TRUE", sep="") names(resultOUT) <- paste(names(resultOUT), splitby, "FALSE", sep="") result <- c(resultIN, resultOUT) } return(as.listof(result)) } update.slrm <- function(object, ..., evaluate=TRUE, env=parent.frame()) { e <- update.default(object, ..., evaluate=FALSE) if(evaluate) e <- eval(e, envir=env) return(e) } anova.slrm <- function(object, ..., test=NULL) { objex <- append(list(object), list(...)) if(!all(unlist(lapply(objex, is.slrm)))) stop("Some arguments are not of class slrm") fitz <- lapply(objex, function(z){z$Fit$FIT}) do.call("anova", append(fitz, list(test=test))) } vcov.slrm <- function(object, ..., what=c("vcov", "corr", "fisher", "Fisher")) { stopifnot(is.slrm(object)) what <- match.arg(what) vc <- vcov(object$Fit$FIT) result <- switch(what, vcov = vc, corr = { sd <- sqrt(diag(vc)) vc / outer(sd, sd, "*") }, fisher=, Fisher={ solve(vc) }) return(result) } unitname.slrm <- function(x) { return(unitname(x$Data$response)) } "unitname<-.slrm" <- function(x, value) { unitname(x$Data$response) <- value return(x) } is.stationary.slrm <- function(x) { fo <- formula(x) trend <- fo[c(1,3)] return(identical.formulae(trend, ~1)) } is.poisson.slrm <- function(x) { TRUE } simulate.slrm <- function(object, nsim=1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE) { # .... copied from simulate.lm .... if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv) else { R.seed <- get(".Random.seed", envir = .GlobalEnv) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv)) } # determine simulation window and compute intensity if(!is.null(window)) stopifnot(is.owin(window)) lambda <- predict(object, type="intensity", newdata=covariates, window=window) # max lambda (for efficiency) summ <- summary(lambda) lmax <- summ$max + 0.05 * diff(summ$range) # run out <- list() if(verbose && (nsim > 1)) cat(paste("Generating", nsim, "simulations... ")) for(i in 1:nsim) { out[[i]] <- rpoispp(lambda, lmax=lmax) if(verbose) progressreport(i, nsim) } # pack up out <- as.listof(out) names(out) <- paste("Simulation", 1:nsim) attr(out, "seed") <- RNGstate return(out) } spatstat/R/centroid.R0000755000176000001440000000773412237642727014332 0ustar ripleyusers# # centroid.S Centroid of a window # and related operations # # $Revision: 1.2 $ $Date: 2013/05/01 05:42:54 $ # # Function names (followed by "xypolygon" or "owin") # # intX integral of x dx dy # intY integral of y dx dy # meanX mean of x dx dy # meanY mean of y dx dy # centroid (meanX, meanY) # #------------------------------------- intX.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y nedges <- length(x) # sic # place x axis below polygon y <- y - min(y) # join vertex n to vertex 1 xr <- c(x, x[1]) yr <- c(y, y[1]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- x * y * dx + (y + slope * x) * (dx^2)/2 + slope * (dx^3)/3 -sum(integrals) } intX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$xrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intX.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) npixels <- sum(w$m) area <- npixels * pixelarea x <- raster.x(w)[w$m] answer <- area * mean(x) }, stop("Unrecognised window type") ) return(answer) } meanX.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$xrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intX.xypolygon))) answer <- integrated/area }, mask = { x <- raster.x(w)[w$m] answer <- mean(x) }, stop("Unrecognised window type") ) return(answer) } intY.xypolygon <- function(polly) { # # polly: list(x,y) vertices of a single polygon (n joins to 1) # verify.xypolygon(polly) x <- polly$x y <- polly$y nedges <- length(x) # sic # place x axis below polygon yadjust <- min(y) y <- y - yadjust # join vertex n to vertex 1 xr <- c(x, x[1]) yr <- c(y, y[1]) # slope dx <- diff(xr) dy <- diff(yr) slope <- ifelseAX(dx == 0, 0, dy/dx) # integrate integrals <- (1/2) * (dx * y^2 + slope * y * dx^2 + slope^2 * dx^3/3) total <- sum(integrals) - yadjust * area.xypolygon(polly) # change sign to adhere to anticlockwise convention -total } intY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { width <- abs(diff(w$xrange)) height <- abs(diff(w$yrange)) answer <- width * height * mean(w$yrange) }, polygonal = { answer <- sum(unlist(lapply(w$bdry, intY.xypolygon))) }, mask = { pixelarea <- abs(w$xstep * w$ystep) npixels <- sum(w$m) area <- npixels * pixelarea y <- raster.y(w)[w$m] answer <- area * mean(y) }, stop("Unrecognised window type") ) return(answer) } meanY.owin <- function(w) { verifyclass(w, "owin") switch(w$type, rectangle = { answer <- mean(w$yrange) }, polygonal = { area <- sum(unlist(lapply(w$bdry, area.xypolygon))) integrated <- sum(unlist(lapply(w$bdry, intY.xypolygon))) answer <- integrated/area }, mask = { y <- raster.y(w)[w$m] answer <- mean(y) }, stop("Unrecognised window type") ) return(answer) } centroid.owin <- function(w) { verifyclass(w, "owin") return(list(x=meanX.owin(w), y=meanY.owin(w))) } spatstat/R/ord.R0000755000176000001440000000206712240721046013263 0ustar ripleyusers# # # ord.S # # $Revision: 1.4 $ $Date: 2007/01/11 03:36:02 $ # # Ord process with user-supplied potential # # Ord() create an instance of the Ord process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Ord <- function(pot, name) { if(missing(name)) name <- "Ord process with user-defined potential" out <- list( name = name, creator = "Ord", family = ord.family, pot = pot, par = NULL, parnames = NULL, init = NULL, update = function(self, ...){ do.call(Ord, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } spatstat/R/Fest.R0000755000176000001440000001057512237642727013421 0ustar ripleyusers# # Fest.S # # S function empty.space() # Computes estimates of the empty space function # # $Revision: 4.32 $ $Date: 2013/04/25 06:37:43 $ # "Fest" <- "empty.space" <- function(X, ..., eps = NULL, r=NULL, breaks=NULL, correction=c("rs", "km", "cs")) { verifyclass(X, "ppp") # Intensity estimate W <- X$window npts <- npoints(X) lambda <- npts/area.owin(W) # First discretise dwin <- as.mask(W, eps) dX <- ppp(X$x, X$y, window=dwin, check=FALSE) # # histogram breakpoints # rmaxdefault <- rmax.rule("F", dwin, lambda) breaks <- handle.r.b.args(r, breaks, dwin, eps, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) { correction <- c("rs", "km", "cs") } else correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", cs="cs", ChiuStoyan="cs", Hanisch="cs", han="cs", best="km"), multi=TRUE) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lambda * pi * rvals^2)) Z <- fv(df, "r", substitute(F(r), NULL), "theo", . ~ r, c(0,rmax), c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="F") nr <- length(rvals) zeroes <- numeric(nr) # # compute distances and censoring distances if(X$window$type == "rectangle") { # original data were in a rectangle # output of exactdt() is sufficient e <- exactdt(dX) dist <- e$d bdry <- e$b } else { # window is irregular.. # Distance transform & boundary distance for all pixels e <- exactdt(dX) b <- bdist.pixels(dX$window, style="matrix") # select only those pixels inside mask mm <- dwin$m dist <- e$d[mm] bdry <- b[mm] } # censoring indicators d <- (dist <= bdry) # observed distances o <- pmin.int(dist, bdry) ### start calculating estimates of F if("none" %in% correction) { # UNCORRECTED e.d.f. of empty space distances if(npts == 0) edf <- zeroes else { hh <- hist(dist[dist <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(dist) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s)[raw](r)", "uncorrected estimate of %s", "raw") } if("cs" %in% correction) { # Chiu-Stoyan correction if(npts == 0) cs <- zeroes else { # uncensored distances x <- dist[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts H <- cumsum(h/a) cs <- H/max(H[is.finite(H)]) } # add to fv object Z <- bind.fv(Z, data.frame(cs=cs), "hat(%s)[cs](r)", "Chiu-Stoyan estimate of %s", "cs") } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and/or border corrected (Reduced Sample) estimators want.rs <- "rs" %in% correction want.km <- "km" %in% correction selection <- c(want.rs, want.km, want.km) tags <- c("rs", "km", "hazard")[selection] labels <- c("hat(%s)[bord](r)", "hat(%s)[km](r)", "hazard(r)")[selection] descr <- c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)")[selection] if(npts == 0) { result <- as.data.frame(matrix(0, nr, length(tags))) names(result) <- tags } else { result <- km.rs.opt(o, bdry, d, breaks, KM=want.km, RS=want.rs) result <- as.data.frame(result[tags]) } # add to fv object Z <- bind.fv(Z, result, labels, descr, if(want.km) "km" else "rs") } # wrap up unitname(Z) <- unitname(X) # remove 'hazard' from the dotnames nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # determine recommended plot range attr(Z, "alim") <- with(Z, range(.x[is.finite(.y) & .y <= 0.9])) return(Z) } spatstat/R/density.ppp.R0000755000176000001440000004020712237642727014770 0ustar ripleyusers# # density.ppp.R # # Method for 'density' for point patterns # # + bandwidth selection rules bw.diggle, bw.scott # # $Revision: 1.61 $ $Date: 2013/08/24 15:07:06 $ # ksmooth.ppp <- function(x, sigma, ..., edge=TRUE) { .Deprecated("density.ppp", package="spatstat") density.ppp(x, sigma, ..., edge=edge) } density.ppp <- local({ density.ppp <- function(x, sigma=NULL, ..., weights=NULL, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE) { verifyclass(x, "ppp") output <- pickoption("output location type", at, c(pixels="pixels", points="points")) ker <- resolve.2D.kernel(..., sigma=sigma, varcov=varcov, x=x, adjust=adjust) sigma <- ker$sigma varcov <- ker$varcov if(output == "points") { # VALUES AT DATA POINTS ONLY result <- densitypointsEngine(x, sigma, varcov=varcov, weights=weights, edge=edge, leaveoneout=leaveoneout, diggle=diggle, ...) if(!is.null(uhoh <- attr(result, "warnings"))) { switch(uhoh, underflow=warning("underflow due to very small bandwidth"), warning(uhoh)) } return(result) } # VALUES AT PIXELS if(!edge) { # no edge correction edg <- NULL raw <- second.moment.calc(x, sigma, what="smooth", ..., weights=weights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } else if(!diggle) { # edge correction e(u) both <- second.moment.calc(x, sigma, what="smoothedge", ..., weights=weights, varcov=varcov) raw <- divide.by.pixelarea(both$smooth) edg <- both$edge smo <- if(is.im(raw)) eval.im(raw/edg) else lapply(raw, function(a,b) eval.im(a/b), b=edg) } else { # edge correction e(x_i) edg <- second.moment.calc(x, sigma, what="edge", ..., varcov=varcov) wi <- 1/safelookup(edg, x, warn=FALSE) wi[!is.finite(wi)] <- 0 # edge correction becomes weight attached to points if(is.null(weights)) { newweights <- wi } else if(is.matrix(weights) || is.data.frame(weights)) { stopifnot(nrow(weights) == npoints(x)) newweights <- weights * wi } else { stopifnot(length(weights) == npoints(x)) newweights <- weights * wi } raw <- second.moment.calc(x, sigma, what="smooth", ..., weights=newweights, varcov=varcov) raw <- divide.by.pixelarea(raw) smo <- raw } result <- if(is.im(smo)) smo[x$window, drop=FALSE] else as.listof(lapply(smo, "[", i=x$window, drop=FALSE)) # internal use only spill <- list(...)$spill if(!is.null(spill)) return(list(sigma=sigma, varcov=varcov, raw = raw, edg=edg)) # normal return attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov return(result) } divide.by.pixelarea <- function(x) { if(is.im(x)) { x$v <- x$v/(x$xstep * x$ystep) } else { for(i in seq_along(x)) x[[i]]$v <- with(x[[i]], v/(xstep * ystep)) } return(x) } density.ppp }) densitypointsEngine <- function(x, sigma, ..., weights=NULL, edge=TRUE, varcov=NULL, leaveoneout=TRUE, diggle=FALSE, sorted=FALSE) { if(is.null(varcov)) { const <- 1/(2 * pi * sigma^2) } else { detSigma <- det(varcov) Sinv <- solve(varcov) const <- 1/(2 * pi * sqrt(detSigma)) } # Leave-one-out computation # cutoff: contributions from pairs of distinct points # closer than 8 standard deviations sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd # nnd <- nndist(x) # nnrange <- range(nnd) # if(nnrange[1] > cutoff) { # npts <- npoints(x) # result <- if(leaveoneout) numeric(npts) else rep.int(const, npts) # attr(result, "sigma") <- sigma # attr(result, "varcov") <- varcov # attr(result, "warnings") <- "underflow" # return(result) # } if(leaveoneout) { # ensure each point has its closest neighbours within the cutoff nndmax <- max(nndist(x)) cutoff <- max(2 * nndmax, cutoff) } # validate weights if(is.null(weights)) { k <- 1 } else if(is.matrix(weights) || is.data.frame(weights)) { k <- ncol(weights) stopifnot(nrow(weights) == npoints(x)) weights <- as.data.frame(weights) weightnames <- colnames(weights) } else { k <- 1 stopifnot(length(weights) == npoints(x) || length(weights) == 1) } # evaluate edge correction weights at points if(edge) { win <- x$window if(is.null(varcov) && win$type == "rectangle") { # evaluate Gaussian probabilities directly xr <- win$xrange yr <- win$yrange xx <- x$x yy <- x$y xprob <- pnorm(xr[2], mean=xx, sd=sigma) - pnorm(xr[1], mean=xx, sd=sigma) yprob <- pnorm(yr[2], mean=yy, sd=sigma) - pnorm(yr[1], mean=yy, sd=sigma) edgeweight <- xprob * yprob } else { edg <- second.moment.calc(x, sigma=sigma, what="edge", varcov=varcov) edgeweight <- safelookup(edg, x, warn=FALSE) } if(diggle) { # Diggle edge correction # edgeweight is attached to each point if(is.null(weights)) { k <- 1 weights <- 1/edgeweight } else { weights <- weights/edgeweight } } } if(spatstat.options("densityC") || k > 1) { # .................. new C code ........................... npts <- npoints(x) result <- if(k == 1) numeric(npts) else matrix(, npts, k) # sort into increasing order of x coordinate (required by C code) if(sorted) { xx <- x$x yy <- x$y } else { oo <- fave.order(x$x) xx <- x$x[oo] yy <- x$y[oo] } DUP <- spatstat.options("dupC") if(is.null(varcov)) { # isotropic kernel if(is.null(weights)) { zz <- .C("denspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1) { wtsort <- if(sorted) weights else weights[oo] zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("wtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), sig = as.double(sd), weight = as.double(wtsort[,j]), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } else { # anisotropic kernel flatSinv <- as.vector(t(Sinv)) if(is.null(weights)) { zz <- .C("adenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else if(k == 1) { # vector of weights wtsort <- if(sorted) weights else weights[oo] zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result <- zz$result else result[oo] <- zz$result } else { # matrix of weights wtsort <- if(sorted) weights else weights[oo, ] for(j in 1:k) { zz <- .C("awtdenspt", nxy = as.integer(npts), x = as.double(xx), y = as.double(yy), rmaxi = as.double(cutoff), detsigma = as.double(detSigma), sinv = as.double(flatSinv), weight = as.double(wtsort[,j]), result = as.double(double(npts)), DUP = DUP) # PACKAGE = "spatstat") if(sorted) result[,j] <- zz$result else result[oo,j] <- zz$result } } } } else { # ..... interpreted code ......................................... close <- closepairs(x, cutoff) i <- close$i j <- close$j d <- close$d # evaluate contribution from each close pair (i,j) if(is.null(varcov)) { contrib <- const * exp(-d^2/(2 * sigma^2)) } else { # anisotropic kernel dx <- close$dx dy <- close$dy contrib <- const * exp(-(dx * (dx * Sinv[1,1] + dy * Sinv[1,2]) + dy * (dx * Sinv[2,1] + dy * Sinv[2,2]))/2) } # multiply by weights if(!is.null(weights)) contrib <- contrib * weights[j] # sum result <- tapply(contrib, factor(i, levels=1:(x$n)), sum) result[is.na(result)] <- 0 # } # ----- contribution from point itself ---------------- if(!leaveoneout) { # add contribution from point itself self <- const if(!is.null(weights)) self <- self * weights result <- result + self } # ........ Edge correction ........................................ if(edge && !diggle) result <- result/edgeweight # ............. validate ................................. npts <- npoints(x) if(k == 1) { result <- as.numeric(result) if(length(result) != npts) stop(paste("Internal error: incorrect number of lambda values", "in leave-one-out method:", "length(lambda) = ", length(result), "!=", npts, "= npoints")) if(any(is.na(result))) { nwrong <- sum(is.na(result)) stop(paste("Internal error:", nwrong, "NA or NaN", ngettext(nwrong, "value", "values"), "generated in leave-one-out method")) } } else { if(ncol(result) != k) stop(paste("Internal error: incorrect number of columns returned:", ncol(result), "!=", k)) colnames(result) <- weightnames if(nrow(result) != npts) stop(paste("Internal error: incorrect number of rows of lambda values", "in leave-one-out method:", "nrow(lambda) = ", nrow(result), "!=", npts, "= npoints")) if(any(is.na(result))) { nwrong <- sum(!complete.cases(result)) stop(paste("Internal error:", nwrong, ngettext(nwrong, "row", "rows"), "of NA values generated in leave-one-out method")) } } # tack on bandwidth attr(result, "sigma") <- sigma attr(result, "varcov") <- varcov # return(result) } resolve.2D.kernel <- function(..., sigma=NULL, varcov=NULL, x, mindist=NULL, adjust=1, bwfun=NULL, allow.zero=FALSE) { if(is.function(sigma)) { bwfun <- sigma sigma <- NULL } if(is.null(sigma) && is.null(varcov) && !is.null(bwfun)) { # call bandwidth selection function bw <- do.call.matched(bwfun, resolve.defaults(list(X=x), list(...))) # interpret the result as either sigma or varcov if(!is.numeric(bw)) stop("bandwidth selector returned a non-numeric result") if(length(bw) %in% c(1,2)) { sigma <- as.numeric(bw) if(!all(sigma > 0)) { gripe <- "bandwidth selector returned negative value(s)" if(allow.zero) warning(gripe) else stop(gripe) } } else if(is.matrix(bw) && nrow(bw) == 2 && ncol(bw) == 2) { varcov <- bw if(!all(eigen(varcov)$values > 0)) stop("bandwidth selector returned matrix with negative eigenvalues") } else stop("bandwidth selector did not return a matrix or numeric value") } sigma.given <- !is.null(sigma) varcov.given <- !is.null(varcov) if(sigma.given) { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) if(!allow.zero) stopifnot(all(sigma > 0)) } if(varcov.given) stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) # reconcile ngiven <- varcov.given + sigma.given switch(ngiven+1, { # default w <- x$window sigma <- (1/8) * shortside(as.rectangle(w)) }, { if(sigma.given && length(sigma) == 2) varcov <- diag(sigma^2) if(!is.null(varcov)) sigma <- NULL }, { stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) }) # apply adjustments if(!is.null(sigma)) sigma <- adjust * sigma if(!is.null(varcov)) varcov <- (adjust^2) * varcov # sd <- if(is.null(varcov)) sigma else sqrt(sum(diag(varcov))) cutoff <- 8 * sd uhoh <- if(!is.null(mindist) && cutoff < mindist) "underflow" else NULL result <- list(sigma=sigma, varcov=varcov, cutoff=cutoff, warnings=uhoh) return(result) } bw.diggle <- function(X, ...) { stopifnot(is.ppp(X)) # secret option for debugging mf <- function(..., method=c("C", "interpreted")) match.arg(method) method <- mf(...) # lambda <- npoints(X)/area.owin(X) K <- Kest(X, correction="good") yname <- fvnames(K, ".y") K <- K[, c("r", yname)] rvals <- K$r # evaluation of M(r) requires K(2r) rmax2 <- max(rvals)/2 if(!is.null(alim <- attr(K, "alim"))) rmax2 <- min(alim[2], rmax2) ok <- (rvals <= rmax2) switch(method, interpreted = { rvals <- rvals[ok] nr <- length(rvals) J <- numeric(nr) phi <- function(x,h) { if(h <= 0) return(numeric(length(x))) y <- pmax.int(0, pmin.int(1, x/(2 * h))) 4 * pi * h^2 * (acos(y) - y * sqrt(1 - y^2)) } for(i in 1:nr) J[i] <- stieltjes(phi, K, h=rvals[i])[[yname]]/(2 * pi) }, C = { nr <- length(rvals) nrmax <- sum(ok) dK <- diff(K[[yname]]) ndK <- length(dK) DUP <- spatstat.options("dupC") z <- .C("digberJ", r=as.double(rvals), dK=as.double(dK), nr=as.integer(nr), nrmax=as.integer(nrmax), ndK=as.integer(ndK), J=as.double(numeric(nrmax)), DUP=DUP) # PACKAGE="spatstat" J <- z$J rvals <- rvals[ok] }) pir2 <- pi * rvals^2 M <- (1/lambda - 2 * K[[yname]][ok])/pir2 + J/pir2^2 # This calculation was for the uniform kernel on B(0,h) # Convert to standard deviation of (one-dimensional marginal) kernel sigma <- rvals/2 result <- bw.optim(M, sigma, creator="bw.diggle", criterion="Berman-Diggle Cross-Validation", J=J, lambda=lambda) return(result) } bw.scott <- function(X) { stopifnot(is.ppp(X)) n <- npoints(X) sdx <- sqrt(var(X$x)) sdy <- sqrt(var(X$y)) return(c(sdx, sdy) * n^(-1/6)) } spatstat/R/lppm.R0000755000176000001440000001375612237642727013474 0ustar ripleyusers# # lppm.R # # Point process models on a linear network # # $Revision: 1.21 $ $Date: 2013/09/10 09:57:02 $ # lppm <- function(X, ..., eps=NULL, nd=1000) { Xname <- short.deparse(substitute(X)) nama <- names(list(...)) resv <- c("method", "forcefit") if(any(clash <- resv %in% nama)) warning(paste(ngettext(sum(clash), "Argument", "Arguments"), commasep(sQuote(resv[clash])), "must not be used")) stopifnot(inherits(X, "lpp")) Q <- linequad(X, eps=eps, nd=nd) fit <- ppm(Q, ..., method="mpl", forcefit=TRUE) if(!is.poisson.ppm(fit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=X, fit=fit, Xname=Xname) class(out) <- "lppm" return(out) } is.lppm <- function(x) { inherits(x, "lppm") } predict.lppm <- function(object, ..., type="trend", locations=NULL) { type <- pickoption("type", type, c(trend="trend", cif="cif", lambda="cif")) X <- object$X fit <- object$fit L <- as.linnet(X) if(!is.null(locations)) { # locations given; return a vector of predicted values values <- predict(fit, locations=locations, type=type) return(values) } # locations not given; want a pixel image # pixellate the lines Llines <- as.psp(L) linemask <- as.mask.psp(Llines, ...) lineimage <- as.im(linemask) # extract pixel centres xx <- raster.x(linemask) yy <- raster.y(linemask) mm <- linemask$m xx <- as.vector(xx[mm]) yy <- as.vector(yy[mm]) pixelcentres <- ppp(xx, yy, window=as.rectangle(linemask), check=FALSE) pixdf <- data.frame(xc=xx, yc=yy) # project pixel centres onto lines p2s <- project2segment(pixelcentres, Llines) projloc <- as.data.frame(p2s$Xproj) projmap <- as.data.frame(p2s[c("mapXY", "tp")]) projdata <- cbind(pixdf, projloc, projmap) # predict at the projected points if(!is.multitype(fit)) { values <- predict(fit, locations=projloc, type=type) # map to nearest pixels Z <- lineimage Z[pixelcentres] <- values # attach exact line position data df <- cbind(projdata, values) out <- linim(L, Z, df=df) } else { # predict for each type lev <- levels(marks(data.ppm(fit))) out <- list() for(k in seq(length(lev))) { markk <- factor(lev[k], levels=lev) locnk <- cbind(projloc, data.frame(marks=markk)) values <- predict(fit, locations=locnk, type=type) Z <- lineimage Z[pixelcentres] <- values df <- cbind(projdata, values) out[[k]] <- linim(L, Z, df=df) } names(out) <- as.character(lev) class(out) <- c("listof", class(out)) } return(out) } coef.lppm <- function(object, ...) { coef(object$fit) } print.lppm <- function(x, ...) { cat("Point process model on linear network\n") print(x$fit) cat("Linear network:\n") print(as.linnet(x)) cat(paste("Original data:", x$Xname, "\n")) return(invisible(NULL)) } plot.lppm <- function(x, ..., type="trend") { xname <- short.deparse(substitute(x)) y <- predict(x, type=type) do.call("plot", resolve.defaults(list(y), list(...), list(main=xname))) } anova.lppm <- function(object, ..., test=NULL, override=FALSE) { stuff <- list(object=object, ...) # extract ppm objects where appropriate stuff <- lapply(stuff, function(z) { if(inherits(z, "lppm")) z$fit else z }) # analysis of deviance for do.call("anova.ppm", append(stuff, list(test=test, override=override))) } update.lppm <- function(object, ...) { stopifnot(inherits(object, "lppm")) X <- object$X fit <- object$fit Xname <- object$Xname aargh <- list(...) islpp <- unlist(lapply(aargh, inherits, what="lpp")) if(!any(islpp)) { # pass arguments through to update.ppm newfit <- do.call("update.ppm", append(list(fit), aargh)) newX <- X } else { # trap point pattern argument & convert to quadscheme if((npp <- sum(islpp)) > 1) stop(paste("Arguments not understood:", npp, "lpp objects given")) newX <- aargh[[which(islpp)]] newQ <- linequad(newX) newfit <- do.call("update.ppm", append(list(fit, newQ), aargh[!islpp])) } if(!is.poisson.ppm(newfit)) warning("Non-Poisson models currently use Euclidean distance") out <- list(X=newX, fit=newfit, Xname=Xname) class(out) <- "lppm" return(out) } terms.lppm <- function(x, ...) { terms(x$fit, ...) } logLik.lppm <- function(object, ...) { logLik(object$fit, ...) } formula.lppm <- function(x, ...) { formula(x$fit, ...) } extractAIC.lppm <- function(fit, ...) { extractAIC(fit$fit, ...) } as.owin.lppm <- function(W, ..., fatal=TRUE) { stopifnot(inherits(W, "lppm")) as.owin(as.linnet(W), ..., fatal=fatal) } model.images.lppm <- function(object, L=as.linnet(object), ...) { stopifnot(inherits(object, "lppm")) stopifnot(inherits(L, "linnet")) m <- model.images(object$fit, W=as.rectangle(L), ...) if(length(m) > 0) { # restrict images to L rasta <- as.mask(m[[1]]) DL <- as.mask.psp(as.psp(L), xy=rasta) ZL <- as.im(DL) m <- lapply(m, function(x, Z) eval.im(x * Z), Z=ZL) # convert to linim m <- lapply(m, function(x, L) linim(L,x), L=L) } return(as.listof(m)) } model.matrix.lppm <- function(object, data=model.frame(object), ..., keepNA=TRUE) { stopifnot(inherits(object, "lppm")) model.matrix(object$fit, data=data, ..., keepNA=keepNA) } model.frame.lppm <- function(formula, ...) { stopifnot(inherits(formula, "lppm")) model.frame(formula$fit, ...) } as.linnet.lppm <- function(X, ...) { as.linnet(X$X, ...) } nobs.lppm <- function(object, ...) { npoints(object$X) } is.poisson.lppm <- function(x) { is.poisson(x$fit) } is.stationary.lppm <- function(x) { is.stationary(x$fit) } is.multitype.lppm <- function(X, ...) { is.multitype(X$fit) } is.marked.lppm <- function(X, ...) { is.marked(X$fit) } vcov.lppm <- function(object, ...) { if(!is.poisson(object)) stop("vov.lppm is only implemented for Poisson models") vcov(object$fit, ...) } spatstat/R/rat.R0000644000176000001440000000644512237642727013304 0ustar ripleyusers# # rat.R # # Ratio objects # # Numerator and denominator are stored as attributes # # $Revision: 1.4 $ $Date: 2011/10/18 06:09:02 $ # rat <- function(ratio, numerator, denominator, check=TRUE) { if(check) { stopifnot(compatible(numerator, denominator)) stopifnot(compatible(ratio, denominator)) } attr(ratio, "numerator") <- numerator attr(ratio, "denominator") <- denominator class(ratio) <- c("rat", class(ratio)) return(ratio) } print.rat <- function(x, ...) { NextMethod("print") cat("[Contains ratio information]\n") return(invisible(NULL)) } compatible.rat <- function(A, B, ...) { NextMethod("compatible") } pool.rat <- function(...) { argh <- list(...) n <- narg <- length(argh) if(narg == 0) return(NULL) if(narg == 1) return(argh[[1]]) # israt <- unlist(lapply(argh, inherits, what="rat")) if(any(bad <- !israt)) { nbad <- sum(bad) stop(paste(ngettext(nbad, "Argument", "Arguments"), commasep(which(bad)), ngettext(nbad, "does not", "do not"), "contain ratio (numerator/denominator) information")) } isfv <- unlist(lapply(argh, is.fv)) if(!all(isfv)) stop("All arguments must be fv objects") # extract template <- vanilla.fv(argh[[1]]) Y <- lapply(argh, attr, which="numerator") X <- lapply(argh, attr, which="denominator") templateX <- vanilla.fv(X[[1]]) templateY <- vanilla.fv(Y[[1]]) # sum Add <- function(A,B){ force(A); force(B); eval.fv(A+B) } sumX <- Reduce(Add, X) sumY <- Reduce(Add, Y) attributes(sumX) <- attributes(templateX) attributes(sumY) <- attributes(templateY) # ratio-of-sums Ratio <- eval.fv(sumY/sumX) # variance calculation meanX <- eval.fv(sumX/n) meanY <- eval.fv(sumY/n) Square <- function(A) { force(A); eval.fv(A^2) } sumX2 <- Reduce(Add, lapply(X, Square)) sumY2 <- Reduce(Add, lapply(Y, Square)) varX <- eval.fv((sumX2 - n * meanX^2)/(n-1)) varY <- eval.fv((sumY2 - n * meanY^2)/(n-1)) Mul <- function(A,B){ force(A); force(B); eval.fv(A*B) } XY <- Map(Mul, X, Y) sumXY <- Reduce(Add, XY) covXY <- eval.fv((sumXY - n * meanX * meanY)/(n-1)) # variance by delta method relvar <- eval.fv(pmax.int(0, varY/meanY^2 + varX/meanX^2 - 2 * covXY/(meanX * meanY))) Variance <- eval.fv(Ratio^2 * relvar/n) # two sigma CI hiCI <- eval.fv(Ratio + 2 * sqrt(Variance)) loCI <- eval.fv(Ratio - 2 * sqrt(Variance)) # relabel attributes(Ratio) <- attributes(Variance) <- attributes(template) Ratio <- prefixfv(Ratio, tagprefix="pool", descprefix="pooled ", lablprefix="") Variance <- prefixfv(Variance, tagprefix="var", descprefix="delta-method variance estimate of ", lablprefix="bold(var)~") attributes(hiCI) <- attributes(loCI) <- attributes(template) hiCI <- prefixfv(hiCI, tagprefix="hi", descprefix="upper limit of two-sigma CI based on ", lablprefix="bold(hi)~") loCI <- prefixfv(loCI, tagprefix="lo", descprefix="lower limit of two-sigma CI based on ", lablprefix="bold(lo)~") # result <- Reduce(bind.fv, list(Ratio, Variance, hiCI, loCI)) return(result) } spatstat/R/Iest.R0000755000176000001440000000467312237642727013426 0ustar ripleyusers# Iest.R # # I function # # $Revision: 1.12 $ $Date: 2011/04/19 02:31:39 $ # # # Iest <- function(X, ..., eps=NULL, r = NULL, breaks = NULL, correction=NULL) { X <- as.ppp(X) if(!is.multitype(X)) stop("Only applicable to multitype point patterns") marx <- marks(X, dfok=FALSE) ntypes <- length(levels(marx)) Y <- unmark(split(X)) # relative proportions ni <- unlist(lapply(Y, function(Z) { Z$n })) fi <- ni/sum(ni) # J function of pattern regardless of type Jdotdot <- Jest(unmark(X), correction=correction) rvals <- Jdotdot$r # J function of subpattern of each type i Jii <- lapply(Y, Jest, r=rvals, correction=correction) nrvals <- unlist(lapply(Jii, function(x) { length(x$r) })) if(length(unique(nrvals)) != 1 || nrvals[1] != length(rvals)) stop("Internal error: J function objects have different lengths") # initialise fv object alim <- attr(Jdotdot, "alim") Z <- fv(data.frame(r=rvals, theo=0), "r", substitute(I(r), NULL), "theo", . ~ r, alim, c("r", "%s[pois](r)"), c("distance argument r", "theoretical Poisson %s"), fname="I") # Estimates of each type extractit <- function(Z, what) { Z[[what]] } extract <- function(Zlist, what) { unlist(lapply(Zlist, extractit, what=what)) } namii <- unlist(lapply(Jii, names)) namdd <- names(Jdotdot) bothnames <- namii[namii %in% namdd] if("un" %in% bothnames) { Jun <- matrix(extract(Jii, "un"), nrow=ntypes, byrow=TRUE) Iun <- apply(fi * Jun, 2, sum) - Jdotdot$un Z <- bind.fv(Z, data.frame(un=Iun), "hat(%s)[un](r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- matrix(extract(Jii, "rs"), nrow=ntypes, byrow=TRUE) Irs <- apply(fi * Jrs, 2, sum) - Jdotdot$rs Z <- bind.fv(Z, data.frame(rs=Irs), "hat(%s)[rs](r)", "border corrected estimate of %s", "rs") } if("han" %in% bothnames) { Jhan <- matrix(extract(Jii, "han"), nrow=ntypes, byrow=TRUE) Ihan <- apply(fi * Jhan, 2, sum) - Jdotdot$han Z <- bind.fv(Z, data.frame(han=Ihan), "hat(%s)[han](r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- matrix(extract(Jii, "km"), nrow=ntypes, byrow=TRUE) Ikm <- apply(fi * Jkm, 2, sum) - Jdotdot$km Z <- bind.fv(Z, data.frame(km=Ikm), "hat(%s)[km](r)", "Kaplan-Meier estimate of %s", "km") } unitname(Z) <- unitname(X) return(Z) } spatstat/R/rmh.ppm.R0000755000176000001440000001104512237642727014072 0ustar ripleyusers# # simulation of FITTED model # # $Revision: 1.28 $ $Date: 2013/07/26 05:39:36 $ # # rmh.ppm <- function(model, start = NULL, control = default.rmhcontrol(model), ..., project=TRUE, verbose=TRUE) { verifyclass(model, "ppm") argh <- list(...) if(is.null(control)) { control <- default.rmhcontrol(model) } else { control <- rmhcontrol(control) } # override if(length(list(...)) > 0) control <- update(control, ...) # convert fitted model object to list of parameters for rmh.default X <- rmhmodel(model, verbose=verbose, project=project, control=control) # set initial state if(is.null(start)) { datapattern <- data.ppm(model) start <- rmhstart(n.start=datapattern$n) } # call rmh.default # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] Y <- do.call("rmh.default", append(list(model=X, start=start, control=control, verbose=verbose), fargs)) return(Y) } simulate.ppm <- function(object, nsim=1, ..., singlerun=FALSE, start = NULL, control = default.rmhcontrol(object), project=TRUE, verbose=FALSE, progress=(nsim > 1)) { verifyclass(object, "ppm") argh <- list(...) if(nsim == 0) return(list()) starttime = proc.time() # set up control parameters if(missing(control) || is.null(control)) { rcontr <- default.rmhcontrol(object) } else { rcontr <- rmhcontrol(control) } if(singlerun) { # allow nsave, nburn to determine nrep nsave <- resolve.1.default("nsave", list(...), as.list(rcontr), .MatchNull=FALSE) nburn <- resolve.1.default("nburn", list(...), as.list(rcontr), list(nburn=nsave), .MatchNull=FALSE) if(!is.null(nsave)) { nrep <- nburn + (nsim-1) * nsave rcontr <- update(rcontr, nrep=nrep, nsave=nsave, nburn=nburn) } } # other overrides if(length(list(...)) > 0) rcontr <- update(rcontr, ...) # Set up model parameters for rmh rmodel <- rmhmodel(object, verbose=FALSE, project=TRUE, control=rcontr) if(is.null(start)) { datapattern <- data.ppm(object) start <- rmhstart(n.start=datapattern$n) } rstart <- rmhstart(start) ######### if(singlerun && nsim > 1) { # ////////////////////////////////////////////////// # execute one long run and save every k-th iteration if(is.null(rcontr$nsave)) { # determine spacing between subsamples if(!is.null(rcontr$nburn)) { nsave <- max(1, with(rcontr, floor((nrep - nburn)/(nsim-1)))) } else { # assume nburn = 2 * nsave nsave <- max(1, with(rcontr, floor(nrep/(nsim+1)))) nburn <- 2 * nsave } rcontr <- update(rcontr, nsave=nsave, nburn=nburn) } # check nrep is enough nrepmin <- with(rcontr, nburn + (nsim-1) * nsave) if(rcontr$nrep < nrepmin) rcontr <- update(rcontr, nrep=nrepmin) # OK, run it if(progress) { cat(paste("Generating", nsim, "simulated patterns in a single run ... ")) flush.console() } Y <- rmh(rmodel, rstart, rcontr, verbose=verbose) if(progress) cat("Done.\n") # extract sampled states out <- attr(Y, "saved") if(length(out) != nsim) stop(paste("Internal error: wrong number of simulations generated:", length(out), "!=", nsim)) } else { # ////////////////////////////////////////////////// # execute 'nsim' independent runs out <- list() # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=verbose) # go if(nsim > 0) { if(progress) { cat(paste("Generating", nsim, "simulated", ngettext(nsim, "pattern", "patterns"), "...")) flush.console() } # call rmh # passing only arguments unrecognised by rmhcontrol known <- names(argh) %in% names(formals(rmhcontrol.default)) fargs <- argh[!known] rmhargs <- append(list(InfoList=rmhinfolist, verbose=verbose), fargs) for(i in 1:nsim) { out[[i]] <- do.call("rmhEngine", rmhargs) if(progress) progressreport(i, nsim) } } } out <- as.listof(out) if(nsim > 0) names(out) <- paste("Simulation", 1:nsim) out <- timed(out, starttime=starttime) return(out) } spatstat/R/is.subset.owin.R0000755000176000001440000000413212237642727015402 0ustar ripleyusers# # is.subset.owin.R # # $Revision: 1.10 $ $Date: 2013/11/01 06:49:39 $ # # Determine whether a window is a subset of another window # # is.subset.owin() # is.subset.owin <- local({ is.subset.owin <- function(A, B) { A <- as.owin(A) B <- as.owin(B) if(identical(A, B)) return(TRUE) A <- rescue.rectangle(A) B <- rescue.rectangle(B) if(is.rectangle(B)) { # Some cases can be resolved using convexity of B # (1) A is also a rectangle if(is.rectangle(A)) { xx <- A$xrange[c(1,2,2,1)] yy <- A$yrange[c(1,1,2,2)] ok <- inside.owin(xx, yy, B) return(all(ok)) } # (2) A is polygonal # Then A is a subset of B iff, # for every constituent polygon of A with positive sign, # the vertices are all in B if(is.polygonal(A)) { ok <- unlist(lapply(A$bdry, okpolygon, B=B)) return(all(ok)) } # (3) Feeling lucky # Test whether the bounding box of A is a subset of B # Then a fortiori, A is a subset of B AA <- bounding.box(A) if(is.subset.owin(AA, B)) return(TRUE) } if(!is.mask(A) && !is.mask(B)) { # rectangles or polygonal domains if(!all(inside.owin(vertices(A), , B))) return(FALSE) # all vertices of A are inside B. if(is.convex(B)) return(TRUE) A <- as.polygonal(A) B <- as.polygonal(B) if(length(B$bdry) == 1 && length(A$bdry) == 1) { # two simply-connected sets # check for boundary crossings bx <- crossing.psp(as.psp(A), as.psp(B)) return(npoints(bx) == 0) } else { # compare area of intersection with area of A return(overlap.owin(A,B) >= area.owin(A)) } } # Discretise a <- as.mask(A) b <- as.mask(B) xx <- as.vector(raster.x(a)[a$m]) yy <- as.vector(raster.y(a)[a$m]) ok <- inside.owin(xx, yy, b) return(all(ok)) } okpolygon <- function(a, B) { if(area.xypolygon(a) < 0) return(TRUE) ok <- inside.owin(a$x, a$y, B) return(all(ok)) } is.subset.owin }) spatstat/R/scanstat.R0000644000176000001440000002274712237642727014341 0ustar ripleyusers# # scanstat.R # # Spatial scan statistics # # $Revision: 1.8 $ $Date: 2013/04/25 06:37:43 $ # scanmeasure <- function(X, ...){ UseMethod("scanmeasure") } scanmeasure.ppp <- function(X, r, ..., method=c("counts", "fft")) { method <- match.arg(method) # enclosing window W <- as.rectangle(as.owin(X)) # expand domain to include all circles W <- grow.rectangle(W, r) # determine pixel resolution W <- as.mask(W, ...) # switch(method, counts = { # direct calculation using C code # get new dimensions dimyx <- W$dim xr <- W$xrange yr <- W$yrange nr <- dimyx[1] nc <- dimyx[2] # n <- npoints(X) DUP <- spatstat.options("dupC") zz <- .C("scantrans", x=as.double(X$x), y=as.double(X$y), n=as.integer(n), xmin=as.double(xr[1]), ymin=as.double(yr[1]), xmax=as.double(xr[2]), ymax=as.double(yr[2]), nr=as.integer(nr), nc=as.integer(nc), R=as.double(r), counts=as.integer(numeric(prod(dimyx))), DUP=DUP) # PACKAGE="spatstat") zzz <- matrix(zz$counts, nrow=dimyx[1], ncol=dimyx[2], byrow=TRUE) Z <- im(zzz, xrange=xr, yrange=yr, unitname=unitname(X)) }, fft = { # Previous version of scanmeasure.ppp had # Y <- pixellate(X, ..., padzero=TRUE) # but this is liable to Gibbs phenomena. # Instead, convolve with small Gaussian (sd = 1 pixel width) sigma <- with(W, unique(c(xstep, ystep))) Y <- density(X, ..., sigma=sigma) # invoke scanmeasure.im Z <- scanmeasure(Y, r) Z <- eval.im(as.integer(round(Z))) }) return(Z) } scanmeasure.im <- function(X, r, ...) { D <- disc(radius=r) eps <- with(X, c(xstep,ystep)) D <- as.im(as.mask(D, eps=eps)) Z <- imcov(X, D) return(Z) } scanPoisLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) ll <- nlogn(nZ, muZ) + nlogn(nZco, muZco) - nlogn(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanBinomLRTS <- function(nZ, nG, muZ, muG, alternative) { nZco <- nG - nZ muZco <- muG - muZ nlogn <- function(n, a) ifelse(n == 0, 0, n * log(n/a)) logbin <- function(k, n) { nlogn(k, n) + nlogn(n-k, n) } ll <- logbin(nZ, muZ) + logbin(nZco, muZco) - logbin(nG, muG) criterion <- (nZ * muZco - muZ * nZco) switch(alternative, less={ ll[criterion > 0] <- 0 }, greater={ ll[criterion < 0] <- 0 }, two.sided={}) return(2 * ll) } scanLRTS <- function(X, r, ..., method=c("poisson", "binomial"), baseline=NULL, case=2, alternative=c("greater", "less", "two.sided")) { stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) switch(method, poisson={ Y <- X Xmask <- as.mask(as.owin(X), ...) if(is.null(baseline)) { mu <- as.im(Xmask, value=1) } else if(is.ppm(baseline)) { if(is.marked(baseline)) stop("baseline is a marked point process: not supported") mu <- predict(baseline, locations=Xmask) } else if(is.im(baseline) || is.function(baseline)) { mu <- as.im(baseline, W=Xmask) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) }, binomial={ stopifnot(is.multitype(X)) lev <- levels(marks(X)) if(length(lev) != 2) warning("X should usually be a bivariate (2-type) point pattern") if(!is.null(baseline)) stop("baseline is not supported in the binomial case") if(is.character(case) && !(case %in% lev)) stop(paste("Unrecognised label for cases:", sQuote(case))) if(is.numeric(case) && !(case %in% seq_along(lev))) stop(paste("Undefined level:", case)) Y <- split(X)[[case]] mu <- unmark(X) }) nZ <- scanmeasure(Y, r, ...) muZ <- scanmeasure(mu, r) if(!compatible.im(nZ, muZ)) { ha <- harmonise.im(nZ, muZ) nZ <- ha[[1]] muZ <- ha[[2]] } nG <- npoints(Y) switch(method, poisson = { muG <- integral.im(mu) result <- eval.im(scanPoisLRTS(nZ, nG, muZ, muG, alternative)) }, binomial = { muG <- npoints(mu) result <- eval.im(scanBinomLRTS(nZ, nG, muZ, muG, alternative)) }, { result <- NULL }) return(result) } scan.test <- function(X, r, ..., method=c("poisson", "binomial"), nsim = 19, baseline=NULL, case = 2, alternative=c("greater", "less", "two.sided"), verbose=TRUE) { dataname <- short.deparse(substitute(X)) stopifnot(is.ppp(X)) method <- match.arg(method) alternative <- match.arg(alternative) check.1.real(r) check.1.real(nsim) if(!(round(nsim) == nsim && nsim > 1)) stop("nsim should be an integer > 1") regionname <- paste("circles of radius", r) # # compute observed loglikelihood function # This also validates the arguments. obsLRTS <- scanLRTS(X=X, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) obs <- max(obsLRTS) sim <- numeric(nsim) # determine how to simulate switch(method, binomial={ methodname <- c("Spatial scan test", "Null hypothesis: constant relative risk", paste("Candidate cluster regions:", regionname), "Likelihood: binomial", paste("Monte Carlo p-value based on", nsim, "simulations")) lev <- levels(marks(X)) names(lev) <- lev casename <- lev[case] counted <- paste("points with mark", sQuote(casename), "inside cluster region") simexpr <- expression(rlabel(X)) }, poisson={ counted <- paste("points inside cluster region") X <- unmark(X) Xwin <- as.owin(X) Xmask <- as.mask(Xwin, ...) if(is.null(baseline)) { nullname <- "Complete Spatial Randomness (CSR)" lambda <- summary(X)$intensity simexpr <- expression(runifpoispp(lambda, Xwin)) } else if(is.ppm(baseline)) { nullname <- baseline$callstring rmhstuff <- rmh(baseline, preponly=TRUE, verbose=FALSE) simexpr <- expression(rmhEngine(rmhstuff)) } else if(is.im(baseline) || is.function(baseline)) { nullname <- "Poisson process with intensity proportional to baseline" base <- as.im(baseline, W=Xmask) alpha <- npoints(X)/integral.im(base) lambda <- eval.im(alpha * base) simexpr <- expression(rpoispp(lambda)) } else stop(paste("baseline should be", "a pixel image, a function, or a fitted model")) methodname <- c("Spatial scan test", paste("Null hypothesis:", nullname), paste("Candidate cluster regions:", regionname), "Likelihood: Poisson", paste("Monte Carlo p-value based on", nsim, "simulations")) }) if(verbose) cat("Simulating...") for(i in 1:nsim) { if(verbose) progressreport(i, nsim) Xsim <- eval(simexpr) simLRTS <- scanLRTS(X=Xsim, r=r, method=method, alternative=alternative, baseline=baseline, case=case, ...) sim[i] <- max(simLRTS) } pval <- mean(c(sim,obs) >= obs, na.rm=TRUE) names(obs) <- "maxLRTS" nm.alternative <- switch(alternative, greater="Excess of", less="Deficit of", two.sided="Two-sided: excess or deficit of", stop("Unknown alternative")) nm.alternative <- paste(nm.alternative, counted) result <- list(statistic = obs, p.value = pval, alternative = nm.alternative, method = methodname, data.name = dataname) class(result) <- c("scan.test", "htest") attr(result, "obsLRTS") <- obsLRTS attr(result, "X") <- X return(result) } plot.scan.test <- function(x, ..., do.window=TRUE) { xname <- short.deparse(substitute(x)) Z <- as.im(x) do.call("plot", resolve.defaults(list(x=Z), list(...), list(main=xname))) if(do.window) { X <- attr(x, "X") plot(as.owin(X), add=TRUE) } invisible(NULL) } as.im.scan.test <- function(X, ...) { X <- attr(X, "obsLRTS") return(as.im(X, ...)) } spatstat/R/inforder.family.R0000755000176000001440000000653012237642727015604 0ustar ripleyusers# # # inforder.family.R # # $Revision: 1.2 $ $Date: 2010/07/10 10:22:09 $ # # Family of `infinite-order' point process models # # inforder.family: object of class 'isf' # # # ------------------------------------------------------------------- # inforder.family <- list( name = "inforder", print = function(self) { cat("Family of infinite-order interactions\n") }, plot = NULL, # ---------------------------------------------------- eval = function(X,U,EqualPairs,pot,pars,correction, ...) { # # This is the eval function for the `inforder' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `inforder' family of interactions with infinite order, # there are no structures common to all interactions. # So this function simply invokes the potential 'pot' directly # and expects 'pot' to return the values of the sufficient statistic S(u,X). # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pairpot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ########################################################################## # POTENTIAL: # In this case the potential function 'pot' should have arguments # pot(X, U, EqualPairs, pars, correction, ...) # # It must return a vector with length equal to the number of points in U, # or a matrix with as many rows as there are points in U. if(!is.ppp(U)) U <- ppp(U$x, U$y, window=X$window) POT <- pot(X, U, EqualPairs, pars, correction, ...) if(is.matrix(POT)) { if(nrow(POT) != U$n) stop("Internal error: the potential returned a matrix with the wrong number of rows") } else if(is.array(POT) && length(dim(POT)) > 2) stop("Internal error: the potential returned an array with more than 2 dimensions") else if(is.vector(POT)) { if(length(POT) != U$n) stop("Internal error: the potential returned a vector with the wrong length") POT <- matrix(POT, ncol=1) } else stop("Internal error: the return value from the potential is not understood") return(POT) }, ######### end of function $eval suffstat = NULL ######### end of function $suffstat ) ######### end of list class(inforder.family) <- "isf" spatstat/R/util.R0000755000176000001440000007316512246336030013464 0ustar ripleyusers# # util.S miscellaneous utilities # # $Revision: 1.125 $ $Date: 2013/11/29 01:50:26 $ # # matrowsum <- function(x) { x %*% rep.int(1, ncol(x)) } matcolsum <- function(x) { rep.int(1, nrow(x)) %*% x } matrowany <- function(x) { (matrowsum(x) > 0) } matrowall <- function(x) { (matrowsum(x) == ncol(x)) } matcolany <- function(x) { (matcolsum(x) > 0) } matcolall <- function(x) { (matcolsum(x) == nrow(x)) } ######## # hm, this is SLOWER apply23sum <- function(x) { dimx <- dim(x) if(length(dimx) != 3) stop("x is not a 3D array") result <- array(0, dimx[-1]) nz <- dimx[3] for(k in 1:nz) { result[,k] <- matcolsum(x[,,k]) } result } ####################### # # whist weighted histogram # whist <- function(x, breaks, weights=NULL) { N <- length(breaks) if(length(x) == 0) h <- numeric(N+1) else { # classify data into histogram cells (breaks need not span range of data) cell <- findInterval(x, breaks, rightmost.closed=TRUE) cell <- factor(cell, levels=0:N) # compute weighted histogram if(is.null(weights)) h <- table(cell) else h <- unlist(lapply(split(weights, cell), sum, na.rm=TRUE)) } h <- as.numeric(h) y <- h[2:N] attr(y, "low") <- h[1] attr(y, "high") <- h[N+1] return(y) } ###################### # # matrixsample subsample or supersample a matrix # matrixsample <- function(mat, newdim, phase=c(0,0), scale, na.value=NA) { # 'phase+1' is the position of the [1,1] corner of the new matrix # expressed in the coordinates of the old matrix. # 'scale' is the size of one step in the new matrix, # expressed in the coordinates of the old matrix. # Both 'phase' and 'scale' can take any real value. olddim <- dim(mat) if(missing(scale)) scale <- (olddim - 1)/(newdim - 1) scale <- ensure2vector(scale) newdim <- ensure2vector(newdim) newmat <- matrix(na.value, newdim[1], newdim[2]) newrow <- 1:newdim[1] newcol <- 1:newdim[2] oldrow <- round(1 + phase[1] + (newrow-1) * scale[1]) oldcol <- round(1 + phase[2] + (newcol-1) * scale[2]) oldrow.ok <- (oldrow >= 1) & (oldrow <= olddim[1]) oldcol.ok <- (oldcol >= 1) & (oldcol <= olddim[2]) newmat[oldrow.ok, oldcol.ok] <- mat[oldrow[oldrow.ok], oldcol[oldcol.ok]] return(newmat) } # common invocation of matrixsample rastersample <- function(X, Y) { stopifnot(is.im(X) || is.mask(X)) stopifnot(is.im(Y) || is.mask(Y)) phase <- c((Y$yrow[1] - X$yrow[1])/X$ystep, (Y$xcol[1] - X$xcol[1])/X$xstep) scale <- c(Y$ystep/X$ystep, Y$xstep/X$xstep) if(is.im(X)) { # resample an image if(!is.im(Y)) Y <- as.im(Y) Xtype <- X$type Xv <- X$v # handle factor-valued image as integer if(Xtype == "factor") Xv <- array(as.integer(Xv), dim=X$dim) # resample naval <- switch(Xtype, factor=, integer= NA_integer_, logical = as.logical(NA_integer_), real = NA_real_, complex = NA_complex_, character = NA_character_, NA) Y$v <- matrixsample(Xv, Y$dim, phase=phase, scale=scale, na.value=naval) # inherit pixel data type from X Y$type <- Xtype if(Xtype == "factor") { Y$v <- factor(Y$v, labels=levels(X)) dim(Y$v) <- Y$dim } } else { # resample a mask if(!is.mask(Y)) Y <- as.mask(Y) Y$m <- matrixsample(X$m, Y$dim, phase=phase, scale=scale, na.value=FALSE) } return(Y) } pointgrid <- function(W, ngrid) { W <- as.owin(W) masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xx <- xx[masque$m] yy <- yy[masque$m] return(ppp(xx, yy, W)) } # text magic commasep <- function(x, join="and") { px <- paste(x) nx <- length(px) if(nx <= 1) return(px) commas <- c(rep(", ", length(px)-2), paste("", join, ""), "") return(paste(paste(px, commas, sep=""), collapse="")) } paren <- function(x, type="(") { switch(type, "(" = { out <- paste("(", x, ")", sep="") }, "[" = { out <- paste("[", x, "]", sep="") }, "{" = { out <- paste("{", x, "}", sep="") }, stop(paste("Unrecognised parenthesis type:", sQuote(type))) ) out } unparen <- function(x) { x <- as.character(x) firstchar <- substr(x, 1, 1) n <- nchar(x) lastchar <- substr(x, n, n) enclosed <- n > 2 & ( (firstchar == "(" & lastchar == ")") | (firstchar == "[" & lastchar == "]") | (firstchar == "{" & lastchar == "}") ) if(any(enclosed)) x[enclosed] <- substr(x[enclosed], 2, n-1) return(x) } fakecallstring <- function(fname, parlist) { cl <- do.call("call", append(list(name = fname), parlist)) return(format(cl)) } prange <- function(x) { stopifnot(length(x) == 2) paren(paste(x, collapse=", "), "[") } ordinal <- function(k) { last <- abs(k) %% 10 lasttwo <- abs(k) %% 100 isteen <- (lasttwo > 10 & lasttwo < 20) ending <- ifelse(isteen, "th", ifelse(last == 1, "st", ifelse(last == 2, "nd", ifelse(last == 3, "rd", "th")))) return(paste(k, ending, sep="")) } # equivalent to rev(cumsum(rev(x))) revcumsum <- function(x) { n <- length(x) if(identical(storage.mode(x), "integer")) { z <- .C("irevcumsum", x=as.integer(x), as.integer(n)) # PACKAGE="spatstat") return(z$x) } else { z <- .C("drevcumsum", x=as.double(x), as.integer(n)) # PACKAGE="spatstat") return(z$x) } } prolongseq <- function(x, newrange) { stopifnot(length(newrange) == 2 && newrange[1] < newrange[2]) stopifnot(length(x) >= 2) dx <- diff(x) if(any(dx <= 0)) stop("x must be an increasing sequence") if(diff(range(dx)) > 0.01 * abs(mean(dx))) stop("x must be evenly spaced") dx <- mean(dx) # add or trim data to left if(x[1] > newrange[1]) { leftbit <- seq(from=x[1], to=newrange[1], by= -dx) x <- c(rev(leftbit), x[-1]) } else x <- x[x >= newrange[1]] # add or trim data to right nx <- length(x) if(newrange[2] > x[nx]) { rightbit <- seq(from=x[nx], to=newrange[2], by= dx) x <- c(x[-nx], rightbit) } else x <- x[x <= newrange[2]] return(x) } intersect.ranges <- function(a, b, fatal=TRUE) { lo <- max(a[1],b[1]) hi <- min(a[2],b[2]) if(lo >= hi) { if(fatal) stop("Intersection is empty") else return(NULL) } return(c(lo, hi)) } inside.range <- function(x, r) { stopifnot(length(r) == 2 && r[1] < r[2]) return(x >= r[1] & x <= r[2]) } prettyinside <- function(x, ...) { r <- range(x, na.rm=TRUE) p <- pretty(x, ...) ok <- inside.range(p, r) return(p[ok]) } check.range <- function(x, fatal=TRUE) { xname <- deparse(substitute(x)) if(identical(x, range(x, na.rm=TRUE))) return(TRUE) if(fatal) stop(paste(xname, "should be a vector of length 2 giving (min, max)")) return(FALSE) } niceround <- function(x, m=c(1,2,5,10)) { expo <- 10^as.integer(floor(log10(x))) y <- m * expo z <- y[which.min(abs(y - x))] return(z) } assign(".Spatstat.ProgressBar", NULL, envir = .spEnv) assign(".Spatstat.ProgressData", NULL, envir = .spEnv) progressreport <- function(i, n, every=min(100,max(1, ceiling(n/100))), nperline=min(charsperline, every * ceiling(charsperline /(every+3))), charsperline=60, style=spatstat.options("progress")) { missevery <- missing(every) if(i > n) { warning(paste("progressreport called with i =", i, "> n =", n)) return(invisible(NULL)) } switch(style, txtbar={ if(i == 1) { # initialise text bar assign(".Spatstat.ProgressBar", txtProgressBar(1, n, 1, style=3), envir = .spEnv) } else { # get text bar pbar <- get(".Spatstat.ProgressBar", envir = .spEnv) # update setTxtProgressBar(pbar, i) if(i == n) { close(pbar) assign(".Spatstat.ProgressBar", NULL, envir = .spEnv) } } }, tty={ now <- proc.time() if(i == 1) { # Initialise stuff if(missevery && every > 1 && n > 10) { every <- niceround(every) nperline <- min(charsperline, every * ceiling(charsperline /(every+3))) } showtime <- FALSE showevery <- n assign(".Spatstat.ProgressData", list(every=every, nperline=nperline, starttime=now, showtime=FALSE, showevery=n), envir=.spEnv) } else { pd <- get(".Spatstat.ProgressData", envir=.spEnv) if(is.null(pd)) stop(paste("progressreport called with i =", i, "before i = 1")) every <- pd$every nperline <- pd$nperline showtime <- pd$showtime showevery <- pd$showevery if(i < n) { # estimate time remaining starttime <- pd$starttime elapsed <- now - starttime elapsed <- unname(elapsed[3]) rate <- elapsed/(i-1) remaining <- rate * (n-i) if(!showtime) { # show time remaining if.. if(rate > 20) { # .. rate is very slow showtime <- TRUE showevery <- 1 } else if(remaining > 180) { # ... more than 3 minutes remaining showtime <- TRUE showevery <- every aminute <- ceiling(60/rate) if(aminute < showevery) showevery <- min(niceround(aminute), showevery) } } assign(".Spatstat.ProgressData", list(every=every, nperline=nperline, starttime=starttime, showtime=showtime, showevery=showevery), envir=.spEnv) } } if(i == n) cat(paste(" ", n, ".\n", sep="")) else if(every == 1 || i <= 3) cat(paste(i, ",", if(i %% nperline == 0) "\n" else " ", sep="")) else { if(i %% every == 0) cat(i) else cat(".") if(i %% nperline == 0) cat("\n") } if(i < n && showtime && (i %% showevery == 0)) { st <- paste("etd", codetime(round(remaining))) st <- paren(st, "[") cat(paste("", st, "")) } flush.console() }, stop(paste("Unrecognised option for style:", dQuote(style))) ) return(invisible(NULL)) } numalign <- function(i, nmax, zero="0") { stopifnot(i <= nmax) nplaces <- as.integer(ceiling(log10(nmax+1))) out <- blank <- paste(rep(zero, nplaces), collapse="") istring <- paste(i) ilen <- nchar(istring) substr(out, nplaces-ilen+1, nplaces) <- istring return(out) } ensure2vector <- function(x) { xname <- deparse(substitute(x)) if(!is.numeric(x)) stop(paste(xname, "is not numeric")) n <- length(x) if(n == 0 || n > 2) stop(paste(xname, "should be of length 1 or 2")) if(n == 1) return(rep(x,2)) return(x) } ensure3Darray <- function(x) { nd <- length(dim(x)) if(nd == 0) { x <- array(x, dim=c(length(x), 1, 1)) } else if(nd == 2) { x <- array(x, dim=c(dim(x), 1)) } else if(nd > 3) { laterdims <- dim(x)[-(1:3)] if(any(laterdims != 1)) stop("Higher-dimensional array cannot be reduced to 3 dimensions") x <- array(x, dim=dim(x)[1:3]) } return(x) } check.nvector <- function(v, npoints, fatal=TRUE, things="data points", naok=FALSE) { # vector of numeric values for each point/thing vname <- sQuote(deparse(substitute(v))) whinge <- NULL if(!is.numeric(v)) whinge <- paste(vname, "is not numeric") else if(!is.atomic(v) || !is.null(dim(v))) # vector with attributes whinge <- paste(vname, "is not a vector") else if(length(v) != npoints) whinge <- paste("The length of", vname, "should equal the number of", things) else if(!naok && any(is.na(v))) whinge <- paste("Some values of", vname, "are NA or NaN") # if(!is.null(whinge)) { if(fatal) stop(whinge) else return(FALSE) } return(TRUE) } check.nmatrix <- function(m, npoints, fatal=TRUE, things="data points", naok=FALSE, squarematrix=TRUE, matchto="nrow") { # matrix of values for each thing or each pair of things mname <- sQuote(deparse(substitute(m))) whinge <- NULL if(!is.matrix(m)) whinge <- paste(mname, "should be a matrix") else if(squarematrix && (nrow(m) != ncol(m))) whinge <- paste(mname, "should be a square matrix") else if(!naok && any(is.na(m))) whinge <- paste("Some values of", mname, "are NA or NaN") else if(matchto=="nrow" && nrow(m) != npoints) whinge <- paste("Number of rows in", mname, "does not match number of", things) else if(matchto=="ncol" && ncol(m) != npoints) whinge <- paste("Number of columns in", mname, "does not match number of", things) # if(!is.null(whinge)) { if(fatal) stop(whinge) else return(FALSE) } return(TRUE) } check.named.vector <- function(x, nam, context="", namopt=character(0)) { xtitle <- deparse(substitute(x)) check.named.thing(x, nam, namopt, sQuote(xtitle), is.numeric(x), "vector", context) opt <- namopt %in% names(x) return(x[c(nam, namopt[opt])]) } check.named.list <- function(x, nam, context="", namopt=character(0)) { xtitle <- deparse(substitute(x)) check.named.thing(x, nam, namopt, sQuote(xtitle), is.list(x), "list", context) opt <- namopt %in% names(x) return(x[c(nam, namopt[opt])]) } check.named.thing <- function(x, nam, namopt=character(0), xtitle=NULL, valid=TRUE, type="object", context="", fatal=TRUE) { if(is.null(xtitle)) xtitle <- sQuote(deparse(substitute(x))) # check whether names(x) contains all obligatory names 'nam' # and possibly some of the optional names 'namopt' namesx <- names(x) omitted <- !(nam %in% namesx) foreign <- !(namesx %in% c(nam, namopt)) if(valid && !any(omitted) && !any(foreign)) return(character(0)) # some condition violated if(nzchar(context)) xtitle <- paste(context, xtitle) whinge <- paste(xtitle, "must be a named", paste(type, ",", sep=""), "with components", commasep(nam)) if(length(namopt) > 0) whinge <- paste(whinge, paren(paste("and optionally", commasep(namopt)))) if(any(omitted)) { grizzle <- paste(ngettext(sum(omitted), "parameter", "parameters"), commasep(nam[omitted]), "omitted") whinge <- paste(whinge, grizzle, sep="; ") } if(any(foreign)) { grizzle <- paste(ngettext(sum(foreign), "component", "components"), commasep(namesx[foreign]), "not recognised") whinge <- paste(whinge, grizzle, sep="; ") } if(fatal) stop(whinge, call.=FALSE) return(whinge) } forbidNA <- function(x, context="", xname, fatal=TRUE, usergiven=TRUE) { if(missing(xname)) xname <- sQuote(deparse(substitute(x))) if(any(is.na(x))) { if(usergiven) { # argument came from user offence <- ngettext(length(x), "be NA", "contain NA values") whinge <- paste(context, xname, "must not", offence) } else { # argument was computed internally violates <- ngettext(length(x), "is NA", "contains NA values") whinge <- paste(context, xname, violates) } if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(FALSE) } return(TRUE) } check.finite <- function(x, context="", xname, fatal=TRUE, usergiven=TRUE) { if(missing(xname)) xname <- sQuote(deparse(substitute(x))) forbidNA(x, context, xname, fatal=fatal, usergiven=usergiven) if(any(!is.finite(x))) { if(usergiven) { # argument came from user oblige <- ngettext(length(x), "be a finite value", "contain finite values") whinge <- paste(context, xname, "must", oblige) } else { # argument was computed internally violates <- ngettext(length(x), "is not finite", "contains non-finite values") whinge <- paste(context, xname, violates) } if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(FALSE) } return(TRUE) } evenly.spaced <- function(x, tol=1e-07) { # test whether x is evenly spaced and increasing dx <- diff(x) if(any(dx <= .Machine$double.eps)) return(FALSE) # The following test for equal spacing is used in hist.default if(diff(range(dx)) > tol * mean(dx)) return(FALSE) return(TRUE) } adjustthinrange <- function(ur,vstep,vr) { if(diff(ur) >= vstep) return(ur) ur <- mean(ur) + c(-1,1) * vstep/2 if(ur[1] < vr[1]) ur <- vr[1] + c(0,1)*vstep if(ur[2] > vr[2]) ur <- vr[2] - c(1,0)*vstep return(ur) } validposint <- function(n, caller, fatal=TRUE) { nname <- deparse(substitute(n)) if(length(n) != 1 || n != round(n) || n <=0) { if(!fatal) return(FALSE) prefix <- if(!missing(caller)) paste("In ", caller, ",", sep="") else NULL stop(paste(prefix, nname, "should be a single positive integer"), call.=FALSE) } return(TRUE) } # wrangle data.frames firstfactor <- function(x) { stopifnot(is.data.frame(x) || is.hyperframe(x)) isfac <- unlist(lapply(as.list(x), is.factor)) if(!any(isfac)) return(NULL) return(x[, min(which(isfac)), drop=TRUE]) } onecolumn <- function(m) { switch(markformat(m), none=stop("No marks provided"), vector=m, dataframe=m[,1, drop=TRUE], NA) } # errors and checks complaining <- function(whinge, fatal=FALSE, value=NULL) { if(fatal) stop(whinge, call.=FALSE) warning(whinge, call.=FALSE) return(value) } check.1.real <- function(x, context="", fatal=TRUE) { xname <- deparse(substitute(x)) if(!is.numeric(x) || length(x) != 1) { whinge <- paste(sQuote(xname), "should be a single number") if(nzchar(context)) whinge <- paste(context, whinge) return(complaining(whinge, fatal=fatal, value=FALSE)) } return(TRUE) } check.1.integer <- function(x, context="", fatal=TRUE) { xname <- deparse(substitute(x)) if(!is.numeric(x) || length(x) != 1 || !is.finite(x) || x %% 1 != 0) { whinge <- paste(sQuote(xname), "should be a single finite integer") if(nzchar(context)) whinge <- paste(context, whinge) return(complaining(whinge, fatal=fatal, value=FALSE)) } return(TRUE) } explain.ifnot <- function(expr, context="") { ex <- deparse(substitute(expr)) ans <- expr if(!(is.logical(ans) && length(ans) == 1 && ans)) stop(paste(context, "it must be TRUE that", sQuote(ex)), call.=FALSE) } warn.ignored.args <- function(..., context=NULL) { if((narg <- length(list(...))) > 0) { whinge <- paste(narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored") if(!is.null(context)) whinge <- paste(context, whinge) warning(context) } } multiply.only.finite.entries <- function(x, a) { # In ppm a potential value that is -Inf must remain -Inf # and a potential value that is 0 multiplied by NA remains 0 y <- x ok <- is.finite(x) & (x != 0) y[ok] <- a * x[ok] return(y) } singlestring <- function(s, coll="") { s <- as.character(s) if(length(s) > 1) s <- paste(s, collapse=coll) return(s) } verbalogic <- function(x, op="and") { stopifnot(is.character(x)) istrue <- (x == "TRUE") isfalse <- (x == "FALSE") isvariable <- !istrue & !isfalse y <- x[isvariable] switch(op, and={ if(any(isfalse)) return("FALSE") if(all(istrue)) return("TRUE") return(paste(y, collapse=" and ")) }, or={ if(all(isfalse)) return("FALSE") if(any(istrue)) return("TRUE") return(paste(y, collapse=" or ")) }, not={ x[isfalse] <- "TRUE" x[istrue] <- "FALSE" x[isvariable] <- paste("not {", y, "}") }, stop(paste("Unrecognised operation", sQuote(op)))) } sensiblevarname <- function(guess, fallback, maxlen=12) { out <- if(is.character(guess) && length(guess) == 1 && make.names(guess) == guess) guess else fallback out <- substr(out, 1, maxlen) return(out) } short.deparse <- function(x, maxlen=60) { deparse(x, nlines=1, width.cutoff=maxlen, control="delayPromises") } good.names <- function(nama, defaults, suffices) { # ensure sensible, unique names stopifnot(is.character(defaults)) if(!missing(suffices)) defaults <- paste(defaults, suffices, sep="") result <- nama if(is.null(result)) result <- defaults else if(any(blank <- !nzchar(result))) result[blank] <- defaults[blank] if(any(duplicated(result))) result <- make.names(result, unique=TRUE) return(result) } cat.factor <- function (..., recursive=FALSE) { lll <- list(...) chk <- sapply(lll,is.factor) if(!all(chk)) stop("First argument is a factor and at least one other argument is not.\n") lll <- lapply(lll,as.data.frame,nm="v1") return(do.call(rbind,lll)[,1]) } nzpaste <- function(..., sep=" ", collapse=NULL) { # Paste only the non-empty strings v <- list(...) ok <- unlist(lapply(v, function(z) {any(nzchar(z))})) do.call("paste", append(v[ok], list(sep=sep, collapse=collapse))) } is.parseable <- function(x) { unlist(lapply(x, function(z) { !inherits(try(parse(text=z), silent=TRUE), "try-error") })) } make.parseable <- function(x) { if(all(is.parseable(x))) x else make.names(x) } # paste(expression(..)) seems to be broken paste.expr <- function(x) { unlist(lapply(x, function(z) { paste(deparse(z), collapse="") })) } # gsub(".", replacement, x) but only when "." appears as a variable gsubdot <- function(replacement, x) { x <- as.character(x) stopifnot(length(x) == 1) # find all positions of "." in x dotpos <- gregexpr("\\.", x)[[1]] if(all(dotpos == -1)) return(x) # find all positions of "." preceded or followed by alphanumeric dotbefore <- gregexpr("\\.[0-9A-Za-z]", x)[[1]] dotafter <- gregexpr("[0-9A-Za-z]\\.", x)[[1]] - 1 # exclude them dotpos <- setdiff(dotpos, union(dotbefore, dotafter)) # if(length(dotpos) == 0) return(x) lenrep <-length(replacement) while(length(dotpos) > 0) { dp <- dotpos[1] x <- paste0(substr(x, 0, dp-1), replacement, substr(x, dp+1, nchar(x))) dotpos <- dotpos[-1] + lenrep-1 } return(x) } badprobability <- function(x, NAvalue=NA) { ifelse(is.na(x), NAvalue, !is.finite(x) | x < 0 | x > 1) } # test for equivalence of two functions samefunction <- function(f, g) { identical(deparse(f), deparse(g)) } codetime <- local({ uname <- c("min", "hours", "days", "years", "thousand years", "million years", "billion years") u1name <- c("min", "hour", "day", "year", "thousand years", "million years", "billion years") multiple <- c(60, 60, 24, 365, 1e3, 1e3, 1e3) codehms <- function(x) { sgn <- if(x < 0) "-" else "" x <- round(abs(x)) hours <- x %/% 3600 mins <- (x %/% 60) %% 60 secs <- x %% 60 h <- if(hours > 0) paste(hours, ":", sep="") else "" started <- (hours > 0) m <- if(mins > 0) { paste(if(mins < 10 && started) "0" else "", mins, ":", sep="") } else if(started) "00:" else "" started <- started | (mins > 0) s <- if(secs > 0) { paste(if(secs < 10 && started) "0" else "", secs, sep="") } else if(started) "00" else "0" if(!started) s <- paste(s, "sec") paste(sgn, h, m, s, sep="") } codetime <- function(x, hms=TRUE, what=c("elapsed","user","system")) { if(inherits(x, "proc_time")) x <- summary(x)[[match.arg(what)]] if(!is.numeric(x) || length(x) != 1) stop("codetime: x must be a proc_time object or a single number") sgn <- if(x < 0) "-" else "" x <- abs(x) if(x < 60) return(paste(sgn, signif(x, 3), " sec", sep="")) # more than 1 minute: round to whole number of seconds x <- round(x) if(hms && (x < 60 * 60 * 24)) return(paste(sgn, codehms(x), sep="")) u <- u1 <- "sec" for(k in seq_along(multiple)) { if(x >= multiple[k]) { x <- x/multiple[k] u <- uname[k] u1 <- u1name[k] } else break } xx <- round(x, 1) ux <- if(xx == 1) u1 else u paste(sgn, xx, " ", ux, sep="") } codetime }) # defines the current favorite algorithm for 'order' fave.order <- function(x) { sort.list(x, method="quick", na.last=NA) } # convert any appropriate subset index for a point pattern # to a logical vector ppsubset <- function(X, I) { Iname <- deparse(substitute(I)) # I could be a function to be applied to X if(is.function(I)) { I <- I(X) if(!is.vector(I)) { warning(paste("Function", sQuote(Iname), "did not return a vector"), call.=FALSE) return(NULL) } } # I is now an index vector n <- npoints(X) i <- try(seq_len(n)[I]) if(inherits(i, "try-error") || any(is.na(i))) { warning(paste("Invalid subset index", sQuote(Iname)), call.=FALSE) return(NULL) } if(is.logical(I)) return(I) # convert to logical Z <- rep.int(FALSE, n) Z[I] <- TRUE return(Z) } trap.extra.arguments <- function(..., .Context="", .Fatal=FALSE) { z <- list(...) if((narg <- length(z)) == 0) return(FALSE) nama <- names(z) named <- nzchar(nama) whinge <- paste(.Context, ":", sep="") if(any(named)) { # some arguments are named: ensure all are named nama <- sQuote(nama) if(!all(named)) nama[!named] <- paste("[Arg", 1:length(nama), ,"]", sep="")[!named] whinge <- paste(whinge, "unrecognised", ngettext(narg, "argument", "arguments"), commasep(nama), ngettext(narg, "was", "were"), "ignored") } else { # all arguments unnamed whinge <- paste(whinge, narg, "unrecognised", ngettext(narg, "argument was", "arguments were"), "ignored") } if(.Fatal) stop(whinge, call.=FALSE) else warning(whinge, call.=FALSE) return(TRUE) } dotexpr.to.call <- function(expr, dot="funX", evaluator="eval.fv") { # convert an expression into a function call # replacing "." by the specified variable stopifnot(is.expression(expr)) aa <- substitute(substitute(ee, list(.=as.name(d))), list(ee=expr, d=dot)) bb <- eval(parse(text=deparse(aa))) cc <- as.call(bb) cc[[1]] <- as.name("eval.fv") return(cc) } # print names and version numbers of libraries loaded sessionLibs <- function() { a <- sessionInfo() b <- unlist(lapply(a$otherPkgs, getElement, name="Version")) g <- rbind(names(b), unname(b)) d <- apply(g, 2, paste, collapse=" ") if(length(d) > 0) { cat("Libraries loaded:\n") for(di in d) cat(paste("\t", di, "\n")) } else cat("Libraries loaded: None\n") return(invisible(d)) } dropifsingle <- function(x) if(length(x) == 1) x[[1]] else x # timed objects timed <- function(x, ..., starttime=NULL, timetaken=NULL) { if(is.null(starttime)) # time starts now. starttime <- proc.time() # evaluate expression if any object <- x timetaken <- proc.time() - starttime class(object) <- c("timed", class(object)) attr(object, "timetaken") <- timetaken return(object) } print.timed <- function(x, ...) { # strip the timing information and print the rest. taken <- attr(x, "timetaken") cx <- class(x) attr(x, "timetaken") <- NULL class(x) <- cx[cx != "timed"] NextMethod("print") # Now print the timing info cat(paste("\nTime taken:", codetime(taken), "\n")) return(invisible(NULL)) } # wrapper for computing weighted variance of a vector # Note: this includes a factor 1 - sum(v^2) in the denominator # where v = w/sum(w). See help(cov.wt) weighted.var <- function(x, w, na.rm=FALSE) { bad <- is.na(w) | is.na(x) if(any(bad)) { if(!na.rm) return(NA_real_) ok <- !bad x <- x[ok] w <- w[ok] } cov.wt(matrix(x, ncol=1),w)$cov[] } # efficient replacements for ifelse() # 'a' and 'b' are single values # 'x' and 'y' are vectors of the same length as 'test' # ifelse(test, a, b) ifelseAB <- function(test, a, b) { y <- rep.int(b, length(test)) y[test] <- a return(y) } # ifelse(test, a, x) ifelseAX <- function(test, a, x) { y <- x y[test] <- a return(y) } # ifelse(test, x, b) ifelseXB <- function(test, x, b) { y <- rep.int(b, length(test)) y[test] <- x[test] return(y) } # ifelse(test, x, y) ifelseXY <- function(test, x, y) { z <- y z[test] <- x[test] return(z) } #.... very special cases ...... # ifelse(test, 1, NA) ifelse1NA <- function(test) { y <- as.integer(test) y[!test] <- NA return(y) } # ifelse(test, 0, NA) ifelse0NA <- function(test) { nyet <- !test y <- as.integer(nyet) y[nyet] <- NA return(y) } # ifelse(test, -x, x) ifelseNegPos <- function(test, x) { y <- x y[test] <- -x[test] return(y) } # .................. "%orifnull%" <- function(a, b) { if(!is.null(a)) return(a) # b is evaluated only now return(b) } spatstat/R/lennard.R0000755000176000001440000000674512237642727014147 0ustar ripleyusers# # # lennard.R # # $Revision: 1.17 $ $Date: 2012/01/18 10:56:30 $ # # Lennard-Jones potential # # # ------------------------------------------------------------------- # LennardJones <- local({ BlankLJ <- list( name = "Lennard-Jones process", creator = "LennardJones", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { d6 <- d^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) } else { # expand around sig0 and set large numbers to Inf drat <- d/sig0 d6 <- drat^{-6} p <- array(c(-d6^2,d6),dim=c(dim(d),2)) small <- (drat < 1/4) small <- array(c(small, small), dim=c(dim(d), 2)) p[small] <- -Inf big <- (drat > 4) big <- array(c(big, big), dim=c(dim(d), 2)) p[big] <- 0 } return(p) }, par = list(sigma0=NULL), # filled in later parnames = "Initial approximation to sigma", selfstart = function(X, self) { # self starter for Lennard Jones # attempt to set value of 'sigma0' if(!is.na(self$par$sigma0)) { # value fixed by user or previous invocation return(self) } if(npoints(X) < 2) { # not enough points return(self) } s0 <- min(nndist(X)) if(s0 == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Lennard-Jones model")) s0 <- mean(nndist(X)) if(s0 == 0) return(self) } LennardJones(s0) }, init = function(...){}, # do nothing update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta1 <- as.numeric(coeffs[1]) theta2 <- as.numeric(coeffs[2]) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 if(sign(theta1) * sign(theta2) == 1) { sigma <- sig0 * (theta1/theta2)^(1/6) epsilon <- (theta2^2)/(4 * theta1) } else { sigma <- NA epsilon <- NA } return(list(param=list(sigma=sigma, epsilon=epsilon), inames="interaction parameters", printable=round(c(sigma=sigma,epsilon=epsilon),4))) }, valid = function(coeffs, self) { p <- self$interpret(coeffs, self)$param return(all(is.finite(p) & (p > 0))) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(any(is.na(coeffs)) || epsilon == 0) return(Inf) sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 theta1 <- abs(coeffs[1]) theta2 <- abs(coeffs[2]) return(sig0 * max((theta1/epsilon)^(1/12), (theta2/epsilon)^(1/6))) }, version=NULL # filled in later ) class(BlankLJ) <- "interact" LennardJones <- function(sigma0=NA) { if(is.null(sigma0) || !is.finite(sigma0)) sigma0 <- NA instantiate.interact(BlankLJ, list(sigma0=sigma0)) } LennardJones }) spatstat/R/rPerfect.R0000755000176000001440000001236112237642727014265 0ustar ripleyusers# # Perfect Simulation # # $Revision: 1.14 $ $Date: 2012/10/13 05:49:28 $ # # rStrauss # rHardcore # rStraussHard # rDiggleGratton # rDGS rStrauss <- function(beta, gamma=1, R=0, W=owin()) { if(!missing(W)) { verifyclass(W, "owin") if(W$type != "rectangle") stop("W must be a rectangle") } check.1.real(beta) check.1.real(gamma) check.1.real(R) check.finite(beta) check.finite(gamma) check.finite(R) stopifnot(beta > 0) stopifnot(gamma >= 0) stopifnot(gamma <= 1) stopifnot(R >= 0) nothing <- runif(1) xrange <- W$xrange yrange <- W$yrange storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStrauss", beta, gamma, R, xrange, yrange) # PACKAGE="spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] times <- c(start=z[[4]], end=z[[5]]) if(nout<0) stop("internal error: copying failed in PerfectStrauss") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=W, check=FALSE) attr(P, "times") <- times return(P) } # Perfect Simulation of Hardcore process rHardcore <- function(beta, R=0, W=owin()) { if(!missing(W)) { verifyclass(W, "owin") if(W$type != "rectangle") stop("W must be a rectangle") } check.1.real(beta) check.1.real(R) check.finite(beta) check.finite(R) stopifnot(beta > 0) stopifnot(R >= 0) nothing <- runif(1) xrange <- W$xrange yrange <- W$yrange storage.mode(beta) <- storage.mode(R) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectHardcore", beta, R, xrange, yrange) # PACKAGE="spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectHardcore") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=W, check=FALSE) return(P) } # # Perfect simulation of hybrid Strauss-Hardcore # provided gamma <= 1 # rStraussHard <- function(beta, gamma=1, R=0, H=0, W=owin()) { if(!missing(W)) { verifyclass(W, "owin") if(W$type != "rectangle") stop("W must be a rectangle") } check.1.real(beta) check.1.real(gamma) check.1.real(R) check.1.real(H) check.finite(beta) check.finite(gamma) check.finite(R) check.finite(H) stopifnot(beta > 0) stopifnot(gamma >= 0) if(gamma > 1) stop("Sorry, perfect simulation is only implemented for gamma <= 1") stopifnot(R >= 0) stopifnot(H >= 0) stopifnot(H <= R) nothing <- runif(1) xrange <- W$xrange yrange <- W$yrange storage.mode(beta) <- storage.mode(gamma) <- storage.mode(R) <- storage.mode(H) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectStraussHard", beta, gamma, R, H, xrange, yrange) # PACKAGE="spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectStraussHard") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=W, check=FALSE) return(P) } # # Perfect Simulation of Diggle-Gratton process # rDiggleGratton <- function(beta, delta, rho, kappa=1, W=owin()) { if(!missing(W)) { verifyclass(W, "owin") if(W$type != "rectangle") stop("W must be a rectangle") } check.1.real(beta) check.1.real(delta) check.1.real(rho) check.1.real(kappa) check.finite(beta) check.finite(delta) check.finite(rho) check.finite(kappa) stopifnot(beta > 0) stopifnot(delta >= 0) stopifnot(rho >= 0) stopifnot(delta <= rho) stopifnot(kappa >= 0) nothing <- runif(1) xrange <- W$xrange yrange <- W$yrange storage.mode(beta) <- "double" storage.mode(delta) <- storage.mode(rho) <- storage.mode(kappa) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDiggleGratton", beta, delta, rho, kappa, xrange, yrange) # PACKAGE="spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDiggleGratton") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=W, check=FALSE) return(P) } # # Perfect Simulation of Diggle-Gates-Stibbard process # rDGS <- function(beta, rho, W=owin()) { if(!missing(W)) { verifyclass(W, "owin") if(W$type != "rectangle") stop("W must be a rectangle") } check.1.real(beta) check.1.real(rho) check.finite(beta) check.finite(rho) stopifnot(beta > 0) stopifnot(rho >= 0) nothing <- runif(1) xrange <- W$xrange yrange <- W$yrange storage.mode(beta) <- "double" storage.mode(rho) <- "double" storage.mode(xrange) <- storage.mode(yrange) <- "double" z <- .Call("PerfectDGS", beta, rho, xrange, yrange) # PACKAGE="spatstat") X <- z[[1]] Y <- z[[2]] nout <- z[[3]] if(nout<0) stop("internal error: copying failed in PerfectDGS") seqn <- seq_len(nout) P <- ppp(X[seqn], Y[seqn], window=W, check=FALSE) return(P) } spatstat/R/harmonic.R0000755000176000001440000000327612237642727014320 0ustar ripleyusers# # # harmonic.R # # $Revision: 1.2 $ $Date: 2004/01/07 08:57:39 $ # # harmonic() # Analogue of polynom() for harmonic functions only # # ------------------------------------------------------------------- # harmonic <- function(x,y,n) { if(missing(n)) stop("the order n must be specified") n <- as.integer(n) if(is.na(n) || n <= 0) stop("n must be a positive integer") if(n > 3) stop("Sorry, harmonic() is not implemented for degree > 3") namex <- deparse(substitute(x)) namey <- deparse(substitute(y)) if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(substitute(y))) namey <- paste("(", namey, ")", sep="") switch(n, { result <- cbind(x, y) names <- c(namex, namey) }, { result <- cbind(x, y, x*y, x^2-y^2) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep="")) }, { result <- cbind(x, y, x * y, x^2-y^2, x^3 - 3 * x * y^2, y^3 - 3 * x^2 * y) names <- c(namex, namey, paste("(", namex, ".", namey, ")", sep=""), paste("(", namex, "^2-", namey, "^2)", sep=""), paste("(", namex, "^3-3", namex, ".", namey, "^2)", sep=""), paste("(", namey, "^3-3", namex, "^2.", namey, ")", sep="") ) } ) dimnames(result) <- list(NULL, names) return(result) } spatstat/R/rshift.R0000755000176000001440000001135112237642727014010 0ustar ripleyusers# # rshift.R # # random shift with optional toroidal boundary # # $Revision: 1.16 $ $Date: 2013/05/01 08:00:33 $ # # rshift <- function(X, ...) { UseMethod("rshift") } rshift.splitppp <- function(X, ..., which=seq_along(X)) { verifyclass(X, "splitppp") if("group" %in% names(list(...))) stop(paste("argument", sQuote("group"), "not implemented for splitppp objects")) if(is.null(which)) { iwhich <- which <- seq_along(X) } else { id <- seq_along(X) names(id) <- names(X) iwhich <- id[which] if(length(iwhich) == 0) stop(paste("Argument", sQuote("which"), "did not match any marks")) } # validate arguments and determine common clipping window arglist <- handle.rshift.args(X[[1]]$window, ..., edgedefault="torus") if(!is.null(clip <- arglist$clip)) { # clip the patterns that are not to be shifted if(length(iwhich) < length(X)) X[-iwhich] <- lapply(X[-iwhich], "[.ppp", i=clip) } # perform shift on selected patterns # (setting group = NULL ensures each pattern is not split further) shiftXsub <- do.call("lapply", append(list(X[iwhich], rshift.ppp, group=NULL), arglist)) # put back X[iwhich] <- shiftXsub return(X) } rshift.ppp <- function(X, ..., which=NULL, group) { verifyclass(X, "ppp") # validate arguments and determine common clipping window arglist <- handle.rshift.args(X$window, ..., edgedefault="torus") # default grouping # (NULL is not the default) # (NULL means all points shifted in parallel) if(missing(group)) group <- if(is.multitype(X)) marks(X) else NULL # if no grouping, use of `which' is undefined if(is.null(group) && !is.null(which)) stop(paste("Cannot apply argument", sQuote("which"), "; no grouping defined")) # if grouping, use split if(!is.null(group)) { Y <- split(X, group) split(X, group) <- do.call("rshift.splitppp", append(list(Y, which=which), arglist)) return(X) } # ungrouped point pattern # shift all points in parallel # recover arguments radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip W <- X$window W <- rescue.rectangle(W) if(W$type != "rectangle" && edge=="torus") stop("Torus (periodic) boundary is only meaningful for rectangular windows") # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate points x <- X$x + jump$x y <- X$y + jump$y # wrap points if(edge == "torus") { xr <- W$xrange yr <- W$yrange Wide <- diff(xr) High <- diff(yr) x <- xr[1] + (x - xr[1]) %% Wide y <- yr[1] + (y - yr[1]) %% High } # put back into point pattern X$x <- x X$y <- y # clip to window if(!is.null(clip)) X <- X[clip] return(X) } handle.rshift.args <- function(W, ..., radius=NULL, width=NULL, height=NULL, edge=NULL, clip=NULL, edgedefault) { verifyclass(W, "owin") W <- rescue.rectangle(W) if(length(aargh <- list(...)) > 0) stop(paste("Unrecognised arguments:", paste(names(aargh), collapse=","))) if(!is.null(radius)) { # radial generator if(!(is.null(width) && is.null(height))) stop(paste(sQuote("radius"), "is incompatible with", sQuote("width"), "and", sQuote("height"))) } else { # rectangular generator if(is.null(width) != is.null(height)) stop("Must specify both width and height, if one is specified") if(is.null(width)) width <- diff(W$xrange) if(is.null(height)) height <- diff(W$yrange) } if(is.null(edge)) edge <- edgedefault else if(!(edge %in% c("torus", "erode", "none"))) stop(paste("Unrecognised option erode=", sQuote(edge))) # determine whether clipping window is needed if(is.null(clip)) clip <- switch(edge, torus= NULL, none= W, erode={ if(!is.null(radius)) erosion.owin(W, radius) else if(W$type == "rectangle") trim.rectangle(W, width, height) else erosion.owin(W, max(width, height)) }) return(list(radius=radius, width=width, height=height, edge=edge, clip=clip)) } rtoro <- function(X, which=NULL, radius=NULL, width=NULL, height=NULL) { .Deprecated("rshift", package="spatstat") rshift(X, which=which, radius=radius, width=width, height=height) } spatstat/R/replace.ppp.R0000755000176000001440000000355112237642727014725 0ustar ripleyusers# # replace.ppp.R # "[<-.ppp" <- function(x, i, j, value) { verifyclass(x, "ppp") verifyclass(value, "ppp") if(missing(i) && missing(j)) return(value) if(missing(i)) { message("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code below x[j] <- value return(x) } xmf <- markformat(x) vmf <- markformat(value) if(xmf != vmf) { if(xmf == "none") stop("Replacement points are marked, but x is not marked") else if(vmf == "none") stop("Replacement points have no marks, but x is marked") else stop("Format of marks in replacement is incompatible with original") } if(inherits(i, "owin")) { win <- i vok <- inside.owin(value$x, value$y, win) if(!all(vok)) { warning("Replacement points outside the specified window were deleted") value <- value[vok] } # convert to vector index i <- inside.owin(x$x, x$y, win) } if(!is.vector(i)) stop("Unrecognised format for subset index i") # vector index # determine index subset n <- x$n SUB <- seq_len(n)[i] # anything to replace? if(length(SUB) == 0) return(x) # sanity checks if(any(is.na(SUB))) stop("Invalid subset: the resulting subscripts include NAs") # exact replacement of this subset? if(value$n == length(SUB)) { x$x[SUB] <- value$x x$y[SUB] <- value$y switch(xmf, none={}, list=, vector={ x$marks[SUB] <- value$marks }, dataframe={ x$marks[SUB,] <- value$marks }) } else x <- superimpose(x[-SUB], value, W=x$window) if(!missing(j)) { warning("The use of argument j in [<-.ppp is deprecated; use argument i") # invoke code above x[j] <- value } return(x) } spatstat/R/nnfunlpp.R0000644000176000001440000000125412237642727014347 0ustar ripleyusers# # nnfunlpp.R # # method for 'nnfun' for class 'lpp' # # $Revision: 1.2 $ $Date: 2012/10/18 10:55:16 $ # nnfun.lpp <- local({ nnfun.lpp <- function(X, ...) { stopifnot(inherits(X, "lpp")) force(X) L <- as.linnet(X) f <- function(x, y=NULL, seg=NULL, tp=NULL, ...) { # L is part of the environment Y <- as.lpp(x=x, y=y, seg=seg, tp=tp, L=L) i <- nncross.lpp(Y, X, what="which") return(i) } f <- linfun(f, L) attr(f, "explain") <- uitleggen return(f) } uitleggen <- function(x, ...) { cat("Nearest neighbour function for lpp object\n") X <- get("X", envir=environment(x)) print(X) } nnfun.lpp }) spatstat/R/pairsat.family.R0000755000176000001440000002046712237642727015444 0ustar ripleyusers# # # pairsat.family.S # # $Revision: 1.42 $ $Date: 2013/09/26 03:47:38 $ # # The saturated pairwise interaction family of point process models # # (an extension of Geyer's saturation process to all pairwise interactions) # # pairsat.family: object of class 'isf' # defining saturated pairwise interaction # # # ------------------------------------------------------------------- # pairsat.family <- list( name = "saturated pairwise", print = function(self) { cat("Saturated pairwise interaction family\n") }, eval = function(X,U,EqualPairs,pairpot,potpars,correction, ..., Reach=NULL, precomputed=NULL, savecomputed=FALSE, halfway=FALSE) { # # This is the eval function for the `pairsat' family. # # This internal function is not meant to be called by the user. # It is called by mpl.prepare() during execution of ppm(). # # The eval functions perform all the manipulations that are common to # a given class of interactions. # # For the `pairsat' family of pairwise-interaction processes, # this eval function computes the distances between points, # invokes 'pairpot' to evaluate the potential between each pair of points, # applies edge corrections, and then sums the pair potential terms # applying the saturation threshold. # # ARGUMENTS: # All 'eval' functions have the following arguments # which are called in sequence (without formal names) # by mpl.prepare(): # # X data point pattern 'ppp' object # U points at which to evaluate potential list(x,y) suffices # EqualPairs two-column matrix of indices i, j such that X[i] == U[j] # (or NULL, meaning all comparisons are FALSE) # pot potential function # potpars auxiliary parameters for pot list(......) # correction edge correction type (string) # # VALUE: # All `eval' functions must return a # matrix of values of the total potential # induced by the pattern X at each location given in U. # The rows of this matrix correspond to the rows of U (the sample points); # the k columns are the coordinates of the k-dimensional potential. # ######################################################################## # # POTENTIAL: # The pair potential function 'pairpot' will be called as # pairpot(M, potpars) where M is a matrix of interpoint distances. # It must return a matrix with the same dimensions as M # or an array with its first two dimensions the same as the dimensions of M. # # NOTE: # Note the Geyer saturation threshold must be given in 'potpars$sat' ########################################################################## # coercion should be unnecessary, but this is useful for debugging X <- as.ppp(X) U <- as.ppp(U, X$window) # i.e. X$window is DEFAULT window # saturation parameter(s) saturate <- potpars$sat # interaction distance of corresponding pairwise interaction PairReach <- if(!is.null(Reach) && is.finite(Reach)) Reach/2 else NULL if(is.null(saturate)) { # pairwise interaction V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach, precomputed=precomputed, savecomputed=savecomputed) return(V) } # first ensure all data points are included in the quadrature points nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points originalrows <- seq_len(nU) nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=FALSE) # correspondingly augment the list of equal pairs newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # compute the pair potentials POT and the unsaturated potential sums V V <- pairwise.family$eval(X, U, EqualPairs, pairpot, potpars, correction, ..., Reach=PairReach) POT <- attr(V, "POT") computed <- attr(V, "computed") # could be NULL # # V is a matrix with rows = quadrature points, # columns = coordinates of potential # POT is an array with rows = data points # columns = quadrature points # planes = coordinates of potential ################################################################# ################## saturation part ############################## ################################################################# # check dimensions and ensure 'saturate' is a vector ns <- length(saturate) np <- ncol(V) if(ns == 1 && np > 1) saturate <- rep.int(saturate, np) else if(ns != np) stop("Length of vector of saturation parameters is incompatible with the pair potential", call.=FALSE) # replicate as a matrix and as an array saturate2 <- array(saturate[slice.index(V, 2)], dim=dim(V)) saturate3 <- array(saturate[slice.index(POT, 3)], dim=dim(POT)) # # (a) compute SATURATED potential sums V.sat <- pmin.int(V, saturate2) if(halfway) return(V.sat) # # (b) compute effect of addition/deletion of dummy/data point j # on the UNSATURATED potential sum of each data point i # # Identify data points is.data <- seq_len(npoints(U)) %in% EqualPairs[,2] # logical vector corresp. to rows of V # Extract potential sums for data points only V.data <- V[is.data, , drop=FALSE] # replicate them so that V.dat.rep[i,j,k] = V.data[i, k] V.dat.rep <- aperm(array(V.data, dim=c(dim(V.data), U$n)), c(1,3,2)) # make a logical array col.is.data[i,j,k] = is.data[j] col.is.data <- array(is.data[slice.index(POT, 2)], dim=dim(POT)) # compute value of unsaturated potential sum for each data point i # obtained after addition/deletion of each dummy/data point j V.after <- V.dat.rep + ifelseNegPos(col.is.data, POT) # The call to ifelseNegPos() is equivalent to # ifelse(col.is.data, -POT, POT) # # # (c) difference of SATURATED potential sums for each data point i # before & after increment/decrement of each dummy/data point j # # saturated values after increment/decrement V.after.sat <- array(pmin.int(saturate3, V.after), dim=dim(V.after)) # saturated values before V.dat.rep.sat <- array(pmin.int(saturate3, V.dat.rep), dim=dim(V.dat.rep)) # difference V.delta <- V.after.sat - V.dat.rep.sat V.delta <- ifelseNegPos(col.is.data, V.delta) # # (d) Sum (c) over all data points i V.delta.sum <- apply(V.delta, c(2,3), sum) # # (e) Result V <- V.sat + V.delta.sum ########################################## # remove rows corresponding to supplementary points if(somemissing) V <- V[originalrows, , drop=FALSE] ### tack on the saved computations from pairwise.family$eval if(savecomputed) attr(V, "computed") <- computed return(V) }, ######### end of function $eval suffstat = function(model, X=NULL, callstring="pairsat.family$suffstat") { # for saturated pairwise models only (possibly nonstationary) verifyclass(model, "ppm") if(!identical(model$interaction$family$name,"saturated pairwise")) stop("Model is not a saturated pairwise interaction process") if(is.null(X)) { X <- data.ppm(model) modelX <- model } else { verifyclass(X, "ppp") modelX <- update(model, X, method="mpl") } # find data points which do not contribute to pseudolikelihood mplsubset <- getglmdata(modelX)$.mpl.SUBSET mpldata <- is.data(quad.ppm(modelX)) contribute <- mplsubset[mpldata] Empty <- X[integer(0)] mom <- partialModelMatrix(X, Empty, model, "suffstat", halfway=TRUE) # halfway=TRUE is passed to pairsat.family$eval # and yields matrix of saturated potential sums # take only those terms that contribute to the pseudolikelihood mom <- mom[contribute, , drop=FALSE] result <- apply(mom, 2, sum) return(result) } ######### end of function $suffstat ) ######### end of list class(pairsat.family) <- "isf" spatstat/R/compareFit.R0000755000176000001440000000470112237642727014603 0ustar ripleyusers# # compareFit.R # # $Revision: 1.1 $ $Date: 2011/06/19 06:08:20 $ compareFit <- function(object, Fun, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=NULL, modelnames=NULL, same=NULL, different=NULL) { dotargs <- list(...) h <- hyperframe(obj=object, tren=trend, inte=interaction) N <- nrow(h) if(N == 0) stop("No objects specified") # determine rbord for summary statistics if(is.null(rbord) && !is.null(interaction)) rbord <- max(with(h, reach(inte))) h$rbord <- rbord # try to get nice model names if(is.null(modelnames)) { if(inherits(trend, "formula") && is.interact(interaction) && inherits(object, "listof") && all(nzchar(names(object))) && length(names(object)) == nrow(h)) modelnames <- names(object) else if(inherits(trend, "listof") && all(nzchar(names(trend))) && length(names(trend)) == nrow(h)) modelnames <- names(trend) else if(inherits(interaction, "listof") && all(nzchar(names(interaction))) && length(names(interaction)) == nrow(h)) modelnames <- names(interaction) else modelnames <- row.names(h) } row.names(h) <- make.names(modelnames) # fix a common vector of r values if(is.null(r)) { # compute first function fun1 <- with(h[1,,drop=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=NULL, breaks=breaks), dotargs))) # extract r values r <- with(fun1, .x) } # compute the subsequent functions if(N == 1) funs2toN <- NULL else funs2toN <- with(h[-1, , drop=FALSE], do.call(Fun, append(list(object=obj, trend=tren, interaction=inte, rbord=rbord, r=r), dotargs))) if(N == 2) funs2toN <- list(funs2toN) # collect all functions in a list funs <- as.listof(append(list(fun1), funs2toN)) names(funs) <- row.names(h) # collapse together out <- collapse.fv(funs, same=same, different=different) return(out) } spatstat/R/distbdry.R0000755000176000001440000001440712237642727014342 0ustar ripleyusers# # distbdry.S Distance to boundary # # $Revision: 4.36 $ $Date: 2013/04/25 06:37:43 $ # # -------- functions ---------------------------------------- # # bdist.points() # compute vector of distances # from each point of point pattern # to boundary of window # # bdist.pixels() # compute matrix of distances from each pixel # to boundary of window # # erodemask() erode the window mask by a distance r # [yields a new window] # # # "bdist.points"<- function(X) { verifyclass(X, "ppp") if(X$n == 0) return(numeric(0)) x <- X$x y <- X$y window <- X$window switch(window$type, rectangle = { xmin <- min(window$xrange) xmax <- max(window$xrange) ymin <- min(window$yrange) ymax <- max(window$yrange) result <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { xy <- cbind(x,y) result <- rep.int(Inf, X$n) bdry <- window$bdry for(i in seq_along(bdry)) { polly <- bdry[[i]] px <- polly$x py <- polly$y nsegs <- length(px) for(j in seq_len(nsegs)) { j1 <- if(j < nsegs) j + 1 else 1 seg <- c(px[j], py[j], px[j1], py[j1]) result <- pmin.int(result, distppl(xy, seg)) } } }, mask = { b <- bdist.pixels(window, style="matrix") loc <- nearest.raster.point(x,y,window) result <- b[cbind(loc$row, loc$col)] }, stop("Unrecognised window type", window$type) ) return(result) } "bdist.pixels" <- function(w, ..., style="image") { verifyclass(w, "owin") masque <- as.mask(w, ...) switch(w$type, mask = { neg <- complement.owin(masque) m <- exactPdt(neg) b <- pmin.int(m$d,m$b) }, rectangle = { x <- raster.x(masque) y <- raster.y(masque) xmin <- w$xrange[1] xmax <- w$xrange[2] ymin <- w$yrange[1] ymax <- w$yrange[2] b <- pmin.int(x - xmin, xmax - x, y - ymin, ymax - y) }, polygonal = { # set up pixel raster x <- as.vector(raster.x(masque)) y <- as.vector(raster.y(masque)) b <- numeric(length(x)) # test each pixel in/out, analytically inside <- inside.owin(x, y, w) # compute distances for these pixels xy <- cbind(x[inside], y[inside]) dxy <- rep.int(Inf, sum(inside)) bdry <- w$bdry for(i in seq_along(bdry)) { polly <- bdry[[i]] nsegs <- length(polly$x) for(j in 1:nsegs) { j1 <- if(j < nsegs) j + 1 else 1 seg <- c(polly$x[j], polly$y[j], polly$x[j1], polly$y[j1]) dxy <- pmin.int(dxy, distppl(xy, seg)) } } b[inside] <- dxy }, stop("unrecognised window type", w$type) ) # reshape it b <- matrix(b, nrow=masque$dim[1], ncol=masque$dim[2]) switch(style, coords={ # format which can be plotted by image(), persp() etc return(list(x=masque$xcol, y=masque$yrow, z=t(b))) }, matrix={ # return matrix (for internal use by package) return(b) }, image={ bim <- im(b, xcol=masque$xcol, yrow=masque$yrow, unitname=unitname(masque)) return(bim) }, stop(paste("Unrecognised option for style:", style))) } erodemask <- function(w, r, strict=FALSE) { # erode a binary image mask without changing any other entries verifyclass(w, "owin") if(w$type != "mask") stop(paste("window w is not of type", sQuote("mask"))) if(!is.numeric(r) || length(r) != 1) stop("r must be a single number") if(r < 0) stop("r must be nonnegative") bb <- bdist.pixels(w, style="matrix") if(r > max(bb)) warning("eroded mask is empty") if(identical(strict, TRUE)) w$m <- (bb > r) else w$m <- (bb >= r) return(w) } rebound.owin <- function(x, rect) { w <- x verifyclass(rect, "owin") if(is.empty(w)) return(emptywindow(rect)) verifyclass(w, "owin") if(!is.subset.owin(as.rectangle(w), rect)) { bb <- bounding.box(w) if(!is.subset.owin(bb, rect)) stop(paste("The new rectangle", sQuote("rect"), "does not contain the window", sQuote("win"))) } xr <- rect$xrange yr <- rect$yrange switch(w$type, rectangle={ return(owin(xr, yr, poly=list(x=w$xrange[c(1,2,2,1)], y=w$yrange[c(1,1,2,2)]))) }, polygonal={ return(owin(xr, yr, poly=w$bdry, check=FALSE)) }, mask={ newseq <- function(oldseq, newrange, dstep) { oldends <- range(oldseq) nleft <- max(0, floor((oldends[1] - newrange[1])/dstep)) nright <- max(0, floor((newrange[2] - oldends[2])/dstep)) newstart <- max(oldends[1] - nleft * dstep, newrange[1]) newend <- min(oldends[2] + nright * dstep, newrange[2]) seq(from=newstart, by=dstep, to=newend) } xcol <- newseq(w$xcol, xr, mean(diff(w$xcol))) yrow <- newseq(w$yrow, yr, mean(diff(w$yrow))) newmask <- as.mask(xy=list(x=xcol, y=yrow)) xx <- raster.x(newmask) yy <- raster.y(newmask) newmask$m <- inside.owin(xx, yy, w) return(newmask) } ) } spatstat/R/pcfinhom.R0000755000176000001440000001326012251564230014301 0ustar ripleyusers# # pcfinhom.R # # $Revision: 1.13 $ $Date: 2013/12/10 10:05:15 $ # # inhomogeneous pair correlation function of point pattern # # pcfinhom <- function(X, lambda=NULL, ..., r=NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor=c("r","d"), renormalise=TRUE, normpower=1, reciplambda=NULL, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") r.override <- !is.null(r) win <- X$window area <- area.owin(win) npts <- npoints(X) correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) correction <- implemented.for.K(correction, win$type, correction.given) divisor <- match.arg(divisor) if(is.null(bw) && kernel=="epanechnikov") { # Stoyan & Stoyan 1995, eq (15.16), page 285 h <- stoyan /sqrt(npts/area) hmax <- h # conversion to standard deviation bw <- h/sqrt(5) } else if(is.numeric(bw)) { # standard deviation of kernel specified # upper bound on half-width hmax <- 3 * bw } else { # data-dependent bandwidth selection: guess upper bound on half-width hmax <- 2 * stoyan /sqrt(npts/area) } ########## intensity values ######################### if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (area/sum(reciplambda))^normpower } ########## r values ############################ # handle arguments r and breaks rmaxdefault <- rmax.rule("K", win, lambda) breaks <- handle.r.b.args(r, NULL, win, rmaxdefault=rmaxdefault) if(!(breaks$even)) stop("r values must be evenly spaced") # extract r values r <- breaks$r rmax <- breaks$max # recommended range of r values for plotting alim <- c(0, min(rmax, rmaxdefault)) ########## smoothing parameters for pcf ############################ # arguments for 'density' denargs <- resolve.defaults(list(kernel=kernel, bw=bw), list(...), list(n=length(r), from=0, to=rmax)) ################################################# # compute pairwise distances close <- closepairs(X, rmax+hmax) dIJ <- close$d I <- close$i J <- close$j XI <- ppp(close$xi, close$yi, window=win, check=FALSE) wIJ <- reciplambda[I] * reciplambda[J] # initialise fv object df <- data.frame(r=r, theo=rep.int(1,length(r))) out <- fv(df, "r", substitute(g[inhom](r), NULL), "theo", , alim, c("r","{%s^{Pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname="g[inhom]") ###### compute ####### if(any(correction=="translate")) { # translation correction XJ <- ppp(close$xj, close$yj, window=win, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) gT <- sewpcf(dIJ, edgewt * wIJ, denargs, area, divisor)$g if(renormalise) gT <- gT * renorm.factor out <- bind.fv(out, data.frame(trans=gT), "hat(%s^{Trans})(r)", "translation-corrected estimate of %s", "trans") } if(any(correction=="isotropic")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) gR <- sewpcf(dIJ, edgewt * wIJ, denargs, area, divisor)$g if(renormalise) gR <- gR * renorm.factor out <- bind.fv(out, data.frame(iso=gR), "hat(%s^{Ripley})(r)", "isotropic-corrected estimate of %s", "iso") } # sanity check if(is.null(out)) { warning("Nothing computed - no edge corrections chosen") return(NULL) } # which corrections have been computed? nama2 <- names(out) corrxns <- rev(nama2[nama2 != "r"]) # default is to display them all formula(out) <- deparse(as.formula(paste( "cbind(", paste(corrxns, collapse=","), ") ~ r"))) unitname(out) <- unitname(X) return(out) } spatstat/R/Kinhom.R0000755000176000001440000002677212237642727013753 0ustar ripleyusers# # Kinhom.S Estimation of K function for inhomogeneous patterns # # $Revision: 1.63 $ $Date: 2013/02/07 09:58:14 $ # # Kinhom() compute estimate of K_inhom # # # Reference: # Non- and semiparametric estimation of interaction # in inhomogeneous point patterns # A.Baddeley, J.Moller, R.Waagepetersen # Statistica Neerlandica 54 (2000) 329--350. # # -------- functions ---------------------------------------- # Kinhom() compute estimate of K # using various edge corrections # # Kwtsum() internal routine for border correction # # -------- standard arguments ------------------------------ # X point pattern (of class 'ppp') # # r distance values at which to compute K # # lambda vector of intensity values for points of X # # -------- standard output ------------------------------ # A data frame (class "fv") with columns named # # r: same as input # # trans: K function estimated by translation correction # # iso: K function estimated by Ripley isotropic correction # # theo: K function for Poisson ( = pi * r ^2 ) # # border: K function estimated by border method # (denominator = sum of weights of points) # # bord.modif: K function estimated by border method # (denominator = area of eroded window) # # ------------------------------------------------------------------------ "Linhom" <- function(...) { K <- Kinhom(...) L <- eval.fv(sqrt(pmax.int(K,0)/pi)) # relabel the fv object L <- rebadge.fv(L, quote(Linhom(r)), "Linhom", names(K), new.labl=attr(K, "labl")) # return(L) } "Kinhom"<- function (X, lambda=NULL, ..., r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") nlarge.given <- !missing(nlarge) rfixed <- !missing(r) || !missing(breaks) # determine basic parameters W <- X$window npts <- npoints(X) area <- area.owin(W) rmaxdefault <- rmax.rule("K", W, npts/area) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # match corrections correction.given <- !missing(correction) && !is.null(correction) correction <- pickoption("correction", correction, c(none="none", border="border", "bord.modif"="bord.modif", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=TRUE) best.wanted <- ("best" %in% correction) correction <- implemented.for.K(correction, W$type, correction.given) ########################################################### # DETERMINE WEIGHTS AND VALIDATE # # The matrix 'lambda2' or 'reciplambda2' is sufficient information # unless we want the border correction. lambda2.given <- !is.null(lambda2) || !is.null(reciplambda2) lambda2.suffices <- !any(correction %in% c("bord", "bord.modif")) # Use matrix of weights if it was provided and if it is sufficient if(lambda2.suffices && lambda2.given) { if(!is.null(reciplambda2)) check.nmatrix(reciplambda2, npts) else { check.nmatrix(lambda2, npts) reciplambda2 <- 1/lambda2 } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (area^2/sum(reciplambda2))^(normpower/2) } } else { # Vector lambda or reciplambda is required if(missing(lambda) && is.null(reciplambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) reciplambda <- 1/lambda } else if(!is.null(reciplambda)) { # 1/lambda values provided if(is.im(reciplambda)) reciplambda <- safelookup(reciplambda, X) else if(is.function(reciplambda)) reciplambda <- reciplambda(X$x, X$y) else if(is.numeric(reciplambda) && is.vector(as.numeric(reciplambda))) check.nvector(reciplambda, npts) else stop(paste(sQuote("reciplambda"), "should be a vector, a pixel image, or a function")) } else { # lambda values provided if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npts) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) # evaluate reciprocal reciplambda <- 1/lambda } # renormalise if(renormalise) { check.1.real(normpower) stopifnot(normpower %in% 1:2) renorm.factor <- (area/sum(reciplambda))^normpower } } # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) ########################################### # Efficient code for border method # Usable only if r values are evenly spaced from 0 to rmax # Invoked automatically if number of points is large can.do.fast <- breaks$even && missing(lambda2) borderonly <- all(correction == "border" | correction == "bord.modif") large.n <- (npts >= nlarge) demand.best <- correction.given && best.wanted large.n.trigger <- large.n && !demand.best will.do.fast <- can.do.fast && (borderonly || large.n.trigger) asked <- borderonly || (nlarge.given && large.n.trigger) if(will.do.fast && !asked) message(paste("number of data points exceeds", nlarge, "- computing border estimate only")) if(asked && !can.do.fast) { whynot <- if(!(breaks$even)) "r values not evenly spaced" else if(!missing(lambda)) "matrix lambda2 was given" else NULL warning(paste(c("cannot use efficient code", whynot), sep="; ")) } if(will.do.fast) { # restrict r values to recommended range, unless specifically requested if(!rfixed) r <- seq(from=0, to=alim[2], length.out=length(r)) K <- Kborder.engine(X, max(r), length(r), correction, reciplambda) # tweak labels K <- rebadge.fv(K, substitute(K[inhom](r), NULL), "K[inhom]") K <- tweak.fv.entry(K, "theo", new.labl="{%s^{pois}}(r)") K <- tweak.fv.entry(K, "border", new.labl="hat(%s^{bord})(r)") K <- tweak.fv.entry(K, "bord.modif", new.labl="hat(%s^{bordm})(r)") return(K) } ########################################### # Slower code ########################################### # this will be the output data frame K <- data.frame(r=r, theo= pi * r^2) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", substitute(K[inhom](r), NULL), "theo", , alim, c("r","{%s^{pois}}(r)"), desc, fname="K[inhom]") # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) dIJ <- close$d # compute weights for these pairs I <- close$i J <- close$j wI <- reciplambda[I] wIJ <- if(is.null(lambda2)) reciplambda[I] * reciplambda[J] else reciplambda2[cbind(I,J)] # XI <- X[I] # compute edge corrected estimates if(any(correction == "border" | correction == "bord.modif")) { # border method # Compute distances to boundary b <- bdist.points(X) I <- close$i bI <- b[I] # apply reduced sample algorithm RS <- Kwtsum(dIJ, bI, wIJ, b, w=reciplambda, breaks) if(any(correction == "border")) { Kb <- RS$ratio if(renormalise) Kb <- Kb * renorm.factor K <- bind.fv(K, data.frame(border=Kb), "hat(%s^{bord})(r)", "border-corrected estimate of %s", "border") } if(any(correction == "bord.modif")) { Kbm <- RS$numerator/eroded.areas(W, r) if(renormalise) Kbm <- Kbm * renorm.factor K <- bind.fv(K, data.frame(bord.modif=Kbm), "hat(%s^{bordm})(r)", "modified border-corrected estimate of %s", "bord.modif") } } if(any(correction == "translate")) { # translation correction XJ <- X[J] edgewt <- edge.Trans(XI, XJ, paired=TRUE) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Ktrans <- cumsum(wh)/area if(renormalise) Ktrans <- Ktrans * renorm.factor rmax <- diameter(W)/2 Ktrans[r >= rmax] <- NA K <- bind.fv(K, data.frame(trans=Ktrans), "hat(%s^{trans})(r)", "translation-correction estimate of %s", "trans") } if(any(correction == "isotropic" | correction == "Ripley")) { # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(dIJ, ncol=1)) allweight <- edgewt * wIJ wh <- whist(dIJ, breaks$val, allweight) Kiso <- cumsum(wh)/area if(renormalise) Kiso <- Kiso * renorm.factor rmax <- diameter(W)/2 Kiso[r >= rmax] <- NA K <- bind.fv(K, data.frame(iso=Kiso), "hat(%s^{iso})(r)", "Ripley isotropic correction estimate of %s", "iso") } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } Kwtsum <- function(dIJ, bI, wIJ, b, w, breaks) { # # "internal" routine to compute border-correction estimates of Kinhom # # dIJ: vector containing pairwise distances for selected I,J pairs # bI: corresponding vector of boundary distances for I # wIJ: product weight for selected I, J pairs # # b: vector of ALL distances to window boundary # w: weights for ALL points # # breaks : breakpts object # stopifnot(length(dIJ) == length(bI)) stopifnot(length(bI) == length(wIJ)) stopifnot(length(w) == length(b)) if(!is.finite(sum(w, wIJ))) stop("Weights in K-function were infinite or NA") # determine which distances d_{ij} were observed without censoring uncen <- (dIJ <= bI) # # histogram of noncensored distances nco <- whist(dIJ[uncen], breaks$val, wIJ[uncen]) # histogram of censoring times for noncensored distances ncc <- whist(bI[uncen], breaks$val, wIJ[uncen]) # histogram of censoring times (yes, this is a different total size) cen <- whist(b, breaks$val, w) # total weight of censoring times beyond rightmost breakpoint uppercen <- sum(w[b > max(breaks$val)]) # go RS <- reduced.sample(nco, cen, ncc, show=TRUE, uppercen=uppercen) # extract results numerator <- RS$numerator denominator <- RS$denominator ratio <- RS$numerator/RS$denominator # check if(length(numerator) != breaks$ncells) stop("internal error: length(numerator) != breaks$ncells") if(length(denominator) != breaks$ncells) stop("internal error: length(denom.count) != breaks$ncells") return(list(numerator=numerator, denominator=denominator, ratio=ratio)) } spatstat/R/saturated.R0000755000176000001440000000263712240721046014476 0ustar ripleyusers# # # saturated.S # # $Revision: 1.6 $ $Date: 2007/01/11 03:36:02 $ # # Saturated pairwise process with user-supplied potential # # Saturated() create a saturated pairwise process # [an object of class 'interact'] # with user-supplied potential # # # ------------------------------------------------------------------- # Saturated <- function(pot, name) { if(missing(name)) name <- "Saturated process with user-defined potential" fop <- names(formals(pot)) if(!identical(all.equal(fop, c("d", "par")), TRUE) && !identical(all.equal(fop, c("d", "tx", "tu", "par")), TRUE)) stop(paste("Formal arguments of pair potential function", sQuote("pot"), "must be either (d, par) or (d, tx, tu, par)")) out <- list( name = name, creator = "Saturated", family = pairsat.family, pot = pot, par = NULL, parnames = NULL, init = NULL, update = function(self, ...){ do.call(Saturated, resolve.defaults(list(...), list(pot=self$pot, name=self$name))) } , print = function(self) { cat("Potential function:\n") print(self$pot) invisible() }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } spatstat/R/psstA.R0000755000176000001440000001127612237642727013611 0ustar ripleyusers# # psstA.R # # Pseudoscore residual for unnormalised F (area-interaction) # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # ################################################################################ # psstA <- function(object, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=reach(interaction), ppmcorrection="border", correction="all", truecoef=NULL, hi.res=NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp") || inherits(object, "quad")) { # convert to quadscheme if(inherits(object, "ppp")) object <- quadscheme(object, ...) # fit model if(ppmcorrection == "border") fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord) else fit <- ppm(object, trend=trend, interaction=interaction, correction=ppmcorrection) } else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) bX <- bdist.points(X) USEDX <- (bX > rbord) } else { USED <- rep.int(TRUE, U$n) USEDX <- rep.int(TRUE, X$n) } # basic statistics Win <- X$window npoints <- X$n area <- area.owin(Win) lambda <- npoints/area # determine breakpoints for r values rmaxdefault <- rmax.rule("F", Win, lambda) if(rfixed) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) else { # create fairly coarse 'r' values r <- seq(0, rmaxdefault, length=nr) breaks <- breakpts.from.r(r) } rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw", drop=FALSE, coefs=truecoef, quad=hi.res) # rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[A](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[A]") # # for efficiency, compute the largest value of distance transform Dmax <- 0 for(i in 1:npoints) { Di <- distmap(X[-i]) Dimax <- summary(Di)$max Dmax <- max(Dmax, Dimax) } Rmax <- min(max(rvals), Dmax * 1.1) nontrivial <- (rvals <= Rmax) trivialzeroes <- numeric(sum(!nontrivial)) # pseudosum Ax <- areaLoss.grid(X, rvals[nontrivial], subset=USEDX, ngrid=ngrid) C1 <- apply(Ax, 2, sum) C1 <- c(C1, trivialzeroes) # pseudocompensator OK <- USED & !Z Au <- areaGain.grid(U[OK], X, rvals[nontrivial], W=Win, ngrid=ngrid) lamu <- matrix(wc[OK], nrow=nrow(Au), ncol=ncol(Au)) C2 <- apply(lamu * Au, 2, sum) C2 <- c(C2, trivialzeroes) # pseudoscore residual Ctot <- C1 - C2 # tack on ans <- bind.fv(ans, data.frame(dat=C1, com=C2, res=Ctot), c("Sigma~Delta~V[A](r)", "bold(C)~Delta~V[A](r)", "%s(r)"), c("data pseudosum (contribution to %s)", "model pseudocompensator (contribution to %s)", "pseudoscore residual %s"), "res") # # pseudovariance # (skipped if called by envelope() etc) # if(correction == "all") { lamX <- matrix(wc[USED & Z], nrow=nrow(Ax), ncol=ncol(Ax)) Var <- apply(lamu * Au^2, 2, sum) + apply(lamX * Ax^2, 2, sum) Var <- c(Var, trivialzeroes) # two-sigma limits TwoSig <- 2 * sqrt(Var) # tack on ans <- bind.fv(ans, data.frame(var=Var, up=TwoSig, lo=-TwoSig), c("bold(C)^2~Delta~V[A](r)", "%s[up](r)", "%s[lo](r)"), c("pseudovariance of %s", "upper 2sigma critical limit for %s", "lower 2sigma critical limit for %s"), "res") fvnames(ans, ".") <- c("res", "up", "lo", "theo") } unitname(ans) <- unitname(fit) # return(ans) } spatstat/R/linequad.R0000755000176000001440000000656312237642727014324 0ustar ripleyusers# # linequad.R # # $Revision: 1.8 $ $Date: 2013/09/16 09:32:42 $ # # create quadscheme for a pattern of points lying *on* line segments linequad <- function(X, Y, ..., eps=NULL, nd=1000) { if(is.lpp(X)) { # extract local coordinates from lpp object coo <- coords(X) mapXY <- coo$seg tp <- coo$tp Xproj <- as.ppp(X) if(!missing(Y)) warning("Argument Y ignored when X is an lpp object") Y <- as.psp(X) } else if(is.ppp(X)) { # project data points onto segments stopifnot(is.psp(Y)) v <- project2segment(X, Y) Xproj <- v$Xproj mapXY <- v$mapXY tp <- v$tp } else stop("X should be an object of class lpp or ppp") # handle multitype ismulti <- is.multitype(X) if(is.marked(X) && !ismulti) stop("Not implemented for marked patterns") if(ismulti) { marx <- marks(X) flev <- factor(levels(marx)) } # win <- as.owin(Y) len <- lengths.psp(Y) nseg <- length(len) if(is.null(eps)) { stopifnot(is.numeric(nd) && length(nd) == 1 & is.finite(nd) && nd > 0) eps <- sum(len)/nd } else stopifnot(is.numeric(eps) && length(eps) == 1 && is.finite(eps) && eps > 0) # initialise quad scheme dat <- dum <- ppp(numeric(0), numeric(0), window=win) wdat <- wdum <- numeric(0) if(ismulti) marks(dat) <- marks(dum) <- marx[integer(0)] # consider each segment in turn YY <- as.data.frame(Y) for(i in 1:nseg) { # divide segment into pieces of length eps # with shorter bits at each end leni <- len[i] nwhole <- floor(leni/eps) if(leni/eps - nwhole < 0.5 && nwhole > 2) nwhole <- nwhole - 1 rump <- (leni - nwhole * eps)/2 brks <- c(0, rump + (0:nwhole) * eps, leni) nbrks <- length(brks) # dummy points at middle of each piece sdum <- (brks[-1] + brks[-nbrks])/2 x <- with(YY, x0[i] + (sdum/leni) * (x1[i]-x0[i])) y <- with(YY, y0[i] + (sdum/leni) * (y1[i]-y0[i])) newdum <- list(x=x, y=y) ndum <- length(sdum) IDdum <- 1:ndum # relevant data points relevant <- (mapXY == i) newdat <- Xproj[relevant] sdat <- leni * tp[relevant] IDdat <- findInterval(sdat, brks, rightmost.closed=TRUE, all.inside=TRUE) # determine weights w <- countingweights(id=c(IDdum, IDdat), areas=diff(brks)) wnewdum <- w[1:ndum] wnewdat <- w[-(1:ndum)] # if(!ismulti) { # unmarked pattern dat <- superimpose(dat, newdat, W=win, check=FALSE) dum <- superimpose(dum, newdum, W=win, check=FALSE) wdat <- c(wdat, wnewdat) wdum <- c(wdum, wnewdum) } else { # marked point pattern # attach correct marks to data points marks(newdat) <- marx[relevant] dat <- superimpose(dat, newdat, W=win, check=FALSE) wdat <- c(wdat, wnewdat) newdum <- as.ppp(newdum, W=win, check=FALSE) # replicate dummy points with each mark # also add points at data locations with other marks for(k in seq_len(length(flev))) { le <- flev[k] avoid <- (marks(newdat) != le) dum <- superimpose(dum, newdum %mark% le, newdat[avoid] %mark% le, W=win, check=FALSE) wdum <- c(wdum, wnewdum, wnewdat[avoid]) } } } # make quad scheme Qout <- quad(dat, dum, c(wdat, wdum)) # silently attach lines attr(Qout, "lines") <- Y return(Qout) } spatstat/R/logistic.R0000644000176000001440000002766012237642727014335 0ustar ripleyusers# # logistic.R # # $Revision: 1.8 $ $Date: 2013/06/19 09:45:35 $ # # Logistic likelihood method - under development # logi.engine <- function(Q, trend = ~1, interaction, ..., covariates=NULL, correction="border", rbord=reach(interaction), covfunargs=list(), allcovar=FALSE, vnamebase=c("Interaction", "Interact."), vnameprefix=NULL, justQ = FALSE, savecomputed = FALSE, precomputed = NULL ){ if(is.null(trend)) trend <- ~1 if(is.null(interaction)) interaction <- Poisson() want.trend <- !identical.formulae(trend, ~1) want.inter <- !is.poisson(interaction) # validate choice of edge correction correction <- pickoption("correction", correction, c(border="border", periodic="periodic", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", none="none")) # rbord applies only to border correction if(correction == "border") { check.1.real(rbord, "In ppm") explain.ifnot(rbord >= 0, "In ppm") } else rbord <- 0 # backdoor stuff if(!missing(vnamebase)) { if(length(vnamebase) == 1) vnamebase <- rep.int(vnamebase, 2) if(!is.character(vnamebase) || length(vnamebase) != 2) stop("Internal error: illegal format of vnamebase") } if(!is.null(vnameprefix)) { if(!is.character(vnameprefix) || length(vnameprefix) != 1) stop("Internal error: illegal format of vnameprefix") } # create dummy points if(inherits(Q, "ppp")){ Xplus <- Q Q <- quadscheme.logi(Xplus, ...) D <- Q$dummy Dinfo <- Q$param } else if(checkfields(Q, c("data", "dummy"))) { Xplus <- Q$data D <- Q$dummy Dinfo <- Q$param if(is.null(Dinfo)){ Dinfo <- list(how="given", rho=npoints(D)/(area.owin(D)*markspace.integral(D))) } } else stop("Format of object Q is not understood") if (justQ) return(Q) ### Dirty way of recording arguments so that the model can be refitted later (should probably be done using call, eval, envir, etc.): extraargs <- list(covfunargs = covfunargs, allcovar = allcovar, vnamebase = vnamebase, vnameprefix = vnameprefix) extraargs <- append(extraargs, list(...)) ## Dummy intensity if(correction == "border" && Dinfo$how=="grid"){ Dbord <- D[bdist.points(D)>=rbord] Dinfo$rho <- npoints(Dbord)/(eroded.areas(as.owin(Dbord), rbord)*markspace.integral(Dbord)) } rho <- Dinfo$rho ##Setting the B from Barker dynamics (relative to dummy intensity) B <- list(...)$Barker if(is.null(B)) B <- 1 B <- B*rho Dinfo <- append(Dinfo, list(B=B)) Dinfo <- append(Dinfo, list(extraargs=extraargs)) # Wplus <- as.owin(Xplus) nXplus <- npoints(Xplus) U <- superimpose(Xplus, D, W=Wplus, check=FALSE) # E <- equalpairs(U, Xplus, marked = is.marked(Xplus)) E <- cbind(1:nXplus, 1:nXplus) # computed <- if (savecomputed) list(X = Xplus, Q = Q, U = U) else list() # assemble covariate data frame if(want.trend) { tvars <- variablesinformula(trend) wantxy <- c("x", "y") %in% tvars wantxy <- wantxy | rep.int(allcovar, 2) cvdf <- data.frame(x=U$x, y=U$y)[, wantxy, drop=FALSE] if(!is.null(covariates)) { df <- mpl.get.covariates(covariates, U, "quadrature points", covfunargs) cvdf <- cbind(cvdf, df) } wantmarks <- "marks" %in% tvars if(wantmarks) cvdf <- cbind(cvdf, marks = marks(U)) } else cvdf <- NULL # evaluate interaction sufficient statistics if (!is.null(ss <- interaction$selfstart)) interaction <- ss(Xplus, interaction) V <- evalInteraction(Xplus, U, E, interaction, correction, precomputed = precomputed, savecomputed = savecomputed) if(!is.matrix(V)) stop("evalInteraction did not return a matrix") if (savecomputed) computed <- append(computed, attr(V, "computed")) IsOffset <- attr(V, "IsOffset") if(is.null(IsOffset)) IsOffset <- rep.int(FALSE, ncol(V)) # determine names if(ncol(V) > 0) { Vnames <- colnames(V) if(is.null(Vnames)) { nc <- ncol(V) Vnames <- if(nc == 1) vnamebase[1] else paste(vnamebase[2], 1:nc, sep="") colnames(V) <- Vnames } else if(!is.null(vnameprefix)) { Vnames <- paste(vnameprefix, Vnames, sep="") colnames(V) <- Vnames } } else Vnames <- character(0) # combine all data glmdata <- as.data.frame(V) if(!is.null(cvdf)) glmdata <- cbind(glmdata, cvdf) # construct response and weights ok <- if(correction == "border") (bdist.points(U) >= rbord) else rep.int(TRUE, npoints(U)) # Keep only those quadrature points for which the # conditional intensity is nonzero. KEEP <- if(ncol(V)>0) matrowall(V != -Inf) else rep.int(TRUE, npoints(U)) ok <- ok & KEEP wei <- c(rep.int(1,npoints(Xplus)),rep.int(B/rho,npoints(D))) resp <- c(rep.int(1,npoints(Xplus)),rep.int(0,npoints(D))) # add offset, subset and weights to data frame # using reserved names beginning with ".logi." glmdata <- cbind(glmdata, .logi.Y = resp, .logi.B = B, .logi.w = wei, .logi.ok =ok) # build glm formula # (reserved names begin with ".logi.") trendpart <- paste(as.character(trend), collapse=" ") fmla <- paste(".logi.Y ", trendpart) # Interaction terms if(want.inter) { VN <- Vnames # enclose offset potentials in 'offset(.)' if(any(IsOffset)) VN[IsOffset] <- paste("offset(", VN[IsOffset], ")", sep="") fmla <- paste(c(fmla, VN), collapse="+") } # add offset intrinsic to logistic technique fmla <- paste(fmla, "offset(-log(.logi.B))", sep="+") fmla <- as.formula(fmla) # to satisfy package checker: .logi.B <- B .logi.w <- wei .logi.ok <- ok .logi.Y <- resp # go fit <- glm(fmla, data=glmdata, family=binomial(), subset = .logi.ok, weights = .logi.w) ## Fitted coeffs co <- coef(fit) fitin <- fii(interaction, co, Vnames, IsOffset) ## Max. value of log-likelihood: maxlogpl <- logLik(fit) + sum(ok*resp*log(B)) # Stamp with spatstat version number spv <- package_version(versionstring.spatstat()) the.version <- list(major=spv$major, minor=spv$minor, release=spv$patchlevel, date="$Date: 2013/06/19 09:45:35 $") ## Compile results fit <- list(method = "logi", fitter = "glm", projected = FALSE, coef = co, trend = trend, interaction = interaction, Q = Q, correction = correction, rbord = rbord, version = the.version, fitin = fitin, maxlogpl = maxlogpl, internal = list(Vnames = Vnames, IsOffset=IsOffset, glmdata = glmdata, glmfit = fit, logistic = Dinfo, computed = computed) ) class(fit) <- "ppm" return(fit) } forbid.logi <- function(object) { if(object$method == "logi") stop("Sorry, this is not implemented for method=\'logi\'") return(invisible(NULL)) } logi.dummy <- function(X, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ ## Resolving nd inspired by default.n.tiling if(is.null(nd)){ nd <- spatstat.options("ndummy.min") if(inherits(X, "ppp")) nd <- pmax(nd, 10 * ceiling(2 * sqrt(X$n)/10)) } nd <- ensure2vector(nd) marx <- is.multitype(X) if(marx) lev <- levels(marks(X)) if(marx && mark.repeat){ N <- length(lev) Dlist <- inDlist <- vector("list", N) } else{ N <- 1 } W <- as.owin(X) type <- match.arg(dummytype, c("stratrand", "binomial", "poisson", "grid", "transgrid")) B <- bounding.box(W) rho <- nd[1]*nd[2]/area.owin(B) Dinfo <- list(nd=nd, rho=rho, how=type) ## Repeating dummy process for each mark type 1:N (only once if unmarked or mark.repeat = FALSE) for(i in 1:N){ switch(type, stratrand={ D <- as.ppp(stratrand(B, nd[1], nd[2]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, binomial={ D <- runifpoint(nd[1]*nd[2], win=B) D <- D[W] }, poisson={ D <- rpoispp(rho, win = W) }, grid={ D <- as.ppp(gridcenters(B, nd[1], nd[2]), W = B) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, transgrid={ D <- as.ppp(gridcenters(B, nd[1], nd[2]), W = B) dxy <- c(diff(D$window$xrange),diff(D$window$yrange))/(2*nd) coords(D) <- coords(D)+matrix(runif(2,-dxy,dxy),npoints(D),2,byrow=TRUE) inD <- which(inside.owin(D, w = W)) D <- D[W] inD <- paste(i,inD,sep="_") }, stop("unknown dummy type")) if(marx && mark.repeat){ marks(D) <- factor(lev[i], levels = lev) Dlist[[i]] <- D if(type %in% c("stratrand","grid","transgrid")) inDlist[[i]] <- inD } } if(marx && mark.repeat){ inD <- Reduce(append, inDlist) D <- Reduce(superimpose, Dlist) } if(type %in% c("stratrand","grid","transgrid")) Dinfo <- append(Dinfo, list(inD=inD)) if(marx && !mark.repeat){ marks(D) <- sample(factor(lev, levels=lev), npoints(D), replace = TRUE) Dinfo$rho <- Dinfo$rho/length(lev) } attr(D, "dummy.parameters") <- Dinfo return(D) } quadscheme.logi <- function(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, ...){ data <- as.ppp(data) ## If dummy is missing we generate dummy pattern with logi.dummy. if(missing(dummy)) dummy <- logi.dummy(data, dummytype, nd, mark.repeat, ...) Dinfo <- attr(dummy, "dummy.parameters") D <- as.ppp(dummy) if(is.null(Dinfo)) Dinfo <- list(how="given", rho=npoints(D)/(area.owin(D)*markspace.integral(D))) Q <- quad(data, D, param=Dinfo) class(Q) <- c("logiquad", class(Q)) return(Q) } summary.logiquad <- function(object, ..., checkdup=FALSE) { verifyclass(object, "logiquad") s <- list( data = summary.ppp(object$data, checkdup=checkdup), dummy = summary.ppp(object$dummy, checkdup=checkdup), param = object$param) class(s) <- "summary.logiquad" return(s) } print.summary.logiquad <- function(x, ..., dp=3) { cat("Quadrature scheme = data + dummy\n") Dinfo <- x$param if(is.null(Dinfo)) cat("created by an unknown function.\n") cat("Data pattern:\n") print(x$data, dp=dp) cat("\n\nDummy pattern:\n") # How they were computed switch(Dinfo$how, stratrand={ cat(paste("(Stratified random dummy points,", paste(Dinfo$nd, collapse=" x "), "grid of cells)\n")) }, binomial={ cat("(Binomial dummy points)\n") }, poisson={ cat("(Poisson dummy points)\n") }, grid={ cat(paste("(Fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, transgrid={ cat(paste("(Random translation of fixed grid of dummy points,", paste(Dinfo$nd, collapse=" x "), "grid)\n")) }, given=cat("(Dummy points given by user)\n") ) # Description of them print(x$dummy, dp=dp) return(invisible(NULL)) } spatstat/R/ordthresh.R0000755000176000001440000000332612237642727014516 0ustar ripleyusers# # # ordthresh.S # # $Revision: 1.10 $ $Date: 2012/01/17 01:19:48 $ # # Ord process with threshold potential # # OrdThresh() create an instance of the Ord process # [an object of class 'interact'] # with threshold potential # # # ------------------------------------------------------------------- # OrdThresh <- function(r) { out <- list( name = "Ord process with threshold potential", creator = "OrdThresh", family = ord.family, pot = function(d, par) { (d <= par$r) }, par = list(r = r), parnames = "threshold distance", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("threshold distance r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=round(gamma,4))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) is.finite(loggamma) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(...) { return(Inf) }, version=versionstring.spatstat() ) class(out) <- "interact" out$init(out) return(out) } spatstat/R/fgk3.R0000755000176000001440000003756012237642727013355 0ustar ripleyusers# # $Revision: 1.18 $ $Date: 2013/05/01 05:48:28 $ # # Estimates of F, G and K for three-dimensional point patterns # # # ............ user interface ............................. # K3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In K3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) # this will be the output data frame K <- data.frame(r=r, theo= (4/3) * pi * r^3) desc <- c("distance argument r", "theoretical Poisson %s") K <- fv(K, "r", substitute(K3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="K3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation") Kt <- u$f K <- bind.fv(K, data.frame(trans=Kt), "%s[trans](r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- k3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic") Ki <- u$f K <- bind.fv(K, data.frame(iso=Ki), "%s[iso](r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) return(K) } G3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("rs", "km", "Hanisch")) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Hanisch="han", hanisch="han", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In G3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) lambda <- nrow(coo)/volume(B) # this will be the output data frame G <- data.frame(r=r, theo= 1 - exp( - lambda * (4/3) * pi * r^3)) desc <- c("distance argument r", "theoretical Poisson %s") G <- fv(G, "r", substitute(G3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="G3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # collect four histograms for censored data u <- g3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval) if("rs" %in% correction) G <- bind.fv(G, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) G <- bind.fv(G, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("han" %in% correction) G <- bind.fv(G, data.frame(han=u$han), "%s[han](r)", "Normalised Hanisch estimate of %s", "han") # default is to display them all formula(G) <- . ~ r unitname(G) <- unitname(X) return(G) } F3est <- function(X, ..., rmax=NULL, nrval=128, vside=NULL, correction=c("rs", "km", "cs"), sphere=c("fudge", "ideal", "digital")) { stopifnot(inherits(X, "pp3")) sphere <- match.arg(sphere) correction <- pickoption("correction", correction, c(rs="rs", border="rs", km="km", KM="km", Kaplan="km", cs="cs", CS="cs", best="km"), multi=TRUE) trap.extra.arguments(..., .Context="In F3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) coo <- coords(X) vol <- volume(B) lambda <- nrow(coo)/vol # determine voxel size if(missing(vside)) { voxvol <- vol/spatstat.options("nvoxel") vside <- voxvol^(1/3) # ensure the shortest side is a whole number of voxels s <- shortside(B) m <- ceiling(s/vside) vside <- s/m } # compute theoretical value switch(sphere, ideal = { volsph <- (4/3) * pi * r^3 spherename <- "ideal sphere" }, fudge = { volsph <- 0.78 * (4/3) * pi * r^3 spherename <- "approximate sphere" }, digital = { volsph <- digital.volume(c(0, rmax), nrval, vside) spherename <- "digital sphere" }) theo.desc <- paste("theoretical Poisson %s using", spherename) # this will be the output data frame FF <- data.frame(r = r, theo = 1 - exp( - lambda * volsph)) desc <- c("distance argument r", theo.desc) labl <- c("r","%s[pois](r)") FF <- fv(FF, "r", substitute(F3(r), NULL), "theo", , c(0,rmax/2), labl, desc, fname="F3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # go u <- f3Cengine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, vside=vside) if("rs" %in% correction) FF <- bind.fv(FF, data.frame(rs=u$rs), "%s[rs](r)", "reduced sample estimate of %s", "rs") if("km" %in% correction) FF <- bind.fv(FF, data.frame(km=u$km), "%s[km](r)", "Kaplan-Meier estimate of %s", "km") if("cs" %in% correction) FF <- bind.fv(FF, data.frame(cs=u$cs), "%s[cs](r)", "Chiu-Stoyan estimate of %s", "cs") # default is to display them all formula(FF) <- . ~ r unitname(FF) <- unitname(X) return(FF) } pcf3est <- function(X, ..., rmax=NULL, nrval=128, correction=c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) { stopifnot(inherits(X, "pp3")) correction <- pickoption("correction", correction, c(translation="translation", trans="translation", isotropic="isotropic", iso="isotropic", best="isotropic"), multi=TRUE) trap.extra.arguments(..., .Context="In pcf3est") B <- X$domain if(is.null(rmax)) rmax <- diameter(B)/2 r <- seq(from=0, to=rmax, length.out=nrval) if(is.null(delta)) { lambda <- npoints(X)/volume(B) delta <- adjust * 0.26/lambda^(1/3) } if(biascorrect) { # bias correction rondel <- r/delta biasbit <- ifelseAX(rondel > 1, 1, (3/4)*(rondel + 2/3 - (1/3)*rondel^3)) } # this will be the output data frame g <- data.frame(r=r, theo=rep.int(1, length(r))) desc <- c("distance argument r", "theoretical Poisson %s") g <- fv(g, "r", substitute(pcf3(r), NULL), "theo", , c(0,rmax/2), c("r","%s[pois](r)"), desc, fname="pcf3") # extract the x,y,z ranges as a vector of length 6 flatbox <- unlist(B[1:3]) # extract coordinates coo <- coords(X) if(any(correction %in% "translation")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="translation", delta=delta) gt <- u$f if(biascorrect) gt <- gt/biasbit g <- bind.fv(g, data.frame(trans=gt), "%s[trans](r)", "translation-corrected estimate of %s", "trans") } if(any(correction %in% "isotropic")) { u <- pcf3engine(coo$x, coo$y, coo$z, flatbox, rmax=rmax, nrval=nrval, correction="isotropic", delta=delta) gi <- u$f if(biascorrect) gi <- gi/biasbit g <- bind.fv(g, data.frame(iso=gi), "%s[iso](r)", "isotropic-corrected estimate of %s", "iso") } # default is to display them all formula(g) <- . ~ r unitname(g) <- unitname(X) attr(g, "delta") <- delta return(g) } # ............ low level code .............................. # k3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation") { code <- switch(correction, translation=0, isotropic=1) DUP <- spatstat.options("dupC") res <- .C("RcallK3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), DUP=DUP) # PACKAGE="spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # # g3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=10, correction="Hanisch G3") { code <- switch(correction, "minus sampling"=1, "Hanisch G3"=3) DUP <- spatstat.options("dupC") res <- .C("RcallG3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), as.integer(code), DUP=DUP) # PACKAGE="spatstat") return(list(range = range, f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # f3engine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, range=c(0,1.414), nval=25, correction="minus sampling") { # code <- switch(correction, "minus sampling"=1, no=0) DUP <- spatstat.options("dupC") res <- .C("RcallF3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(vside), as.double(range[1]), as.double(range[2]), m=as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(code), DUP=DUP) # PACKAGE="spatstat") r <- seq(from=range[1], to=range[2], length.out=nval) f <- with(res, ifelseXB(denom > 0, num/denom, 1)) return(list(r = r, f = f, num=res$num, denom=res$denom, correction=correction)) } f3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), vside=0.05, rmax=1, nrval=25) { # DUP <- spatstat.options("dupC") res <- .C("RcallF3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(vside), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1)), uppercen = as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) cs <- H/max(H[is.finite(H)]) # return(list(rs=rs, km=km$km, hazard=km$lambda, cs=cs, r=r)) } g3Cengine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=25) { # DUP <- spatstat.options("dupC") res <- .C("RcallG3cen", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(0), as.double(rmax), m=as.integer(nrval), obs = as.integer(integer(nrval)), nco = as.integer(integer(nrval)), cen = as.integer(integer(nrval)), ncc = as.integer(integer(nrval)), upperobs = as.integer(integer(1)), uppercen = as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") r <- seq(from=0, to=rmax, length.out=nrval) # obs <- res$obs nco <- res$nco cen <- res$cen ncc <- res$ncc upperobs <- res$upperobs uppercen <- res$uppercen # breaks <- breakpts.from.r(r) km <- kaplan.meier(obs, nco, breaks, upperobs=upperobs) rs <- reduced.sample(nco, cen, ncc, uppercen=uppercen) # ero <- eroded.volumes(as.box3(box), r) H <- cumsum(nco/ero) han <- H/max(H[is.finite(H)]) return(list(rs=rs, km=km$km, hazard=km$lambda, han=han, r=r)) } pcf3engine <- function(x, y, z, box=c(0,1,0,1,0,1), rmax=1, nrval=100, correction="translation", delta=rmax/10) { code <- switch(correction, translation=0, isotropic=1) DUP <- spatstat.options("dupC") res <- .C("Rcallpcf3", as.double(x), as.double(y), as.double(z), as.integer(length(x)), as.double(box[1]), as.double(box[2]), as.double(box[3]), as.double(box[4]), as.double(box[5]), as.double(box[6]), as.double(0), as.double(rmax), as.integer(nrval), f = as.double(numeric(nrval)), num = as.double(numeric(nrval)), denom = as.double(numeric(nrval)), method=as.integer(code), delta=as.double(delta), DUP=DUP) # PACKAGE="spatstat") return(list(range = c(0,rmax), f = res$f, num=res$num, denom=res$denom, correction=correction)) } # # ------------------------------------------------------------ # volume of a sphere (exact and approximate) # sphere.volume <- function(range=c(0,1.414), nval=10) { rr <- seq(from=range[1], to=range[2], length.out=nval) return( (4/3) * pi * rr^3) } digital.volume <- function(range=c(0, 1.414), nval=25, vside= 0.05) { # Calculate number of points in digital sphere # by performing distance transform for a single point # in the middle of a suitably large box # # This takes EIGHT TIMES AS LONG as the corresponding empirical F-hat !!! # w <- 2 * range[2] + 2 * vside # DUP <- spatstat.options("dupC") dvol <- .C("RcallF3", as.double(w/2), as.double(w/2), as.double(w/2), as.integer(1), as.double(0), as.double(w), as.double(0), as.double(w), as.double(0), as.double(w), as.double(vside), as.double(range[1]), as.double(range[2]), as.integer(nval), num = as.integer(integer(nval)), denom = as.integer(integer(nval)), as.integer(0), DUP=DUP # PACKAGE="spatstat" )$num # (vside^3) * dvol } spatstat/R/mppm.R0000644000176000001440000004221312240201006013427 0ustar ripleyusers# # mppm.R # # $Revision: 1.61 $ $Date: 2012/09/06 04:39:13 $ # mppm <- function(formula, data, interaction=Poisson(), ..., iformula=NULL, use.gam=FALSE) { # remember call cl <- match.call() callstring <- paste(short.deparse(sys.call()), collapse="") # Validate arguments if(!inherits(formula, "formula")) stop(paste("Argument", dQuote("formula"), "should be a formula")) stopifnot(is.hyperframe(data)) data.sumry <- summary(data, brief=TRUE) npat <- data.sumry$ncases if(npat == 0) stop(paste("Hyperframe", sQuote("data"), "has zero rows")) if(!is.null(iformula) && !inherits(iformula, "formula")) stop(paste("Argument", sQuote("iformula"), "should be a formula or NULL")) if(! (is.interact(interaction) || is.hyperframe(interaction))) stop(paste("The argument", sQuote("interaction"), "should be a point process interaction object (class", dQuote("interact"), "), or a hyperframe containing such objects", sep="")) backdoor <- list(...)$backdoor if(is.null(backdoor) || !is.logical(backdoor)) backdoor <- FALSE ############## HANDLE FORMULAS ############################ checkvars <- function(f, b, extra=NULL, bname=short.deparse(substitute(b))){ fname <- short.deparse(substitute(f)) fvars <- variablesinformula(f) bvars <- if(is.character(b)) b else names(b) bvars <- c(bvars, extra) nbg <- !(fvars %in% bvars) if(any(nbg)) { nn <- sum(nbg) stop(paste(ngettext(nn, "Variable", "Variables"), commasep(dQuote(fvars[nbg])), "in", fname, ngettext(nn, "is not one of the", "are not"), "names in", bname)) } return(NULL) } #------ Trend Formula ------------------ # check all variables in trend formula are recognised checkvars(formula, data.sumry$col.names, extra=c("x","y","id"), bname="data") # check formula has LHS and RHS. Extract them if(length(formula) < 3) stop(paste("Argument", sQuote("formula"), "must have a left hand side")) Yname <- lhs <- formula[[2]] trend <- rhs <- formula[c(1,3)] if(!is.name(Yname)) stop("Left hand side of formula should be a single name") Yname <- paste(Yname) if(!inherits(trend, "formula")) stop("Internal error: failed to extract RHS of formula") allvars <- variablesinformula(trend) # --- Interaction formula ----- # names of interactions as they may appear in formulae itags <- if(is.hyperframe(interaction)) names(interaction) else "Interaction" ninteract <- length(itags) # ensure `iformula' is a formula without a LHS # and determine which columns of `interaction' are actually used if(is.null(iformula)) { if(ninteract > 1) stop(paste("interaction hyperframe has more than 1 column;", "you must specify the choice of interaction", "using argument", sQuote("iformula"))) iused <- TRUE iformula <- as.formula(paste("~", itags)) } else { if(length(iformula) > 2) stop(paste("The interaction formula", sQuote("iformula"), "should not have a left hand side")) # valid variables in `iformula' are interactions and data frame columns permitted <- paste(sQuote("interaction"), "or permitted name in", sQuote("data")) checkvars(iformula, itags, extra=c(data.sumry$dfnames, "id"), bname=permitted) ivars <- variablesinformula(iformula) # check which columns of `interaction' are actually used iused <- itags %in% ivars if(sum(iused) == 0) stop("No interaction specified in iformula") # OK allvars <- c(allvars, ivars) } # ---- variables required (on RHS of one of the above formulae) ----- allvars <- unique(allvars) ######## EXTRACT DATA ##################################### # Insert extra variable 'id' data <- cbind.hyperframe(data, id=factor(1:npat)) data.sumry <- summary(data, brief=TRUE) allvars <- unique(c(allvars, "id")) # Extract the list of responses (point pattern/quadscheme) Y <- data[, Yname, drop=TRUE] if(npat == 1) Y <- list(Y) Yclass <- data.sumry$classes[Yname] if(Yclass == "ppp") { # convert to quadrature schemes, for efficiency's sake Y <- lapply(Y, quadscheme) } else if(Yclass != "quad") stop(paste("Column", dQuote(Yname), "of data", "does not consist of point patterns (class ppp)", "nor of quadrature schemes (class quad)")) # Extract sub-hyperframe of data named in formulae datanames <- names(data) used.cov.names <- allvars[allvars %in% datanames] has.covar <- (length(used.cov.names) > 0) if(has.covar) { dfvar <- used.cov.names %in% data.sumry$dfnames imvar <- data.sumry$types[used.cov.names] == "im" if(any(nbg <- !(dfvar | imvar))) stop(paste("Inappropriate format for", ngettext(sum(nbg), "covariate", "covariates"), paste(sQuote(used.cov.names[nbg]), collapse=", "), ": should contain image objects or vector/factor")) covariates.hf <- data[, used.cov.names, drop=FALSE] has.design <- any(dfvar) dfvarnames <- used.cov.names[dfvar] datadf <- if(has.design) as.data.frame(covariates.hf, discard=TRUE, warn=FALSE) else NULL if(has.design) { # check for NA's in design covariates if(any(nbg <- apply(is.na(datadf), 2, any))) stop(paste("There are NA's in the", ngettext(sum(nbg), "covariate", "covariates"), commasep(dQuote(names(datadf)[nbg])))) } } else { has.design <- FALSE datadf <- NULL } ############### INTERACTION ################################### # ensure `interaction' is a hyperframe of `interact' objects # with the right number of rows. # All entries in a column must represent the same process # (possibly with different values of the irregular parameters). # Extract the names of the point processes. if(is.interact(interaction)) { ninteract <- 1 processes <- list(Interaction=interaction$name) interaction <- hyperframe(Interaction=interaction, id=1:npat)[,1] constant <- c(Interaction=TRUE) } else if(is.hyperframe(interaction)) { inter.sumry <- summary(interaction) ninteract <- inter.sumry$nvars # ensure it has the same number of rows as 'data' nr <- inter.sumry$ncases if(nr == 1 && npat > 1) { interaction <- cbind.hyperframe(id=1:npat, interaction)[,-1] inter.sumry <- summary(interaction) } else if(nr != npat) stop(paste("Number of rows in", sQuote("interaction"), "=", nr, "!=", npat, "=", "number of rows in", sQuote("data"))) # check all columns contain interaction objects ok <- (inter.sumry$classes == "interact") if(!all(ok)) { nbg <- names(interaction)[!ok] nn <- sum(!ok) stop(paste(ngettext(nn, "Column", "Columns"), paste(sQuote(nbg), collapse=", "), ngettext(nn, "does", "do"), "not consist of interaction objects")) } # all entries in a column must represent the same process type # (with possibly different values of the irregular parameters) consistentname <- function(x) { xnames <- unlist(lapply(x, function(y) { y$name })) return(length(unique(xnames)) == 1) } ok <- unlist(lapply(as.list(interaction), consistentname)) if(!all(ok)) { nbg <- names(interaction)[!ok] stop(paste("Different interactions may not appear in a single column.", "Violated by", paste(sQuote(nbg), collapse=", "))) } processes <- lapply(as.list(interaction), function(z) { z[[1]]$name }) # determine whether all entries in a column are EXACTLY the same # (=> have the same parameters) constant <- (inter.sumry$storage == "hyperatom") checkconstant <- function(x) { if(length(x) <= 1) return(TRUE) y <- x[[1]] all(unlist(lapply(x[-1], identical, y=y))) } if(any(!constant)) { others <- interaction[,!constant] constant[!constant] <- unlist(lapply(as.list(others), checkconstant)) } } # check for trivial (Poisson) interactions ispoisson <- function(x) all(unlist(lapply(x, is.poisson.interact))) trivial <- unlist(lapply(as.list(interaction), ispoisson)) # check that iformula does not combine two interactions on one row nondfnames <- datanames[!(datanames %in% data.sumry$dfnames)] ip <- impliedpresence(itags, iformula, datadf, nondfnames) if(any(apply(ip, 1, sum) > 1)) stop("iformula invokes more than one interaction on a single row") # #################### BERMAN-TURNER DEVICE ######################### # # set up list to contain the glm variable names for each interaction. Vnamelist <- rep(list(character(0)), ninteract) names(Vnamelist) <- itags # set up list to contain 'IsOffset' Isoffsetlist <- rep(list(logical(0)), ninteract) names(Isoffsetlist) <- itags # # ---------------- L O O P --------------------------------- for(i in 1:npat) { # extract responses and covariates for presentation to ppm() Yi <- Y[[i]] covariates <- if(has.covar) as.list(covariates.hf[i, , drop=FALSE]) else NULL if(has.design) { # convert each data frame value to an image covariates[dfvarnames] <- lapply(as.list(as.data.frame(covariates[dfvarnames])), as.im, W=Yi$data$window) } # Generate data frame and glm info for this point pattern # First the trend covariates prep0 <- bt.frame(Yi, trend, Poisson(), ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam) glmdat <- prep0$glmdata # now the nontrivial interaction terms for(j in (1:ninteract)[iused & !trivial]) { inter <- interaction[i,j,drop=TRUE] prepj <- bt.frame(Yi, ~1, inter, ..., covariates=covariates, allcovar=TRUE, use.gam=use.gam, vnamebase=itags[j], vnameprefix=itags[j]) # store GLM variable names & check consistency vnameij <- prepj$Vnames if(i == 1) Vnamelist[[j]] <- vnameij else if(!identical(vnameij, Vnamelist[[j]])) stop("Internal error: Unexpected conflict in glm variable names") # store offset indicator vectors isoffset.ij <- prepj$IsOffset if(i == 1) Isoffsetlist[[j]] <- isoffset.ij else if(!identical(isoffset.ij, Isoffsetlist[[j]])) stop("Internal error: Unexpected conflict in offset indicators") # GLM data frame for this interaction glmdatj <- prepj$glmdata if(nrow(glmdatj) != nrow(glmdat)) stop("Internal error: differing numbers of rows in glm data frame") iterms.ij <- glmdatj[vnameij] subset.ij <- glmdatj$.mpl.SUBSET # tack on columns of interaction terms glmdat <- cbind(glmdat, iterms.ij) # update subset (quadrature points where cif is positive) glmdat$.mpl.SUBSET <- glmdat$.mpl.SUBSET & subset.ij } # assemble the Mother Of All Data Frames if(i == 1) moadf <- glmdat else { # There may be new or missing columns recognised <- names(glmdat) %in% names(moadf) if(any(!recognised)) { newnames <- names(glmdat)[!recognised] zeroes <- as.data.frame(matrix(0, nrow(moadf), length(newnames))) names(zeroes) <- newnames moadf <- cbind(moadf, zeroes) } provided <- names(moadf) %in% names(glmdat) if(any(!provided)) { absentnames <- names(moadf)[!provided] zeroes <- as.data.frame(matrix(0, nrow(glmdat), length(absentnames))) names(zeroes) <- absentnames glmdat <- cbind(glmdat, zeroes) } # Finally they are compatible moadf <- rbind(moadf, glmdat) } } # ---------------- E N D o f L O O P -------------------------- # # backdoor exit - Berman-Turner frame only - used by predict.mppm if(backdoor) return(moadf) # # # -------------------------------------------------------------------- # # Construct the glm formula for the Berman-Turner device # # Get trend part from the last-computed prep0 fmla <- prep0$trendfmla # Tack on the RHS of the interaction formula if(!all(trivial)) fmla <- paste(fmla, "+", as.character(iformula)[[2]]) # Make it a formula fmla <- as.formula(fmla) # Ensure that each interaction name is recognised. # # To the user, an interaction is identified by its `tag' name # (default tag: "Interaction") # # Internally, an interaction is fitted using its sufficient statistic # which may be 0, 1 or k-dimensional. # The column names of the sufficient statistic are the Vnames # returned from ppm. # The Poisson process is a special case: it is 0-dimensional (no Vnames). # # For k-dimensional sufficient statistics, we modify the formulae, # replacing the interaction name by (vname1 + vname2 + .... + vnamek) # for(j in (1:ninteract)[iused]) { vnames <- Vnamelist[[j]] tag <- itags[j] isoffset <- Isoffsetlist[[j]] if(any(isoffset)) { # enclose names of offset variables in 'offset()' vnames[isoffset] <- paste("offset(", vnames[isoffset], ")", sep="") } if(trivial[j]) # Poisson case: add a column of zeroes moadf[[tag]] <- 0 else if(!identical(vnames, tag)) { if(length(vnames) == 1) # tag to be replaced by vname vn <- paste("~", vnames[1]) else # tag to be replaced by (vname1 + vname2 + .... + vnamek) vn <- paste("~(", paste(vnames, collapse=" + "), ")") # pull out formula representation of RHS vnr <- as.formula(vn)[[2]] # make substitution rule: list(=) vnsub <- list(vnr) names(vnsub) <- tag # perform substitution in trend formula fmla <- eval(substitute(substitute(fom, vnsub), list(fom=fmla))) } } fmla <- as.formula(fmla) # Fix scoping problem assign("glmmsubset", moadf$.mpl.SUBSET, envir=environment(fmla)) # Satisfy package checker glmmsubset <- .mpl.SUBSET <- moadf$.mpl.SUBSET .mpl.W <- moadf$.mpl.W # ---------------- FIT THE MODEL ------------------------------------ want.trend <- prep0$info$want.trend if(want.trend && use.gam) { fitter <- "gam" FIT <- gam(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=gam.control(maxit=50)) deviants <- deviance(FIT) } else { fitter <- "glm" FIT <- glm(fmla, family=quasi(link=log, variance=mu), weights=.mpl.W, data=moadf, subset=(.mpl.SUBSET=="TRUE"), control=glm.control(maxit=50)) deviants <- deviance(FIT) } # maximised log-pseudolikelihood W <- moadf$.mpl.W SUBSET <- moadf$.mpl.SUBSET Z <- (moadf$.mpl.Y != 0) maxlogpl <- -(deviants/2 + sum(log(W[Z & SUBSET])) + sum(Z & SUBSET)) # # ---------------- PACK UP THE RESULT -------------------------------- # result <- list(Call = list(callstring=callstring, cl=cl), Info = list( has.covar=has.covar, has.design=has.design, Yname=Yname, used.cov.names=used.cov.names, allvars=allvars, names.data=names(data), is.df.column=(data.sumry$storage == "dfcolumn"), rownames=row.names(data), correction=prep0$info$correction, rbord=prep0$info$rbord), Fit= list(fitter=fitter, use.gam=use.gam, fmla=fmla, FIT=FIT, moadf=moadf, Vnamelist=Vnamelist), Inter = list(ninteract=ninteract, interaction=interaction, iformula=iformula, iused=iused, itags=itags, processes=processes, trivial=trivial, constant=constant), formula=formula, trend=trend, iformula=iformula, npat=npat, data=data, Y=Y, maxlogpl=maxlogpl, datadf=datadf) class(result) <- c("mppm", class(result)) return(result) } is.mppm <- function(x) { inherits(x, "mppm") } coef.mppm <- function(object, ...) { coef(object$Fit$FIT) } print.mppm <- function(x, ...) { print(summary(x, ..., brief=TRUE)) } is.poisson.mppm <- function(x) { trivial <- x$Inter$trivial iused <- x$Inter$iused all(trivial[iused]) } quad.mppm <- function(x) { x$Y } data.mppm <- function(x) { lapply(x$Y, function(z) { z$data }) } windows.mppm <- function(x) { lapply(x$Y, function(z) {z$data$window}) } logLik.mppm <- function(object, ...) { if(!is.poisson.mppm(object)) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) ll <- object$maxlogpl attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } spatstat/R/softcore.R0000755000176000001440000000715712237642727014346 0ustar ripleyusers# # # softcore.S # # $Revision: 2.12 $ $Date: 2012/08/27 02:09:49 $ # # Soft core processes. # # Softcore() create an instance of a soft core process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # Softcore <- local({ BlankSoftcore <- list( name = "Soft core process", creator = "Softcore", family = "pairwise.family", # evaluated later pot = function(d, par) { sig0 <- par$sigma0 if(is.na(sig0)) { p <- -d^(-2/par$kappa) } else { # expand around sigma0 and set large negative numbers to -Inf drat <- d/sig0 p <- -drat^(-2/par$kappa) p[p < -25] <- -Inf } return(p) }, par = list(kappa = NULL, sigma0=NA), # filled in later parnames = c("Exponent kappa", "Initial approximation to sigma"), selfstart = function(X, self) { # self starter for Softcore if(npoints(X) < 2) { # not enough points to make any decisions return(self) } md <- min(nndist(X)) if(md == 0) { warning(paste("Pattern contains duplicated points:", "impossible under Softcore model")) return(self) } kappa <- self$par$kappa if(!is.na(sigma0 <- self$par$sigma0)) { # value fixed by user or previous invocation # check it if((md/sigma0)^(-2/kappa) > 25) warning(paste("Initial approximation sigma0 is too large;", "some data points will have zero probability")) return(self) } # take sigma0 = minimum interpoint distance Softcore(kappa=kappa, sigma0=md) }, init = function(self) { kappa <- self$par$kappa if(!is.numeric(kappa) || length(kappa) != 1 || kappa <= 0 || kappa >= 1) stop(paste("Exponent kappa must be a", "positive number less than 1")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { theta <- as.numeric(coeffs[1]) sigma <- theta^(self$par$kappa/2) if(!is.na(sig0 <- self$par$sigma0)) sigma <- sigma * sig0 return(list(param=list(sigma=sigma), inames="interaction parameter sigma", printable=sigma)) }, valid = function(coeffs, self) { theta <- coeffs[1] return(is.finite(theta) && (theta >= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { # distance d beyond which log(interaction factor) <= epsilon if(any(is.na(coeffs)) || epsilon == 0) return(Inf) theta <- as.numeric(coeffs[1]) kappa <- self$par$kappa sig0 <- self$par$sigma0 if(is.na(sig0)) sig0 <- 1 return(sig0 * (theta/epsilon)^(kappa/2)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral kappa <- self$par$kappa sigma <- (self$interpret)(coeffs, self)$param$sigma return(pi * (sigma^2) * gamma(1 - kappa)) }, version=NULL # filled in later ) class(BlankSoftcore) <- "interact" Softcore <- function(kappa, sigma0=NA) { instantiate.interact(BlankSoftcore, list(kappa=kappa, sigma0=sigma0)) } Softcore }) spatstat/R/dist2dpath.R0000755000176000001440000000372512237642727014565 0ustar ripleyusers# # dist2dpath.R # # $Revision: 1.6 $ $Date: 2013/04/25 06:37:43 $ # # dist2dpath compute shortest path distances # dist2dpath <- function(dist, method="C") { # given a matrix of distances between adjacent vertices # (value = Inf if not adjacent) # compute the matrix of shortest path distances stopifnot(is.matrix(dist) && isSymmetric(dist)) stopifnot(all(diag(dist) == 0)) stopifnot(all(dist[is.finite(dist)] >= 0)) # n <- nrow(dist) cols <- col(dist) # shortest <- min(dist[is.finite(dist) & dist > 0]) tol <- shortest/max(n,1024) tol <- max(tol, .Machine$double.eps) # switch(method, interpreted={ dpathnew <- dpath <- dist changed <- TRUE while(changed) { for(j in 1:n) dpathnew[,j] <- apply(dpath + dist[j,][cols], 1, min) changed <- any(abs(dpathnew - dpath) > tol) dpath <- dpathnew } }, C={ adj <- is.finite(dist) diag(adj) <- TRUE d <- dist d[!adj] <- -1 DUP <- spatstat.options("dupC") z <- .C("Ddist2dpath", nv=as.integer(n), d=as.double(d), adj=as.integer(adj), dpath=as.double(numeric(n*n)), tol=as.double(tol), niter=as.integer(integer(1)), status=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") if(z$status == -1) warning(paste("C algorithm did not converge to tolerance", tol, "after", z$niter, "iterations", "on", n, "vertices and", sum(adj) - n, "edges")) dpath <- matrix(z$dpath, n, n) # value=-1 implies unreachable dpath[dpath < 0] <- Inf }, stop(paste("Unrecognised method", sQuote(method)))) return(dpath) } spatstat/R/eval.fasp.R0000755000176000001440000000516012237642727014371 0ustar ripleyusers# # eval.fasp.R # # # eval.fasp() Evaluate expressions involving fasp objects # # compatible.fasp() Check whether two fasp objects are compatible # # $Revision: 1.6 $ $Date: 2012/06/11 05:22:53 $ # eval.fasp <- function(expr, envir, dotonly=TRUE) { # convert syntactic expression to 'expression' object e <- as.expression(substitute(expr)) # convert syntactic expression to call elang <- substitute(expr) # find names of all variables in the expression varnames <- all.vars(e) if(length(varnames) == 0) stop("No variables in this expression") # get the actual variables if(missing(envir)) envir <- sys.parent() vars <- lapply(as.list(varnames), get, envir=envir) names(vars) <- varnames # find out which ones are fasp objects isfasp <- unlist(lapply(vars, inherits, what="fasp")) if(!any(isfasp)) stop("No fasp objects in this expression") fasps <- vars[isfasp] nfasps <- length(fasps) # test whether the fasp objects are compatible if(nfasps > 1 && !(ok <- do.call("compatible", unname(fasps)))) stop(paste(if(nfasps > 2) "some of" else NULL, "the objects", commasep(sQuote(names(fasps))), "are not compatible")) # copy first object as template result <- fasps[[1]] which <- result$which nr <- nrow(which) nc <- ncol(which) # create environment for evaluation fenv <- new.env() # for each [i,j] extract fv objects and evaluate expression for(i in seq_len(nr)) for(j in seq_len(nc)) { # extract fv objects at position [i,j] funs <- lapply(fasps, function(x, i, j) { as.fv(x[i,j]) }, i=i, j=j) # insert into list of argument values vars[isfasp] <- funs # assign them into the right environment for(k in seq_along(vars)) assign(varnames[k], vars[[k]], envir=fenv) # evaluate resultij <- eval(substitute(eval.fv(ee,ff,dd), list(ee=e, ff=fenv, dd=dotonly))) # insert back into fasp result$fns[[which[i,j] ]] <- resultij } result$title <- paste("Result of eval.fasp(", e, ")", sep="") return(result) } compatible.fasp <- function(A, B, ...) { verifyclass(A, "fasp") if(missing(B)) return(TRUE) verifyclass(B, "fasp") dimA <- dim(A$which) dimB <- dim(B$which) if(!all(dimA == dimB)) return(FALSE) for(i in seq_len(dimA[1])) for(j in seq_len(dimA[2])) { Aij <- as.fv(A[i,j]) Bij <- as.fv(B[i,j]) if(!compatible.fv(Aij, Bij)) return(FALSE) } # A and B agree if(length(list(...)) == 0) return(TRUE) # recursion return(compatible.fasp(B, ...)) } spatstat/R/geyer.R0000755000176000001440000002446512237642727013636 0ustar ripleyusers# # # geyer.S # # $Revision: 2.23 $ $Date: 2013/04/25 06:37:43 $ # # Geyer's saturation process # # Geyer() create an instance of Geyer's saturation process # [an object of class 'interact'] # # Geyer <- local({ # .......... template .......... BlankGeyer <- list( name = "Geyer saturation process", creator = "Geyer", family = "pairsat.family", # evaluated later pot = function(d, par) { (d <= par$r) # same as for Strauss }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction distance","saturation parameter"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("interaction distance r must be a positive number") if(!is.numeric(sat) || length(sat) != 1 || sat < 1) stop("saturation parameter sat must be a number >= 1") if(ceiling(sat) != floor(sat)) warning(paste("saturation parameter sat", "has a non-integer value")) }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=round(gamma,4))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) sat <- self$par$sat return(is.finite(loggamma) && (is.finite(sat) || loggamma <= 0)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r if(any(!is.na(coeffs))) { loggamma <- coeffs[1] if(!is.na(loggamma) && (abs(loggamma) <= epsilon)) return(0) } return(2 * r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ..., halfway=FALSE, check=TRUE) { # fast evaluator for Geyer interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for Geyer") r <- potpars$r sat <- potpars$sat # first ensure all data points are in U nX <- npoints(X) nU <- npoints(U) Xseq <- seq_len(nX) if(length(EqualPairs) == 0) { # no data points currently included missingdata <- rep.int(TRUE, nX) } else { Xused <- EqualPairs[,1] missingdata <- !(Xseq %in% Xused) } somemissing <- any(missingdata) if(somemissing) { # add the missing data points nmiss <- sum(missingdata) U <- superimpose(U, X[missingdata], W=X$window, check=check) # correspondingly augment the list of equal pairs originalrows <- seq_len(nU) newXindex <- Xseq[missingdata] newUindex <- nU + seq_len(nmiss) EqualPairs <- rbind(EqualPairs, cbind(newXindex, newUindex)) nU <- nU + nmiss } # determine saturated pair counts counts <- strausscounts(U, X, r, EqualPairs) satcounts <- pmin.int(sat, counts) satcounts <- matrix(satcounts, ncol=1) if(halfway) { # trapdoor used by suffstat() answer <- satcounts } else if(sat == Inf) { # no saturation: fast code answer <- 2 * satcounts } else { # extract counts for data points Uindex <- EqualPairs[,2] Xindex <- EqualPairs[,1] Xcounts <- integer(npoints(X)) Xcounts[Xindex] <- counts[Uindex] # evaluate change in saturated counts of other data points change <- geyercounts(U, X, r, sat, Xcounts, EqualPairs) answer <- satcounts + change answer <- matrix(answer, ncol=1) } if(somemissing) answer <- answer[originalrows, , drop=FALSE] return(answer) }, delta2 = function(X,inte,correction, ...) { # Sufficient statistic for second order conditional intensity # h(X[i] | X) - h(X[i] | X[-j]) # Geyer interaction r <- inte$par$r sat <- inte$par$sat result <- geyerdelta2(X,r,sat) return(result) } ) class(BlankGeyer) <- "interact" Geyer <- function(r, sat) { instantiate.interact(BlankGeyer, list(r = r, sat=sat)) } Geyer }) # ........... externally visible auxiliary functions ......... geyercounts <- function(U, X, r, sat, Xcounts, EqualPairs) { # evaluate effect of adding dummy point or deleting data point # on saturated counts of other data points stopifnot(is.numeric(r)) stopifnot(is.numeric(sat)) # for C calls we need finite numbers stopifnot(is.finite(r)) stopifnot(is.finite(sat)) # sort in increasing order of x coordinate oX <- fave.order(X$x) oU <- fave.order(U$x) Xsort <- X[oX] Usort <- U[oU] nX <- npoints(X) nU <- npoints(U) Xcountsort <- Xcounts[oX] # inverse: data point i has sorted position i' = rankX[i] rankX <- integer(nX) rankX[oX] <- seq_len(nX) rankU <- integer(nU) rankU[oU] <- seq_len(nU) # map from quadrature points to data points Uindex <- EqualPairs[,2] Xindex <- EqualPairs[,1] Xsortindex <- rankX[Xindex] Usortindex <- rankU[Uindex] Cmap <- rep.int(-1, nU) Cmap[Usortindex] <- Xsortindex - 1 # call C routine DUP <- spatstat.options("dupC") zz <- .C("Egeyer", nnquad = as.integer(nU), xquad = as.double(Usort$x), yquad = as.double(Usort$y), quadtodata = as.integer(Cmap), nndata = as.integer(nX), xdata = as.double(Xsort$x), ydata = as.double(Xsort$y), tdata = as.integer(Xcountsort), rrmax = as.double(r), ssat = as.double(sat), result = as.double(numeric(nU)), DUP=DUP) # PACKAGE="spatstat") result <- zz$result[rankU] return(result) } geyerdelta2 <- local({ geyerdelta2 <- function(X, r, sat) { # Sufficient statistic for second order conditional intensity # Geyer model stopifnot(is.numeric(sat) && length(sat) == 1 && sat >= 0) # X could be a ppp or quad. if(is.ppp(X)) { # evaluate \Delta_{x_i} \Delta_{x_j} S(x) for data points x_i, x_j # i.e. h(X[i]|X) - h(X[i]|X[-j]) where h is first order cif statistic return(geydelppp(X, r, sat)) } else if(inherits(X, "quad")) { # evaluate \Delta_{u_i} \Delta_{u_j} S(x) for quadrature points u_i, u_j return(geydelquad(X, r, sat)) } else stop("Internal error: X should be a ppp or quad object") } geydelppp <- function(X, r, sat) { # initialise nX <- npoints(X) result <- matrix(0, nX, nX) # identify all r-close pairs (ordered pairs i ~ j) a <- closepairs(X, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I,J) # count number of r-neighbours for each point # (consistently with the above) tvals <- table(factor(I, levels=1:nX)) # Compute direct part # (arising when i~j) tI <- tvals[I] tJ <- tvals[J] result[IJ] <- pmin(sat, tI) - pmin(sat, tI - 1) + pmin(sat, tJ) - pmin(sat, tJ - 1) # Compute indirect part # (arising when i~k and j~k for another point k) # First find all such triples ord <- (I < J) vees <- edges2vees(I[ord], J[ord], nX) # evaluate contribution of (k, i, j) KK <- vees$i II <- factor(vees$j, levels=1:nX) JJ <- factor(vees$k, levels=1:nX) tKK <- tvals[KK] contribKK <- pmin(sat, tKK) - 2 * pmin(sat, tKK-1) + pmin(sat, tKK-2) # for each (i, j), sum the contributions over k delta3 <- tapply(contribKK, list(I=II, J=JJ), sum) delta3[is.na(delta3)] <- 0 # symmetrise and combine result <- result + delta3 + t(delta3) # if X is a ppp, return now if(is.null(D)) return(result) } geydelquad <- function(Q, r, sat) { Z <- is.data(Q) U <- union.quad(Q) nU <- npoints(U) nX <- npoints(Q$data) result <- matrix(0, nU, nU) # identify all r-close pairs U[i], U[j] a <- closepairs(U, r, what="indices") I <- a$i J <- a$j IJ <- cbind(I, J) # tag which ones are data points zI <- Z[I] zJ <- Z[J] # count t(U[i], X) IzJ <- I[zJ] JzJ <- J[zJ] tvals <- table(factor(IzJ, levels=1:nU)) # Compute direct part # (arising when U[i]~U[j]) tI <- tvals[I] tJ <- tvals[J] tIJ <- tI - zJ tJI <- tJ - zI result[IJ] <- pmin(sat, tIJ + 1) - pmin(sat, tIJ) + pmin(sat, tJI + 1) - pmin(sat, tJI) # Compute indirect part # (arising when U[i]~X[k] and U[j]~X[k] for another point X[k]) # First find all such triples # Group close pairs X[k] ~ U[j] by index k spl <- split(IzJ, factor(JzJ, levels=1:nX)) grlen <- unlist(lapply(spl, length)) # Assemble list of triples U[i], X[k], U[j] # by expanding each pair U[i], X[k] JJ <- unlist(spl[JzJ]) II <- rep(IzJ, grlen[JzJ]) KK <- rep(JzJ, grlen[JzJ]) # remove identical pairs i = j ok <- II != JJ II <- II[ok] JJ <- JJ[ok] KK <- KK[ok] # evaluate contribution of each triple tKK <- tvals[KK] zII <- Z[II] zJJ <- Z[JJ] tKIJ <- tKK - zII - zJJ contribKK <- pmin(sat, tKIJ + 2) - 2 * pmin(sat, tKIJ + 1) + pmin(sat, tKIJ) # for each (i, j), sum the contributions over k II <- factor(II, levels=1:nU) JJ <- factor(JJ, levels=1:nU) delta4 <- tapply(contribKK, list(I=II, J=JJ), sum) delta4[is.na(delta4)] <- 0 # combine result <- result + delta4 return(result) } geyerdelta2 }) spatstat/R/rshift.psp.R0000755000176000001440000000270512237642727014614 0ustar ripleyusers# # rshift.psp.R # # $Revision: 1.6 $ $Date: 2011/05/18 09:10:12 $ # rshift.psp <- function(X, ..., group=NULL, which=NULL) { verifyclass(X, "psp") # process arguments W <- rescue.rectangle(X$window) arglist <- handle.rshift.args(W, ..., edgedefault="erode") radius <- arglist$radius width <- arglist$width height <- arglist$height edge <- arglist$edge clip <- arglist$clip if(W$type != "rectangle") stop("Not yet implemented for non-rectangular windows") if(edge != "erode") stop(paste("Only implemented for edge=", dQuote("erode"))) # split into groups if(is.null(group)) Y <- list(X) else { stopifnot(is.factor(group)) stopifnot(length(group) == X$n) Y <- lapply(levels(group), function(l, X, group) {X[group == l]}, X=X, group=group) } ############ loop ################ result <- psp(numeric(0), numeric(0), numeric(0), numeric(0), X$window) for(i in seq_along(Y)) { Z <- Y[[i]] # generate random translation vector if(!is.null(radius)) jump <- runifdisc(1, radius=radius) else { jump <- list(x=runif(1, min=0, max=width), y=runif(1, min=0, max=height)) } # translate segments Zsh <- shift(Z, c(jump$x, jump$y)) Zsh$window <- W # append to result result <- append.psp(result, Zsh) } # clip if(!is.null(clip)) result <- result[clip] return(result) } spatstat/R/periodify.R0000755000176000001440000000751112237642727014506 0ustar ripleyusers# # periodify.R # # replicate a pattern periodically # # $Revision: 1.3 $ $Date: 2011/04/17 05:52:50 $ # periodify <- function(X, ...) { UseMethod("periodify") } periodify.ppp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.psp <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) W <- X$window isrect <- (W$type == "rectangle") if(warn && combine && !isrect) warning("X has a non-rectangular window") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(W$xrange) height <- diff(W$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) Xshift <- list() for(i in 1:nrow(shifts)) Xshift[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) if(!combine) return(Xshift) Wnew <- if(isrect) { owin(range(range(W$xrange) + range(shifts[,1])), range(range(W$yrange) + range(shifts[,2]))) } else NULL Z <- do.call(superimpose, append(Xshift, list(W=Wnew, check=check))) return(Z) } periodify.owin <- function(X, nx=1, ny=1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) { # sanity checks if(!missing(nx) || !missing(ny)) { if(is.null(nx)) nx <- 1 if(is.null(ny)) ny <- 1 if(length(nx) != 1 || length(ny) != 1) stop("nx and ny should be single integers") if(nx != round(nx) || ny != round(ny)) stop("nx and ny should be integers") } force(ixy) isrect <- (X$type == "rectangle") if(warn && combine && !isrect) warning("X is not rectangular") else isrect <- isrect && all(diff(nx) == 1) && all(diff(ny) == 1) width <- diff(X$xrange) height <- diff(X$yrange) shifts <- cbind(ixy[,1] * width, ixy[,2] * height) if(combine) { if(isrect) { # result is a rectangle Y <- owin(range(range(X$xrange) + range(shifts[,1])), range(range(X$yrange) + range(shifts[,2]))) } else { # result is another type of window for(i in 1:nrow(shifts)) { Xi <- shift(X, vec=as.numeric(shifts[i, ])) Y <- if(i == 1) Xi else union.owin(Y, Xi) } } } else { # result is a list Y <- list() for(i in 1:nrow(shifts)) Y[[i]] <- shift(X, vec=as.numeric(shifts[i, ])) } return(Y) } spatstat/R/idw.R0000755000176000001440000000454412237642727013302 0ustar ripleyusers# # idw.R # # Inverse-distance weighted smoothing # # $Revision: 1.4 $ $Date: 2011/07/25 06:25:10 $ idw <- function(X, power=2, at="pixels", ...) { stopifnot(is.ppp(X) && is.marked(X)) marx <- marks(X) if(is.data.frame(marx)) { if(ncol(marx) > 1) { # multiple columns of marks - process one-by-one out <- list() for(j in 1:ncol(marx)) out[[j]] <- idw(X %mark% marx[,j], power=power, at=at, ...) names(out) <- names(marx) switch(at, pixels = { out <- as.listof(out) }, points = { out <- as.data.frame(out) }) return(out) } else marx <- marx[,1] } if(!is.numeric(marx)) stop("Marks must be numeric") check.1.real(power) switch(at, pixels = { # create grid W <- as.mask(as.owin(X), ...) dim <- W$dim npixels <- prod(dim) # call C DUP <- spatstat.options("dupC") z <- .C("Cidw", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npoints(X)), xstart = as.double(W$xcol[1]), xstep = as.double(W$xstep), nx = as.integer(dim[2]), ystart = as.double(W$yrow[1]), ystep = as.double(W$ystep), ny = as.integer(dim[1]), power = as.double(power), num = as.double(numeric(npixels)), den = as.double(numeric(npixels)), rat = as.double(numeric(npixels)), DUP = DUP) # PACKAGE = "spatstat") out <- as.im(matrix(z$rat, dim[1], dim[2]), W=W) }, points={ DUP <- spatstat.options("dupC") npts <- npoints(X) z <- .C("idwloo", x = as.double(X$x), y = as.double(X$y), v = as.double(marx), n = as.integer(npts), power = as.double(power), num = as.double(numeric(npts)), den = as.double(numeric(npts)), rat = as.double(numeric(npts)), DUP = DUP) # PACKAGE = "spatstat") out <- z$rat }) return(out) } spatstat/R/istat.R0000755000176000001440000001346212237642727013642 0ustar ripleyusers# # interactive analysis of point patterns # # $Revision: 1.15 $ $Date: 2013/04/25 06:37:43 $ # # istat <- function(x, xname) { if(missing(xname)) xname <- short.deparse(substitute(x)) verifyclass(x, "ppp") # generate simulations of CSR for use in envelopes simx <- envelope(x, fun=NULL, nsim=39, internal=list(csr=TRUE, eject="patterns")) # initial value of smoothing parameter sigma0 <- with(x$window, min(diff(xrange),diff(yrange)))/8 # create panel require(rpanel) p <- rp.control(paste("istat(", xname, ")", sep=""), x=x, # point pattern xname=xname, # name of point pattern simx=simx, # simulated realisations of CSR stat="data", envel="none", sigma=sigma0, size=c(600, 400)) # Split panel into two halves # Left half of panel: display # Right half of panel: controls rp.grid(p, "gdisplay", pos=list(row=0,column=0)) rp.grid(p, "gcontrols", pos=list(row=0,column=1)) #----- Display side ------------ # This line is to placate the package checker mytkr2 <- NULL rp.tkrplot(p, mytkr2, do.istat, pos=list(row=0,column=0,grid="gdisplay")) redraw <- function(panel) { rp.tkrreplot(panel, mytkr2) panel } #----- Control side ------------ nextrow <- 0 pozzie <- function(n=nextrow,s='w') list(row=n,column=0,grid="gcontrols",sticky=s) # choice of summary statistic ftable <- c(data="data", density="kernel smoothed", Kest="K-function", Lest="L-function", pcf="pair correlation", Kinhom="inhomogeneous K", Linhom="inhomogeneous L", Fest="empty space function F", Gest="nearest neighbour function G", Jest="J-function") fvals <- names(ftable) flabs <- as.character(ftable) stat <- NULL rp.radiogroup(p, stat, values=fvals, labels=flabs, title="statistic", action=redraw, pos=pozzie(0)) nextrow <- 1 # envelopes? envel <- NULL evals <- c("none", "pointwise", "simultaneous") elabs <- c("No simulation envelopes", "Pointwise envelopes under CSR", "Simultaneous envelopes under CSR") rp.radiogroup(p, envel, values=evals, labels=elabs, title="Simulation envelopes", action=redraw, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # smoothing parameters sigma <- NULL rect <- as.rectangle(x$window) winwid <- min(abs(diff(rect$xrange)), abs(diff(rect$yrange))) rp.slider(p, sigma, winwid/80, winwid/2, action=redraw, title="sigma", initval=winwid/8, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 pcfbw <- pcfbwinit <- 0.15/sqrt(5 * x$n/area.owin(x$window)) rp.slider(p, pcfbw, pcfbwinit/10, 4 * pcfbwinit, action=redraw, title="bw", initval=pcfbwinit, showvalue=TRUE, pos=pozzie(nextrow, '')) nextrow <- nextrow + 1 # button to print a summary at console rp.button(p, title="Print summary information", action=function(panel) { print(summary(panel$x)); panel}, pos=pozzie(nextrow)) nextrow <- nextrow + 1 # quit button rp.button(p, title="Quit", quitbutton=TRUE, action= function(panel) { panel }, pos=pozzie(nextrow)) invisible(NULL) } # function that updates the plot when the control panel is operated do.istat <- function(panel) { x <- panel$x xname <- panel$xname envel <- panel$envel stat <- panel$stat sigma <- panel$sigma simx <- panel$simx if(stat=="data") { plot(x, main=xname) return(panel) } out <- switch(envel, none=switch(stat, density=density(x, sigma=sigma), Kest=Kest(x), Lest=Lest(x), pcf=pcf(x, bw=panel$pcfbw), Kinhom=Kinhom(x, sigma=sigma), Linhom=Linhom(x, sigma=sigma), Fest=Fest(x), Gest=Gest(x), Jest=Jest(x)), pointwise=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=39, simulate=simx), Lest=envelope(x, Lest, nsim=39, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=39, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=39, sigma=sigma, simulate=simx), Linhom=envelope(x, Linhom, nsim=39, sigma=sigma, simulate=simx), Fest=envelope(x, Fest, nsim=39, simulate=simx), Gest=envelope(x, Gest, nsim=39, simulate=simx), Jest=envelope(x, Jest, nsim=39, simulate=simx)), simultaneous=switch(stat, density=density(x, sigma=sigma), Kest=envelope(x, Kest, nsim=19, global=TRUE, simulate=simx), Lest=envelope(x, Lest, nsim=19, global=TRUE, simulate=simx), pcf=envelope(x, pcf, bw=panel$pcfbw, nsim=19, global=TRUE, simulate=simx), Kinhom=envelope(x, Kinhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Linhom=envelope(x, Linhom, nsim=19, sigma=sigma, global=TRUE, simulate=simx), Fest=envelope(x, Fest, nsim=19, global=TRUE, simulate=simx), Gest=envelope(x, Gest, nsim=19, global=TRUE, simulate=simx), Jest=envelope(x, Jest, nsim=19, global=TRUE, simulate=simx)) ) # plot it if(stat %in% c("density", "Kinhom", "Linhom")) { plot(out, main=paste(stat, "(", xname, ", sigma)", sep="")) if(stat == "density") points(x) } else if(stat == "pcf") plot(out, main=paste("pcf(", xname, ", bw)", sep="")) else plot(out, main=paste(stat, "(", xname, ")", sep="")) return(panel) } spatstat/R/interactions.R0000644000176000001440000001556312237642727015221 0ustar ripleyusers# # # interactions.R # # Works out which interaction is in force for a given point pattern # # $Revision: 1.8 $ $Date: 2007/03/30 08:19:16 $ # # impliedpresence <- function(tags, formula, df, extranames=character(0)) { # Determines, for each row of the data frame df, # whether the variable called tags[j] is required in the formula stopifnot(is.data.frame(df)) stopifnot(inherits(formula, "formula")) stopifnot(is.character(tags)) stopifnot(is.character(extranames)) allvars <- variablesinformula(formula) if(any(tags %in% names(df))) stop(paste(sQuote("tags"), "conflicts with the name of a column of", sQuote("df"))) if(any(extranames %in% names(df))) stop(paste(sQuote("extranames"), "conflicts with the name of a column of", sQuote("df"))) # answer is a matrix nvars <- length(tags) nrows <- nrow(df) answer <- matrix(TRUE, nrows, nvars) # expand data frame with zeroes for each tags and extranames for(v in unique(c(tags, extranames))) df[ , v] <- 0 # loop for(i in seq(nrow(df))) { # make a fake data frame for the formula # using the data frame entries from row i # (includes 0 values for all other variables) pseudat <- df[i, , drop=FALSE] # use this to construct a fake model matrix mof0 <- model.frame(formula, pseudat) mom0 <- model.matrix(formula, mof0) for(j in seq(nvars)) { # Reset the variable called tags[j] to 1 pseudatj <- pseudat pseudatj[ , tags[j]] <- 1 # Now create the fake model matrix mofj <- model.frame(formula, pseudatj) momj <- model.matrix(formula, mofj) # Compare the two matrices answer[i,j] <- any(momj != mom0) } } return(answer) } active.interactions <- function(object) { stopifnot(inherits(object, "mppm")) interaction <- object$Inter$interaction iformula <- object$iformula environment(iformula) <- nenv <- new.env() ninter <- object$Inter$ninter itags <- object$Inter$itags iused <- object$Inter$iused trivial <- object$Inter$trivial # names of variables dat <- object$data datanames <- names(dat) dfnames <- summary(dat)$dfnames nondfnames <- datanames[!(datanames %in% dfnames)] # extract data-frame values dfdata <- as.data.frame(dat[, dfnames, drop=FALSE], warn=FALSE) # determine which interaction(s) are in force answer <- impliedpresence(itags, iformula, dfdata, nondfnames) colnames(answer) <- names(interaction) return(answer) } impliedcoefficients <- function(object, tag) { stopifnot(inherits(object, "mppm")) stopifnot(is.character(tag) && length(tag) == 1) fitobj <- object$Fit$FIT Vnamelist <- object$Fit$Vnamelist fitter <- object$Fit$fitter interaction <- object$Inter$interaction ninteract <- object$Inter$ninteract trivial <- object$Inter$trivial iused <- object$Inter$iused itags <- object$Inter$itags if(!(tag %in% itags)) stop(paste("Argument", dQuote("tag"), "is not one of the interaction names")) # (0) Set up # Identify the columns of the glm data frame # that are associated with this interpoint interaction vnames <- Vnamelist[[tag]] if(!is.character(vnames)) stop("Internal error - wrong format for vnames") # The answer is a matrix of coefficients, # with one row for each point pattern, # and one column for each vname answer <- matrix(, nrow=object$npat, ncol=length(vnames)) colnames(answer) <- vnames # (1) make a data frame of covariates # Names of all columns in glm data frame allnames <- names(object$Fit$moadf) # Extract the design covariates df <- as.data.frame(object$data, warn=FALSE) # Names of all covariates other than design covariates othernames <- allnames[!(allnames %in% names(df))] # Add columns in which all other covariates are set to 0 for(v in othernames) df[, v] <- 0 # (2) evaluate linear predictor opt <- options(warn= -1) eta0 <- predict(fitobj, newdata=df, type="link") options(opt) # (3) for each vname in turn, # set the value of the vname to 1 and predict again for(j in seq(vnames)) { df[[vnames[j] ]] <- 1 opt <- options(warn= -1) etaj <- predict(fitobj, newdata=df, type="link") options(opt) answer[ ,j] <- etaj - eta0 } return(answer) } illegal.iformula <- function(ifmla, itags, dfvarnames) { # THIS IS TOO STRINGENT! # Check validity of the interaction formula. # ifmla is the formula. # itags is the character vector of interaction names. # Check whether the occurrences of `itags' in `iformula' are valid: # e.g. no functions applied to `itags[i]'. # Returns NULL if legal, otherwise a character string stopifnot(inherits(ifmla, "formula")) stopifnot(is.character(itags)) # formula must not have a LHS if(length(ifmla) > 2) return("iformula must not have a left-hand side") # variables in formula must be interaction tags or data frame variables varsinf <- variablesinformula(ifmla) if(!all(ok <- varsinf %in% c(itags, dfvarnames))) return(paste( ngettext(sum(!ok), "variable", "variables"), paste(dQuote(varsinf[!ok]), collapse=", "), "not allowed in iformula")) # if formula uses no interaction tags, it's trivial if(!any(itags %in% variablesinformula(ifmla))) return(NULL) # create terms object tt <- attributes(terms(ifmla)) # extract all variables appearing in the formula vars <- as.list(tt$variables)[-1] nvars <- length(vars) varstrings <- sapply(vars, function(x) paste(as.expression(x))) # Each variable may be a name or an expression v.is.name <- sapply(vars, is.name) # a term may be an expression like sin(x), poly(x,y,degree=2) v.args <- lapply(vars, function(x) all.vars(as.expression(x))) v.n.args <- sapply(v.args, length) v.has.itag <- sapply(v.args, function(x,y) { any(y %in% x) }, y=itags) # interaction tags may only appear as names, not in functions if(any(nbg <- v.has.itag & !v.is.name)) return(paste("interaction tags may not appear inside a function:", paste(dQuote(varstrings[nbg]), collapse=", "))) # Interaction between two itags is not defined # Inspect the higher-order terms fax <- tt$factors if(prod(dim(fax)) == 0) return(NULL) # rows are first order terms, columns are terms of order >= 1 fvars <- rownames(fax) fterms <- colnames(fax) fv.args <- lapply(fvars, function(x) all.vars(as.expression(parse(text=x)))) ft.args <- lapply(fterms, function(tum, fax, fv.args) { basis <- (fax[, tum] != 0) unlist(fv.args[basis]) }, fax=fax, fv.args=fv.args) ft.itags <- lapply(ft.args, intersect, y=itags) if(any(sapply(ft.itags, length) > 1)) return("Interaction between itags is not defined") return(NULL) } spatstat/R/by.ppp.R0000755000176000001440000000060712237642727013723 0ustar ripleyusers# # by.ppp.R # # $Revision: 1.5 $ $Date: 2011/05/18 01:29:48 $ # by.ppp <- function(data, INDICES=marks(data), FUN, ...) { if(missing(INDICES)) INDICES <- marks(data, dfok=FALSE) if(missing(FUN)) stop("FUN is missing") y <- split(data, INDICES) z <- list() for(i in seq_along(y)) z[[i]] <- FUN(y[[i]], ...) names(z) <- names(y) z <- as.listof(z) return(z) } spatstat/R/diagnoseppm.R0000755000176000001440000003012312237642727015015 0ustar ripleyusers# # diagnoseppm.R # # Makes diagnostic plots based on residuals or energy weights # # $Revision: 1.34 $ $Date: 2013/08/29 03:55:04 $ # diagnose.ppm.engine <- function(object, ..., type="eem", typename, opt, sigma=NULL, rbord = reach(object), compute.sd=TRUE, compute.cts=TRUE, rv=NULL, oldstyle=FALSE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") # quadrature points Q <- quad.ppm(object) U <- union.quad(Q) Qweights <- w.quad(Q) # -------------- Calculate residuals/weights ------------------- # Discretised residuals if(type == "eem") { residval <- if(!is.null(rv)) rv else eem(object, check=FALSE) residval <- as.numeric(residval) X <- data.ppm(object) Y <- X %mark% residval } else { if(!is.null(rv) && !inherits(rv, "msr")) stop("rv should be a measure (object of class msr)") residobj <- if(!is.null(rv)) rv else residuals.ppm(object, type=type, check=FALSE) residval <- with(residobj, "increment") if(ncol(as.matrix(residval)) > 1) stop("Not implemented for vector-valued residuals; use [.msr to split into separate components") Y <- U %mark% residval } # Atoms and density of measure Ymass <- NULL Ycts <- NULL Ydens <- NULL if(compute.cts) { if(type == "eem") { Ymass <- Y Ycts <- U %mark% (-1) Ydens <- as.im(-1, Y$window) } else { atoms <- with(residobj, "is.atom") masses <- with(residobj, "discrete") cts <- with(residobj, "density") if(!is.null(atoms) && !is.null(masses) && !is.null(cts)) { Ymass <- (U %mark% masses)[atoms] Ycts <- U %mark% cts # remove NAs (as opposed to zero cif points) if(!all(ok <- is.finite(cts))) { U <- U[ok] Ycts <- Ycts[ok] cts <- cts[ok] Qweights <- Qweights[ok] } # interpolate continuous part to yield an image for plotting if(type == "inverse" && all(cts > 0)) { Ydens <- as.im(-1, Y$window) } else if(is.stationary.ppm(object) && is.poisson.ppm(object)) { # all values of `cts' will be equal Ydens <- as.im(cts[1], Y$window) } else { smallsigma <- max(nndist(Ycts)) Ujitter <- U Ujitter$x <- U$x + runif(U$n, -smallsigma, smallsigma) Ujitter$y <- U$y + runif(U$n, -smallsigma, smallsigma) Ydens <- Smooth(Ujitter %mark% marks(Ycts), sigma=smallsigma, weights=Qweights, edge=TRUE, ...) } } } } #---------------- Erode window --------------------------------- # ## Compute windows W <- Y$window # Erode window if required clip <- (rbord > 0) if(clip) { Wclip <- erosion.owin(W, rbord) Yclip <- Y[Wclip] Qweightsclip <- Qweights[inside.owin(U, , Wclip)] if(!is.null(Ycts)) Ycts <- Ycts[Wclip] if(!is.null(Ydens)) Ydens <- Ydens[Wclip, drop=FALSE] } else { Wclip <- W Yclip <- Y } # ------------ start collecting results ------------------------- result <- list(type=type, clip=clip, Y=Y, W=W, Yclip=Yclip, Ymass=Ymass, Ycts=Ycts, Ydens=Ydens) # ------------- smoothed field ------------------------------ Z <- NULL if(opt$smooth | opt$xcumul | opt$ycumul | opt$xmargin | opt$ymargin) { if(is.null(sigma)) sigma <- 0.1 * diameter(Wclip) Z <- density.ppp(Yclip, sigma, weights=Yclip$marks, edge=TRUE, ...) } if(opt$smooth) result$smooth <- list(Z = Z, sigma=sigma) # -------------- marginals of smoothed field ------------------------ if(opt$xmargin) { xZ <- apply(Z$v, 2, sum, na.rm=TRUE) * Z$xstep if(type == "eem") ExZ <- apply(Z$v, 2, function(column) { sum(!is.na(column)) }) * Z$xstep else ExZ <- numeric(length(xZ)) result$xmargin <- list(x=Z$xcol, xZ=xZ, ExZ=ExZ) } if(opt$ymargin) { yZ <- apply(Z$v, 1, sum, na.rm=TRUE) * Z$ystep if(type == "eem") EyZ <- apply(Z$v, 1, function(roww) { sum(!is.na(roww)) }) * Z$ystep else EyZ <- numeric(length(yZ)) result$ymargin <- list(y=Z$yrow, yZ=yZ, EyZ=EyZ) } # -------------- cumulative (lurking variable) plots -------------- if(opt$xcumul) result$xcumul <- lurking(object, covariate=x.quad(Q), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, plot.it=FALSE, typename=typename, covname="x coordinate", oldstyle=oldstyle, check=FALSE, ...) if(opt$ycumul) result$ycumul <- lurking(object, covariate=y.quad(Q), type=type, clipwindow= if(clip) Wclip else NULL, rv=residval, plot.sd=compute.sd, plot.it=FALSE, typename=typename, covname="y coordinate", oldstyle=oldstyle, check=FALSE, ...) # -------------- summary numbers -------------- if(opt$sum) result$sum <- list(marksum=sum(Yclip$marks, na.rm=TRUE), area.Wclip=area.owin(Wclip), area.quad=if(clip) sum(Qweightsclip) else sum(Qweights), range=if(!is.null(Z)) range(Z) else NULL) return(invisible(result)) } ######################################################################## diagnose.ppm <- function(object, ..., type="raw", which="all", sigma=NULL, rbord = reach(object), cumulative=TRUE, plot.it = TRUE, rv = NULL, compute.sd=TRUE, compute.cts=TRUE, typename, check=TRUE, repair=TRUE, oldstyle=FALSE) { if(is.marked.ppm(object)) stop("Sorry, this is not yet implemented for marked models") if(check && damaged.ppm(object)) { if(!repair) stop("object format corrupted; try update(object, use.internal=TRUE)") message("object format corrupted; repairing it.") object <- update(object, use.internal=TRUE) } # ------------- Interpret arguments -------------------------- # edge effect avoidance if(!is.finite(rbord)) { if(missing(rbord)) stop(paste(sQuote("rbord"), "must be specified; the model has infinite range")) else stop(paste(sQuote("rbord"), "is infinite")) } # whether window should be clipped clip <- (rbord > 0) # match type argument type <- pickoption("type", type, c(eem="eem", raw="raw", inverse="inverse", pearson="pearson", Pearson="pearson")) if(missing(typename)) typename <- switch(type, eem="exponential energy weights", raw="raw residuals", inverse="inverse-lambda residuals", pearson="Pearson residuals") # 'which' is multiple choice with exact matching optionlist <- c("all", "marks", "smooth", "x", "y", "sum") if(!all(m <- which %in% optionlist)) stop(paste("Unrecognised choice(s) of", paste(sQuote("which"), ":", sep=""), paste(which[!m], collapse=", "))) opt <- list() opt$all <- "all" %in% which opt$marks <- ("marks" %in% which) | opt$all opt$smooth <- ("smooth" %in% which) | opt$all opt$xmargin <- (("x" %in% which) | opt$all) && !cumulative opt$ymargin <- (("y" %in% which) | opt$all) && !cumulative opt$xcumul <- (("x" %in% which) | opt$all) && cumulative opt$ycumul <- (("y" %in% which) | opt$all) && cumulative opt$sum <- ("sum" %in% which) | opt$all # compute and plot estimated standard deviations? # yes for Poisson, no for other models, unless overridden if(!missing(compute.sd)) plot.sd <- compute.sd else plot.sd <- list(...)$plot.sd if(is.null(plot.sd)) plot.sd <- is.poisson.ppm(object) if(missing(compute.sd)) compute.sd <- plot.sd # interpolate the density of the residual measure? if(missing(compute.cts)) { plot.neg <- resolve.defaults(list(...), formals(plot.diagppm)["plot.neg"])$plot.neg # only if it is needed for the mark plot compute.cts <- opt$marks && (plot.neg != "discrete") } # ------- DO THE CALCULATIONS ----------------------------------- RES <- diagnose.ppm.engine(object, type=type, typename=typename, opt=opt, sigma=sigma, rbord=rbord, compute.sd=compute.sd, compute.cts=compute.cts, rv=rv, oldstyle=oldstyle, ...) RES$typename <- typename RES$opt <- opt RES$compute.sd <- compute.sd RES$compute.cts <- compute.cts class(RES) <- "diagppm" # ------- PLOT -------------------------------------------------- if(plot.it) plot(RES, ...) return(RES) } plot.diagppm <- function(x, ..., which, plot.neg="image", plot.smooth="imagecontour", plot.sd=TRUE, spacing=0.1, srange=NULL, monochrome=FALSE, main=NULL) { opt <- x$opt if(!missing(which)) { oldopt <- opt newopt <- list() newopt$all <- "all" %in% which newopt$marks <- ("marks" %in% which) | newopt$all newopt$smooth <- ("smooth" %in% which) | newopt$all newopt$xmargin <- (("x" %in% which) | newopt$all) && oldopt$xmargin newopt$ymargin <- (("y" %in% which) | newopt$all) && oldopt$ymargin newopt$xcumul <- (("x" %in% which) | newopt$all) && oldopt$xcumul newopt$ycumul <- (("y" %in% which) | newopt$all) && oldopt$ycumul newopt$sum <- ("sum" %in% which) | newopt$all illegal <- (unlist(newopt) > unlist(oldopt)) if(any(illegal)) { offending <- paste(names(newopt)[illegal], collapse=", ") whinge <- paste("cannot display the following components;\n", "they were not computed: - \n", offending, "\n") stop(whinge) } opt <- newopt } if(!(x$compute.sd) && plot.sd) { if(!missing(plot.sd)) warning("can't plot standard deviations; they were not computed") plot.sd <- FALSE } if(!(x$compute.cts) && (plot.neg != "discrete") && (opt$marks || opt$all)) { if(!missing(plot.neg)) warning("can't plot continuous component of residuals; it was not computed") plot.neg <- "discrete" } if(opt$all) resid4plot(x, plot.neg, plot.smooth, spacing, srange,monochrome, main, ...) else resid1plot(x, opt, plot.neg, plot.smooth, srange, monochrome, main, ...) } print.diagppm <- function(x, ...) { opt <- x$opt typename <- x$typename cat(paste("Model diagnostics (", typename, ")\n", sep="")) cat("Diagnostics available:\n") optkey <- list(all="four-panel plot", marks=paste("mark plot", if(!x$compute.cts) "(discrete representation only)" else NULL), smooth="smoothed residual field", xmargin="x marginal density", ymargin="y marginal density", xcumul="x cumulative residuals", ycumul="y cumulative residuals", sum="sum of all residuals") avail <- unlist(optkey[names(opt)[unlist(opt)]]) names(avail) <- NULL cat(paste("\t", paste(avail, collapse="\n\t"), "\n", sep="")) if(opt$sum) { xs <- x$sum windowname <- if(x$clip) "clipped window" else "entire window" cat(paste("sum of", typename, "in", windowname, "=", signif(sum(xs$marksum),4), "\n")) cat(paste("area of", windowname, "=", signif(xs$area.Wclip, 4), "\n")) cat(paste("quadrature area =", signif(xs$area.quad, 4), "\n")) } if(opt$smooth) cat(paste("range of smoothed field = [", paste(signif(range(x$smooth$Z$v, na.rm=TRUE),4), collapse=","), "]\n")) return(invisible(NULL)) } spatstat/R/edgeTrans.R0000755000176000001440000000660612237642727014434 0ustar ripleyusers# # edgeTrans.R # # $Revision: 1.11 $ $Date: 2011/05/18 01:51:52 $ # # Translation edge correction weights # # edge.Trans(X) compute translation correction weights # for each pair of points from point pattern X # # edge.Trans(X, Y, W) compute translation correction weights # for all pairs of points X[i] and Y[j] # (i.e. one point from X and one from Y) # in window W # # edge.Trans(X, Y, W, paired=TRUE) # compute translation correction weights # for each corresponding pair X[i], Y[i]. # # To estimate the K-function see the idiom in "Kest.S" # ####################################################################### edge.Trans <- function(X, Y=X, W=X$window, exact=FALSE, paired=FALSE, trim=spatstat.options("maxedgewt")) { X <- as.ppp(X, W) W <- X$window x <- X$x y <- X$y nX <- X$n Y <- as.ppp(Y, W) xx <- Y$x yy <- Y$y nY <- Y$n if(paired && (nX != nY)) stop("X and Y should have equal length when paired=TRUE") # For irregular polygons, exact evaluation is very slow; # so use pixel approximation, unless exact=TRUE if(W$type == "polygonal" && !exact) W <- as.mask(W) switch(W$type, rectangle={ # Fast code for this case wide <- diff(W$xrange) high <- diff(W$yrange) if(!paired) { DX <- abs(outer(x,xx,"-")) DY <- abs(outer(y,yy,"-")) } else { DX <- abs(xx - x) DY <- abs(yy - y) } weight <- wide * high / ((wide - DX) * (high - DY)) }, polygonal={ # This code is SLOW a <- area.owin(W) if(!paired) { weight <- matrix(, nrow=nX, ncol=nY) if(nX > 0 && nY > 0) { for(i in seq_len(nX)) { X.i <- c(x[i], y[i]) for(j in seq_len(nY)) { shiftvector <- X.i - c(xx[j],yy[j]) Wshift <- shift(W, shiftvector) b <- overlap.owin(W, Wshift) weight[i,j] <- a/b } } } } else { weight <- numeric(nX) if(nX > 0) { for(i in seq_len(nX)) { shiftvector <- c(x[i],y[i]) - c(xx[i],yy[i]) Wshift <- shift(W, shiftvector) b <- overlap.owin(W, Wshift) weight[i] <- a/b } } } }, mask={ # make difference vectors if(!paired) { DX <- outer(x,xx,"-") DY <- outer(y,yy,"-") } else { DX <- x - xx DY <- y - yy } # compute set covariance of window g <- setcov(W) # evaluate set covariance at these vectors gvalues <- lookup.im(g, as.vector(DX), as.vector(DY), naok=TRUE, strict=FALSE) if(!paired) # reshape gvalues <- matrix(gvalues, nrow=nX, ncol=nY) weight <- area.owin(W)/gvalues } ) # clip high values if(length(weight) > 0) weight <- pmin.int(weight, trim) if(!paired) weight <- matrix(weight, nrow=nX, ncol=nY) return(weight) } spatstat/R/applynbd.R0000755000176000001440000000477412237642727014335 0ustar ripleyusers# applynbd.R # # $Revision: 1.15 $ $Date: 2013/04/25 06:37:43 $ # # applynbd() # For each point, identify either # - all points within distance R # - the closest N points # - those points satisfying some constraint # and apply the function FUN to them # # markstat() # simple application of applynbd ################################################################# applynbd <- function(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, ...) { if(is.null(N) && is.null(R) && is.null(criterion)) stop(paste("must specify at least one of the arguments", commasep(sQuote(c("N","R","criterion"))))) X <- as.ppp(X) npts <- npoints(X) # compute matrix of pairwise distances dist <- pairdist(X) # compute row ranks (avoid ties) rankit <- function(x) { u <- numeric(length(x)); u[fave.order(x)] <- seq_along(x); return(u) } drank <- t(apply(dist, 1, rankit)) - 1 included <- matrix(TRUE, npts, npts) if(!is.null(R)) { # select points closer than R included <- included & (dist <= R) } if(!is.null(N)) { # select N closest points if(N < 1) stop("Value of N must be at least 1") if(exclude) included <- included & (drank <= N) else included <- included & (drank <= N-1) } if(!is.null(criterion)) { # some funny criterion for(i in 1:npts) included[i,] <- included[i,] & criterion(dist[i,], drank[i,]) } if(exclude) diag(included) <- FALSE # bind into an array a <- array(c(included, dist, drank, row(included)), dim=c(npts,npts,4)) # what to do with a[i, , ] if(!is.marked(X)) go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1]) distances <- ai[,2] dranks <- ai[,3] here <- ai[1,4] fun(Y=Z[which], current=c(x=Z$x[here], y=Z$y[here]), dists=distances[which], dranks=dranks[which], ...) } else go <- function(ai, Z, fun, ...) { which <- as.logical(ai[,1]) distances <- ai[,2] dranks <- ai[,3] here <- ai[1,4] fun(Y=Z[which], current=Z[here], dists=distances[which], dranks=dranks[which], ...) } # do it result <- apply(a, 1, go, Z=X, fun=FUN, ...) return(result) } markstat <- function(X, fun, N=NULL, R=NULL, ...) { verifyclass(X, "ppp") stopifnot(is.function(fun)) statfun <- function(Y, current, dists, dranks, func, ...) { func(marks(Y, dfok=TRUE), ...) } applynbd(X, statfun, R=R, N=N, func=fun, ...) } spatstat/R/psstG.R0000755000176000001440000001314712237642727013616 0ustar ripleyusers# # psstG.R # # Pseudoscore residual for unnormalised G (saturation process) # # $Revision: 1.4 $ $Date: 2013/05/01 07:39:38 $ # ################################################################################ # psstG <- function(object, r=NULL, breaks=NULL, ..., trend=~1, interaction=Poisson(), rbord=reach(interaction), truecoef=NULL, hi.res=NULL) { if(inherits(object, "ppm")) fit <- object else if(inherits(object, "ppp")) fit <- ppm(quadscheme(object, ...), trend=trend, interaction=interaction, rbord=rbord) else if(inherits(object, "quad")) fit <- ppm(object, trend=trend, interaction=interaction, rbord=rbord) else stop("object should be a fitted point process model or a point pattern") rfixed <- !is.null(r) || !is.null(breaks) # Extract data and quadrature points Q <- quad.ppm(fit, drop=FALSE) X <- data.ppm(fit) U <- union.quad(Q) Z <- is.data(Q) # indicator data/dummy E <- equalsfun.quad(Q) WQ <- w.quad(Q) # quadrature weights # integrals will be restricted to quadrature points # that were actually used in the fit # USED <- getglmsubset(fit) if(fit$correction == "border") { rbord <- fit$rbord b <- bdist.points(U) USED <- (b > rbord) } else USED <- rep.int(TRUE, U$n) # basic statistics Win <- X$window npoints <- X$n area <- area.owin(Win) lambda <- npoints/area # adjustments to account for restricted domain of pseudolikelihood if(any(!USED)) { npoints.used <- sum(Z & USED) area.used <- sum(WQ[USED]) lambda.used <- npoints.used/area.used } else { npoints.used <- npoints area.used <- area lambda.used <- lambda } # determine breakpoints for r values rmaxdefault <- rmax.rule("G", Win, lambda) breaks <- handle.r.b.args(r, breaks, Win, rmaxdefault=rmaxdefault) rvals <- breaks$r rmax <- breaks$max # residuals res <- residuals(fit, type="raw",drop=FALSE, coefs=truecoef, quad=hi.res) resval <- with(res, "increment") rescts <- with(res, "continuous") # absolute weight for continuous integrals wc <- -rescts # initialise fv object df <- data.frame(r=rvals, theo=0) desc <- c("distance argument r", "value 0 corresponding to perfect fit") ans <- fv(df, "r", substitute(bold(R)~Delta~V[S](r), NULL), "theo", . ~ r, alim=c(0, rmax), c("r","%s[theo](r)"), desc, fname="bold(R)~Delta~V[S]") # First phase: ................................................. # nearest neighbours (quadrature point to data point) nn <- nncross(U, X, seq(U$n), seq(X$n)) # excludes identical pairs dIJ <- nn$dist I <- seq(U$n) J <- nn$which DD <- (I <= X$n) # TRUE for data points wcIJ <- wc okI <- USED[I] # histogram of nndist for data points only (without edge correction) Bsum <- cumsum(whist(dIJ[DD & okI], breaks$val)) # weighted histogram of nncross (without edge correction) Bint <- cumsum(whist(dIJ[okI], breaks$val, wcIJ[okI])) # residual Bres <- Bsum - Bint # tack on ans <- bind.fv(ans, data.frame(dat1=Bsum, com1=Bint, res1=Bres), c("%s[dat1](r)", "%s[com1](r)", "%s[res1](r)"), c("phase 1 pseudosum (contribution to %s)", "phase 1 pseudocompensator (contribution to %s)", "phase 1 pseudoresidual (contribution to %s)")) # Second phase: ................................................ # close pairs (quadrature point to data point) close <- crosspairs(U, X, rmax) dIJ <- close$d I <- close$i J <- close$j UI <- U[I] XJ <- X[J] EIJ <- E(I, J) # TRUE if points are identical, U[I[k]] == X[J[k]] ZI <- Z[I] # TRUE if U[I[k]] is a data point DD <- ZI & !EIJ # TRUE for pairs of distinct data points only nDD <- sum(DD) okI <- USED[I] # residual weights wIJ <- ifelseXY(EIJ, rescts[I], resval[I]) # absolute weight for continuous integrals wc <- -rescts wcIJ <- -rescts[I] # nearest and second-nearest neighbour distances in X nn1 <- nndist(X) nn2 <- nndist(X, k=2) nn1J <- nn1[J] nn2J <- nn2[J] # weird use of the reduced sample estimator # data sum: RSX <- Kount(dIJ[DD & okI], nn2J[DD & okI], nn2J[ZI & okI], breaks) Csum <- RSX$numerator # integral: if(spatstat.options("psstG.remove.zeroes")) okE <- okI & !EIJ else okE <- okI RSD <- Kwtsum(dIJ[okE], nn1J[okE], wcIJ[okE], nn1, rep.int(1, length(nn1)), breaks) Cint <- RSD$numerator # Cres <- Bres + Csum - Cint # tack on ans <- bind.fv(ans, data.frame(dat2=Csum, com2=Cint, res2=Cres, dat=Bsum+Csum, com=Bint+Cint, res=Bres+Cres), c("%s[dat2](r)", "%s[com2](r)", "%s[res2](r)", "Sigma~Delta~V[S](r)", "bold(C)~Delta~V[S](r)", "bold(R)~Delta~V[S](r)"), c("phase 2 pseudosum (contribution to %s)", "phase 2 pseudocompensator (contribution to %s)", "phase 2 pseudoresidual (contribution to %s)", "pseudosum (contribution to %s)", "pseudocompensator (contribution to %s)", "pseudoresidual function %s"), "res") # restrict choice of curves in default plot fvnames(ans, ".") <- c("dat", "com", "res", "theo") # return(ans) } spatstat/R/addvar.R0000755000176000001440000003066212237642727013760 0ustar ripleyusers# # addvar.R # # added variable plot # # $Revision: 1.2 $ $Date: 2013/05/01 05:39:46 $ # addvar <- function(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) { if(missing(covname)) covname <- sensiblevarname(deparse(substitute(covariate)), "X") callstring <- paste(deparse(sys.call()), collapse = "") if(is.null(adjust)) adjust <- 1 bw.input <- match.arg(bw.input) # validate model stopifnot(is.ppm(model)) if(is.null(getglmfit(model))) model <- update(model, forcefit=TRUE) modelcall <- model$callstring if(is.null(modelcall)) modelcall <- model$call # extract spatial locations Q <- quad.ppm(model) datapoints <- Q$data quadpoints <- union.quad(Q) Z <- is.data(Q) wts <- w.quad(Q) nQ <- n.quad(Q) # fitted intensity lam <- fitted(model, type="trend") # subset of quadrature points used to fit model subQset <- getglmsubset(model) if(is.null(subQset)) subQset <- rep.int(TRUE, nQ) # restriction to subregion insubregion <- if(!is.null(subregion)) { inside.owin(quadpoints, w=subregion) } else rep.int(TRUE, nQ) ################################################################ # Pearson residuals from point process model yr <- residuals(model, type="Pearson") yresid <- with(yr, "increment") # averaged (then sum with weight 'wts') yresid <- yresid/wts ################################################################# # Covariates # # covariate data frame df <- getglmdata(model) if(!all(c("x", "y") %in% names(df))) { xy <- as.data.frame(quadpoints) notxy <- !(colnames(df) %in% c("x", "y")) other <- df[, notxy] df <- cbind(xy, other) } # avail.covars <- names(df) # covariates used in model used.covars <- model.covariates(model) fitted.covars <- model.covariates(model, offset=FALSE) # ################################################################# # identify the covariate # if(!is.character(covariate)) { # Covariate is some kind of data, treated as external covariate if(covname %in% fitted.covars) stop(paste("covariate named", dQuote(covname), "is already used in model")) covvalues <- evalCovariate(covariate, quadpoints) # validate covvalues if(is.null(covvalues)) stop("Unable to extract covariate values") else if(length(covvalues) != npoints(quadpoints)) stop(paste("Internal error: number of covariate values =", length(covvalues), "!=", npoints(quadpoints), "= number of quadrature points")) # tack onto data frame covdf <- data.frame(covvalues) names(covdf) <- covname df <- cbind(df, covdf) } else { # Argument is name of covariate covname <- covariate if(length(covname) > 1) stop("Must specify only one covariate") # if(covname %in% fitted.covars) stop(paste("covariate", dQuote(covname), "already used in model")) # if(!(covname %in% avail.covars)) stop(paste("covariate", dQuote(covname), "not available")) # covvalues <- df[, covname] } ################################################################ # Pearson residuals from weighted linear regression of new covariate on others rhs <- formula(model) fo <- as.formula(paste(covname, paste(rhs, collapse=" "))) fit <- lm(fo, data=df, weights=lam * wts) xresid <- residuals(fit, type="pearson")/sqrt(wts) if(crosscheck) { message("Cross-checking...") X <- model.matrix(fo, data=df) V <- diag(lam * wts) sqrtV <- diag(sqrt(lam * wts)) Info <- t(X) %*% V %*% X H <- sqrtV %*% X %*% solve(Info) %*% t(X) %*% sqrtV nQ <- length(lam) Id <- diag(1, nQ, nQ) xresid.pearson <- (Id - H) %*% sqrtV %*% covvalues xresid.correct <- xresid.pearson/sqrt(wts) abserr <- max(abs(xresid - xresid.correct), na.rm=TRUE) relerr <- abserr/diff(range(xresid.correct, finite=TRUE)) if(is.finite(relerr) && relerr > 0.01) { warning("Large relative error in residual computation") } message("Done.") } # experiment suggests residuals(fit, "pearson") == xresid.correct # and residuals(fit) equivalent to # covvalues - X %*% solve(t(X) %*% V %*% X) %*% t(X) %*% V %*% covvalues ################################################################# # check for NA's etc # locations that must have finite values operative <- if(bw.restrict) insubregion & subQset else subQset nbg <- !is.finite(xresid) | !is.finite(yresid) if(any(offending <- nbg & operative)) { warning(paste(sum(offending), "out of", length(offending), "covariate values discarded because", ngettext(sum(offending), "it is", "they are"), "NA or infinite")) } ################################################################# # Restrict data to 'operative' points # with finite values ok <- !nbg & operative Q <- Q[ok] xresid <- xresid[ok] yresid <- yresid[ok] covvalues <- covvalues[ok] df <- df[ok, ] lam <- lam[ok] wts <- wts[ok] Z <- Z[ok] insubregion <- insubregion[ok] #################################################### # assemble data for smoothing xx <- xresid yy <- yresid ww <- wts if(makefrom <- is.null(from)) from <- min(xresid) if(maketo <- is.null(to)) to <- max(xresid) #################################################### # determine smoothing bandwidth # from 'operative' data switch(bw.input, quad = { # bandwidth selection from covariate values at all quadrature points numer <- unnormdensity(xx, weights=yy * ww, bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- numer$bw }, points= { # bandwidth selection from covariate values at data points fake <- unnormdensity(xx[Z], weights=1/lam[Z], bw=bw, adjust=adjust, n=n,from=from,to=to, ...) sigma <- fake$bw numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) }) #################################################### # Restrict data and recompute numerator if required if(!is.null(subregion) && !bw.restrict) { # Bandwidth was computed on all data # Restrict to subregion and recompute numerator xx <- xx[insubregion] yy <- yy[insubregion] ww <- ww[insubregion] lam <- lam[insubregion] Z <- Z[insubregion] if(makefrom) from <- min(xx) if(maketo) to <- max(xx) numer <- unnormdensity(xx, weights=yy * ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) } #################################################### # Compute denominator denom <- unnormdensity(xx,weights=ww, bw=sigma, adjust=1, n=n,from=from,to=to, ...) #################################################### # Determine recommended plot range xr <- range(xresid[Z], finite=TRUE) alim <- xr + 0.1 * diff(xr) * c(-1,1) alim <- intersect.ranges(alim, c(from, to)) #################################################### # Compute terms interpolate <- function(x,y) { if(inherits(x, "density") && missing(y)) approxfun(x$x, x$y, rule=2) else approxfun(x, y, rule=2) } numfun <- interpolate(numer) denfun <- interpolate(denom) xxx <- numer$x ratio <- function(y, x) { ifelseXB(x != 0, y/x, NA) } yyy <- ratio(numfun(xxx), denfun(xxx)) # Null variance estimation # smooth with weight 1 and smaller bandwidth tau <- sigma/sqrt(2) varnumer <- unnormdensity(xx,weights=ww, bw=tau,adjust=1, n=n,from=from,to=to, ...) varnumfun <- interpolate(varnumer) vvv <- ratio(varnumfun(xxx), 2 * sigma * sqrt(pi) * denfun(xxx)^2) safesqrt <- function(x) { ok <- is.finite(x) & (x >= 0) y <- rep.int(NA_real_, length(x)) y[ok] <- sqrt(x[ok]) return(y) } twosd <- 2 * safesqrt(vvv) # pack into fv object rslt <- data.frame(rcov=xxx, rpts=yyy, theo=0, var=vvv, hi=twosd, lo=-twosd) given.covars <- used.covars if(length(given.covars) == 0) given.covars <- "1" given <- paste("|", paste(given.covars, collapse=", ")) xlab <- paste("r", paren(paste(covname, given))) ylab <- paste("r", paren(paste("points", given))) desc <- c(paste("Pearson residual of covariate", covname, given), paste("Smoothed Pearson residual of point process", given), "Null expected value of point process residual", "Null variance of point process residual", "Upper limit of pointwise 5%% significance band", "Lower limit of pointwise 5%% significance band") rslt <- fv(rslt, argu="rcov", ylab=as.name(ylab), valu="rpts", fmla= (. ~ rcov), alim=alim, labl=c(xlab, "%s", "0", "var[%s]", "hi", "lo"), desc=desc, fname=ylab) attr(rslt, "dotnames") <- c("rpts", "theo", "hi", "lo") # data associated with quadrature points reserved <- (substr(colnames(df), 1, 4) == ".mpl") isxy <- colnames(df) %in% c("x", "y") dfpublic <- cbind(df[, !(reserved | isxy)], data.frame(xresid, yresid)) attr(rslt, "spatial") <- union.quad(Q) %mark% dfpublic # auxiliary data attr(rslt, "stuff") <- list(covname = covname, xresid = xresid, yresid = yresid, covvalues = covvalues, wts = wts, bw = bw, adjust = adjust, sigma = sigma, used.covars = used.covars, modelcall = modelcall, callstring = callstring, xlim = c(from, to), xlab = xlab, ylab = ylab, lmcoef = coef(fit), bw.input = bw.input, bw.restrict = bw.restrict, restricted = !is.null(subregion)) # finish class(rslt) <- c("addvar", class(rslt)) return(rslt) } print.addvar <- function(x, ...) { cat("Added variable plot diagnostic (class addvar)\n") s <- attr(x, "stuff") mc <- paste(s$modelcall, collapse="") cat(paste("for the covariate", dQuote(s$covname), "for the fitted model:", if(nchar(mc) <= 30) "" else "\n\t", mc, "\n\n")) if(identical(s$restricted, TRUE)) cat("\t--Diagnostic computed for a subregion--\n") cat(paste("Call:", s$callstring, "\n")) cat(paste("Actual smoothing bandwidth sigma =", signif(s$sigma,5), "\n\n")) NextMethod("print") } plot.addvar <- function(x, ..., do.points=FALSE) { xname <- deparse(substitute(x)) s <- attr(x, "stuff") covname <- s$covname xresid <- s$xresid yresid <- s$yresid # check whether it's the default plot argh <- list(...) isfo <- unlist(lapply(argh, inherits, what="formula")) defaultplot <- !any(isfo) # set x label if it's the default plot xlab0 <- if(defaultplot) s$xlab else NULL # adjust y limits if intending to plot points as well ylimcover <- if(do.points) range(yresid, finite=TRUE) else NULL # do.call("plot.fv", resolve.defaults(list(x), list(...), list(main=xname, shade=c("hi", "lo"), xlab=xlab0, legend=FALSE, ylim.covers=ylimcover))) # plot points if(do.points) do.call(points, resolve.defaults(list(x=xresid, y=yresid), list(...), list(pch=3, cex=0.5))) return(invisible(x)) } spatstat/R/Gmulti.R0000755000176000001440000001572612237642727013764 0ustar ripleyusers# Gmulti.S # # Compute estimates of nearest neighbour distance distribution functions # for multitype point patterns # # S functions: # Gcross G_{ij} # Gdot G_{i\bullet} # Gmulti (generic) # # $Revision: 4.40 $ $Date: 2013/04/25 06:37:43 $ # ################################################################################ "Gcross" <- function(X, i, j, r=NULL, breaks=NULL, ..., correction=c("rs", "km", "han")) { # computes G_{ij} estimates # # X marked point pattern (of class 'ppp') # i,j the two mark values to be compared # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X, dfok=FALSE)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") if(i == j) result <- Gest(X[I], r=r, breaks=breaks, ...) else { J <- (marx == j) if(sum(J) == 0) stop("No points are of type j") result <- Gmulti(X, I, J, r=r, breaks=breaks, disjoint=FALSE, ..., correction=correction) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(G[i,j](r), list(i=iname, j=jname)), sprintf("G[list(%s,%s)]", iname, jname), new.yexp=substitute(G[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Gdot" <- function(X, i, r=NULL, breaks=NULL, ..., correction=c("km","rs","han")) { # Computes estimate of # G_{i\bullet}(t) = # P( a further point of pattern in B(0,t)| a type i point at 0 ) # # X marked point pattern (of class ppp) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] I <- (marx == i) if(sum(I) == 0) stop("No points are of type i") J <- rep.int(TRUE, X$n) # i.e. all points # result <- Gmulti(X, I, J, r, breaks, disjoint=FALSE, ..., correction=correction) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(G[i ~ dot](r), list(i=iname)), paste("G[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(G[i ~ symbol("\267")](r), list(i=iname))) return(result) } ########## "Gmulti" <- function(X, I, J, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=c("rs", "km", "han")) { # # engine for computing the estimate of G_{ij} or G_{i\bullet} # depending on selection of I, J # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # verifyclass(X, "ppp") W <- X$window npts <- npoints(X) area <- area.owin(W) # check I and J I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") nI <- sum(I) nJ <- sum(J) if(nI == 0) stop("No points satisfy condition I") if(nJ == 0) stop("No points satisfy condition J") if(is.null(disjoint)) disjoint <- !any(I & J) # choose correction(s) correction.given <- !missing(correction) && !is.null(correction) if(is.null(correction)) correction <- c("rs", "km", "han") correction <- pickoption("correction", correction, c(none="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) # determine breakpoints for r values lamJ <- nJ/area rmaxdefault <- rmax.rule("G", W, lamJ) breaks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) brks <- breaks$val rmax <- breaks$max rvals <- breaks$r zeroes <- numeric(length(rvals)) # initialise fv object df <- data.frame(r=rvals, theo=1-exp(-lamJ * pi * rvals^2)) Z <- fv(df, "r", substitute(G[multi](r), NULL), "theo", . ~ r, c(0,rmax), c("r", "{%s^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname="Gmulti") # "type I to type J" nearest neighbour distances XI <- X[I] XJ <- X[J] if(disjoint) nnd <- nncross(XI, XJ, what="dist") else { seqnp <- seq_len(npts) iX <- seqnp[I] iY <- seqnp[J] nnd <- nncross(XI, XJ, iX, iY, what="dist") } # distance to boundary from each type i point bdry <- bdist.points(XI) # observations o <- pmin.int(nnd,bdry) # censoring indicators d <- (nnd <= bdry) # # calculate estimates if("none" %in% correction) { # UNCORRECTED e.d.f. of nearest neighbour distances: use with care if(npts == 0) edf <- zeroes else { hh <- hist(nnd[nnd <= rmax],breaks=breaks$val,plot=FALSE)$counts edf <- cumsum(hh)/length(nnd) } Z <- bind.fv(Z, data.frame(raw=edf), "hat(%s^{raw})(r)", "uncorrected estimate of %s", "raw") } if("han" %in% correction) { # Hanisch style estimator if(npts == 0) G <- zeroes else { # uncensored distances x <- nnd[d] # weights a <- eroded.areas(W, rvals) # calculate Hanisch estimator h <- hist(x[x <= rmax], breaks=breaks$val, plot=FALSE)$counts G <- cumsum(h/a) G <- G/max(G[is.finite(G)]) } # add to fv object Z <- bind.fv(Z, data.frame(han=G), "hat(%s^{han})(r)", "Hanisch estimate of %s", "han") # modify recommended plot range attr(Z, "alim") <- range(rvals[G <= 0.9]) } if(any(correction %in% c("rs", "km"))) { # calculate Kaplan-Meier and border correction (Reduced Sample) estimators if(npts == 0) result <- data.frame(rs=zeroes, km=zeroes, hazard=zeroes) else { result <- km.rs(o, bdry, d, breaks) result <- as.data.frame(result[c("rs", "km", "hazard")]) } # add to fv object Z <- bind.fv(Z, result, c("hat(%s^{bord})(r)", "hat(%s^{km})(r)", "hazard(r)"), c("border corrected estimate of %s", "Kaplan-Meier estimate of %s", "Kaplan-Meier estimate of hazard function lambda(r)"), "km") # modify recommended plot range attr(Z, "alim") <- range(rvals[result$km <= 0.9]) } nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) unitname(Z) <- unitname(X) return(Z) } spatstat/R/satpiece.R0000755000176000001440000001106712237642727014312 0ustar ripleyusers# # # satpiece.S # # $Revision: 1.14 $ $Date: 2012/01/18 11:04:54 $ # # Saturated pairwise interaction process with piecewise constant potential # # SatPiece() create an instance of the process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # SatPiece <- local({ # ..... auxiliary functions ...... delSP <- function(i, r, sat) { r <- r[-i] sat <- sat[-i] nr <- length(r) if(nr == 0) return(Poisson()) if(nr == 1) return(Geyer(r, sat)) return(SatPiece(r, sat)) } # ....... template object .......... BlankSatPiece <- list( name = "piecewise constant Saturated pairwise interaction process", creator = "SatPiece", family = "pairsat.family", # evaluated later pot = function(d, par) { r <- par$r nr <- length(r) out <- array(FALSE, dim=c(dim(d), nr)) out[,,1] <- (d < r[1]) if(nr > 1) { for(i in 2:nr) out[,,i] <- (d >= r[i-1]) & (d < r[i]) } out }, par = list(r = NULL, sat=NULL), # filled in later parnames = c("interaction thresholds", "saturation parameters"), init = function(self) { r <- self$par$r sat <- self$par$sat if(!is.numeric(r) || !all(r > 0)) stop("interaction thresholds r must be positive numbers") if(length(r) > 1 && !all(diff(r) > 0)) stop("interaction thresholds r must be strictly increasing") if(!is.numeric(sat) || any(sat < 0)) stop("saturation parameters must be nonnegative numbers") if(any(ceiling(sat) != floor(sat))) warning("saturation parameter has a non-integer value") if(length(sat) != length(r) && length(sat) != 1) stop("vectors r and sat must have equal length") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { r <- self$par$r npiece <- length(r) # extract coefficients gammas <- exp(as.numeric(coeffs)) # name them gn <- gammas names(gn) <- paste("[", c(0,r[-npiece]),",", r, ")", sep="") # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_i", printable=round(gn,4))) }, valid = function(coeffs, self) { # interaction parameters gamma must be # non-NA # finite, if sat > 0 # less than 1, if sat = Inf gamma <- (self$interpret)(coeffs, self)$param$gammas sat <- self$par$sat if(any(is.na(gamma))) return(FALSE) return(all((is.finite(gamma) | sat == 0) & (gamma <= 1 | sat != Inf))) }, project = function(coeffs, self){ loggammas <- as.numeric(coeffs) sat <- self$par$sat r <- self$par$r ok <- is.finite(loggammas) & (is.finite(sat) | loggammas <= 0) if(all(ok)) return(NULL) if(!any(ok)) return(Poisson()) bad <- !ok if(spatstat.options("project.fast") || sum(bad) == 1) { # remove smallest threshold with an unidentifiable parameter firstbad <- min(which(bad)) return(delSP(firstbad, r, sat)) } else { # consider all candidate submodels subs <- lapply(which(bad), delSP, r=r, sat=sat) return(subs) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r sat <- self$par$sat if(all(is.na(coeffs))) return(2 * max(r)) gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- (abs(log(gamma)) > epsilon) & (sat > 0) if(!any(active)) return(0) else return(2 * max(r[active])) }, version=NULL # added later ) class(BlankSatPiece) <- "interact" SatPiece <- function(r, sat) { instantiate.interact(BlankSatPiece, list(r=r, sat=sat)) } SatPiece }) spatstat/R/pspcross.R0000755000176000001440000001374412237642727014375 0ustar ripleyusers# # pspcross.R # # Intersections of line segments # # $Revision: 1.12 $ $Date: 2013/04/25 06:37:43 $ # # crossing.psp <- function(A,B,fatal=TRUE) { verifyclass(A, "psp") verifyclass(B, "psp") # first check for intersection of windows ABW <- intersect.owin(A$window, B$window, fatal=fatal) if(is.null(ABW)) return(NULL) eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 useCall <- spatstat.options("crossing.psp.useCall") if(!useCall) { # old C routine DUP <- spatstat.options("dupC") out <- .C("xysegint", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), xx=as.double(numeric(na * nb)), yy=as.double(numeric(na * nb)), ta=as.double(numeric(na * nb)), tb=as.double(numeric(na * nb)), ok=as.integer(integer(na * nb)), DUP=DUP) # PACKAGE="spatstat") ok <- (matrix(out$ok, na, nb) != 0) xx <- matrix(out$xx, na, nb) yy <- matrix(out$yy, na, nb) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) } else { # new storage.mode(x0a) <- storage.mode(y0a) <- "double" storage.mode(dxa) <- storage.mode(dya) <- "double" storage.mode(x0b) <- storage.mode(y0b) <- "double" storage.mode(dxb) <- storage.mode(dyb) <- "double" storage.mode(eps) <- "double" out <- .Call("Cxysegint", x0a, y0a, dxa, dya, x0b, y0b, dxb, dyb, eps) # PACKAGE="spatstat") xx <- out[[5]] yy <- out[[6]] } result <- ppp(xx, yy, window=ABW, check=FALSE) return(result) } test.crossing.psp <- function(A,B) { # return logical matrix specifying whether A[i] and B[j] cross verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 DUP <- spatstat.options("dupC") out <- .C("xysi", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(na * nb)), DUP=DUP) # PACKAGE="spatstat") hit <- (matrix(out$ok, na, nb) != 0) return(hit) } anycrossing.psp <- function(A,B) { # equivalent to: any(test.crossing.psp(A,B)) # Test whether two psp objects have at least one crossing point verifyclass(A, "psp") verifyclass(B, "psp") eps <- .Machine$double.eps na <- A$n eA <- A$ends x0a <- eA$x0 y0a <- eA$y0 dxa <- eA$x1 - eA$x0 dya <- eA$y1 - eA$y0 nb <- B$n eB <- B$ends x0b <- eB$x0 y0b <- eB$y0 dxb <- eB$x1 - eB$x0 dyb <- eB$y1 - eB$y0 DUP <- spatstat.options("dupC") out <- .C("xysiANY", na=as.integer(na), x0a=as.double(x0a), y0a=as.double(y0a), dxa=as.double(dxa), dya=as.double(dya), nb=as.integer(nb), x0b=as.double(x0b), y0b=as.double(y0b), dxb=as.double(dxb), dyb=as.double(dyb), eps=as.double(eps), ok=as.integer(integer(1)), DUP=DUP) # PACKAGE="spatstat") hit <- (out$ok != 0) return(hit) } selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 useCall <- spatstat.options("selfcrossing.psp.useCall") if(!useCall) { # old C routine DUP <- spatstat.options("dupC") out <- .C("xysegXint", n=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), xx=as.double(numeric(n^2)), yy=as.double(numeric(n^2)), ti=as.double(numeric(n^2)), tj=as.double(numeric(n^2)), ok=as.integer(integer(n^2)), DUP=DUP) # PACKAGE="spatstat") ok <- (matrix(out$ok, n, n) != 0) xx <- matrix(out$xx, n, n) yy <- matrix(out$yy, n, n) xx <- as.vector(xx[ok]) yy <- as.vector(yy[ok]) } else { # new storage.mode(x0) <- storage.mode(y0) <- "double" storage.mode(dx) <- storage.mode(dy) <- "double" storage.mode(eps) <- "double" out <- .Call("CxysegXint", x0, y0, dx, dy, eps) # PACKAGE="spatstat") xx <- out[[5]] yy <- out[[6]] } result <- ppp(xx, yy, window=A$window, check=FALSE) return(result) } test.selfcrossing.psp <- function(A) { verifyclass(A, "psp") eps <- .Machine$double.eps n <- A$n eA <- A$ends x0 <- eA$x0 y0 <- eA$y0 dx <- eA$x1 - eA$x0 dy <- eA$y1 - eA$y0 DUP <- spatstat.options("dupC") out <- .C("xysxi", na=as.integer(n), x0=as.double(x0), y0=as.double(y0), dx=as.double(dx), dy=as.double(dy), eps=as.double(eps), ok=as.integer(integer(n*n)), DUP=DUP) # PACKAGE="spatstat") hit <- (matrix(out$ok, n, n) != 0) return(hit) } spatstat/R/distcdf.R0000644000176000001440000000556612237642727014141 0ustar ripleyusers# # distcdf.R # # cdf of |X1-X2| when X1,X2 are iid uniform in W, etc # # $Revision: 1.6 $ $Date: 2013/08/26 09:51:51 $ # distcdf <- function(W, V=W, ..., dW=1, dV=dW, nr=1024) { reflexive <- missing(V) && missing(dV) diffuse <- is.owin(W) && is.owin(V) uniformW <- identical(dW, 1) uniformV <- identical(dV, 1) uniform <- uniformW && uniformV if(is.owin(W)) { W <- as.mask(as.owin(W), ...) dW <- as.im(dW, W=W) } else if(is.ppp(W)) { if(uniformW) { # discrete uniform distribution on W dW <- pixellate(W, ...) } else { # dW should be a weight or vector of weights if(!is.vector(dW) || !is.numeric(dW)) stop("If W is a point pattern, dW should be a vector of weights") if(length(dW) == 1) { dW <- rep(dW, npoints(W)) } else stopifnot(length(dW) == npoints(W)) dW <- pixellate(W, weights=dW, ...) } } else stop("W should be a point pattern or a window") if(is.owin(V)) { V <- as.mask(as.owin(V), ...) dV <- as.im(dV, W=V) } else if(is.ppp(V)) { if(uniformV) { # discrete uniform distribution on V dV <- pixellate(V, ...) } else { # dV should be a weight or vector of weights if(!is.vector(dV) || !is.numeric(dV)) stop("If V is a point pattern, dV should be a vector of weights") if(length(dV) == 1) { dV <- rep(dV, npoints(V)) } else stopifnot(length(dV) == npoints(V)) dV <- pixellate(V, weights=dV, ...) } } else stop("V should be a point pattern or a window") # compute if(diffuse && uniform) { # uniform distributions on windows g <- if(reflexive) setcov(W, ...) else setcov(W, V, ...) } else { g <- if(reflexive) imcov(dW) else imcov(dW, dV) } r <- as.im(function(x,y) { sqrt(x^2 + y^2) }, g) rvals <- as.vector(as.matrix(r)) gvals <- as.vector(as.matrix(g)) rgrid <- seq(0, max(rvals), length=nr) h <- whist(rvals, breaks=rgrid, weights=gvals/sum(gvals)) ch <- c(0,cumsum(h)) result <- fv(data.frame(r=rgrid, f=ch), "r", quote(CDF(r)), "f", , range(rvals), c("r","%s(r)"), c("Interpoint distance","Cumulative probability"), fname="CDF") return(result) } bw.frac <- function(X, ..., f=1/4) { X <- as.owin(X) g <- distcdf(X, ...) r <- with(g, .x) Fr <- with(g, .y) iopt <- min(which(Fr >= f)) ropt <- r[iopt] attr(ropt, "f") <- f attr(ropt, "g") <- g class(ropt) <- c("bw.frac", class(ropt)) return(ropt) } print.bw.frac <- function(x, ...) { print(as.numeric(x), ...) } plot.bw.frac <- function(x, ...) { xname <- short.deparse(substitute(x)) g <- attr(x, "g") f <- attr(x, "f") ropt <- as.numeric(x) do.call("plot", resolve.defaults(list(g), list(...), list(main=xname))) abline(v=ropt, lty=3) abline(h=f, lty=3) invisible(NULL) } spatstat/R/morishita.R0000755000176000001440000000224112237642727014506 0ustar ripleyusers# # morishita.R # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # miplot <- function(X, ...) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) W <- X$window N <- X$n if(W$type != "rectangle") stop("Window of X is not a rectangle - Morishita index undefined") a <- min(diff(W$xrange), diff(W$yrange)) maxnquad <- floor(a/mean(nndist(X))) if(maxnquad <= 1) stop("Not enough points for a Morishita plot") mindex <- numeric(maxnquad) for(nquad in 1:maxnquad) { qq <- quadratcount(X, nquad, nquad) tt <- as.vector(as.table(qq)) mindex[nquad] <- length(tt) * sum(tt * (tt-1))/(N*(N-1)) } quadsize <- diameter(W)/(1:maxnquad) unitinfo <- summary(unitname(W))$axis do.call("plot.default", resolve.defaults(list(quadsize, mindex), list(...), list(xlim=c(0,a), ylim=c(0,max(mindex)), xlab=paste("Diameter of quadrat", unitinfo), ylab="Morishita index", main=paste("Morishita plot for", Xname)))) abline(h=1, lty=2) return(invisible(NULL)) } spatstat/R/summary.ppm.R0000755000176000001440000004005012240455270014764 0ustar ripleyusers# # summary.ppm.R # # summary() method for class "ppm" # # $Revision: 1.65 $ $Date: 2013/11/12 16:31:54 $ # # summary.ppm() # print.summary.ppm() # summary.ppm <- local({ covtype <- function(x) { if(is.im(x)) "im" else if(is.function(x)) "function" else if(is.owin(x)) "owin" else if(is.numeric(x) && length(x) == 1) "number" else if(is.factor(x)) "factor" else if(is.integer(x)) "integer" else if(is.numeric(x)) "numeric" else storage.mode(x) } xargs <- function(f) { ar <- names(formals(f))[-(1:2)] return(ar[ar != "..."]) } summary.ppm <- function(object, ..., quick=FALSE) { verifyclass(object, "ppm") x <- object y <- list() ####### Extract main data components ######################### QUAD <- object$Q DATA <- QUAD$data TREND <- x$trend INTERACT <- x$interaction if(is.null(INTERACT)) INTERACT <- Poisson() ####### Check version ######################### mpl.ver <- versionstring.ppm(object) int.ver <- versionstring.interact(INTERACT) current <- versionstring.spatstat() virgin <- min(package_version(c(mpl.ver, int.ver))) y$antiquated <- antiquated <- (virgin <= package_version("1.5")) y$old <- old <- (virgin < majorminorversion(current)) y$version <- as.character(virgin) ####### Determine type of model ############################ y$entries <- list() y$no.trend <- identical.formulae(TREND, NULL) || identical.formulae(TREND, ~1) y$trendvar <- trendvar <- variablesinformula(TREND) y$stationary <- y$no.trend || all(trendvar == "marks") y$poisson <- is.poisson.interact(INTERACT) y$marked <- is.marked.ppp(DATA) y$multitype <- is.multitype.ppp(DATA) y$marktype <- if(y$multitype) "multitype" else if(y$marked) "marked" else "unmarked" if(y$marked) y$entries$marks <- marks(DATA) y$name <- paste(if(y$stationary) "Stationary " else "Nonstationary ", if(y$poisson) { if(y$multitype) "multitype " else if(y$marked) "marked " else "" }, INTERACT$name, sep="") ###### Fitting algorithm ######################################## y$method <- x$method y$problems <- x$problems y$fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" if(y$fitter %in% c("glm", "gam")) y$converged <- x$internal$glmfit$converged ###### Coefficients were changed after fit? ##################### y$projected <- identical(x$projected, TRUE) y$changedcoef <- y$projected || !is.null(x$coef.orig) ###### Extract fitted model coefficients ######################### y$entries$coef <- COEFS <- x$coef y$coef.orig <- x$coef.orig y$entries$Vnames <- Vnames <- x$internal$Vnames y$entries$IsOffset <- x$internal$IsOffset ###### Extract fitted interaction and summarise ################# FITIN <- fitin(x) y$interaction <- summary(FITIN) # Exit here if quick=TRUE if(identical(quick, TRUE)) { class(y) <- "summary.ppm" return(y) } ###### Does it have external covariates? #################### # defaults y <- append(y, list(has.covars = FALSE, covnames = character(0), covars.used = character(0), uses.covars = FALSE, covars.are.df = FALSE, expandable = TRUE, covar.type = character(0), covar.descrip = character(0), has.funcs = FALSE, covfunargs = NULL, has.xargs = FALSE, xargmap = NULL)) if(!antiquated) { covars <- x$covariates y$has.covars <- hc <- !is.null(covars) && (length(covars) > 0) if(hc) { y$covnames <- names(covars) used <- (y$trendvar %in% names(covars)) y$covars.used <- y$trendvar[used] y$uses.covars <- any(used) y$covars.are.df <- is.data.frame(covars) # describe covariates ctype <- unlist(lapply(covars, covtype)) y$expandable <- all(ctype[used] %in%c("function", "number")) names(ctype) <- names(covars) y$covar.type <- ctype y$covar.descrip <- ctype # are there any functions? y$has.funcs <- any(isfun <- (ctype == "function")) # do covariates depend on additional arguments? if(y$has.funcs) { y$covfunargs <- x$covfunargs funs <- covars[isfun] fdescrip <- function(f) { if(inherits(f, "distfun")) return("distfun") alist <- paste(names(formals(f)), collapse=", ") paste("function(", alist, ")", sep="") } y$covar.descrip[isfun] <- unlist(lapply(funs, fdescrip)) # find any extra arguments (after args 1 & 2) explicitly named fargs <- lapply(funs, xargs) nxargs <- unlist(lapply(fargs, length)) y$has.xargs <- any(nxargs > 0) if(y$has.xargs) { # identify which function arguments are fixed in the call fmap <- data.frame(Covariate=rep.int(names(funs), nxargs), Argument=unlist(fargs)) fmap$Given <- (fmap$Argument %in% names(y$covfunargs)) y$xargmap <- fmap } } } } else { # Antiquated format # Interpret the function call instead callexpr <- parse(text=x$call) callargs <- names(as.list(callexpr[[1]])) # Data frame of covariates was called 'data' in versions up to 1.4-x y$has.covars <- !is.null(callargs) && !is.na(pmatch("data", callargs)) # conservative guess y$uses.covars <- y$has.covars y$covfunargs <- NULL } ###### Arguments in call #################################### y$args <- x[c("call", "correction", "rbord")] ####### Main data components ######################### y$entries <- append(list(quad=QUAD, data=DATA, interaction=INTERACT), y$entries) if(is.character(quick) && (quick == "entries")) return(y) ####### Summarise data ############################ y$data <- summary(DATA, checkdup=FALSE) y$quad <- summary(QUAD, checkdup=FALSE) if(is.character(quick) && (quick == "no prediction")) return(y) ###### Trend component ######################### y$trend <- list() y$trend$name <- if(y$poisson) "Intensity" else "Trend" y$trend$formula <- if(y$no.trend) NULL else TREND if(y$poisson && y$no.trend) { # uniform Poisson process y$trend$value <- lambda <- exp(COEFS[[1]]) y$trend$label <- switch(y$marktype, unmarked="Uniform intensity", multitype="Uniform intensity for each mark level", marked="Uniform intensity in product space", "") } else if(y$stationary) { # stationary switch(y$marktype, unmarked={ # stationary non-poisson non-marked y$trend$label <- "First order term" y$trend$value <- c(beta=exp(COEFS[[1]])) }, multitype={ # stationary, multitype mrk <- marks(DATA) y$trend$label <- if(y$poisson) "Intensities" else "First order terms" # Use predict.ppm to evaluate the fitted intensities lev <- factor(levels(mrk), levels=levels(mrk)) nlev <- length(lev) marx <- list(x=rep.int(0, nlev), y=rep.int(0, nlev), marks=lev) betas <- predict(x, locations=marx, type="trend") names(betas) <- paste("beta_", as.character(lev), sep="") y$trend$value <- betas }, marked={ # stationary, marked y$trend$label <- "Fitted intensity coefficients" y$trend$value <- blankcoefnames(COEFS) }) } else { # not stationary y$trend$label <- "Fitted coefficients for trend formula" # extract trend terms without trying to understand them much if(is.null(Vnames)) trendbits <- COEFS else { agree <- outer(names(COEFS), Vnames, "==") whichbits <- apply(!agree, 1, all) trendbits <- COEFS[whichbits] } y$trend$value <- blankcoefnames(trendbits) } # ----- parameters with SE -------------------------- if(is.character(quick) && (quick == "no variances")) return(y) if(length(COEFS) > 0) { # compute standard errors se <- x$internal$se if(is.null(se)) { vc <- vcov(x, matrix.action="warn") if(!is.null(vc)) { se <- if(is.matrix(vc)) sqrt(diag(vc)) else if(length(vc) == 1) sqrt(vc) else NULL } } if(!is.null(se)) { two <- qnorm(0.975) lo <- COEFS - two * se hi <- COEFS + two * se pval <- 2 * pnorm(abs(COEFS)/se, lower.tail=FALSE) psig <- cut(pval, c(0,0.001, 0.01, 0.05, 1, Inf), labels=c("***", "**", "*", " ", "na"), include.lowest=TRUE) notapplic <- names(COEFS) %in% c("(Intercept)", "log(lambda)") psig[notapplic] <- "na" # table of coefficient estimates with SE and 95% CI y$coefs.SE.CI <- data.frame(Estimate=COEFS, S.E.=se, Ztest=psig, CI95.lo=lo, CI95.hi=hi) } } class(y) <- "summary.ppm" return(y) } summary.ppm }) coef.summary.ppm <- function(object, ...) { object$coefs.SE.CI } print.summary.ppm <- function(x, ...) { if(x$old) warning("Model was fitted by an older version of spatstat") if(is.null(x$args)) { # this is the quick version cat(paste(x$name, "\n")) return(invisible(NULL)) } # otherwise - full details cat("Point process model\n") fitter <- if(!is.null(x$fitter)) x$fitter else "unknown" methodchosen <- if(is.null(x$method)) "unspecified method" else if(fitter == "exact") "maximum likelihood" else switch(x$method, mpl={ if(x$poisson) { # Poisson process "maximum likelihood (Berman-Turner approximation)" } else { "maximum pseudolikelihood (Berman-Turner approximation)" } }, logi={ if(!x$poisson) { "maximum pseudolikelihood (logistic regression approximation)" } else { # Poisson process "maximum likelihood (logistic regression approximation)" } }, ho="Huang-Ogata method (approximate maximum likelihood)", paste("unrecognised method", sQuote(x$method))) cat(paste("Fitting method:", methodchosen, "\n")) howfitted <- switch(fitter, exact= "analytically", gam = "using gam()", glm = "using glm()", ho = NULL, paste("using unrecognised fitter", sQuote(fitter))) if(!is.null(howfitted)) cat(paste("Model was fitted", howfitted, "\n")) if(fitter %in% c("glm", "gam")) { if(x$converged) cat("Algorithm converged\n") else cat("*** Algorithm did not converge ***\n") } if(x$projected) cat("Fit was projected to obtain a valid point process model\n") cat("Call:\n") print(x$args$call) if(x$old) cat(paste("** Executed by old spatstat version", x$version, " **\n")) cat(paste("Edge correction:", dQuote(x$args$correction), "\n")) if(x$args$correction == "border") cat(paste("\t[border correction distance r =", x$args$rbord,"]\n")) cat("\n----------------------------------------------------\n") # print summary of quadrature scheme print(x$quad) cat("\n----------------------------------------------------\n") cat("FITTED MODEL:\n\n") # This bit is currently identical to print.ppm() # except for a bit more fanfare # and the inclusion of the 'gory details' bit notrend <- x$no.trend stationary <- x$stationary poisson <- x$poisson markeddata <- x$marked multitype <- x$multitype markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- cat(x$name) cat("\n") if(markeddata) mrk <- x$entries$marks if(multitype) { cat("Possible marks: \n") cat(paste(levels(mrk))) } # ----- trend -------------------------- cat(paste("\n\n ---- ", x$trend$name, ": ----\n\n", sep="")) if(!notrend) { cat("Trend formula: ") print(x$trend$formula) if(x$uses.covars) cat(paste("Model depends on external", ngettext(length(x$covars.used), "covariate", "covariates"), commasep(sQuote(x$covars.used)), "\n")) } if(x$has.covars) { if(notrend || !x$uses.covars) cat("Model object contains external covariates\n") isdf <- identical(x$covars.are.df, TRUE) if(!is.null(cd <- x$covar.descrip)) { # print description of each covariate cat(paste("\nCovariates provided", if(isdf) " (in data frame)" else NULL, ":\n", sep="")) namescd <- names(cd) for(i in seq_along(cd)) cat(paste("\t", namescd[i], ": ", cd[i], "\n", sep="")) } if(!is.null(cfa <- x$covfunargs) && length(cfa) > 0) { cat("Covariate function arguments (covfunargs) provided:\n") namescfa <- names(cfa) for(i in seq_along(cfa)) { cat(paste(namescfa[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } cat(paste("\n", x$trend$label, ":\n", sep="")) tv <- x$trend$value if(!is.list(tv)) print(tv) else for(i in seq_along(tv)) print(tv[[i]]) # table of coefficient estimates with SE and 95% CI if(!is.null(cose <- x$coefs.SE.CI)) { cat("\n") print(cose) } # ---- Interaction ---------------------------- if(!poisson) { cat("\n\n ---- Interaction: -----\n\n") print(x$interaction) } ####### Gory details ################################### cat("\n\n----------- gory details -----\n") COEFS <- x$entries$coef cat("\nFitted regular parameters (theta): \n") print(COEFS) cat("\nFitted exp(theta): \n") print(exp(unlist(COEFS))) ##### Warnings issued ####### probs <- x$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(a) { if(is.list(a) && !is.null(p <- a$print)) cat(paste("Problem:\n", p, "\n\n")) }) return(invisible(NULL)) } no.trend.ppm <- function(x) { summary.ppm(x, quick=TRUE)$no.trend } is.stationary <- function(x) { UseMethod("is.stationary") } is.poisson <- function(x) { UseMethod("is.poisson") } is.stationary.ppm <- function(x) { TREND <- x$trend if(is.null(TREND) || identical.formulae(TREND, ~1)) return(TRUE) if(all(variablesinformula(TREND) == "marks")) return(TRUE) return(FALSE) } is.poisson.ppm <- function(x) { stopifnot(is.ppm(x)) y <- x$interaction if(is.null(y)) y <- Poisson() is.poisson.interact(y) } is.marked.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$marked } is.multitype.ppm <- function(X, ...) { summary.ppm(X, quick=TRUE)$multitype } is.expandable.ppm <- function(x) { return(identical(summary(x, quick="entries")$expandable, TRUE)) } blankcoefnames <- function(x) { # remove name labels from ppm coefficients # First decide whether there are 'labels within labels' unlabelled <- unlist(lapply(x, function(z) { is.null(names(z)) } )) if(all(unlabelled)) value <- unlist(x) else { value <- list() for(i in seq_along(x)) value[[i]] <- if(unlabelled[i]) unlist(x[i]) else x[[i]] } return(value) } spatstat/R/hyperframe.R0000755000176000001440000003665112252274531014654 0ustar ripleyusers# # hyperframe.R # # $Revision: 1.46 $ $Date: 2013/04/25 06:37:43 $ # hyperframe <- function(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) { aarg <- list(...) nama <- names(aarg) # number of columns (= variables) nvars <- length(aarg) if(nvars == 0) { # zero columns - return result <- list(nvars=0, ncases=0, vname=character(0), vtype=factor(, levels=c("dfcolumn","hypercolumn","hyperatom")), vclass=character(0), df=data.frame(), hyperatoms=list(), hypercolumns=list()) class(result) <- c("hyperframe", class(result)) return(result) } # check column names if(is.null(nama)) nama <- paste("V", 1:nvars, sep="") else if(any(unnamed <- (nama == ""))) nama[unnamed] <- paste("V", seq_len(sum(unnamed)), sep="") nama <- make.names(nama, unique=TRUE) names(aarg) <- nama # Each argument must be either # - a vector suitable as a column in a data frame # - a list of objects of the same class # - a single object of some class is.dfcolumn <- function(x) { is.atomic(x) && (is.vector(x) || is.factor(x)) } is.hypercolumn <- function(x) { if(!is.list(x)) return(FALSE) if(length(x) <= 1) return(TRUE) cla <- class(x[[1]]) all(sapply(x, function(xi,cla) { identical(class(xi), cla) }, cla=cla)) } dfcolumns <- sapply(aarg, is.dfcolumn) hypercolumns <- sapply(aarg, is.hypercolumn) hyperatoms <- !(dfcolumns | hypercolumns) # Determine number of rows (= cases) columns <- dfcolumns | hypercolumns if(!any(columns)) ncases <- 1 else { heights <- rep.int(1, nvars) heights[columns] <- unlist(lapply(aarg[columns], length)) u <- unique(heights) if(length(u) > 1) { u <- u[u != 1] if(length(u) > 1) stop(paste("Column lengths are inconsistent:", paste(u, collapse=","))) } ncases <- u if(ncases > 1 && all(heights[dfcolumns] == 1)) # force the data frame to have 'ncases' rows aarg[dfcolumns] <- lapply(aarg[dfcolumns], rep, ncases) } # Collect the data frame columns into a data frame if(!any(dfcolumns)) df <- as.data.frame(matrix(, ncases, 0), row.names=row.names) else { df <- do.call("data.frame", append(aarg[dfcolumns], list(row.names=row.names, check.rows=check.rows, check.names=check.names, stringsAsFactors=stringsAsFactors))) names(df) <- nama[dfcolumns] } # Storage type of each variable vtype <- character(nvars) vtype[dfcolumns] <- "dfcolumn" vtype[hypercolumns] <- "hypercolumn" vtype[hyperatoms] <- "hyperatom" vtype=factor(vtype, levels=c("dfcolumn","hypercolumn","hyperatom")) # Class of each variable class1 <- function(x) { class(x)[1] } vclass <- character(nvars) if(any(dfcolumns)) vclass[dfcolumns] <- unlist(lapply(as.list(df), class1)) if(any(hyperatoms)) vclass[hyperatoms] <- unlist(lapply(aarg[hyperatoms], class1)) if(any(hypercolumns)) vclass[hypercolumns] <- unlist(lapply(aarg[hypercolumns], function(x) { class1(x[[1]]) })) # Put the result together result <- list(nvars=nvars, ncases=ncases, vname=nama, vtype=vtype, vclass=vclass, df=df, hyperatoms=aarg[hyperatoms], hypercolumns=aarg[hypercolumns]) class(result) <- c("hyperframe", class(result)) return(result) } is.hyperframe <- function(x) inherits(x, "hyperframe") print.hyperframe <- function(x, ...) { ux <- unclass(x) nvars <- ux$nvars ncases <- ux$ncases if(nvars * ncases == 0) { cat(paste("NULL hyperframe with", ncases, ngettext(ncases, "row (=case)", "rows (=cases)"), "and", nvars, ngettext(nvars, "column (=variable)", "columns (=variables)"), "\n")) } else { cat("Hyperframe:\n") print(as.data.frame(x, discard=FALSE), ...) } return(invisible(NULL)) } dim.hyperframe <- function(x) { with(unclass(x), c(ncases, nvars)) } summary.hyperframe <- function(object, ..., brief=FALSE) { x <- unclass(object) y <- list( nvars = x$nvars, ncases = x$ncases, dim = c(x$ncases, x$nvars), typeframe = data.frame(VariableName=x$vname, Class=x$vclass), storage = x$vtype, col.names = x$vname) classes <- x$vclass names(classes) <- x$vname y$classes <- classes # Ordinary data frame columns df <- x$df y$dfnames <- names(df) y$df <- if(length(df) > 0 && !brief) summary(df) else NULL y$row.names <- row.names(df) class(y) <- c("summary.hyperframe", class(y)) return(y) } print.summary.hyperframe <- function(x, ...) { nvars <- x$nvars ncases <- x$ncases cat(paste(if(nvars * ncases == 0) "NULL" else NULL, "hyperframe with", ncases, ngettext(ncases, "row (=case)", "rows (=cases)"), "and", nvars, ngettext(nvars, "column (=variable)", "columns (=variables)"), "\n")) if(nvars == 0) return(invisible(NULL)) # Variable names and types print(x$typeframe) # Ordinary data frame columns if(!is.null(x$df)) { cat("Summary of data frame columns:\n") print(x$df, ...) } return(invisible(NULL)) } names.hyperframe <- function(x) { unclass(x)$vname } "names<-.hyperframe" <- function(x, value) { x <- unclass(x) stopifnot(is.character(value)) value <- make.names(value) if(length(value) != x$nvars) stop("Incorrect length for vector of names") x$vname <- value names(x$df) <- value[x$vtype == "dfcolumn"] class(x) <- c("hyperframe", class(x)) return(x) } row.names.hyperframe <- function(x) { return(row.names(unclass(x)$df)) } "row.names<-.hyperframe" <- function(x, value) { y <- unclass(x) df <- y$df row.names(df) <- value y$df <- df class(y) <- c("hyperframe", class(y)) return(y) } ## conversion to hyperframe as.hyperframe <- function(x, ...) { UseMethod("as.hyperframe") } as.hyperframe.hyperframe <- function(x, ...) { return(x) } as.hyperframe.data.frame <- function(x, ..., stringsAsFactors=FALSE) { xlist <- if(missing(x)) NULL else as.list(x) do.call("hyperframe", resolve.defaults( xlist, list(...), list(row.names=rownames(x), stringsAsFactors=stringsAsFactors), .StripNull=TRUE)) } as.hyperframe.listof <- function(x, ...) { if(!missing(x)) { xname <- sensiblevarname(short.deparse(substitute(x)), "x") xlist <- list(x) names(xlist) <- xname } else xlist <- NULL do.call("hyperframe", resolve.defaults( xlist, list(...), list(row.names=rownames(x)), .StripNull=TRUE)) } as.hyperframe.default <- function(x, ...) { as.hyperframe(as.data.frame(x, ...)) } #### conversion to other types as.data.frame.hyperframe <- function(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) { ux <- unclass(x) if(is.null(row.names)) row.names <- row.names(ux$df) vtype <- ux$vtype vclass <- ux$vclass dfcol <- (vtype == "dfcolumn") if(discard) { nhyper <- sum(!dfcol) if(nhyper > 0 && warn) warning(paste(nhyper, ngettext(nhyper, "variable", "variables"), "discarded in conversion to data frame")) df <- as.data.frame(ux$df, row.names=row.names, optional=optional, ...) } else { lx <- as.list(x) nrows <- ux$ncases vclassstring <- paste("(", vclass, ")", sep="") if(any(!dfcol)) lx[!dfcol] <- lapply(as.list(vclassstring[!dfcol]), function(x,n) { rep.int(x,n)}, n=nrows) df <- do.call("data.frame", append(lx, list(row.names=row.names))) } return(df) } as.list.hyperframe <- function(x, ...) { ux <- unclass(x) nama <- ux$vname names(nama) <- nama out <- lapply(nama, function(nam, x) { x[, nam, drop=TRUE] }, x=x) return(out) } # evaluation eval.hyper <- function(e, h, simplify=TRUE, ee=NULL) { .Deprecated("with.hyperframe", package="spatstat") if(is.null(ee)) ee <- as.expression(substitute(e)) with.hyperframe(h, simplify=simplify, ee=ee) } with.hyperframe <- function(data, expr, ..., simplify=TRUE, ee=NULL, enclos=NULL) { if(!inherits(data, "hyperframe")) stop("data must be a hyperframe") if(is.null(ee)) ee <- as.expression(substitute(expr)) if(is.null(enclos)) enclos <- parent.frame() n <- nrow(data) out <- vector(mode="list", length=n) for(i in 1:n) { rowi <- data[i,, drop=FALSE] outi <- eval(ee, as.list(rowi), enclos) if(!is.null(outi)) out[[i]] <- outi } names(out) <- row.names(data) if(simplify && all(unlist(lapply(out, is.vector)))) { # if all results are atomic vectors of equal length, # return a matrix or vector. lenfs <- unlist(lapply(out, length)) if(all(unlist(lapply(out, is.atomic))) && length(unique(lenfs)) == 1) { out <- t(as.matrix(as.data.frame(out))) row.names(out) <- row.names(data) out <- out[,,drop=TRUE] return(out) } } out <- hyperframe(result=out, row.names=row.names(data))$result return(out) } cbind.hyperframe <- function(...) { aarg <- list(...) narg <- length(aarg) if(narg == 0) return(hyperframe()) namarg <- names(aarg) if(is.null(namarg)) namarg <- rep.int("", narg) ishyper <- unlist(lapply(aarg, inherits, what="hyperframe")) isdf <- unlist(lapply(aarg, inherits, what="data.frame")) columns <- list() for(i in 1:narg) { if(ishyper[i] || isdf[i]){ if(ncol(aarg[[i]]) > 0) { newcolumns <- as.list(aarg[[i]]) if(namarg[i] != "") names(newcolumns) <- paste(namarg[i], ".", names(newcolumns), sep="") columns <- append(columns, newcolumns) } } else { nextcolumn <- list(aarg[[i]]) names(nextcolumn) <- namarg[i] columns <- append(columns, nextcolumn) } } result <- do.call("hyperframe", columns) return(result) } rbind.hyperframe <- function(...) { argh <- list(...) if(length(argh) == 0) return(NULL) # convert them all to hyperframes argh <- lapply(argh, as.hyperframe) # nargh <- length(argh) if(nargh == 1) return(argh[[1]]) # check for compatibility of dimensions & names dfs <- lapply(argh, as.data.frame, discard=FALSE) dfall <- do.call(rbind, dfs) # check that data frame columns also match dfs0 <- lapply(argh, as.data.frame, discard=TRUE, warn=FALSE) df0all <- do.call(rbind, dfs0) # assemble data rslt <- list() nam <- names(dfall) nam0 <- names(df0all) for(k in seq_along(nam)) { nama <- nam[k] if(nama %in% nam0) { # data frame column: already made rslt[[k]] <- dfall[,k] } else { # hypercolumns or hyperatoms: extract them hdata <- lapply(argh, function(x,nama) { x[, nama, drop=TRUE] }, nama=nama) # append them hh <- hdata[[1]] for(j in 2:nargh) { hh <- append(hh, hdata[[j]]) } rslt[[k]] <- hh } } # make hyperframe names(rslt) <- nam out <- do.call(hyperframe, append(rslt, list(stringsAsFactors=FALSE))) return(out) } plot.hyperframe <- function(x, e, ..., main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=c(1,1,3,1) * marsize), marsize=0.1) { xname <- short.deparse(substitute(x)) main <- if(!missing(main)) main else xname if(missing(e)) { # default: plot first column that contains objects ok <- (summary(x)$storage %in% c("hypercolumn", "hyperatom")) if(any(ok)) { j <- min(which(ok)) x <- x[,j, drop=TRUE] x <- as.listof(x) plot(x, ..., main=main, arrange=arrange, nrows=nrows, ncols=ncols) return(invisible(NULL)) } else { # hyperframe does not contain any objects # invoke plot.data.frame x <- as.data.frame(x) plot(x, ..., main=main) return(invisible(NULL)) } } if(!is.language(e)) stop(paste("Argument e should be a call or an expression;", "use quote(...) or expression(...)")) ee <- as.expression(e) if(!arrange) { # No arrangement specified: just evaluate the plot expression 'nr' times with(x, ee=ee) return(invisible(NULL)) } # Arrangement # Decide whether to plot a main header banner <- (sum(nchar(as.character(main))) > 0) if(length(main) > 1) main <- paste(main, collapse="\n") nlines <- if(!is.character(main)) 1 else length(unlist(strsplit(main, "\n"))) # determine arrangement of plots # arrange like mfrow(nrows, ncols) plus a banner at the top n <- summary(x)$ncases if(is.null(nrows) && is.null(ncols)) { nrows <- as.integer(floor(sqrt(n))) ncols <- as.integer(ceiling(n/nrows)) } else if(!is.null(nrows) && is.null(ncols)) ncols <- as.integer(ceiling(n/nrows)) else if(is.null(nrows) && !is.null(ncols)) nrows <- as.integer(ceiling(n/ncols)) else stopifnot(nrows * ncols >= length(x)) nblank <- ncols * nrows - n # declare layout mat <- matrix(c(seq_len(n), numeric(nblank)), byrow=TRUE, ncol=ncols, nrow=nrows) heights <- rep.int(1, nrows) if(banner) { # Increment existing panel numbers # New panel 1 is the banner panels <- (mat > 0) mat[panels] <- mat[panels] + 1 mat <- rbind(rep.int(1,ncols), mat) heights <- c(0.1 * (1 + nlines), heights) } # initialise plot layout(mat, heights=heights) # plot banner if(banner) { opa <- par(mar=rep.int(0,4), xpd=TRUE) plot(numeric(0),numeric(0),type="n",ann=FALSE,axes=FALSE, xlim=c(-1,1),ylim=c(-1,1)) cex <- resolve.defaults(list(...), list(cex.title=2))$cex.title text(0,0,main, cex=cex) } # plot panels npa <- do.call("par", parargs) if(!banner) opa <- npa with(x, ee=ee) # revert layout(1) par(opa) return(invisible(NULL)) } str.hyperframe <- function(object, ...) { d <- dim(object) x <- unclass(object) argh <- resolve.defaults(list(...), list(nest.lev=0, indent.str=" ..")) cat(paste("'hyperframe':\t", d[1], ngettext(d[1], "row", "rows"), "and", d[2], ngettext(d[2], "column", "columns"), "\n")) nr <- d[1] nc <- d[2] if(nc > 0) { vname <- x$vname vclass <- x$vclass vtype <- as.character(x$vtype) indentstring <- with(argh, paste(rep.int(indent.str, nest.lev), collapse="")) for(j in 1:nc) { tag <- paste("$", vname[j]) switch(vtype[j], dfcolumn={ desc <- vclass[j] if(nr > 0) { vals <- object[1:min(nr,3),j,drop=TRUE] vals <- paste(paste(format(vals), collapse=" "), "...") } else vals <- "" }, hypercolumn=, hyperatom={ desc <- "objects of class" vals <- vclass[j] }) cat(paste(paste(indentstring, tag, sep=""), ":", desc, vals, "\n")) } } return(invisible(NULL)) } spatstat/R/fryplot.R0000755000176000001440000000354512237642727014216 0ustar ripleyusers# # fryplot.R # # $Revision: 1.6 $ $Date: 2013/04/25 06:37:43 $ # fryplot <- function(X, ..., width=NULL, from=NULL, to=NULL) { Xname <- short.deparse(substitute(X)) X <- as.ppp(X) n <- npoints(X) ismarked <- is.marked(X) seqn <- seq_len(n) from <- if(is.null(from)) seqn else seqn[from] to <- if(is.null(to)) seqn else seqn[to] b <- as.rectangle(X) halfspan <- with(b, c(diff(xrange), diff(yrange)))/2 if(!is.null(width)) { halfwidth <- ensure2vector(width)/2 halfspan <- pmin.int(halfspan, halfwidth) } bb <- owin(c(-1,1) * halfspan[1], c(-1,1) * halfspan[2]) do.call("plot.owin", resolve.defaults(list(bb), list(...), list(invert=TRUE), list(main=paste("Fry plot of", Xname)))) xx <- X$x[to] yy <- X$y[to] if(ismarked) { marx <- as.data.frame(marks(X)) marx <- marx[to, ,drop=FALSE] } for(i in from) { noti <- (to != i) dxi <- xx[noti] - xx[i] dyi <- yy[noti] - yy[i] oki <- (abs(dxi) < halfspan[1]) & (abs(dyi) < halfspan[2]) if(any(oki)) { mki <- if(ismarked) marx[noti, , drop=FALSE] else NULL dXi <- ppp(x=dxi[oki], y=dyi[oki], window=bb, marks=mki[oki,], check=FALSE) plot(dXi, add=TRUE, ...) } } return(invisible(NULL)) } frypoints <- function(X) { X <- as.ppp(X) b <- as.rectangle(X) bb <- owin(c(-1,1) * diff(b$xrange), c(-1,1) * diff(b$yrange)) n <- X$n xx <- X$x yy <- X$y dx <- outer(xx, xx, "-") dy <- outer(yy, yy, "-") nondiag <- matrix(TRUE, n, n) diag(nondiag) <- FALSE DX <- as.vector(dx[nondiag]) DY <- as.vector(dy[nondiag]) Fry <- ppp(DX, DY, window=bb, check=FALSE) if(is.marked(X)) { marx <- as.data.frame(marks(X)) rowind <- row(nondiag)[nondiag] marks(Fry) <- marx[rowind, ] } return(Fry) } spatstat/R/nndist.R0000644000176000001440000002521512252313136013773 0ustar ripleyusers# # nndist.R # # nearest neighbour distances (nndist) and identifiers (nnwhich) # # $Revision: 1.5 $ $Date: 2013/12/12 10:55:33 $ # nndist <- function(X, ...) { UseMethod("nndist") } nndist.ppp <- local({ nndist.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nndist.ppp") if(is.null(by)) # usual case return(nndist.default(X$x, X$y, k=k, by=by, method=method)) return(nndistby(X, k=k, by=by)) } nndistby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) distY <- lapply(Y, nndistsub, XX=X, iX=idX, k=k) result <- do.call("cbind", distY) return(result) } nndistsub <- function(Z, XX, iX, k) { nncross(XX, Z, iX=iX, iY=marks(Z), k=k, what="dist") } nndist.ppp }) nndist.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # computes the vector of nearest-neighbour distances # for the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # other arguments ignored trap.extra.arguments(..., .Context="In nndist.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=bounding.box.xy) return(nndist(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # trivial cases if(n <= 1) { # empty pattern => return numeric(0) # or pattern with only 1 point => return Inf nnd <- matrix(Inf, nrow=n, ncol=kmax) nnd <- nnd[,k, drop=TRUE] return(nnd) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # calculate k-nn distances for k <= kmaxcalc if(kmaxcalc == 1) { # calculate nearest neighbour distance only switch(method, interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbour distances nnd <- sqrt(apply(squd,1,min)) }, C={ nnd<-numeric(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") z<- .C("nndistsort", n= as.integer(n), x= as.double(x[o]), y= as.double(y[o]), nnd= as.double(nnd), as.double(big), DUP=DUP) nnd[o] <- z$nnd }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, interpreted={ if(n <= 1000) { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf NND2 <- t(apply(D2, 1, sort))[, 1:kmaxcalc] nnd <- sqrt(NND2) } else { # avoid creating huge matrix # handle one row of D at a time NND2 <- matrix(numeric(n * kmaxcalc), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf NND2[i,] <- sort(D2i)[1:kmaxcalc] } nnd <- sqrt(NND2) } }, C={ nnd<-numeric(n * kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") z<- .C("knndsort", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnd = as.double(nnd), huge = as.double(big), DUP=DUP) nnd <- matrix(nnd, nrow=n, ncol=kmaxcalc) nnd[o, ] <- matrix(z$nnd, nrow=n, ncol=kmaxcalc, byrow=TRUE) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of Inf infs <- matrix(Inf, nrow=n, ncol=kmax-kmaxcalc) nnd <- cbind(nnd, infs) } if(kmax > 1) colnames(nnd) <- paste0("dist.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnd <- nnd[, k, drop=TRUE] } return(nnd) } nnwhich <- function(X, ...) { UseMethod("nnwhich") } nnwhich.ppp <- local({ nnwhich.ppp <- function(X, ..., k=1, by=NULL, method="C") { verifyclass(X, "ppp") trap.extra.arguments(..., .Context="In nnwhich.ppp") if(is.null(by)) return(nnwhich.default(X$x, X$y, k=k, method=method)) return(nnwhichby(X, k=k, by=by)) } nnwhichby <- function(X, k, by) { # split by factor idX <- seq_len(npoints(X)) Y <- split(X %mark% idX, f=by, un=FALSE) whichY <- lapply(Y, nnwhichsub, XX=X, iX=idX, k=k) result <- do.call("cbind", whichY) return(result) } nnwhichsub <- function(Z, XX, iX, k) { # marks(Z) gives original serial numbers of subset Z iY <- marks(Z) Zid <- nncross(XX, Z, iX=iX, iY=iY, k=k, what="which") nk <- length(k) if(nk == 1) { Yid <- iY[Zid] } else { Zid <- as.vector(as.matrix(Zid)) Yid <- iY[Zid] Yid <- data.frame(which=matrix(Yid, ncol=nk)) } return(Yid) } nnwhich.ppp }) nnwhich.default <- function(X, Y=NULL, ..., k=1, by=NULL, method="C") { # identifies nearest neighbour of each point in # the pattern of points (x[i],y[i]) # xy <- xy.coords(X,Y)[c("x","y")] x <- xy$x y <- xy$y # validate n <- length(x) if(length(y) != n) stop("lengths of x and y do not match") # other arguments ignored trap.extra.arguments(..., .Context="In nnwhich.default") # split by factor ? if(!is.null(by)) { X <- as.ppp(xy, W=bounding.box.xy) return(nnwhich(X, by=by, k=k)) } # k can be a single integer or an integer vector if(length(k) == 0) stop("k is an empty vector") else if(length(k) == 1) { if(k != round(k) || k <= 0) stop("k is not a positive integer") } else { if(any(k != round(k)) || any(k <= 0)) stop(paste("some entries of the vector", sQuote("k"), "are not positive integers")) } k <- as.integer(k) kmax <- max(k) # special cases if(n <= 1) { # empty pattern => return integer(0) # or pattern with only 1 point => return NA nnw <- matrix(as.integer(NA), nrow=n, ncol=kmax) nnw <- nnw[,k, drop=TRUE] return(nnw) } # number of neighbours that are well-defined kmaxcalc <- min(n-1, kmax) # identify k-nn for k <= kmaxcalc if(kmaxcalc == 1) { # identify nearest neighbour only switch(method, interpreted={ # matrix of squared distances between all pairs of points sq <- function(a, b) { (a-b)^2 } squd <- outer(x, x, sq) + outer(y, y, sq) # reset diagonal to a large value so it is excluded from minimum diag(squd) <- Inf # nearest neighbours nnw <- apply(squd,1,which.min) }, C={ nnw <- integer(n) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") z<- .C("nnwhichsort", n = as.integer(n), x = as.double(x[o]), y = as.double(y[o]), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) witch <- z$nnwhich # sic if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") nnw[o] <- o[witch] }, stop(paste("Unrecognised method", sQuote(method))) ) } else { # case kmaxcalc > 1 switch(method, interpreted={ if(n <= 1000) { # form n x n matrix of squared distances D2 <- pairdist.default(x, y, method=method, squared=TRUE) # find k'th smallest squared distance diag(D2) <- Inf nnw <- t(apply(D2, 1, fave.order))[, 1:kmaxcalc] } else { # avoid creating huge matrix # handle one row of D at a time nnw <- matrix(as.integer(NA), nrow=n, ncol=kmaxcalc) for(i in seq_len(n)) { D2i <- (x - x[i])^2 + (y - y[i])^2 D2i[i] <- Inf nnw[i,] <- fave.order(D2i)[1:kmaxcalc] } } }, C={ nnw <- matrix(integer(n * kmaxcalc), nrow=n, ncol=kmaxcalc) o <- fave.order(y) big <- sqrt(.Machine$double.xmax) DUP <- spatstat.options("dupC") z<- .C("knnsort", n = as.integer(n), kmax = as.integer(kmaxcalc), x = as.double(x[o]), y = as.double(y[o]), nnd = as.double(numeric(n * kmaxcalc)), nnwhich = as.integer(nnw), huge = as.double(big), DUP=DUP) witch <- z$nnwhich # sic witch <- matrix(witch, nrow=n, ncol=kmaxcalc, byrow=TRUE) if(any(witch <= 0)) stop("Internal error: non-positive index returned from C code") if(any(witch > n)) stop("Internal error: index returned from C code exceeds n") # convert back to original ordering nnw[o,] <- matrix(o[witch], nrow=n, ncol=kmaxcalc) }, stop(paste("Unrecognised method", sQuote(method))) ) } # post-processing if(kmax > kmaxcalc) { # add columns of NA's nas <- matrix(as.numeric(NA), nrow=n, ncol=kmax-kmaxcalc) nnw <- cbind(nnw, nas) } if(kmax > 1) colnames(nnw) <- paste0("which.", 1:kmax) if(length(k) < kmax) { # select only the specified columns nnw <- nnw[, k, drop=TRUE] } return(nnw) } spatstat/R/measures.R0000755000176000001440000002043712240173061014322 0ustar ripleyusers# # measures.R # # signed/vector valued measures with atomic and diffuse components # # $Revision: 1.30 $ $Date: 2013/11/11 15:19:52 $ # msr <- function(qscheme, discrete, density, check=TRUE) { if(!inherits(qscheme, "quad")) stop("qscheme should be a quadrature scheme") nquad <- n.quad(qscheme) U <- union.quad(qscheme) W <- w.quad(qscheme) Z <- is.data(qscheme) ndata <- sum(Z) # ensure conformable vectors/matrices if(is.vector(discrete) && is.vector(density)) { # handle constants if(length(discrete) == 1) discrete <- rep.int(discrete, ndata) if(length(density) == 1) density <- rep.int(density, nquad) # check lengths if(check) { check.nvector(discrete, ndata, things="data points", naok=TRUE) check.nvector(density, nquad, things="quadrature points", naok=TRUE) } discretepad <- numeric(nquad) discretepad[Z] <- discrete } else { if(length(discrete) == 1 && is.matrix(density)) { # replicate constant 'discrete' component to matrix of correct size discrete <- matrix(discrete, ndata, ncol(density)) } else if(length(density) == 1 && is.matrix(discrete)) { # replicate constant 'density' to matrix of correct size density <- matrix(density, nquad, ncol(discrete)) } else { discrete <- as.matrix(discrete) density <- as.matrix(density) } if(check) { # check numbers of rows check.nmatrix(discrete, ndata, things="data points", naok=TRUE, squarematrix=FALSE) check.nmatrix(density, nquad, things="quadrature points", naok=TRUE, squarematrix=FALSE) } nd <- ncol(discrete) nc <- ncol(density) if(nd != nc) { if(nd == 1) { # replicate columns of discrete component discrete <- matrix(rep.int(discrete, nc), ndata, nc) colnames(discrete) <- colnames(density) } else if(nc == 1) { # replicate columns of density component density <- matrix(rep.int(density, nd), nquad, nd) colnames(density) <- colnames(discrete) } else stop(paste("Incompatible numbers of columns in", sQuote("discrete"), paren(nd), "and", sQuote("density"), paren(nc))) } discretepad <- matrix(0, nquad, max(nd, nc)) discretepad[Z, ] <- discrete colnames(discretepad) <- colnames(density) } # # # Discretised measure (value of measure for each quadrature tile) val <- discretepad + W * density if(is.matrix(density)) colnames(val) <- colnames(density) # out <- list(loc = U, val = val, atoms = Z, discrete = discretepad, density = density, wt = W) class(out) <- "msr" return(out) } # Translation table for usage of measures # # e.g. res <- residuals(fit, ...) # # OLD NEW # res[ ] res$val[ ] with(res, "increment") # attr(res, "atoms") res$atoms with(res, "is.atom") # attr(res, "discrete") res$discrete with(res, "discrete") # attr(res, "continuous") res$density with(res, "density") # w.quad(quad.ppm(fit)) res$wt with(res, "qweights") # union.quad(quad.ppm(fit)) res$loc with(res, "qlocations") # ................................................. with.msr <- function(data, expr, ...) { stopifnot(inherits(data, "msr")) stopifnot(is.character(expr)) y <- switch(expr, increment = { data$val }, is.atom = { data$atoms }, discrete = { data$discrete }, density = { data$density }, continuous = { data$density * data$wt }, qweights = { data$wt }, qlocations = { data$loc }, stop("Unrecognised option in entry.msr", call.=FALSE)) return(y) } print.msr <- function(x, ...) { n <- npoints(x$loc) d <- ncol(as.matrix(x$val)) descrip <- if(d == 1) "Scalar" else paste(d, "dimensional vector", sep="-") cat(paste(descrip, "-valued measure\n", sep="")) if(d > 1 && !is.null(cn <- colnames(x$val))) cat(paste("vector components:", commasep(sQuote(cn)), "\n")) cat(paste("Approximated by", n, "quadrature points\n")) print(as.owin(x$loc)) cat(paste(sum(x$atoms), "atoms\n")) cat(paste("Total mass:\n")) if(d == 1) { cat(paste("discrete =", signif(sum(with(x, "discrete")), 5), "\tcontinuous =", signif(sum(with(x, "continuous")), 5), "\ttotal =", signif(sum(with(x, "increment")), 5), "\n")) } else { if(is.null(cn)) cn <- paste("component", 1:d) for(j in 1:d) { cat(paste(cn[j], ":\t", "discrete =", signif(sum(with(x, "discrete")[,j]), 5), "\tcontinuous =", signif(sum(with(x, "continuous")[,j]), 5), "\ttotal =", signif(sum(with(x, "increment")[,j]), 5), "\n")) } } return(invisible(NULL)) } integral.msr <- function(x, ...) { stopifnot(inherits(x, "msr")) y <- with(x, "increment") if(is.matrix(y)) apply(y, 2, sum) else sum(y) } plot.msr <- function(x, ...) { xname <- short.deparse(substitute(x)) d <- ncol(as.matrix(x$val)) if(d == 1) { # smooth the density unless it is flat if(diff(range(x$density)) > sqrt(.Machine$double.eps) || "sigma" %in% names(list(...))) { sigma0 <- max(nndist(x$loc)) smo <- do.call("Smooth", resolve.defaults(list(X=x$loc %mark% x$density), list(...), list(sigma=sigma0))) } else { smo <- as.im(mean(x$density), W=as.owin(x$loc)) } xtra <- unique(c(names(formals(plot.default)), names(formals(image.default)), "box")) do.call.matched("plot.im", resolve.defaults(list(x=smo), list(...), list(main=xname)), extrargs=xtra) xtra <- unique(c(names(formals(plot.owin)), names(formals(points)), names(formals(symbols)))) xtra <- setdiff(xtra, "box") do.call.matched("plot.ppp", resolve.defaults(list(x=x$loc %mark% x$discrete), list(add=TRUE), list(...)), extrargs=xtra) } else { # split into a list of real-valued measures lis <- list() for(j in 1:d) lis[[j]] <- x[,j] lis <- as.listof(lis) if(!is.null(cn <- colnames(x$val))) names(lis) <- cn do.call("plot.listof", resolve.defaults(list(lis), list(...), list(main=xname))) } return(invisible(NULL)) } "[.msr" <- function(x, i, j, ...) { valu <- as.matrix(x$val) disc <- as.matrix(x$discrete) dens <- as.matrix(x$density) wt <- x$wt atoms <- x$atoms # if(!missing(j)) { valu <- valu[, j] disc <- disc[, j] dens <- dens[, j] } loc <- x$loc if(!missing(i)) { # use [.ppp to identify which points are retained locn <- loc %mark% seq_len(npoints(loc)) loci <- locn[i] loc <- unmark(loci) id <- marks(loci) # extract valu <- valu[id, ] disc <- disc[id, ] dens <- dens[id, ] wt <- wt[id] atoms <- atoms[id] } out <- list(loc=loc, val=valu, atoms=atoms, discrete=disc, density=dens, wt=wt) class(out) <- "msr" return(out) } dim.msr <- function(x) { dim(as.matrix(x$val)) } dimnames.msr <- function(x) { list(NULL, colnames(x$val)) } smooth.msr <- function(X, ...) { message("smooth.msr will soon be deprecated: use the generic Smooth with a capital S") # .Deprecated("Smooth.msr", package="spatstat", # msg="smooth.msr is deprecated: use the generic Smooth with a capital S") Smooth(X, ...) } Smooth.msr <- function(X, ...) { verifyclass(X, "msr") loc <- X$loc val <- X$val d <- ncol(as.matrix(val)) if(d == 1) { result <- density(loc, weights=val, ...) } else { result <- list() for(j in 1:d) result[[j]] <- density(loc, weights=val[,j], ...) result <- as.listof(result) names(result) <- colnames(X) } return(result) } spatstat/R/setcov.R0000755000176000001440000000611512237642727014016 0ustar ripleyusers# # # setcov.R # # $Revision: 1.11 $ $Date: 2012/10/10 06:48:16 $ # # Compute the set covariance function of a window # or the (noncentred) spatial covariance function of an image # setcov <- function(W, V=W, ...) { W <- as.owin(W) # pixel approximation mW <- as.mask(W, ...) Z <- as.im(mW, na.replace=0) if(missing(V)) return(imcov(Z)) # cross-covariance V <- as.owin(V) mV <- as.mask(V, ...) Z2 <- as.im(mV, na.replace=0) imcov(Z, Z2) } imcov <- function(X, Y=X) { if(missing(Y)) Y <- NULL convolve.im(X, Y, reflectX = FALSE, reflectY=TRUE) } convolve.im <- function(X, Y=X, ..., reflectX=FALSE, reflectY=FALSE) { stopifnot(is.im(X)) have.Y <- !missing(Y) && !is.null(Y) crosscov <- have.Y || reflectX || reflectY trap.extra.arguments(..., .Context="In convolve.im") # if(have.Y) { # cross-covariance stopifnot(is.im(Y)) Xbox <- as.rectangle(X) Ybox <- as.rectangle(Y) # first shift images to common midpoint, to reduce storage Xmid <- centroid.owin(Xbox) Ymid <- centroid.owin(Ybox) svec <- as.numeric(Xmid) - as.numeric(Ymid) Y <- shift(Y, svec) # ensure images are compatible XY <- harmonise.im(X=X, Y=Y) X <- XY$X Y <- XY$Y } else { # Y is missing or NULL Y <- X Xbox <- Ybox <- as.rectangle(X) } M <- X$v M[is.na(M)] <- 0 xstep <- X$xstep ystep <- X$ystep # pad with zeroes nr <- nrow(M) nc <- ncol(M) Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft(Mpad) if(!crosscov) { # compute convolution square G <- fft(fM^2, inverse=TRUE)/lengthMpad } else { # compute set cross-covariance or convolution by fft N <- Y$v N[is.na(N)] <- 0 Npad <- matrix(0, ncol=2*nc, nrow=2*nr) Npad[1:nr, 1:nc] <- N fN <- fft(Npad) if(reflectY) fN <- Conj(fN) if(reflectX) fM <- Conj(fM) G <- fft(fM * fN, inverse=TRUE)/lengthMpad } # cat(paste("maximum imaginary part=", max(Im(G)), "\n")) G <- Mod(G) * xstep * ystep if(reflectX != reflectY) { # Currently G[i,j] corresponds to a vector shift of # dy = (i-1) mod nr, dx = (j-1) mod nc. # Rearrange this periodic function so that # the origin of translations (0,0) is at matrix position (nr,nc) # NB this introduces an extra row and column G <- G[ ((-nr):nr) %% (2 * nr) + 1, (-nc):nc %% (2*nc) + 1] } # Determine spatial domain of full raster image XB <- as.rectangle(X) YB <- as.rectangle(Y) # undo shift if(have.Y) YB <- shift(YB, -svec) # reflect if(reflectX) XB <- reflect(XB) if(reflectY) YB <- reflect(YB) # Minkowski sum of covering boxes xran <- XB$xrange + YB$xrange yran <- XB$yrange + YB$yrange # Declare spatial domain out <- im(G, xrange = xran, yrange=yran) if(crosscov) { # restrict to actual spatial domain of function if(reflectX) Xbox <- reflect(Xbox) if(reflectY) Ybox <- reflect(Ybox) # Minkowski sum xran <- Xbox$xrange + Ybox$xrange yran <- Xbox$yrange + Ybox$yrange XYbox <- owin(xran, yran) out <- out[XYbox, rescue=TRUE] } return(out) } spatstat/R/multistrauss.R0000755000176000001440000001754512240724402015264 0ustar ripleyusers# # # multistrauss.S # # $Revision: 2.20 $ $Date: 2012/08/30 02:09:41 $ # # The multitype Strauss process # # MultiStrauss() create an instance of the multitype Strauss process # [an object of class 'interact'] # # ------------------------------------------------------------------- # MultiStrauss <- local({ # ......... define interaction potential MSpotential <- function(d, tx, tu, par) { # arguments: # d[i,j] distance between points X[i] and U[j] # tx[i] type (mark) of point X[i] # tu[j] type (mark) of point U[j] # # get matrix of interaction radii r[ , ] r <- par$radii # # get possible marks and validate if(!is.factor(tx) || !is.factor(tu)) stop("marks of data and dummy points must be factor variables") lx <- levels(tx) lu <- levels(tu) if(length(lx) != length(lu) || any(lx != lu)) stop("marks of data and dummy points do not have same possible levels") if(!identical(lx, par$types)) stop("data and model do not have the same possible levels of marks") if(!identical(lu, par$types)) stop("dummy points and model do not have the same possible levels of marks") # ensure factor levels are acceptable for column names (etc) lxname <- make.names(lx, unique=TRUE) # list all UNORDERED pairs of types to be checked # (the interaction must be symmetric in type, and scored as such) uptri <- (row(r) <= col(r)) & !is.na(r) mark1 <- (lx[row(r)])[uptri] mark2 <- (lx[col(r)])[uptri] # corresponding names mark1name <- (lxname[row(r)])[uptri] mark2name <- (lxname[col(r)])[uptri] vname <- apply(cbind(mark1name,mark2name), 1, paste, collapse="x") vname <- paste("mark", vname, sep="") npairs <- length(vname) # list all ORDERED pairs of types to be checked # (to save writing the same code twice) different <- mark1 != mark2 mark1o <- c(mark1, mark2[different]) mark2o <- c(mark2, mark1[different]) nordpairs <- length(mark1o) # unordered pair corresponding to each ordered pair ucode <- c(1:npairs, (1:npairs)[different]) # # create logical array for result z <- array(FALSE, dim=c(dim(d), npairs), dimnames=list(character(0), character(0), vname)) # go.... if(length(z) > 0) { # assemble the relevant interaction distance for each pair of points rxu <- r[ tx, tu ] # apply relevant threshold to each pair of points str <- (d <= rxu) # assign str[i,j] -> z[i,j,k] where k is relevant interaction code for(i in 1:nordpairs) { # data points with mark m1 Xsub <- (tx == mark1o[i]) # quadrature points with mark m2 Qsub <- (tu == mark2o[i]) # assign z[Xsub, Qsub, ucode[i]] <- str[Xsub, Qsub] } } return(z) } #### end of 'pot' function #### # ........ auxiliary functions .............. delMS <- function(which, types, radii) { radii[which] <- NA if(all(is.na(radii))) return(Poisson()) return(MultiStrauss(types, radii)) } # Set up basic object except for family and parameters BlankMSobject <- list( name = "Multitype Strauss process", creator = "MultiStrauss", family = "pairwise.family", # evaluated later pot = MSpotential, par = list(types=NULL, radii = NULL), # to be filled in later parnames = c("possible types", "interaction distances"), selfstart = function(X, self) { if(!is.null(self$par$types)) return(self) types <- levels(marks(X)) MultiStrauss(types=types,radii=self$par$radii) }, init = function(self) { types <- self$par$types if(!is.null(types)) { radii <- self$par$radii nt <- length(types) MultiPair.checkmatrix(radii, nt, sQuote("radii")) if(length(types) == 0) stop(paste("The", sQuote("types"),"argument should be", "either NULL or a vector of all possible types")) if(any(is.na(types))) stop("NA's not allowed in types") if(is.factor(types)) { types <- levels(types) } else { types <- levels(factor(types, levels=types)) } } }, update = NULL, # default OK print = function(self) { radii <- self$par$radii types <- self$par$types cat(paste(nrow(radii), "types of points\n")) if(!is.null(types)) { cat("Possible types: \n") print(types) } else cat("Possible types:\t not yet determined\n") cat("Interaction radii:\n") print(self$par$radii) invisible() }, interpret = function(coeffs, self) { # get possible types typ <- self$par$types ntypes <- length(typ) # get matrix of Strauss interaction radii r <- self$par$radii # list all unordered pairs of types uptri <- (row(r) <= col(r)) & (!is.na(r)) index1 <- (row(r))[uptri] index2 <- (col(r))[uptri] npairs <- length(index1) # extract canonical parameters; shape them into a matrix gammas <- matrix(, ntypes, ntypes) dimnames(gammas) <- list(typ, typ) expcoef <- exp(coeffs) gammas[ cbind(index1, index2) ] <- expcoef gammas[ cbind(index2, index1) ] <- expcoef # return(list(param=list(gammas=gammas), inames="interaction parameters gamma_ij", printable=round(gammas,4))) }, valid = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii radii <- self$par$radii # parameters to estimate required <- !is.na(radii) gr <- gamma[required] return(all(is.finite(gr) & gr <= 1)) }, project = function(coeffs, self) { # interaction parameters gamma[i,j] gamma <- (self$interpret)(coeffs, self)$param$gammas # interaction radii and types radii <- self$par$radii types <- self$par$types # problems? required <- !is.na(radii) okgamma <- is.finite(gamma) & (gamma <= 1) naughty <- required & !okgamma # if(!any(naughty)) return(NULL) if(spatstat.options("project.fast")) { # remove ALL naughty terms simultaneously return(delMS(naughty, types, radii)) } else { # present a list of candidates rn <- row(naughty) cn <- col(naughty) uptri <- (rn <= cn) upn <- uptri & naughty rowidx <- as.vector(rn[upn]) colidx <- as.vector(cn[upn]) matindex <- function(v) { matrix(c(v, rev(v)), ncol=2, byrow=TRUE) } mats <- lapply(as.data.frame(rbind(rowidx, colidx)), matindex) inters <- lapply(mats, delMS, types=types, radii=radii) return(inters) } }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$radii active <- !is.na(r) if(any(!is.na(coeffs))) { gamma <- (self$interpret)(coeffs, self)$param$gammas gamma[is.na(gamma)] <- 1 active <- active & (abs(log(gamma)) > epsilon) } if(any(active)) return(max(r[active])) else return(0) }, version=NULL # to be added ) class(BlankMSobject) <- "interact" # finally create main function MultiStrauss <- function(types=NULL, radii) { out <- instantiate.interact(BlankMSobject, list(types=types, radii = radii)) if(!is.null(types)) dimnames(out$par$radii) <- list(types, types) return(out) } MultiStrauss }) spatstat/R/morphology.R0000755000176000001440000002351112243077200014672 0ustar ripleyusers# # morphology.R # # dilation, erosion, opening, closing # # generic functions # and methods for owin, psp, ppp # # $Revision: 1.23 $ $Date: 2013/11/20 09:18:50 $ # # ............ generic ............................ erosion <- function(w, r, ...) { UseMethod("erosion") } dilation <- function(w, r, ...) { UseMethod("dilation") } closing <- function(w, r, ...) { UseMethod("closing") } opening <- function(w, r, ...) { UseMethod("opening") } # ............ methods for class 'owin' ............................ erode.owin <- function(...) { .Deprecated("erosion.owin", package="spatstat") erosion.owin(...) } erosion.owin <- function(w, r, shrink.frame=TRUE, ..., strict=FALSE, polygonal=NULL) { verifyclass(w, "owin") validradius(r, "erosion") if(r == 0 && !strict) return(w) xr <- w$xrange yr <- w$yrange if(2 * r >= max(diff(xr), diff(yr))) stop("erosion distance r too large for frame of window") # compute the dimensions of the eroded frame exr <- xr + c(r, -r) eyr <- yr + c(r, -r) ebox <- list(x=exr[c(1,2,2,1)], y=eyr[c(1,1,2,2)]) ismask <- is.mask(w) if(is.empty(w)) return(emptywindow(ebox)) # determine type of computation if(is.null(polygonal)) polygonal <- !ismask else { stopifnot(is.logical(polygonal)) if(polygonal && ismask) { # try to convert w <- as.polygonal(w) if(is.mask(w)) polygonal <- FALSE } } if(is.rectangle(w) && polygonal) { # result is a smaller rectangle if(shrink.frame) { return(owin(exr, eyr)) # type 'rectangle' } else { return(owin(xr, yr, poly=ebox, check=FALSE)) # type 'polygonal' } } if(polygonal) { # compute polygonal region using polyclip package pnew <- polyoffset(w$bdry, -r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) if(shrink.frame) { return(owin(poly=pnew, check=FALSE)) } else { return(owin( xr, yr, poly=pnew, check=FALSE)) } } # otherwise erode the window in pixel image form if(w$type == "mask") wnew <- erodemask(w, r, strict=strict) else { D <- distmap(w, invert=TRUE, ...) wnew <- levelset(D, r, if(strict) ">" else ">=") } if(shrink.frame) { # trim off some rows & columns of pixel raster keepcol <- (wnew$xcol >= exr[1] & wnew$xcol <= exr[2]) keeprow <- (wnew$yrow >= eyr[1] & wnew$yrow <= eyr[2]) wnew$xcol <- wnew$xcol[keepcol] wnew$yrow <- wnew$yrow[keeprow] wnew$dim <- c(sum(keeprow), sum(keepcol)) wnew$m <- wnew$m[keeprow, keepcol] wnew$xrange <- exr wnew$yrange <- eyr } return(wnew) } dilate.owin <- function(...) { .Deprecated("dilation.owin", package="spatstat") dilation.owin(...) } dilation.owin <- function(w, r, ..., polygonal=NULL, tight=TRUE) { verifyclass(w, "owin") validradius(r, "dilation") if(r == 0) return(w) ismask <- is.mask(w) if(is.empty(w)) return(w) # determine type of computation if(is.null(polygonal)) { polygonal <- !ismask } else stopifnot(is.logical(polygonal)) if(polygonal) { # convert to polygonal w <- as.polygonal(w) if(!is.polygonal(w)) polygonal <- FALSE } # bounding frame bb <- if(tight) bounding.box(w) else as.rectangle(w) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { # compute pixel approximation epsilon <- sqrt(w$xstep^2 + w$ystep^2) r <- max(r, epsilon) w <- rebound.owin(w, newbox) distant <- distmap(w, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal region using polyclip package pnew <- polyoffset(w$bdry, r, jointype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # determine bounding frame, convert to owin if(tight) { out <- owin(poly=pnew, check=FALSE) } else { out <- owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) } return(out) } } closing.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.owin(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(wplus) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } opening.owin <- function(w, r, ..., polygonal=NULL) { if(missing(r)) stop("r is required") validradius(r, "opening") wminus <- erosion.owin(w, r, ..., polygonal=polygonal, shrink.frame=FALSE) if(is.empty(wminus)) return(wminus) wopen <- dilation.owin(wminus, r, tight=FALSE) wopen <- rebound.owin(wopen, as.rectangle(w)) return(wopen) } border <- function(w, r, outside=FALSE, ...) { w <- as.owin(w) if(!outside) { e <- erosion(w, r, ...) b <- setminus.owin(w, e) } else { d <- dilation(w, r, ...) b <- setminus.owin(d, w) } return(b) } # ............ methods for class 'psp' ............................ dilation.psp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "psp") x <- w validradius(r, "dilation") if(r == 0) return(w) if(is.empty(x)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) bounding.box(x) else as.rectangle(x) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { x <- rebound.psp(x, newbox) distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else if(spatstat.options("old.morpho.psp")) { # old code for polygonal case ends <- x$ends angles <- angles.psp(x, directed=TRUE) lengths <- lengths.psp(x) out <- NULL # dilate individual segments halfcircle <- seq(from=0, to=pi, length.out=128)[-c(1,128)] for(i in seq_len(x$n)) { seg <- ends[i,] co <- cos(angles[i]) si <- sin(angles[i]) # draw sausage around i-th segment xx <- c(seg$x0, seg$x1) + r * si yy <- c(seg$y0, seg$y1) - r * co rightcircle <- angles[i] - pi/2 + halfcircle xx <- c(xx, seg$x1 + r * cos(rightcircle)) yy <- c(yy, seg$y1 + r * sin(rightcircle)) xx <- c(xx, c(seg$x1, seg$x0) - r * si) yy <- c(yy, c(seg$y1, seg$y0) + r * co) leftcircle <- angles[i] + pi/2 + halfcircle xx <- c(xx, seg$x0 + r * cos(leftcircle)) yy <- c(yy, seg$y0 + r * sin(leftcircle)) sausage <- owin(newbox$xrange, newbox$yrange, poly=list(x=xx, y=yy), check=FALSE) # add to set out <- union.owin(out, sausage, ...) } return(out) } else { # new code using 'polyclip' package # convert to list of list(x,y) ends <- as.matrix(x$ends) n <- nrow(ends) plines <- vector(mode="list", length=n) for(i in 1:n) plines[[i]] <- list(x=ends[i, c("x0","x1")], y=ends[i, c("y0","y1")]) # call pnew <- polylineoffset(plines, r, jointype="round", endtype="round") # ensure correct polarity totarea <- sum(unlist(lapply(pnew, area.xypolygon))) if(totarea < 0) pnew <- lapply(pnew, reverse.xypolygon) # convert to owin object out <- if(tight) owin(poly=pnew, check=FALSE) else owin(newbox$xrange, newbox$yrange, poly=pnew, check=FALSE) return(out) } } closing.psp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") wplus <- dilation.psp(w, r, ..., polygonal=polygonal, tight=FALSE) if(is.empty(wplus)) return(emptywindow(as.owin(w))) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.psp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.psp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ methods for class 'ppp' ............................ dilation.ppp <- function(w, r, ..., polygonal=TRUE, tight=TRUE) { verifyclass(w, "ppp") validradius(r, "dilation") x <- w if(r == 0) return(x) if(is.empty(w)) return(emptywindow(as.owin(w))) # bounding frame bb <- if(tight) bounding.box(x) else as.rectangle(x) newbox <- grow.rectangle(bb, r) # compute dilation if(!polygonal) { # compute pixel approximation x <- rebound.ppp(x, newbox) distant <- distmap(x, ...) dil <- levelset(distant, r, "<=") return(dil) } else { # compute polygonal approximation # generate discs out <- NULL for(i in seq_len(x$n)) { balli <- disc(r, c(x$x[i], x$y[i])) out <- union.owin(out, balli) } return(out) } } closing.ppp <- function(w, r, ..., polygonal=TRUE) { if(missing(r)) stop("r is required") validradius(r, "closing") if(is.empty(w) || w$n <= 3) return(emptywindow(as.owin(w))) # remove `isolated' points ok <- (nndist(w) <= 2 * r) if(sum(ok) <= 3) return(emptywindow(as.owin(w))) w <- w[ok] # dilate wplus <- dilation.ppp(w, r, ..., polygonal=polygonal, tight=FALSE) wclose <- erosion.owin(wplus, r, strict=TRUE) wclose <- rebound.owin(wclose, as.rectangle(w)) return(wclose) } erosion.ppp <- function(w, r, ...) { idorempty(w, r, "erosion") } opening.ppp <- function(w, r, ...) { idorempty(w, r,"opening") } # ............ utilities ............................ validradius <- function(r, caller="morphological operator") { rname <- short.deparse(substitute(r)) groan <- function(whinge, caller) { stop(paste("for", paste(caller, ",", sep=""), whinge), call.=FALSE) } if(!is.numeric(r) || length(r) != 1) groan("radius r must be a single number", caller) if(r < 0) groan("radius r must be nonnegative", caller) return(TRUE) } idorempty <- function(w, r, caller="morphological operator") { validradius(r, caller) if(r == 0) return(w) else return(emptywindow(w)) } spatstat/R/ppmclass.R0000755000176000001440000004767512237642727014355 0ustar ripleyusers# # ppmclass.R # # Class 'ppm' representing fitted point process models. # # # $Revision: 2.89 $ $Date: 2013/09/02 10:25:16 $ # # An object of class 'ppm' contains the following: # # $method model-fitting method (currently "mpl") # # $coef vector of fitted regular parameters # as given by coef(glm(....)) # # $trend the trend formula # or NULL # # $interaction the interaction family # (an object of class 'interact') or NULL # # $Q the quadrature scheme used # # $maxlogpl the maximised value of log pseudolikelihood # # $internal list of internal calculation results # # $correction name of edge correction method used # $rbord erosion distance for border correction (or NULL) # # $the.call the originating call to ppm() # # $the.version version of mpl() which yielded the fit # # #------------------------------------------------------------------------ is.ppm <- function(x) { inherits(x, "ppm") } print.ppm <- function(x, ..., what=c("all", "model", "trend", "interaction", "se", "errors")) { verifyclass(x, "ppm") misswhat <- missing(what) opts <- c("model", "trend", "interaction", "se", "errors") what <- match.arg(what, c("all", opts), several.ok=TRUE) if("all" %in% what) what <- opts # If SE was explicitly requested, calculate it. # Otherwise, do it only if the model is Poisson (by default) do.SE <- if(!misswhat) ("se" %in% what) else switch(spatstat.options("print.ppm.SE"), always = TRUE, never = FALSE, poisson = { is.poisson(x) && !is.null(x$fitter) && (x$fitter != "gam") }) s <- summary.ppm(x, quick=if(do.SE) FALSE else "no variances") notrend <- s$no.trend stationary <- s$stationary poisson <- s$poisson markeddata <- s$marked multitype <- s$multitype markedpoisson <- poisson && markeddata # ----------- Print model type ------------------- if("model" %in% what) { cat(s$name) cat("\n") if(markeddata) mrk <- s$entries$marks if(multitype) { cat("Possible marks: \n") cat(paste(levels(mrk))) cat("\n") } } # ----- trend -------------------------- if("trend" %in% what) { # cat(paste("\n", s$trend$name, ":\n", sep="")) if(!notrend) { cat("\nTrend formula: ") print(s$trend$formula, showEnv=FALSE) } tv <- s$trend$value if(length(tv) == 0) cat("\n[No trend coefficients]\n") else { cat(paste("\n", s$trend$label, ":", sep="")) if(is.list(tv)) { cat("\n") for(i in seq_along(tv)) print(tv[[i]]) } else if(is.numeric(tv) && length(tv) == 1 && is.null(names(tv))) { # single number: append to end of current line cat("\t", paste(tv), "\n") } else { # some other format cat("\n") print(tv) } } cat("\n") if(!is.null(cfa <- s$covfunargs) && length(cfa) > 0) { cat("Covariate function arguments (covfunargs) provided:\n") for(i in seq_along(cfa)) { cat(paste(names(cfa)[i], "= ")) cfai <- cfa[[i]] if(is.numeric(cfai) && length(cfai) == 1) { cat(paste(cfai, "\n")) } else print(cfa[[i]]) } } } # ---- Interaction ---------------------------- if("interaction" %in% what) { if(!poisson) { print(s$interaction, family=FALSE) cat("\n") } } # ----- parameter estimates with SE and 95% CI -------------------- if("se" %in% what) { if(!is.null(cose <- s$coefs.SE.CI)) { print(cose) } else if(do.SE) { # standard error calculation failed cat("Standard errors unavailable; variance-covariance matrix is singular") } else { # standard error was voluntarily omitted cat("For standard errors, type coef(summary(x))\n") } } # ---- Warnings issued in mpl.prepare --------------------- if("errors" %in% what) { probs <- s$problems if(!is.null(probs) && is.list(probs) && (length(probs) > 0)) lapply(probs, function(x) { if(is.list(x) && !is.null(p <- x$print)) cat(paste("Problem:\n", p, "\n\n")) }) if(s$old) warning(paste("Model fitted by old spatstat version", s$version)) # ---- Algorithm status ---------------------------- fitter <- s$fitter converged <- s$converged if(!is.null(fitter) && fitter %in% c("glm", "gam") && !converged) cat(paste("*** Fitting algorithm for", sQuote(fitter), "did not converge ***\n")) } if(s$projected) cat("Fit was projected to obtain a valid point process model\n") return(invisible(NULL)) } quad.ppm <- function(object, drop=FALSE) { if(!is.ppm(object)) { if(inherits(object, "kppm")) object <- object$po else stop("object is not of class ppm") } Q <- object$Q if(!drop || is.null(Q)) return(Q) ok <- getglmsubset(object) if(is.null(ok)) return(Q) return(Q[ok]) } data.ppm <- function(object) { verifyclass(object, "ppm") object$Q$data } dummy.ppm <- function(object, drop=FALSE) { return(quad.ppm(object, drop=drop)$dummy) } # method for 'coef' coef.ppm <- function(object, ...) { verifyclass(object, "ppm") object$coef } getglmfit <- function(object) { verifyclass(object, "ppm") glmfit <- object$internal$glmfit if(is.null(glmfit)) return(NULL) if(object$method != "mpl") glmfit$coefficients <- object$coef return(glmfit) } getglmdata <- function(object, drop=FALSE) { verifyclass(object, "ppm") gd <- object$internal$glmdata if(!drop) return(gd) return(gd[getglmsubset(object), , drop=FALSE]) } getglmsubset <- function(object) { gd <- object$internal$glmdata if(object$method=="logi") return(gd$.logi.ok) return(gd$.mpl.SUBSET) } getppmdatasubset <- function(object) { # Equivalent to getglmsubset(object)[is.data(quad.ppm(object))] # but also works for models fitted exactly, etc # if(object$method %in% c("mpl", "ho")) { sub <- getglmsubset(object) if(!is.null(sub)) { Z <- is.data(quad.ppm(object)) return(sub[Z]) } } X <- data.ppm(object) sub <- if(object$correction == "border") { (bdist.points(X) >= object$rbord) } else rep(TRUE, npoints(X)) return(sub) } # ??? method for 'effects' ??? valid.ppm <- function(object) { verifyclass(object, "ppm") coeffs <- coef(object) # ensure all coefficients are fitted, and finite if(!all(is.finite(coeffs))) return(FALSE) # inspect interaction inte <- object$interaction if(is.null(inte)) return(TRUE) # Poisson process # extract fitted interaction coefficients Vnames <- object$internal$Vnames IsOffset <- object$internal$IsOffset Icoeffs <- coeffs[Vnames[!IsOffset]] # check interaction checker <- inte$valid if(is.null(checker) || !newstyle.coeff.handling(inte)) { warning("Internal error: unable to check validity of model") return(NA) } answer <- checker(Icoeffs, inte) return(answer) } project.ppm <- local({ tracemessage <- function(depth, ...) { if(depth == 0) return(NULL) spacer <- paste(rep.int(" ", depth), collapse="") marker <- ngettext(depth, "trace", paste("trace", depth)) marker <- paren(marker, "[") cat(paste(spacer, marker, " ", paste(...), "\n", sep="")) } leaving <- function(depth) { tracemessage(depth, ngettext(depth, "Returning.", "Exiting level.")) } project.ppm <- function(object, ..., fatal=FALSE, trace=FALSE) { verifyclass(object, "ppm") fast <- spatstat.options("project.fast") # user specifies 'trace' as logical # but 'trace' can also be integer representing trace depth td <- as.integer(trace) trace <- (td > 0) tdnext <- if(trace) td+1 else 0 if(valid.ppm(object)) { tracemessage(td, "Model is valid.") leaving(td) return(object) } # First ensure trend coefficients are all finite coeffs <- coef(object) # Which coefficients are trend coefficients coefnames <- names(coeffs) internames <- object$internal$Vnames trendnames <- coefnames[!(coefnames %in% internames)] # Trend terms in trend formula trendterms <- attr(terms(object), "term.labels") # Mapping from coefficients to terms of GLM coef2term <- attr(model.matrix(object), "assign") istrend <- (coef2term > 0) & (coefnames %in% trendnames) # Identify non-finite trend coefficients bad <- istrend & !is.finite(coeffs) if(!any(bad)) { tracemessage(td, "Trend terms are valid.") } else { nbad <- sum(bad) tracemessage(td, "Non-finite ", ngettext(nbad, "coefficient for term ", "coefficients for terms "), commasep(sQuote(trendterms[coef2term[bad]]))) if(fast) { # remove first illegal term firstbad <- min(which(bad)) badterm <- trendterms[coef2term[firstbad]] # remove this term from model tracemessage(td, "Removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) newobject <- update(object, removebad) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- project.ppm(newobject, fatal=fatal, trace=tdnext) # return leaving(td) return(newobject) } else { # consider all illegal terms bestobject <- NULL for(i in which(bad)) { badterm <- trendterms[coef2term[i]] # remove this term from model tracemessage(td, "Considering removing term ", sQuote(badterm)) removebad <- as.formula(paste("~ . - ", badterm), env=object$callframe) object.i <- update(object, removebad) if(trace) { tracemessage(td, "Considering updated model:") print(object.i) } # recurse object.i <- project.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } # return leaving(td) return(bestobject) } } # Now handle interaction inte <- object$interaction if(is.null(inte)) { tracemessage(td, "No interaction to check.") leaving(td) return(object) } tracemessage(td, "Inspecting interaction terms.") proj <- inte$project if(is.null(proj)) { whinge <- "Internal error: interaction has no projection operator" if(fatal) stop(whinge) warning(whinge) leaving(td) return(object) } # ensure the same edge correction is used! correction <- object$correction rbord <- object$rbord # apply projection coef.orig <- coeffs <- coef(object) Vnames <- object$internal$Vnames Icoeffs <- coeffs[Vnames] change <- proj(Icoeffs, inte) if(is.null(change)) { tracemessage(td, "Interaction does not need updating.") leaving(td) return(object) } tracemessage(td, "Interaction is not valid.") if(is.numeric(change)) { tracemessage(td, "Interaction coefficients updated without re-fitting.") # old style: 'project' returned a vector of updated coefficients Icoeffs <- change # tweak interaction coefficients object$coef[Vnames] <- Icoeffs # recompute fitted interaction object$fitin <- NULL object$fitin <- fitin(object) } else if(is.interact(change)) { # new style: 'project' returns an interaction if(trace) { tracemessage(td, "Interaction changed to:") print(change) } # refit the whole model # (using the same edge correction) # (and the same quadrature scheme) newobject <- update(object, interaction=change, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Updated model:") print(newobject) } # recurse newobject <- project.ppm(newobject, fatal=fatal, trace=tdnext) object <- newobject } else if(is.list(change) && all(unlist(lapply(change, is.interact)))) { # new style: 'project' returns a list of candidate interactions nchange <- length(change) tracemessage(td, "Considering", nchange, ngettext(nchange, "submodel", "submodels")) bestobject <- NULL for(i in seq_len(nchange)) { change.i <- change[[i]] if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate submodel, with interaction:") print(change.i) } # refit the whole model object.i <- update(object, interaction=change.i, correction=correction, rbord=rbord, forcefit=TRUE, envir=object$callframe) if(trace) { tracemessage(td, "Considering", ordinal(i), "candidate updated model:") print(object.i) } # recurse object.i <- project.ppm(object.i, fatal=fatal, trace=tdnext) # evaluate logPL logPL.i <- logLik(object.i, warn=FALSE) tracemessage(td, "max log pseudolikelihood = ", logPL.i) # optimise if(is.null(bestobject) || (logLik(bestobject, warn=FALSE) < logPL.i)) bestobject <- object.i } # end loop through submodels if(trace) { tracemessage(td, "Best submodel:") print(bestobject) } object <- bestobject } else stop("Internal error: unrecognised format of update") object$projected <- TRUE object$coef.orig <- coef.orig leaving(td) return(object) } project.ppm }) # more methods logLik.ppm <- function(object, ..., warn=TRUE) { if(!is.poisson.ppm(object) && warn) warning(paste("log likelihood is not available for non-Poisson model;", "log-pseudolikelihood returned")) method <- object$method switch(method, mpl={ ll <- object$maxlogpl }, ho={ # evaluate the log pseudolikelihood Q <- quad.ppm(object, drop=TRUE) Z <- is.data(Q) w <- w.quad(Q) cif <- fitted(object, type="cif", drop=TRUE) cifdata <- cif[Z] ll <- sum(log(cifdata[cifdata > 0])) - sum(w * cif) }, logi={ ll <- object$maxlogpl }, stop(paste("Internal error: unrecognised ppm method:", dQuote(method))) ) attr(ll, "df") <- length(coef(object)) class(ll) <- "logLik" return(ll) } formula.ppm <- function(x, ...) { f <- x$trend if(is.null(f)) f <- ~1 return(f) } terms.ppm <- function(x, ...) { terms(formula(x), ...) } labels.ppm <- function(object, ...) { # extract fitted trend coefficients co <- coef(object) Vnames <- object$internal$Vnames is.trend <- !(names(co) %in% Vnames) # model terms tt <- terms(object) lab <- attr(tt, "term.labels") if(length(lab) == 0) return(character(0)) # model matrix mm <- model.matrix(object) ass <- attr(mm, "assign") # 'ass' associates coefficients with model terms # except ass == 0 for the Intercept coef.ok <- is.finite(co) relevant <- (ass > 0) & is.trend okterms <- unique(ass[coef.ok & relevant]) return(lab[okterms]) } extractAIC.ppm <- function (fit, scale = 0, k = 2, ...) { edf <- length(coef(fit)) aic <- AIC(fit) c(edf, aic + (k - 2) * edf) } # # method for model.frame model.frame.ppm <- function(formula, ...) { object <- formula gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) } # gd <- getglmdata(object) # model.frame(gf, data=gd, ...) model.frame(gf, ...) } # # method for model.matrix model.matrix.ppm <- function(object, data=model.frame(object), ..., keepNA=TRUE) { data.given <- !missing(data) gf <- getglmfit(object) if(is.null(gf)) { warning("Model re-fitted with forcefit=TRUE") object <- update(object, forcefit=TRUE) gf <- getglmfit(object) if(is.null(gf)) stop("internal error: unable to extract a glm fit") } if(data.given) { # new data. Must contain the Berman-Turner variables as well. bt <- list(.mpl.Y=1, .mpl.W=1, .mpl.SUBSET=TRUE) if(any(forgot <- !(names(bt) %in% names(data)))) data <- do.call("cbind", append(list(data), bt[forgot])) mm <- model.matrix(gf, data=data, ...) return(mm) } if(!keepNA) { # extract model matrix of glm fit object # restricting to its 'subset' mm <- model.matrix(gf, data=data, ...) return(mm) } # extract model matrix for all cases mm <- model.matrix(gf, data, ..., subset=NULL) cn <- colnames(mm) gd <- getglmdata(object, drop=FALSE) if(nrow(mm) != nrow(gd)) { # can occur if covariates include NA's or interaction is -Inf insubset <- getglmsubset(object) isna <- is.na(insubset) | !insubset if(sum(isna) + nrow(mm) == nrow(gd)) { # insert rows of NA's mmplus <- matrix( , nrow(gd), ncol(mm)) mmplus[isna, ] <- NA mmplus[!isna, ] <- mm mm <- mmplus } else stop("internal error: model matrix does not match glm data frame") } colnames(mm) <- cn return(mm) } model.images <- function(object, ...) { UseMethod("model.images") } model.images.ppm <- function(object, W=as.owin(object), ...) { X <- data.ppm(object) # make a quadscheme with a dummy point at every pixel Q <- pixelquad(X, W) # construct Berman-Turner frame needed <- c("trend", "interaction", "covariates", "correction", "rbord") bt <- do.call("bt.frame", append(list(Q), object[needed])) # compute model matrix mf <- model.frame(bt$fmla, bt$glmdata, ...) mm <- model.matrix(bt$fmla, mf, ...) # retain only the entries for dummy points (pixels) mm <- mm[!is.data(Q), , drop=FALSE] # create template image Z <- as.im(attr(Q, "M")) # make images imagenames <- colnames(mm) result <- lapply(imagenames, function(nama, Z, mm) { values <- mm[, nama] im(values, xcol=Z$xcol, yrow=Z$yrow, unitname=unitname(Z)) }, Z=Z, mm=mm) result <- as.listof(result) names(result) <- imagenames return(result) } unitname.ppm <- function(x) { return(unitname(x$Q)) } "unitname<-.ppm" <- function(x, value) { unitname(x$Q) <- value return(x) } nobs.ppm <- function(object, ...) { npoints(data.ppm(object)) } as.interact.ppm <- function(object) { verifyclass(object, "ppm") inte <- object$interaction if(is.null(inte)) inte <- Poisson() return(inte) } as.ppm <- function(object) { UseMethod("as.ppm") } as.ppm.ppm <- function(object) { object } # method for as.owin as.owin.ppm <- function(W, ..., from=c("points", "covariates"), fatal=TRUE) { if(!verifyclass(W, "ppm", fatal=fatal)) return(NULL) from <- match.arg(from) datawin <- as.owin(data.ppm(W)) if(from == "points") return(datawin) covs <- W$covariates isim <- unlist(lapply(covs, is.im)) if(!any(isim)) return(datawin) cwins <- lapply(covs[isim], as.owin) covwin <- do.call("intersect.owin", unname(cwins)) result <- intersect.owin(covwin, datawin) return(result) } spatstat/R/randomtess.R0000755000176000001440000000265112237642727014673 0ustar ripleyusers# # randomtess.R # # Random tessellations # # $Revision: 1.6 $ $Date: 2011/05/18 09:00:01 $ # # Poisson line tessellation rpoislinetess <- function(lambda, win=owin()) { win <- as.owin(win) if(win$type == "mask") stop("Not implemented for masks") # determine circumcircle xr <- win$xrange yr <- win$yrange xmid <- mean(xr) ymid <- mean(yr) width <- diff(xr) height <- diff(yr) rmax <- sqrt(width^2 + height^2)/2 boundbox <- owin(xmid + c(-1,1) * rmax, ymid + c(-1,1) * rmax) # generate poisson lines through circumcircle n <- rpois(1, lambda * 2 * pi * rmax) if(n == 0) return(tess(tiles=list(win))) theta <- runif(n, max= 2 * pi) p <- runif(n, max=rmax) Y <- infline(p=p, theta=theta) # form the induced tessellation in bounding box Z <- chop.tess(boundbox, Y) # clip to window Z <- intersect.tess(Z, win) return(Z) } rMosaicSet <- function(X, p=0.5) { stopifnot(is.tess(X)) Y <- tiles(X) Y <- Y[runif(length(Y)) < p] if(length(Y) == 0) return(NULL) Z <- NULL for(i in seq_along(Y)) Z <- union.owin(Z, Y[[i]]) return(Z) } rMosaicField <- function(X, rgen=function(n) { sample(0:1, n, replace=TRUE)}, ..., rgenargs=NULL ) { stopifnot(is.tess(X)) Y <- as.im(X, ...) ntiles <- length(levels(Y)) values <- do.call(rgen, append(list(ntiles),rgenargs)) Z <- eval.im(values[as.integer(Y)]) return(Z) } spatstat/R/rmhResolveTypes.R0000755000176000001440000000613412237642727015667 0ustar ripleyusers# # # rmhResolveTypes.R # # $Revision: 1.9 $ $Date: 2009/10/31 01:52:54 $ # # rmhResolveTypes <- function(model, start, control) { # Decide whether a multitype point process is to be simulated. # If so, determine the vector of types. verifyclass(model, "rmhmodel") verifyclass(start, "rmhstart") verifyclass(control, "rmhcontrol") # Different ways of specifying types directly types.model <- model$types types.start <- if(start$given=="x" && is.marked(x.start <- start$x.start)) levels(marks(x.start, dfok=FALSE)) else NULL # Check for inconsistencies if(!is.null(types.model) && !is.null(types.start)) if(!identical(all.equal(types.model, types.start), TRUE)) stop("marks in start$x.start do not match model$types") types.given <- if(!is.null(types.model)) types.model else types.start types.given.source <- if(!is.null(types.model)) "model$types" else "marks of x.start" # Different ways of implying the number of types ntypes.beta <- length(model$par[["beta"]]) ntypes.ptypes <- length(control$ptypes) ntypes.nstart <- if(start$given == "n") length(start$n.start) else 0 mot <- model$trend ntypes.trend <- if(is.null(mot)) 0 else if(is.im(mot)) 1 else if(is.list(mot) && all(unlist(lapply(mot, is.im)))) length(mot) else 0 # Check for inconsistencies in implied number of types (only for numbers > 1) nty <- c(ntypes.beta, ntypes.ptypes, ntypes.nstart, ntypes.trend) nam <- c("model$par$beta", "control$ptypes", "start$n.start", "model$trend") implied <- (nty > 1) if(!any(implied)) ntypes.implied <- 1 else { if(length(unique(nty[implied])) > 1) stop(paste("Mismatch in numbers of types implied by", commasep(sQuote(nam[implied])))) ntypes.implied <- unique(nty[implied]) ntypes.implied.source <- (nam[implied])[1] } # Check consistency between types.given and ntypes.implied if(!is.null(types.given) && ntypes.implied > 1) if(length(types.given) != ntypes.implied) stop(paste("Mismatch between number of types in", types.given.source, "and length of", ntypes.implied.source)) # Finally determine the types if(model$multitype.interact) { # There MUST be a types vector types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else stop("Cannot determine types for multitype process") } else { types <- if(!is.null(types.given)) types.given else if(ntypes.implied > 1) 1:ntypes.implied else 1 } ntypes <- length(types) # If we are conditioning on the number of points of each type, # make sure the starting state is appropriate if(control$fixing == "n.each.type") { if(start$given == "n" && ntypes.nstart != ntypes) stop("Length of start$n.start not equal to number of types.\n") else if(start$given == "x" && length(types.given) != ntypes) stop("Marks of start$x.start do not match number of types.\n") } return(types) } spatstat/R/quadratcount.R0000755000176000001440000001310612237642727015223 0ustar ripleyusers# # quadratcount.R # # $Revision: 1.33 $ $Date: 2012/09/05 08:17:55 $ # quadratcount <- function(X, ...) { UseMethod("quadratcount") } quadratcount.splitppp <- function(X, ...) { as.listof(lapply(X, quadratcount, ...)) } quadratcount.ppp <- function(X, nx=5, ny=nx, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL) { verifyclass(X, "ppp") W <- X$window if(is.null(tess)) { # rectangular boundaries if(!is.numeric(nx)) stop("nx should be numeric") # start with rectangular tessellation tess <- quadrats(as.rectangle(W), nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks) # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) # if(W$type != "rectangle") { # intersections of rectangles with window including empty intersections tess <- quadrats(X, nx=nx, ny=ny, xbreaks=xbreaks, ybreaks=ybreaks, keepempty=TRUE) # now delete the empty quadrats and the corresponding counts nonempty <- !unlist(lapply(tiles(tess), is.empty)) if(!any(nonempty)) stop("All tiles are empty") if(!all(nonempty)) { ntiles <- sum(nonempty) tess <- tess[nonempty] Xcount <- t(Xcount)[nonempty] # matrices and tables are in row-major order, # tiles in a rectangular tessellation are in column-major order Xcount <- array(Xcount, dimnames=list(tile=tilenames(tess))) class(Xcount) <- "table" } } } else { # user-supplied tessellation if(!inherits(tess, "tess")) stop("The argument tess should be a tessellation", call.=FALSE) if(tess$type == "rect") { # fast code for counting points in rectangular grid Xcount <- rectquadrat.countEngine(X$x, X$y, tess$xgrid, tess$ygrid) } else { # quadrats are another type of tessellation Y <- cut(X, tess) if(any(is.na(marks(Y)))) warning("Tessellation does not contain all the points of X") Xcount <- table(tile=marks(Y)) } } attr(Xcount, "tess") <- tess class(Xcount) <- c("quadratcount", class(Xcount)) return(Xcount) } plot.quadratcount <- function(x, ..., add=FALSE, entries=as.vector(t(as.table(x))), dx=0, dy=0, show.tiles=TRUE) { xname <- short.deparse(substitute(x)) tess <- attr(x, "tess") # add=FALSE, show.tiles=TRUE => plot tiles + numbers # add=FALSE, show.tiles=FALSE => plot window (add=FALSE) + numbers # add=TRUE, show.tiles=TRUE => plot tiles (add=TRUE) + numbers # add=TRUE, show.tiles=FALSE => plot numbers if(show.tiles || !add) { context <- if(show.tiles) tess else as.owin(tess) do.call("plot", resolve.defaults(list(context, add=add), list(...), list(main=xname), .StripNull=TRUE)) } if(!is.null(entries)) { labels <- paste(as.vector(entries)) til <- tiles(tess) incircles <- lapply(til, incircle) x0 <- unlist(lapply(incircles, function(z) { z$x })) y0 <- unlist(lapply(incircles, function(z) { z$y })) ra <- unlist(lapply(incircles, function(z) { z$r })) do.call.matched("text.default", resolve.defaults(list(x=x0 + dx * ra, y = y0 + dy * ra), list(labels=labels), list(...))) } return(invisible(NULL)) } rectquadrat.breaks <- function(xr, yr, nx=5, ny=nx, xbreaks=NULL, ybreaks=NULL) { if(is.null(xbreaks)) xbreaks <- seq(from=xr[1], to=xr[2], length.out=nx+1) else if(min(xbreaks) > xr[1] || max(xbreaks) < xr[2]) stop("xbreaks do not span the range of x coordinates in the window") if(is.null(ybreaks)) ybreaks <- seq(from=yr[1], to=yr[2], length.out=ny+1) else if(min(ybreaks) > yr[1] || max(ybreaks) < yr[2]) stop("ybreaks do not span the range of y coordinates in the window") return(list(xbreaks=xbreaks, ybreaks=ybreaks)) } rectquadrat.countEngine <- function(x, y, xbreaks, ybreaks, weights) { if(length(x) > 0) { # check validity of breaks if(min(x) < min(xbreaks) || max(x) > max(xbreaks)) stop("xbreaks do not span the actual range of x coordinates in data") if(min(y) < min(ybreaks) || max(y) > max(ybreaks)) stop("ybreaks do not span the actual range of y coordinates in data") } xg <- cut(x, breaks=xbreaks, include.lowest=TRUE) yg <- cut(y, breaks=ybreaks, include.lowest=TRUE) if(missing(weights)) sumz <- table(list(y=yg, x=xg)) else { sumz <- tapply(weights, list(y=yg, x=xg), sum) if(any(nbg <- is.na(sumz))) sumz[nbg] <- 0 } # reverse order of y sumz <- sumz[rev(seq_len(nrow(sumz))), ] sumz <- as.table(sumz) # attr(sumz, "xbreaks") <- xbreaks attr(sumz, "ybreaks") <- ybreaks return(sumz) } quadrats <- function(X, nx=5, ny=nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) { W <- as.owin(X) xr <- W$xrange yr <- W$yrange b <- rectquadrat.breaks(xr, yr, nx, ny, xbreaks, ybreaks) # rectangular tiles Z <- tess(xgrid=b$xbreaks, ygrid=b$ybreaks) if(W$type != "rectangle") { # intersect rectangular tiles with window W if(!keepempty) { Z <- intersect.tess(Z, W) } else { til <- tiles(Z) for(i in seq_along(til)) til[[i]] <- intersect.owin(til[[i]], W) Z <- tess(tiles=til, window=W, keepempty=TRUE) } } return(Z) } as.tess.quadratcount <- function(X) { return(attr(X, "tess")) } spatstat/R/strausshard.R0000755000176000001440000001076512237642727015064 0ustar ripleyusers# # # strausshard.S # # $Revision: 2.19 $ $Date: 2013/07/19 02:53:00 $ # # The Strauss/hard core process # # StraussHard() create an instance of the Strauss-hardcore process # [an object of class 'interact'] # # # ------------------------------------------------------------------- # StraussHard <- local({ BlankStraussHard <- list( name = "Strauss - hard core process", creator = "StraussHard", family = "pairwise.family", # evaluated later pot = function(d, par) { v <- 1 * (d <= par$r) v[ d <= par$hc ] <- (-Inf) v }, par = list(r = NULL, hc = NULL), # filled in later parnames = c("interaction distance", "hard core distance"), selfstart = function(X, self) { # self starter for StraussHard nX <- npoints(X) if(nX < 2) { # not enough points to make any decisions return(self) } r <- self$par$r md <- min(nndist(X)) if(md == 0) { warning(paste("Pattern contains duplicated points:", "hard core must be zero")) return(StraussHard(r=r, hc=0)) } if(!is.na(hc <- self$par$hc)) { # value fixed by user or previous invocation # check it if(md < hc) warning(paste("Hard core distance is too large;", "some data points will have zero probability")) return(self) } # take hc = minimum interpoint distance * n/(n+1) hcX <- md * nX/(nX+1) StraussHard(r=r, hc = hcX) }, init = function(self) { r <- self$par$r hc <- self$par$hc if(length(hc) != 1) stop("hard core distance must be a single value") if(!is.na(hc)) { if(!is.numeric(hc) || hc <= 0) stop("hard core distance hc must be a positive number, or NA") if(!is.numeric(r) || length(r) != 1 || r <= hc) stop("interaction distance r must be a number greater than hc") } }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) gamma <- exp(loggamma) return(list(param=list(gamma=gamma), inames="interaction parameter gamma", printable=round(gamma,4))) }, valid = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) return(is.finite(loggamma)) }, project = function(coeffs, self) { loggamma <- as.numeric(coeffs[1]) if(is.finite(loggamma)) return(NULL) hc <- self$par$hc if(hc > 0) return(Hardcore(hc)) else return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { r <- self$par$r hc <- self$par$hc if(any(is.na(coeffs))) return(r) loggamma <- coeffs[1] if(abs(loggamma) <= epsilon) return(hc) else return(r) }, version=NULL, # evaluated later # fast evaluation is available for the border correction only can.do.fast=function(X,correction,par) { return(all(correction %in% c("border", "none"))) }, fasteval=function(X,U,EqualPairs,pairpot,potpars,correction, ...) { # fast evaluator for StraussHard interaction if(!all(correction %in% c("border", "none"))) return(NULL) if(spatstat.options("fasteval") == "test") message("Using fast eval for StraussHard") r <- potpars$r hc <- potpars$hc hclose <- strausscounts(U, X, hc, EqualPairs) rclose <- strausscounts(U, X, r, EqualPairs) answer <- ifelseXB(hclose == 0, rclose, -Inf) return(matrix(answer, ncol=1)) }, Mayer=function(coeffs, self) { # second Mayer cluster integral gamma <- exp(as.numeric(coeffs[1])) r <- self$par$r hc <- self$par$hc return(pi * (hc^2 + (1-gamma) * (r^2 - hc^2))) } ) class(BlankStraussHard) <- "interact" StraussHard <- function(r, hc=NA) { instantiate.interact(BlankStraussHard, list(r=r, hc=hc)) } StraussHard }) spatstat/R/summary.mppm.R0000644000176000001440000001266512240737337015160 0ustar ripleyusers# # summary.mppm.R # # $Revision: 1.8 $ $Date: 2013/11/13 18:02:37 $ # summary.mppm <- function(object, ..., brief=FALSE) { # y will be the summary y <- object[c("Call", "Info", "Inter", "trend", "iformula", "npat", "maxlogpl")] y$brief <- brief Info <- object$Info Inter <- object$Inter FIT <- object$Fit$FIT moadf <- object$Fit$moadf y$Fit <- object$Fit[c("fitter", "use.gam", "fmla", "Vnamelist")] y$Fit$FIT <- summary(FIT) y$Fit$moadf <- list(nrow=nrow(moadf), colnames=colnames(moadf)) ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial npat <- y$npat iformula <- y$iformula Vnamelist <- y$Fit$Vnamelist allVnames <- unlist(Vnamelist) poistags <- itags[trivial] rownames <- y$Info$rownames switch(y$Fit$fitter, gam=, glm={ y$coef <- co <- coef(FIT) systematic <- !(names(co) %in% c(allVnames, poistags)) y$coef.syst <- co[systematic] }) # model depends on covariates y$depends.covar <- Info$has.covar && (length(Info$used.cov.names) > 0) ### Interactions # model is Poisson y$poisson <- all(trivial[iused]) # Determine how complicated the interactions are: # (1) is the interaction formula of the form ~ tag + tag + ... + tag isimple <- identical(sort(variablesinformula(iformula)), sort(termsinformula(iformula))) # (2) is it of the form ~tag trivialformula <- (isimple && ninteract == 1) # (3) is it of the form ~tag where the interaction is the same in each row fixedinteraction <- trivialformula && constant ### Determine printing of interactions, accordingly ### iprint <- list() if(fixedinteraction) { # exactly the same interaction for all patterns interaction <- interaction[1,1,drop=TRUE] fi.all <- fii(interaction, co, Vnamelist[[1]]) iprint <- list("Interaction for all patterns"=fi.all) printeachrow <- FALSE toohard <- FALSE } else if(trivialformula) { # same type of process for all patterns pname <- unlist(processnames)[iused] iprint <- list("Interaction for each pattern" = pname) printeachrow <- TRUE toohard <- FALSE } else if(isimple && all(constant)) { # several interactions involved, each of which is the same for all patterns iprint <- list("Interaction formula"=iformula, "Interactions defined for each pattern"=NULL) for(j in (1:ninteract)[iused]) { name.j <- paste("Interaction", sQuote(itags[j])) int.j <- Inter$interaction[1,j,drop=TRUE] Vnames.j <- Vnamelist[[j]] fii.j <- fii(int.j, co, Vnames.j) extra.j <- list(fii.j) names(extra.j) <- name.j iprint <- append(iprint, extra.j) } printeachrow <- FALSE toohard <- FALSE } else { # general case # determine which interaction(s) are active on each row active <- active.interactions(object) if(ninteract > 1 || !all(active)) iprint <- list("Active interactions"=active) printeachrow <- TRUE toohard <- any(rowSums(active) > 1) } y$ikind <- list( isimple =isimple, trivialformula =trivialformula, fixedinteraction =fixedinteraction, toohard =toohard, printeachrow =printeachrow) if(toohard) iprint <- append(iprint, list("(Sorry, cannot interpret fitted interactions)")) else if(printeachrow) { subs <- subfits(object, what="interactions") names(subs) <- paste("Interaction", 1:npat) iprint <- append(iprint, subs) } y$iprint <- iprint class(y) <- c("summary.mppm", class(list)) return(y) } print.summary.mppm <- function(x, ..., brief=x$brief) { # NB: x is an object of class "summary.mppm" npat <- x$npat Inter <- x$Inter ninteract <- Inter$ninteract interaction <- Inter$interaction iused <- Inter$iused itags <- Inter$itags processnames <- Inter$processes constant <- Inter$constant trivial <- Inter$trivial iformula <- x$iformula FIT <- x$Fit$FIT Vnamelist <- x$Fit$Vnamelist allVnames <- unlist(Vnamelist) poistags <- itags[trivial] rownames <- x$Info$rownames cat(paste("Point process model fitted to", x$npat, "point patterns\n")) cat(paste("Call:\n", x$Call$callstring, "\n")) cat(paste("Trend formula:", paste(x$trend, collapse=""), "\n")) switch(x$Fit$fitter, gam=, glm={ cat("Fitted trend coefficients:\n") print(x$coef.syst) co <- coef(FIT) }) if(!brief) { cat("All fitted coefficients:\n") print(co) } cat("\n") ### Print interaction information ### iprint <- x$iprint nama <- names(iprint) for(i in seq(length(iprint))) { nami <- nama[i] vali <- iprint[[i]] newlin <- !(inherits(vali, "formula") || is.character(vali)) if(nami != "") cat(paste(nami, ":", if(newlin) "\n" else "\t", sep="")) if(!is.null(vali)) { if(inherits(vali, "fii")) { print(vali, tiny=brief) } else if(is.character(vali)) { cat(vali) } else print(vali) } cat("\n") } if(!brief) { cat("--- Gory details: ---\n") cat(paste("Combined data frame has", x$Fit$moadf$nrow, "rows\n")) print(FIT) } invisible(NULL) } spatstat/R/predictmppm.R0000644000176000001440000002473412237642730015035 0ustar ripleyusers# # predictmppm.R # # $Revision: 1.6 $ $Date: 2007/05/18 16:39:51 $ # # # ------------------------------------------------------------------- predict.mppm <- function(object, ..., newdata=NULL, type=c("trend", "cif"), ngrid=40, locations=NULL, verbose=FALSE) { # # 'object' is the output of mppm() # model <- object verifyclass(model, "mppm") # # # 'type' type <- pickoption("type", type, c(trend="trend", lambda="cif", cif="cif"), multi=TRUE) want.trend <- "trend" %in% type want.cif <- "cif" %in% type selfcheck <- resolve.defaults(list(...), list(selfcheck=FALSE))$selfcheck # # if(verbose) cat("Inspecting arguments...") # # 'newdata' use.olddata <- is.null(newdata) if(use.olddata) { newdata <- model$data newdataname <- "Original data" } else { stopifnot(is.data.frame(newdata) || is.hyperframe(newdata)) newdataname <- sQuote("newdata") } # # # Locations for prediction if(is.hyperframe(locations)) locations <- locations[,1,drop=TRUE] if(is.list(locations)) cls <- unique(sapply(locations, class)) loctype <- if(is.null(locations)) "null" else if(is.data.frame(locations)) "data.frame" else if(is.list(locations)) { if(any(c("ppp", "quad") %in% cls)) "points" else if("owin" %in% cls) { if(all(sapply(locations, function(w) { w$type == "mask"}))) "mask" else "window" } else "unknown" } else "unknown" need.grid <- switch(loctype, null =TRUE, data.frame=FALSE, points =FALSE, mask =FALSE, window =TRUE, unknown =stop("Unrecognised format for locations")) make.image <- need.grid || (loctype == "mask") # locationvars <- c("x", "y", "id") # # if(verbose) cat("done.\nDetermining locations for prediction...") if(need.grid) { # prediction on a grid is required if(is.data.frame(newdata)) stop(paste("Cannot predict model on a grid;", newdataname, "are a data frame")) } else { # prediction at `locations' is required if(is.hyperframe(newdata)) { # check consistency between locations and newdata nloc <- length(locations) nnew <- summary(newdata)$ncases if(nloc != nnew) stop(paste("Length of argument", sQuote("locations"), paren(nloc), "does not match number of rows in", newdataname, paren(nnew))) } else { # newdata is a data frame if(!is.data.frame(locations)) stop(paste(newdataname, "is a data frame; locations must be a data frame")) else { stopifnot(nrow(locations) == nrow(newdata)) dup <- names(newdata) %in% names(locations) if(any(dup)) for(nam in names(newdata)[dup]) if(!all.equal(newdata[,nam], locations[,nam])) stop(paste("The data frames newdata and locations", "both have a column called", sQuote(nam), "but the entries differ")) nbg <- !(locationvars %in% c(names(newdata),names(locations))) if(any(nbg)) stop(paste(ngettext(sum(nbg), "Variable", "Variables"), commasep(locationvars[nbg]), "not provided")) # merge the two data frames newdata <- cbind(newdata[,!dup], locations) locations <- NULL } } } if(verbose) cat("done.\n Constructing data for prediction...") # # # extract fitted glm/gam/glmm object FIT <- model$Fit$FIT # extract names of interaction variables Vnamelist <- model$Fit$Vnamelist vnames <- unlist(Vnamelist) # # # newdata is data frame if(is.data.frame(newdata)) { if(verbose) cat("(data frame)...") if(need.grid) stop("Cannot predict model on a grid; newdata is a data frame") # use newdata as covariates nbg <- !(locationvars %in% names(newdata)) if(any(nbg)) stop(paste(ngettext(sum(nbg), "variable", "variables"), commasep(locationvars[nbg]), "not provided")) # create output data frame answer <- as.data.frame(matrix(, nrow=nrow(newdata), ncol=0), row.names=row.names(newdata)) if(want.trend) { # add interaction components, set to zero (if any) if(length(vnames) > 0) newdata[, vnames] <- 0 # compute fitted values answer$trend <- predict(FIT, newdata=newdata, type="response") } if(want.cif) { warning("Not yet implemented (computation of cif in data frame case)") # split data frame by 'id' # compute interaction components using existing point patterns # compute fitted values } return(answer) } # newdata is a hyperframe if(verbose) cat("(hyperframe)...") sumry <- summary(newdata) npat.new <- sumry$ncases # name of response point pattern in model Yname <- model$Info$Yname # # Determine response point patterns if known. # Extract from newdata if available # Otherwise from the original data if appropriate if(verbose) cat("(responses)...") Y <- if(Yname %in% sumry$col.names) newdata[, Yname, drop=TRUE] else if(npat.new == model$npat) data[, Yname, drop=TRUE] else NULL # if(want.cif && is.null(Y)) stop(paste("Cannot compute cif:", "newdata does not contain column", dQuote(Yname), "of response point patterns")) # # Determine windows for prediction if(verbose) cat("(windows)...") Wins <- if(!need.grid) lapply(locations, as.owin, fatal=FALSE) else if(!is.null(Y)) lapply(Y, as.owin, fatal=FALSE) else NULL if(is.null(Wins) || any(sapply(Wins, is.null))) stop("Cannot determine windows where predictions should be made") # # if(is.null(Y)) { # only want trend; empty patterns will do emptypattern <- function(w) { ppp(numeric(0), numeric(0), window=w) } Y <- lapply(Wins, emptypattern) } # ensure Y contains data points only if(inherits(Y[[1]], "quad")) Y <- lapply(Y, function(z) { z$data }) # Determine locations for prediction if(need.grid) { # Generate grids of dummy locations if(verbose) cat("(grids)...") gridsample <- function(W, ngrid) { masque <- as.mask(W, dimyx=ngrid) xx <- raster.x(masque) yy <- raster.y(masque) xpredict <- xx[masque$m] ypredict <- yy[masque$m] Dummy <- ppp(xpredict, ypredict, window=W) Image <- as.im(masque) return(list(D=Dummy, I=Image)) } Gridded <- lapply(Wins, gridsample, ngrid=ngrid) Dummies <- lapply(Gridded, function(z) { z$D }) Templates <- lapply(Gridded, function(z) { z$I }) } else { # locations are given somehow if(verbose) cat("(locations)...") if(loctype == "points") Dummies <- locations else if(loctype == "mask") { punctify <- function(M) { xx <- raster.x(M) yy <- raster.y(M) xpredict <- xx[M$m] ypredict <- yy[M$m] return(ppp(xpredict, ypredict, window=M)) } Dummies <- lapply(locations, punctify) Templates <- lapply(locations, as.im) } else stop("Internal error: illegal loctype") } # Pack into quadschemes if(verbose) cat("(quadschemes)...") Quads <- list() for(i in seq(npat.new)) Quads[[i]] <- quad(data=Y[[i]], dummy=Dummies[[i]]) # Insert quadschemes into newdata newdata[, Yname] <- Quads # compute the Berman-Turner frame if(verbose) cat("done.\nStarting prediction...(Berman-Turner frame)...") moadf <- mppm(formula = model$formula, data = newdata, interaction = model$Inter$interaction, iformula = model$iformula, use.gam = model$Fit$use.gam, correction = model$Info$correction, rbord = model$Info$rbord, backdoor = TRUE) # compute fitted values if(verbose) cat("(glm prediction)...") values <- moadf[, c("x", "y", "id")] if(want.cif) values$cif <- predict(FIT, newdata=moadf, type="response") if(want.trend) { if(length(vnames) == 0) # Poisson model: trend = cif values$trend <- if(want.cif) values$cif else predict(FIT, newdata=moadf, type="response") else { # zero the interaction components moadf[, vnames] <- 0 # compute fitted values values$trend <- predict(FIT, newdata=moadf, type="response") } } if(verbose) cat("done.\nReshaping results...") # # Reshape results # separate answers for each image values <- split(values, values$id) # Trends <- list() Lambdas <- list() if(!make.image) { if(verbose) cat("(marked point patterns)...") # values become marks attached to locations for(i in seq(npat.new)) { Val <- values[[i]] Loc <- Dummies[[i]] isdum <- !is.data(Quads[[i]]) if(selfcheck) if(length(isdum) != length(Val$trend)) stop("Internal error: mismatch between data frame and locations") if(want.trend) Trends[[i]] <- Loc %mark% (Val$trend[isdum]) if(want.cif) Lambdas[[i]] <- Loc %mark% (Val$cif[isdum]) } } else { if(verbose) cat("(pixel images)...") # assign values to pixel images for(i in seq(npat.new)) { values.i <- values[[i]] Q.i <- Quads[[i]] values.i <- values.i[!is.data(Q.i), ] Template.i <- Templates[[i]] ok.i <- !is.na(Template.i$v) if(sum(ok.i) != nrow(values.i)) stop("Internal error: mismatch between data frame and image") if(selfcheck) { dx <- rasterx.im(Template.i)[ok.i] - values.i$x dy <- rastery.im(Template.i)[ok.i] - values.i$y cat(paste("i=", i, "range(dx) =", paste(range(dx), collapse=", "), "range(dy) =", paste(range(dy), collapse=", "), "\n")) } if(want.trend) { Trend.i <- Template.i Trend.i$v[ok.i] <- values.i$trend Trends[[i]] <- Trend.i } if(want.cif) { Lambda.i <- Template.i Lambda.i$v[ok.i] <- values.i$cif Lambdas[[i]] <- Lambda.i } } } if(verbose) cat("done.\n") # answer is a hyperframe Answer <- hyperframe(id=factor(levels(moadf$id)), row.names=sumry$row.names) if(want.trend) Answer$trend <- Trends if(want.cif) Answer$cif <- Lambdas return(Answer) } spatstat/R/rmhmodel.ppm.R0000755000176000001440000003203312237642727015113 0ustar ripleyusers# # rmhmodel.ppm.R # # convert ppm object into format palatable to rmh.default # # $Revision: 2.57 $ $Date: 2013/04/25 06:37:43 $ # # .Spatstat.rmhinfo # rmhmodel.ppm() # .Spatstat.Rmhinfo <- list( "Multitype Hardcore process" = function(coeffs, inte) { # hard core radii r[i,j] hradii <- inte$par$hradii return(list(cif='multihard', par=list(hradii=hradii), ntypes=ncol(hradii))) }, "Lennard-Jones process" = function(coeffs, inte) { sigma <- inte$par$sigma epsilon <- inte$par$epsilon return(list(cif='lennard', par=list(sigma=sigma, epsilon=epsilon), ntypes=1)) }, "Fiksel process" = function(coeffs, inte) { hc <- inte$par$hc r <- inte$par$r kappa <- inte$par$kappa a <- inte$interpret(coeffs,inte)$param$a return(list(cif='fiksel', par=list(r=r,hc=hc,kappa=kappa,a=a), ntypes=1)) }, "Diggle-Gates-Stibbard process" = function(coeffs, inte) { rho <- inte$par$rho return(list(cif='dgs', par=list(rho=rho), ntypes=1)) }, "Diggle-Gratton process" = function(coeffs, inte) { kappa <- inte$interpret(coeffs,inte)$param$kappa delta <- inte$par$delta rho <- inte$par$rho return(list(cif='diggra', par=list(kappa=kappa,delta=delta,rho=rho), ntypes=1)) }, "Hard core process" = function(coeffs, inte) { hc <- inte$par$hc return(list(cif='hardcore', par=list(hc=hc), ntypes=1)) }, "Geyer saturation process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par$r sat <- inte$par$sat return(list(cif='geyer', par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Soft core process" = function(coeffs, inte) { kappa <- inte$par$kappa sigma <- inte$interpret(coeffs,inte)$param$sigma return(list(cif="sftcr", par=list(sigma=sigma,kappa=kappa), ntypes=1)) }, "Strauss process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par$r return(list(cif = "strauss", par = list(gamma = gamma, r = r), ntypes=1)) }, "Strauss - hard core process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par$r hc <- inte$par$hc return(list(cif='straush', par=list(gamma=gamma,r=r,hc=hc), ntypes=1)) }, "Triplets process" = function(coeffs, inte) { gamma <- inte$interpret(coeffs,inte)$param$gamma r <- inte$par$r return(list(cif = "triplets", par = list(gamma = gamma, r = r), ntypes=1)) }, "Multitype Strauss process" = function(coeffs, inte) { # interaction radii r[i,j] radii <- inte$par$radii # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straussm', par=list(gamma=gamma,radii=radii), ntypes=ncol(radii))) }, "Multitype Strauss Hardcore process" = function(coeffs, inte) { # interaction radii r[i,j] iradii <- inte$par$iradii # hard core radii r[i,j] hradii <- inte$par$hradii # interaction parameters gamma[i,j] gamma <- (inte$interpret)(coeffs, inte)$param$gammas return(list(cif='straushm', par=list(gamma=gamma,iradii=iradii,hradii=hradii), ntypes=ncol(iradii))) }, "Piecewise constant pairwise interaction process" = function(coeffs, inte) { r <- inte$par$r gamma <- (inte$interpret)(coeffs, inte)$param$gammas h <- stepfun(r, c(gamma, 1)) return(list(cif='lookup', par=list(h=h), ntypes=1)) }, "Area-interaction process" = function(coeffs, inte) { r <- inte$par$r eta <- (inte$interpret)(coeffs, inte)$param$eta return(list(cif='areaint', par=list(eta=eta,r=r), ntypes=1)) }, "hybrid Geyer process" = function(coeffs, inte) { r <- inte$par$r sat <- inte$par$sat gamma <- (inte$interpret)(coeffs,inte)$param$gammas return(list(cif='badgey',par=list(gamma=gamma,r=r,sat=sat), ntypes=1)) }, "Hybrid interaction"= function(coeffs, inte){ # for hybrids, $par is a list of the component interactions interlist <- inte$par # check for Poisson components ispois <- unlist(lapply(interlist, is.poisson)) if(all(ispois)) { # reduces to Poisson Z <- list(cif='poisson', par=list()) return(Z) } else if(any(ispois)) { # remove Poisson components interlist <- interlist[!ispois] } # N <- length(interlist) cifs <- character(N) pars <- vector(mode="list", length=N) ntyp <- integer(N) for(i in 1:N) { interI <- interlist[[i]] # forbid hybrids-of-hybrids - these should not occur anyway if(interI$name == "Hybrid interaction") stop("Simulation of a hybrid-of-hybrid interaction is not implemented") # get RMH mapping for I-th component siminfoI <- .Spatstat.Rmhinfo[[interI$name]] if(is.null(siminfoI)) stop(paste("Simulation of a fitted", sQuote(interI$name), "has not yet been implemented"), call.=FALSE) # nameI is the tag that identifies I-th component in hybrid nameI <- names(interlist)[[i]] nameI. <- paste(nameI, ".", sep="") # find coefficients with prefix that exactly matches nameI. Cname <- names(coeffs) prefixlength <- nchar(nameI.) Cprefix <- substr(Cname, 1, prefixlength) relevant <- (Cprefix == nameI.) # extract coefficients # (there may be none, if this interaction is an 'offset') coeffsI <- coeffs[relevant] # remove the prefix so the coefficients are recognisable to 'siminfoI' if(any(relevant)) names(coeffsI) <- substr(Cname[relevant], prefixlength+1, max(nchar(Cname))) # compute RMH info ZI <- siminfoI(coeffsI, interI) cifs[i] <- ZI$cif pars[[i]] <- ZI$par ntyp[i] <- ZI$ntypes } nt <- unique(ntyp[ntyp != 1]) if(length(nt) > 1) stop(paste("Hybrid components have different numbers of types:", commasep(nt))) if(N == 1) { # single cif: revert to original format: par is a list of parameters Z <- list(cif=cifs[1], par=pars[[1]], ntypes=ntyp) } else { # hybrid cif: par is a list of lists of parameters Z <- list(cif=cifs, par=pars, ntypes=ntyp) } return(Z) } ) # OTHER MODELS not yet implemented: # # # interaction object rmh.default # ------------------ ----------- # # OrdThresh # rmhmodel.ppm <- function(model, win, ..., verbose=TRUE, project=TRUE, control=rmhcontrol()) { # converts ppm object `model' into format palatable to rmh.default verifyclass(model, "ppm") # Ensure the fitted model is valid # (i.e. exists mathematically as a point process) if(!valid.ppm(model)) { if(project) { if(verbose) cat("Model is invalid - projecting it\n") model <- project.ppm(model, fatal=TRUE) } else stop("The fitted model is not a valid point process") } X <- model if(verbose) cat("Extracting model information...") # Extract essential information Y <- summary(X, quick="no variances") if(Y$marked && !Y$multitype) stop("Not implemented for marked point processes other than multitype") if(Y$uses.covars && is.data.frame(X$covariates)) stop(paste("This model cannot be simulated, because the", "covariate values were given as a data frame.")) # enforce defaults for `control' control <- rmhcontrol(control) # adjust to peculiarities of model control <- rmhResolveControl(control, X) ######## Interpoint interaction if(Y$poisson) { Z <- list(cif="poisson", par=list()) # par is filled in later } else { # First check version number of ppm object if(Y$antiquated) stop(paste("This model was fitted by a very old version", "of the package: spatstat", Y$version, "; simulation is not possible.", "Re-fit the model using your original code")) else if(Y$old) warning(paste("This model was fitted by an old version", "of the package: spatstat", Y$version, ". Re-fit the model using update.ppm", "or your original code")) # Extract the interpoint interaction object inte <- Y$entries$interaction # Determine whether the model can be simulated using rmh siminfo <- .Spatstat.Rmhinfo[[inte$name]] if(is.null(siminfo)) stop(paste("Simulation of a fitted", sQuote(inte$name), "has not yet been implemented")) # Get fitted model's canonical coefficients coeffs <- Y$entries$coef if(newstyle.coeff.handling(inte)) { # extract only the interaction coefficients Vnames <- Y$entries$Vnames IsOffset <- Y$entries$IsOffset coeffs <- coeffs[Vnames[!IsOffset]] } # Translate the model to the format required by rmh.default Z <- siminfo(coeffs, inte) if(is.null(Z)) stop("The model cannot be simulated") else if(is.null(Z$cif)) stop(paste("Internal error: no cif returned from .Spatstat.Rmhinfo")) } # Don't forget the types if(Y$multitype && is.null(Z$types)) Z$types <- levels(Y$entries$marks) ######## Window for result if(missing(win)) win <- Y$entries$data$window Z$w <- win ######## Expanded window for simulation? covims <- if(Y$uses.covars) X$covariates[Y$covars.used] else NULL wsim <- rmhResolveExpansion(win, control, covims, "covariate")$wsim ###### Trend or Intensity ############ if(verbose) cat("Evaluating trend...") if(Y$stationary) { # first order terms (beta or beta[i]) are carried in Z$par beta <- as.numeric(Y$trend$value) Z$trend <- NULL } else { # trend terms present # all first order effects are subsumed in Z$trend beta <- if(!Y$marked) 1 else rep.int(1, length(Z$types)) # predict on window possibly larger than original data window Z$trend <- if(wsim$type == "mask") predict(X, window=wsim, type="trend", locations=wsim) else predict(X, window=wsim, type="trend") } Ncif <- length(Z$cif) if(Ncif == 1) { # single interaction Z$par[["beta"]] <- beta } else { # hybrid interaction if(all(Z$ntypes == 1)) { # unmarked model: scalar 'beta' is absorbed in first cif absorb <- 1 } else { # multitype model: vector 'beta' is absorbed in a multitype cif absorb <- min(which(Z$ntypes > 1)) } Z$par[[absorb]]$beta <- beta # other cifs have par$beta = 1 for(i in (1:Ncif)[-absorb]) Z$par[[i]]$beta <- rep.int(1, Z$ntypes[i]) } if(verbose) cat("done.\n") Z <- rmhmodel(Z, ...) return(Z) } rmhResolveExpansion <- function(win, control, imagelist, itype="covariate") { # Determine expansion window for simulation ex <- control$expand # The following is redundant because it is implied by !will.expand(ex) # if(ex$force.noexp) { # # Expansion prohibited # return(list(wsim=win, expanded=FALSE)) # } # Is expansion contemplated? if(!will.expand(ex)) return(list(wsim=win, expanded=FALSE)) # Proposed expansion window wexp <- expand.owin(win, ex) # Check feasibility isim <- unlist(lapply(imagelist, is.im)) imagelist <- imagelist[isim] if(length(imagelist) == 0) { # Unlimited expansion is feasible return(list(wsim=wexp, expanded=TRUE)) } # Expansion is limited to domain of image data # Determine maximum possible expansion window wins <- lapply(imagelist, as.owin) cwin <- do.call("intersect.owin", unname(wins)) if(!is.subset.owin(wexp, cwin)) { # Cannot expand to proposed window if(ex$force.exp) stop(paste("Cannot expand the simulation window,", "because the", itype, "images do not cover", "the expanded window"), call.=FALSE) # Take largest possible window wexp <- intersect.owin(wexp, cwin) } return(list(wsim=wexp, expanded=TRUE)) } spatstat/R/linearK.R0000755000176000001440000001150512237642727014077 0ustar ripleyusers# # linearK # # $Revision: 1.30 $ $Date: 2013/04/25 06:37:43 $ # # K function for point pattern on linear network # # linearK <- function(X, r=NULL, ..., correction="Ang") { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # compute K denom <- np * (np - 1)/lengthL K <- linearKengine(X, r=r, denom=denom, correction=correction, ...) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[L](r)) fname <- "K[L]" }, none = { ylab <- quote(K[net](r)) fname <- "K[net]" }) K <- rebadge.fv(K, new.ylab=ylab, new.fname=fname) return(K) } linearKinhom <- function(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE) { stopifnot(inherits(X, "lpp")) correction <- pickoption("correction", correction, c(none="none", Ang="Ang", best="Ang"), multi=FALSE) if(is.null(lambda)) linearK(X, r=r, ..., correction=correction) # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # lambdaX <- getlambda.lpp(lambda, X, ...) # invlam <- 1/lambdaX invlam2 <- outer(invlam, invlam, "*") denom <- if(!normalise) lengthL else sum(invlam) K <- linearKengine(X, ..., r=r, reweight=invlam2, denom=denom, correction=correction) # set appropriate y axis label switch(correction, Ang = { ylab <- quote(K[LI](r)) fname <- "K[LI]" }, none = { ylab <- quote(K[netI](r)) fname <- "K[netI]" }) K <- rebadge.fv(K, new.fname=fname, new.ylab=ylab) return(K) } getlambda.lpp <- function(lambda, X, ...) { lambdaname <- deparse(substitute(lambda)) XX <- as.ppp(X) lambdaX <- if(is.vector(lambda)) lambda else if(is.function(lambda)) lambda(XX$x, XX$y, ...) else if(is.im(lambda)) safelookup(lambda, XX) else if(inherits(lambda, "linim")) safelookup(as.im(lambda), XX) else if(is.ppm(lambda) || inherits(lambda, "lppm")) predict(lambda, locations=as.data.frame(XX)) else stop(paste(lambdaname, "should be", "a numeric vector, function, pixel image, or fitted model")) if(!is.numeric(lambdaX)) stop(paste("Values of", lambdaname, "are not numeric")) if((nv <- length(lambdaX)) != (np <- npoints(X))) stop(paste("Obtained", nv, "values of", lambdaname, "but point pattern contains", np, "points")) if(any(lambdaX < 0)) stop(paste("Negative values of", lambdaname, "obtained")) if(any(lambdaX == 0)) stop(paste("Zero values of", lambdaname, "obtained")) return(lambdaX) } linearKengine <- function(X, ..., r=NULL, reweight=NULL, denom=1, correction="Ang", showworking=FALSE) { # extract info about pattern sX <- summary(X) np <- sX$npoints lengthL <- sX$totlength # extract linear network L <- X$domain # extract points Y <- as.ppp(X) W <- Y$window # determine r values rmaxdefault <- 0.98 * circumradius(L) breaks <- handle.r.b.args(r, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max # if(np < 2) { # no pairs to count: return zero function zeroes <- numeric(length(r)) df <- data.frame(r = r, est = zeroes) K <- fv(df, "r", substitute(linearK(r), NULL), "est", . ~ r, c(0, rmax), c("r", "%s(r)"), c("distance argument r", "estimated %s"), fname = "linearK") return(K) } # compute pairwise distances D <- pairdist(X) #--- compile into K function --- if(correction == "none" && is.null(reweight)) { # no weights (Okabe-Yamada) K <- compileK(D, r, denom=denom) unitname(K) <- unitname(X) return(K) } if(correction == "none") edgewt <- 1 else { # inverse m weights (Wei's correction) # compute m[i,j] m <- matrix(1, np, np) for(j in 1:np) m[ -j, j] <- countends(L, Y[-j], D[-j,j]) if(any(uhoh <- (m == 0))) { warning("Internal error: disc boundary count equal to zero") m[uhoh] <- 1 } edgewt <- 1/m } # compute K wt <- if(!is.null(reweight)) edgewt * reweight else edgewt K <- compileK(D, r, weights=wt, denom=denom) # tack on theoretical value K <- bind.fv(K, data.frame(theo=r), "%s[theo](r)", "theoretical Poisson %s") unitname(K) <- unitname(X) fvnames(K, ".") <- rev(fvnames(K, ".")) # show working if(showworking) attr(K, "working") <- list(D=D, wt=wt) return(K) } spatstat/R/plot.mppm.R0000644000176000001440000000142712237642730014431 0ustar ripleyusers# # plot.mppm.R # # $Revision: 1.1 $ $Date: 2007/03/26 01:31:40 $ # # plot.mppm <- function(x, ..., trend=TRUE, cif=FALSE, how="image") { xname <- deparse(substitute(x)) if(length(how) > 1) stop(paste("Multiple plotting styles cannot be selected;", "argument", dQuote("how"), "must have length 1")) if(!missing(trend) && missing(cif)) cif <- !trend else if(missing(trend) && !missing(cif)) trend <- !cif else if(trend + cif != 1) stop(paste("Exactly one of", dQuote("trend"), "and", dQuote("cif"), "should be TRUE")) subs <- subfits(x) arglist <- resolve.defaults(list(x=subs,how=how, trend=trend, cif=cif), list(...), list(main=xname)) do.call("plot", arglist) } spatstat/R/localK.R0000755000176000001440000001543312237642727013723 0ustar ripleyusers# # localK.R Getis-Franklin neighbourhood density function # # $Revision: 1.19 $ $Date: 2013/02/07 09:58:14 $ # # "localL" <- function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL) { localK(X, wantL=TRUE, correction=correction, verbose=verbose, rvalue=rvalue) } "localLinhom" <- function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL) { localKinhom(X, lambda=lambda, wantL=TRUE, ..., correction=correction, verbose=verbose, rvalue=rvalue, sigma=sigma, varcov=varcov) } "localK" <- function(X, ..., correction="Ripley", verbose=TRUE, rvalue=NULL) { verifyclass(X, "ppp") localKengine(X, ..., correction=correction, verbose=verbose, rvalue=rvalue) } "localKinhom" <- function(X, lambda=NULL, ..., correction="Ripley", verbose=TRUE, rvalue=NULL, sigma=NULL, varcov=NULL) { verifyclass(X, "ppp") if(is.null(lambda)) { # No intensity data provided # Estimate density by leave-one-out kernel smoothing lambda <- density(X, ..., sigma=sigma, varcov=varcov, at="points", leaveoneout=TRUE) lambda <- as.numeric(lambda) } else { # validate if(is.im(lambda)) lambda <- safelookup(lambda, X) else if(is.ppm(lambda)) lambda <- predict(lambda, locations=X, type="trend") else if(is.function(lambda)) lambda <- lambda(X$x, X$y) else if(is.numeric(lambda) && is.vector(as.numeric(lambda))) check.nvector(lambda, npoints(X)) else stop(paste(sQuote("lambda"), "should be a vector, a pixel image, or a function")) } localKengine(X, lambda=lambda, ..., correction=correction, verbose=verbose, rvalue=rvalue) } "localKengine" <- function(X, ..., wantL=FALSE, lambda=NULL, correction="Ripley", verbose=TRUE, rvalue=NULL) { npts <- npoints(X) W <- X$window area <- area.owin(W) lambda.ave <- npts/area lambda1.ave <- (npts - 1)/area weighted <- !is.null(lambda) if(is.null(rvalue)) rmaxdefault <- rmax.rule("K", W, lambda.ave) else { stopifnot(is.numeric(rvalue)) stopifnot(length(rvalue) == 1) stopifnot(rvalue >= 0) rmaxdefault <- rvalue } breaks <- handle.r.b.args(NULL, NULL, W, rmaxdefault=rmaxdefault) r <- breaks$r rmax <- breaks$max correction.given <- !missing(correction) correction <- pickoption("correction", correction, c(none="none", isotropic="isotropic", Ripley="isotropic", trans="translate", translate="translate", translation="translate", best="best"), multi=FALSE) correction <- implemented.for.K(correction, W$type, correction.given) # recommended range of r values alim <- c(0, min(rmax, rmaxdefault)) # identify all close pairs rmax <- max(r) close <- closepairs(X, rmax) DIJ <- close$d XI <- ppp(close$xi, close$yi, window=W, check=FALSE) I <- close$i if(weighted) { J <- close$j lambdaJ <- lambda[J] weightJ <- 1/lambdaJ } # initialise df <- as.data.frame(matrix(NA, length(r), npts)) labl <- desc <- character(npts) bkt <- function(x) { paste("[", x, "]", sep="") } switch(correction, none={ # uncorrected! For demonstration purposes only! for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, if(weighted) weightJ[ii] else NULL) # no edge weights df[,i] <- cumsum(wh) icode <- numalign(i, npts) names(df)[i] <- paste("un", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("uncorrected estimate of %s", "for point", icode) if(verbose) progressreport(i, npts) } if(!weighted) df <- df/lambda1.ave }, translate={ # Translation correction XJ <- ppp(close$xj, close$yj, window=W, check=FALSE) edgewt <- edge.Trans(XI, XJ, paired=TRUE) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Ktrans <- cumsum(wh) df[,i] <- Ktrans icode <- numalign(i, npts) names(df)[i] <- paste("trans", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("translation-corrected estimate of %s", "for point", icode) if(verbose) progressreport(i, npts) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }, isotropic={ # Ripley isotropic correction edgewt <- edge.Ripley(XI, matrix(DIJ, ncol=1)) if(weighted) edgewt <- edgewt * weightJ for(i in 1:npts) { ii <- (I == i) wh <- whist(DIJ[ii], breaks$val, edgewt[ii]) Kiso <- cumsum(wh) df[,i] <- Kiso icode <- numalign(i, npts) names(df)[i] <- paste("iso", icode, sep="") labl[i] <- paste("%s", bkt(icode), "(r)", sep="") desc[i] <- paste("Ripley isotropic correction estimate of %s", "for point", icode) if(verbose) progressreport(i, npts) } if(!weighted) df <- df/lambda1.ave h <- diameter(W)/2 df[r >= h, ] <- NA }) # transform values if L required if(wantL) df <- sqrt(df/pi) # return vector of values at r=rvalue, if desired if(!is.null(rvalue)) { nr <- length(r) if(r[nr] != rvalue) stop("Internal error - rvalue not attained") return(as.numeric(df[nr,])) } # function value table required # add r and theo if(!wantL) { df <- cbind(df, data.frame(r=r, theo=pi * r^2)) if(!weighted) { ylab <- quote(K[loc](r)) fnam <- "K[loc][',']" } else { ylab <- quote(Kinhom[loc](r)) fnam <- "Kinhom[loc][',']" } } else { df <- cbind(df, data.frame(r=r, theo=r)) if(!weighted) { ylab <- quote(L[loc](r)) fnam <- "L[loc][',']" } else { ylab <- quote(Linhom[loc](r)) fnam <- "Linhom[loc][',']" } } desc <- c(desc, c("distance argument r", "theoretical Poisson %s")) labl <- c(labl, c("r", "%s[pois](r)")) # create fv object K <- fv(df, "r", ylab, "theo", , alim, labl, desc, fname=fnam) # default is to display them all formula(K) <- . ~ r unitname(K) <- unitname(X) attr(K, "correction") <- correction return(K) } spatstat/R/primefactors.R0000755000176000001440000000544212237642727015213 0ustar ripleyusers# # primefactors.R # # $Revision: 1.4 $ $Date: 2012/04/18 09:16:32 $ # primesbelow <- local({ # all primes below 1000 p1000 <- c( 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997) primesbelow <- function(nmax) { if(nmax <= 1000) return(p1000[p1000 <= nmax]) eratosthenes(nmax, c(p1000, 1001:nmax)) } primesbelow }) eratosthenes <- function(nmax, startset=2:nmax) { # The Sieve of Eratosthenes if(nmax < 2) return(numeric(0)) numbers <- startset prime <- 2 repeat{ retain <- (numbers <= prime) | (numbers %% prime != 0) numbers <- numbers[retain] remaining <- (numbers > prime) if(!any(remaining)) break prime <- min(numbers[remaining]) } return(numbers) } primefactors <- function(n, prmax) { if(missing(prmax)) prmax <- floor(sqrt(n)) primes <- primesbelow(prmax) divides.n <- (n %% primes == 0) if(!any(divides.n)) return(n) else { divisors <- primes[divides.n] prmax <- max(divisors) m <- n/prod(divisors) if(m == 1) return(divisors) else { mfactors <- primefactors(m, prmax=prmax) return(sort(c(divisors, mfactors))) } } } is.prime <- function(n) { length(primefactors(n)) == 1 } least.common.multiple <- function(n, m) { nf <- primefactors(n) mf <- primefactors(m) p <- sort(unique(c(nf,mf))) nfac <- table(factor(nf, levels=p)) mfac <- table(factor(mf, levels=p)) prod(p^pmax.int(nfac,mfac)) } greatest.common.divisor <- function(n, m) { nf <- primefactors(n) mf <- primefactors(m) p <- sort(unique(c(nf,mf))) nfac <- table(factor(nf, levels=p)) mfac <- table(factor(mf, levels=p)) prod(p^pmin.int(nfac,mfac)) } divisors <- function(n) { p <- primefactors(n) up <- sort(unique(p)) k <- table(factor(p, levels=up)) rest <- function(kk, uu) { powers <- uu[1]^(0:(kk[1])) if(length(uu) == 1) return(powers) rr <- rest(kk[-1], uu[-1]) products <- as.vector(outer(powers, rr, "*")) return(sort(unique(products))) } return(rest(k, up)) } spatstat/R/poisson.R0000755000176000001440000000170112237642727014201 0ustar ripleyusers# # # poisson.S # # $Revision: 1.7 $ $Date: 2012/01/16 08:26:08 $ # # The Poisson process # # Poisson() create an object of class 'interact' describing # the (null) interpoint interaction structure # of the Poisson process. # # # ------------------------------------------------------------------- # Poisson <- function() { out <- list( name = "Poisson process", creator = "Poisson", family = NULL, pot = NULL, par = NULL, parnames = NULL, init = function(...) { }, update = function(...) { }, print = function(self) { cat("Poisson process\n") invisible() }, valid = function(...) { TRUE }, project = function(...) NULL, irange = function(...) { 0 }, version=versionstring.spatstat() ) class(out) <- "interact" return(out) } spatstat/R/Hest.R0000755000176000001440000000445412237642727013422 0ustar ripleyusers# # Hest.R # # Contact distribution for a random set # # Hest <- function(X, r=NULL, breaks=NULL, ..., correction=c("km", "rs", "han"), conditional=TRUE) { if(!(is.ppp(X) || is.psp(X) || is.owin(X))) stop("X should be an object of class ppp, psp or owin") # handle corrections if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable # compute distance map D <- distmap(X, ...) B <- attr(D, "bdry") W <- as.owin(D) # histogram breakpoints dmax <- summary(D)$max breaks <- handle.r.b.args(r, breaks, W, NULL, rmaxdefault=dmax) rval <- breaks$r # extract distances and censoring distances dist <- as.vector(as.matrix(D)) bdry <- as.vector(as.matrix(B)) ok <- !is.na(dist) && !is.na(bdry) dist <- dist[ok] bdry <- bdry[ok] # delete zero distances if(conditional && is.owin(X)) { pos <- (dist > 0) dist <- dist[pos] bdry <- bdry[pos] } # censoring indicators d <- (dist <= bdry) # observed distances o <- pmin.int(dist, bdry) # calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(W, rval) else NULL, tt=dist) # conditional on d > 0 ? if(conditional && is.owin(X)) { zeroadj <- function(x) { (x - x[1])/(1-x[1]) } if(corx$km) Z$km <- zeroadj(Z$km) if(corx$rs) Z$rs <- zeroadj(Z$rs) if(corx$han) Z$han <- zeroadj(Z$han) if(corx$none) Z$raw <- zeroadj(Z$raw) } # relabel Z <- rebadge.fv(Z, substitute(H(r), NULL), "H") unitname(Z) <- unitname(X) return(Z) } spatstat/R/interp.im.R0000755000176000001440000000341312237642727014416 0ustar ripleyusers# # interp.im.R # # $Revision: 1.2 $ $Date: 2007/05/17 16:41:13 $ # interp.im <- function(Z, x, y) { stopifnot(is.im(Z)) stopifnot(length(x) == length(y)) if(!is.null(levels(Z))) stop("Interpolation is undefined for factor-valued images") ok <- inside.owin(x,y, as.owin(Z)) # get default lookup values (for boundary cases) fallback <- Z[ppp(x[ok], y[ok], window=as.rectangle(Z), check=FALSE)] # Transform to grid coordinates # so that pixel centres are at integer points, # bottom left of image is (0,0) xx <- (x[ok] - Z$xcol[1])/Z$xstep yy <- (y[ok] - Z$yrow[1])/Z$ystep # find grid point to left and below # (may transgress boundary) xlower <- floor(xx) ylower <- floor(yy) cc <- as.integer(xlower) + 1 rr <- as.integer(ylower) + 1 # determine whether (x,y) is above or below antidiagonal in square dx <- xx - xlower dy <- yy - ylower below <- (dx + dy <= 1) # if below, interpolate Z(x,y) = (1-x-y)Z(0,0) + xZ(1,0) + yZ(0,1) # if above, interpolate Z(x,y) = (x+y-1)Z(1,1) + (1-x)Z(0,1) + (1-y)Z(1,0) V <- Z$v lukimyu <- function(ccc, rrr, mat, defaults) { dimm <- dim(mat) within <- (rrr >= 1 & rrr <= dimm[1] & ccc >= 1 & ccc <= dimm[2]) result <- defaults result[within] <- mat[cbind(rrr[within], ccc[within])] result } values <- ifelse(below, ( (1-dx-dy)*lukimyu(cc,rr,V,fallback) + dx*lukimyu(cc+1,rr,V,fallback) + dy*lukimyu(cc,rr+1,V,fallback) ), ( (dx+dy-1)*lukimyu(cc+1,rr+1,V,fallback) + (1-dx)*lukimyu(cc,rr+1,V,fallback) + (1-dy)*lukimyu(cc+1,rr,V,fallback) )) result <- numeric(length(x)) result[ok] <- values result[!ok] <- NA return(result) } spatstat/R/Jmulti.R0000755000176000001440000001245112237642727013757 0ustar ripleyusers# Jmulti.S # # Usual invocations to compute multitype J function(s) # if F and G are not required # # $Revision: 4.36 $ $Date: 2013/04/25 06:37:43 $ # # # "Jcross" <- function(X, i, j, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{ij}(r) # # X: point pattern (an object of class 'ppp') # i, j: types for which J_{i,j}(r) is calculated # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] if(missing(j)) j <- levels(marx)[2] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) # if(i == j) result <- Jest(X[I], eps=eps, r=r, breaks=breaks, correction=correction) else { J <- (marx == j) result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=TRUE, correction=correction) } iname <- make.parseable(paste(i)) jname <- make.parseable(paste(j)) result <- rebadge.fv(result, substitute(J[i,j](r), list(i=iname,j=jname)), sprintf("J[list(%s,%s)]", iname, jname), new.yexp=substitute(J[list(i,j)](r), list(i=iname,j=jname))) return(result) } "Jdot" <- function(X, i, eps=NULL, r=NULL, breaks=NULL, ..., correction=NULL) { # # multitype J function J_{i\dot}(r) # # X: point pattern (an object of class 'ppp') # i: mark i for which we calculate J_{i\cdot}(r) # eps: raster grid mesh size for distance transform # (unless specified by X$window) # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # X <- as.ppp(X) if(!is.marked(X)) stop(paste("point pattern has no", sQuote("marks"))) stopifnot(is.multitype(X)) # marx <- marks(X, dfok=FALSE) if(missing(i)) i <- levels(marx)[1] # I <- (marx == i) if(sum(I) == 0) stop(paste("No points have mark = ", i)) J <- rep.int(TRUE, X$n) # result <- Jmulti(X, I, J, eps=eps, r=r, breaks=breaks, disjoint=FALSE, correction=correction) iname <- make.parseable(paste(i)) result <- rebadge.fv(result, substitute(J[i ~ dot](r), list(i=iname)), paste("J[", iname, "~ symbol(\"\\267\")]"), new.yexp=substitute(J[i ~ symbol("\267")](r), list(i=iname))) return(result) } "Jmulti" <- function(X, I, J, eps=NULL, r=NULL, breaks=NULL, ..., disjoint=NULL, correction=NULL) { # # multitype J function (generic engine) # # X marked point pattern (of class ppp) # # I,J logical vectors of length equal to the number of points # and identifying the two subsets of points to be # compared. # # eps: raster grid mesh size for distance transform # (unless specified by X$window) # # r: (optional) values of argument r # breaks: (optional) breakpoints for argument r # # X <- as.ppp(X) W<- X$window rmaxdefault <- rmax.rule("J", W) brks <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault)$val I <- ppsubset(X, I) J <- ppsubset(X, J) if(is.null(I) || is.null(J)) stop("I and J must be valid subset indices") FJ <- Fest(X[J], eps, breaks=brks, correction=correction) GIJ <- Gmulti(X, I, J, breaks=brks, disjoint=disjoint, correction=correction) rvals <- FJ$r Fnames <- names(FJ) Gnames <- names(GIJ) bothnames <- Fnames[Fnames %in% Gnames] # initialise fv object alim <- attr(FJ, "alim") Z <- fv(data.frame(r=rvals, theo=1), "r", substitute(Jmulti(r), NULL), "theo", . ~ r, alim, c("r", "{%s^{pois}}(r)"), c("distance argument r", "theoretical Poisson %s"), fname="J[multi]") # add pieces manually ratio <- function(a, b) { result <- a/b result[ b == 0 ] <- NA result } if("raw" %in% bothnames) { Jun <- ratio(1-GIJ$raw, 1-FJ$raw) Z <- bind.fv(Z, data.frame(un=Jun), "hat(%s^{un})(r)", "uncorrected estimate of %s", "un") } if("rs" %in% bothnames) { Jrs <- ratio(1-GIJ$rs, 1-FJ$rs) Z <- bind.fv(Z, data.frame(rs=Jrs), "hat(%s^{rs})(r)", "border corrected estimate of %s", "rs") } if("han" %in% Gnames && "cs" %in% Fnames) { Jhan <- ratio(1-GIJ$han, 1-FJ$cs) Z <- bind.fv(Z, data.frame(han=Jhan), "hat(%s^{han})(r)", "Hanisch-style estimate of %s", "han") } if("km" %in% bothnames) { Jkm <- ratio(1-GIJ$km, 1-FJ$km) Z <- bind.fv(Z, data.frame(km=Jkm), "hat(%s^{km})(r)", "Kaplan-Meier estimate of %s", "km") if("hazard" %in% names(GIJ) && "hazard" %in% names(FJ)) { Jhaz <- GIJ$hazard - FJ$hazard Z <- bind.fv(Z, data.frame(hazard=Jhaz), "hazard(r)", "Kaplan-Meier estimate of derivative of log(%s)") } } # set default plotting values and order nama <- names(Z) fvnames(Z, ".") <- rev(nama[!(nama %in% c("r", "hazard"))]) # add other info attr(Z, "G") <- GIJ attr(Z, "F") <- FJ unitname(Z) <- unitname(X) return(Z) } spatstat/R/plot.plotppm.R0000755000176000001440000001020012237642727015151 0ustar ripleyusers# # plot.plotppm.R # # engine of plot method for ppm # # $Revision: 1.14 $ $Date: 2011/05/18 08:39:26 $ # # plot.plotppm <- function(x,data=NULL,trend=TRUE,cif=TRUE,se=TRUE, pause=interactive(), how=c("persp","image","contour"), ...) { verifyclass(x,"plotppm") # determine main plotting actions superimposed <- !is.null(data) if(!missing(trend) && (trend & is.null(x[["trend"]]))) stop("No trend to plot.\n") trend <- trend & !is.null(x[["trend"]]) if(!missing(cif) && (cif & is.null(x[["cif"]]))) stop("No cif to plot.\n") cif <- cif & !is.null(x[["cif"]]) if(!missing(se) && (se & is.null(x[["se"]]))) stop("No SE to plot.\n") se <- se & !is.null(x[["se"]]) surftypes <- c("trend", "cif", "se")[c(trend, cif, se)] # marked point process? mrkvals <- attr(x,"mrkvals") marked <- (length(mrkvals) > 1) if(marked) data.marks <- marks(data) if(marked & superimposed) { data.types <- levels(data.marks) if(any(sort(data.types) != sort(mrkvals))) stop(paste("Data marks are different from mark", "values for argument x.\n")) } # plotting style howmat <- outer(how, c("persp", "image", "contour"), "==") howmatch <- apply(howmat, 1, any) if (any(!howmatch)) stop(paste("unrecognised option", how[!howmatch])) # start plotting if(pause) oldpar <- par(ask = TRUE) on.exit(if(pause) par(oldpar)) for(ttt in surftypes) { xs <- x[[ttt]] for (i in seq_along(mrkvals)) { level <- mrkvals[i] main <- paste(if(ttt == "se") "Estimated" else "Fitted", ttt, if(marked) paste("\n mark =", level) else NULL) for (style in how) { switch(style, persp = { do.call("persp", resolve.defaults(list(xs[[i]]), list(...), spatstat.options("par.persp"), list(xlab="x", zlab=ttt, main=main))) }, image = { do.call("image", resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if(superimposed) { if(marked) plot(data[data.marks == level], add = TRUE) else plot(data,add=TRUE) } }, contour = { do.call("contour", resolve.defaults(list(xs[[i]]), list(...), list(main=main))) if (superimposed) { if(marked) plot(data[data.marks == level], add = TRUE) else plot(data,add=TRUE) } }, { stop(paste("Unrecognised plot style", style)) }) } } } return(invisible()) } print.plotppm <- function(x, ...) { verifyclass(x, "plotppm") trend <- x$trend cif <- x$cif mrkvals <- attr(x, "mrkvals") ntypes <- length(mrkvals) unmarked <- (ntypes == 1 ) cat(paste("Object of class", sQuote("plotppm"), "\n")) if(unmarked) cat("Computed for an unmarked point process\n") else { cat("Computed for a marked point process, with mark values:\n") print(mrkvals) } cat("Contains the following components:\n") if(!is.null(trend)) { cat("\n$trend:\tFitted trend.\n") if(unmarked) { cat("A list containing 1 image\n") print(trend[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(trend[[1]], ...) } } if(!is.null(cif)) { cat("\n$cif:\tFitted conditional intensity.\n") if(unmarked) { cat("A list containing 1 image\n") print(cif[[1]], ...) } else { cat(paste("A list of", ntypes, "images\n")) cat("Typical details:\n") print(cif[[1]], ...) } } invisible(NULL) } spatstat/R/edges2triangles.R0000644000176000001440000000615212237642727015573 0ustar ripleyusers# # edges2triangles.R # # $Revision: 1.10 $ $Date: 2013/07/04 00:43:27 $ # edges2triangles <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE, friendly=rep(TRUE, nvert)) { usefriends <- !missing(friendly) if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } if(usefriends) { stopifnot(is.logical(friendly)) stopifnot(length(friendly) == nvert) usefriends <- !all(friendly) } } # zero length data, or not enough to make triangles if(length(iedge) < 3) return(matrix(, nrow=0, ncol=3)) # sort in increasing order of 'iedge' oi <- fave.order(iedge) iedge <- iedge[oi] jedge <- jedge[oi] # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" if(!usefriends) { zz <- .Call("triograph", nv=nvert, iedge=iedge, jedge=jedge) # PACKAGE="spatstat") } else { fr <- as.logical(friendly) storage.mode(fr) <- "integer" zz <- .Call("trioxgraph", nv=nvert, iedge=iedge, jedge=jedge, friendly=fr) # PACKAGE="spatstat") } mat <- as.matrix(as.data.frame(zz)) return(mat) } # compute triangle diameters as well trianglediameters <- function(iedge, jedge, edgelength, ..., nvert=max(iedge, jedge), check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(length(iedge) == length(edgelength)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } } # zero length data if(length(iedge) == 0) return(data.frame(i=integer(0), j=integer(0), k=integer(0), diam=numeric(0))) # call C storage.mode(nvert) <- storage.mode(iedge) <- storage.mode(jedge) <- "integer" storage.mode(edgelength) <- "double" zz <- .Call("triDgraph", nv=nvert, iedge=iedge, jedge=jedge, edgelength=edgelength) # PACKAGE="spatstat") df <- as.data.frame(zz) colnames(df) <- c("i", "j", "k", "diam") return(df) } # extract 'vees', i.e. triples (i, j, k) where i ~ j and i ~ k edges2vees <- function(iedge, jedge, nvert=max(iedge, jedge), ..., check=TRUE) { if(check) { stopifnot(length(iedge) == length(jedge)) stopifnot(all(iedge > 0)) stopifnot(all(jedge > 0)) if(!missing(nvert)) { stopifnot(all(iedge <= nvert)) stopifnot(all(jedge <= nvert)) } } # zero length data, or not enough to make vees if(length(iedge) < 2) return(data.frame(i=numeric(0), j=numeric(0), k=numeric(0))) # call vees <- .Call("graphVees", nv = nvert, iedge = iedge, jedge = jedge) # PACKAGE="spatstat") names(vees) <- c("i", "j", "k") vees <- as.data.frame(vees) return(vees) } spatstat/R/concom.R0000644000176000001440000000731212237642727013766 0ustar ripleyusers# # # concom.R # # $Revision: 1.1 $ $Date: 2013/02/25 05:19:36 $ # # The connected component interaction # # Concom() create an instance of the connected component interaction # [an object of class 'interact'] # # ------------------------------------------------------------------- # Concom <- local({ connectedlabels <- function(X, R) { connected(X, R, internal=TRUE) } countcompo <- function(X, R) { length(unique(connectedlabels(X, R))) } # change in number of components when point i is deleted cocoDel <- function(X, R, subset=seq_len(npoints(X))) { n <- length(subset) ans <- integer(n) if(n > 0) { cX <- countcompo(X, R) for(i in 1:n) ans[i] = countcompo(X[-subset[i]], R) - cX } return(ans) } # change in number of components when new point is added cocoAdd <- function(U, X, R) { U <- as.ppp(U, W=as.owin(X)) nU <- npoints(U) cr <- crosspairs(U, X, R) lab <- connectedlabels(X, R) hitcomp <- tapply(X=lab[cr$j], INDEX=factor(cr$i, levels=1:nU), FUN=unique, simplify=FALSE) nhit <- unname(unlist(lapply(hitcomp, length))) change <- 1 - nhit return(change) } # connected component potential cocopot <- function(X,U,EqualPairs,pars,correction, ...) { bad <- !(correction %in% c("border", "none")) if((nbad <- sum(bad)) > 0) warning(paste("The", ngettext(nbad, "correction", "corrections"), commasep(sQuote(correction[!ok])), ngettext(nbad, "is", "are"), "not implemented")) n <- U$n answer <- numeric(n) r <- pars$r if(is.null(r)) stop("internal error: r parameter not found") dummies <- !(seq_len(n) %in% EqualPairs[,2]) if(sum(dummies) > 0) answer[dummies] <- -cocoAdd(U[dummies], X, r) ii <- EqualPairs[,1] jj <- EqualPairs[,2] answer[jj] <- cocoDel(X, r, subset=ii) return(answer + 1) } # template object without family, par, version BlankCoco <- list( name = "Connected component process", creator = "Concom", family = "inforder.family", # evaluated later pot = cocopot, par = list(r = NULL), # to be filled in parnames = "distance threshold", init = function(self) { r <- self$par$r if(!is.numeric(r) || length(r) != 1 || r <= 0) stop("distance threshold r must be a positive number") }, update = NULL, # default OK print = NULL, # default OK interpret = function(coeffs, self) { logeta <- as.numeric(coeffs[1]) eta <- exp(logeta) return(list(param=list(eta=eta), inames="interaction parameter eta", printable=round(eta,4))) }, valid = function(coeffs, self) { eta <- ((self$interpret)(coeffs, self))$param$eta return(is.finite(eta)) }, project = function(coeffs, self) { if((self$valid)(coeffs, self)) return(NULL) return(Poisson()) }, irange = function(self, coeffs=NA, epsilon=0, ...) { if(any(is.na(coeffs))) return(Inf) logeta <- coeffs[1] if(abs(logeta) <= epsilon) return(0) else return(Inf) }, version=NULL # to be added ) class(BlankCoco) <- "interact" Concom <- function(r) { instantiate.interact(BlankCoco, list(r=r)) } Concom }) spatstat/R/fasp.R0000755000176000001440000001325112237642727013443 0ustar ripleyusers# # fasp.R # # $Revision: 1.32 $ $Date: 2013/07/05 06:13:00 $ # # #----------------------------------------------------------------------------- # # creator fasp <- function(fns, which, formulae=NULL, dataname=NULL, title=NULL, rowNames=NULL, colNames=NULL, checkfv=TRUE) { stopifnot(is.list(fns)) stopifnot(is.matrix(which)) stopifnot(length(fns) == length(which)) n <- length(which) if(checkfv) for(i in seq_len(n)) if(!is.fv(fns[[i]])) stop(paste("fns[[", i, "]] is not an fv object", sep="")) # set row and column labels if(!is.null(rowNames)) rownames(which) <- rowNames if(!is.null(colNames)) colnames(which) <- colNames if(!is.null(formulae)) { # verify format and convert to character vector formulae <- FormatFaspFormulae(formulae, "formulae") # ensure length matches length of "fns" if(length(formulae) == 1 && n > 1) # single formula - replicate it formulae <- rep.int(formulae, n) else stopifnot(length(formulae) == length(which)) } rslt <- list(fns=fns, which=which, default.formula=formulae, dataname=dataname, title=title) class(rslt) <- "fasp" return(rslt) } # subset extraction operator "[.fasp" <- function(x, I, J, drop=TRUE, ...) { verifyclass(x, "fasp") m <- nrow(x$which) n <- ncol(x$which) if(missing(I)) I <- 1:m if(missing(J)) J <- 1:n if(!is.vector(I) || !is.vector(J)) stop("Subset operator is only implemented for vector indices") # determine index subset for lists 'fns', 'titles' etc included <- rep.int(FALSE, length(x$fns)) w <- as.vector(x$which[I,J]) if(length(w) == 0) stop("result is empty") included[w] <- TRUE # if only one cell selected, and drop=TRUE: if((sum(included) == 1) && drop) return(x$fns[included][[1]]) # determine positions in shortened lists whichIJ <- x$which[I,J,drop=FALSE] newk <- cumsum(included) newwhich <- matrix(newk[whichIJ], ncol=ncol(whichIJ), nrow=nrow(whichIJ)) rownames(newwhich) <- rownames(x$which)[I] colnames(newwhich) <- colnames(x$which)[J] # default plotting formulae - could be NULL deform <- x$default.formula # create new fasp object Y <- fasp(fns = x$fns[included], formulae = if(!is.null(deform)) deform[included] else NULL, which = newwhich, dataname = x$dataname, title = x$title) return(Y) } dim.fasp <- function(x) { dim(x$which) } # print method print.fasp <- function(x, ...) { verifyclass(x, "fasp") cat(paste("Function array (class", sQuote("fasp"), ")\n")) dim <- dim(x$which) cat(paste("Dimensions: ", dim[1], "x", dim[2], "\n")) cat(paste("Title:", if(is.null(x$title)) "(None)" else x$title, "\n")) invisible(NULL) } # other methods as.fv.fasp <- function(x) do.call("cbind.fv", x$fns) dimnames.fasp <- function(x) { return(dimnames(x$which)) } "dimnames<-.fasp" <- function(x, value) { w <- x$which dimnames(w) <- value x$which <- w return(x) } pool.fasp <- function(...) { Alist <- list(...) Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1, 40), "[..]") nA <- length(Alist) if(nA == 0) return(NULL) # validate.... # All arguments must be fasp objects notfasp <- !unlist(lapply(Alist, inherits, what="fasp")) if(any(notfasp)) { n <- sum(notfasp) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notfasp)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("fasp")) stop(why) } # All arguments must have envelopes has.env <- function(z) { all(unlist(lapply(z$fns, inherits, what="envelope"))) } notenv <- !unlist(lapply(Alist, has.env)) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "contain envelope data") stop(why) } if(nA == 1) return(Alist[[1]]) # All arguments must have the same dimensions witches <- lapply(Alist, function(z) { z$which }) witch1 <- witches[[1]] same <- unlist(lapply(witches, identical, y=witch1)) if(!all(same)) stop("Function arrays do not have the same array dimensions") # OK. # Pool envelopes at each position result <- Alist[[1]] fns <- result$fns for(k in seq_along(fns)) { funks <- lapply(Alist, function(z, k) { z$fns[[k]] }, k=k) fnk <- do.call("pool.envelope", funks) attr(fnk, "einfo")$Yname <- Yname fns[[k]] <- fnk } result$fns <- fns return(result) } # other functions FormatFaspFormulae <- function(f, argname) { # f should be a single formula object, a list of formula objects, # a character vector, or a list containing formulae and strings. # It will be converted to a character vector. zapit <- function(x, argname) { if(inherits(x, "formula")) deparse(x) else if(is.character(x)) x else stop(paste("The entries of", sQuote(argname), "must be formula objects or strings")) } result <- if(is.character(f)) f else if(inherits(f, "formula")) deparse(f) else if(is.list(f)) unlist(lapply(f, zapit, argname=argname)) else stop(paste(sQuote(argname), "should be a formula, a list of formulae,", "or a character vector")) return(result) } spatstat/R/Kmeasure.R0000755000176000001440000003666712237642727014306 0ustar ripleyusers# # Kmeasure.R # # $Revision: 1.45 $ $Date: 2013/07/26 07:44:42 $ # # Kmeasure() compute an estimate of the second order moment measure # # Kest.fft() use Kmeasure() to form an estimate of the K-function # # second.moment.calc() underlying algorithm # Kmeasure <- function(X, sigma, edge=TRUE, ..., varcov=NULL) { stopifnot(is.ppp(X)) sigma.given <- !missing(sigma) && !is.null(sigma) varcov.given <- !is.null(varcov) ngiven <- sigma.given + varcov.given if(ngiven == 2) stop(paste("Give only one of the arguments", sQuote("sigma"), "and", sQuote("varcov"))) if(ngiven == 0) stop(paste("Please specify smoothing bandwidth", sQuote("sigma"), "or", sQuote("varcov"))) if(varcov.given) { stopifnot(is.matrix(varcov) && nrow(varcov) == 2 && ncol(varcov)==2 ) sigma <- NULL } else { stopifnot(is.numeric(sigma)) stopifnot(length(sigma) %in% c(1,2)) stopifnot(all(sigma > 0)) if(length(sigma) == 2) { varcov <- diag(sigma^2) sigma <- NULL } } second.moment.calc(X, sigma=sigma, edge, "Kmeasure", varcov=varcov) } second.moment.calc <- function(x, sigma=NULL, edge=TRUE, what="Kmeasure", debug=FALSE, ..., varcov=NULL, expand=FALSE) { if(is.null(sigma) && is.null(varcov)) stop("must specify sigma or varcov") choices <- c("kernel", "smooth", "Kmeasure", "Bartlett", "edge", "all", "smoothedge") if(!(what %in% choices)) stop(paste("Unknown choice: what = ", sQuote(what), "; available options are:", paste(sQuote(choices), collapse=", "))) sig <- if(!is.null(sigma)) sigma else max(c(diag(varcov), sqrt(det(varcov)))) xtype <- if(is.ppp(x)) "ppp" else if(is.im(x)) "im" else if(all(unlist(lapply(x, is.im)))) "imlist" else stop("x should be a point pattern or a pixel image") nimages <- switch(xtype, ppp = 1, im = 1, imlist = length(x)) win <- if(nimages == 1) as.owin(x) else as.owin(x[[1]]) win <- rescue.rectangle(win) rec <- as.rectangle(win) across <- min(diff(rec$xrange), diff(rec$yrange)) # determine whether to expand window if(!expand || (6 * sig < across)) { result <- second.moment.engine(x, sigma=sigma, edge=edge, what=what, debug=debug, ..., varcov=varcov) return(result) } # need to expand window bigger <- grow.rectangle(rec, (7 * sig - across)/2) switch(xtype, ppp = { # pixellate first (to preserve pixel resolution) X <- pixellate(x, ..., padzero=TRUE) np <- npoints(x) }, im = { X <- x np <- NULL }, imlist = { Xlist <- x np <- NULL }) # Now expand if(nimages == 1) { X <- rebound.im(X, bigger) X <- na.handle.im(X, 0) } else { X <- lapply(X, rebound.im, rect=bigger) X <- lapply(X, na.handle.im, na.replace=0) } # Compute! out <- second.moment.engine(X, sigma=sigma, edge=edge, what=what, debug=debug, ..., obswin=win, varcov=varcov, npts=np) # Now clip it fbox <- shift(rec, origin="midpoint") if(nimages == 1) { result <- switch(what, kernel = out[fbox], smooth = out[win], Kmeasure = out[fbox], Bartlett = out[fbox], edge = out[win], smoothedge = list(smooth=out$smooth[win], edge =out$edge[win]), all = list(kernel=out$kernel[fbox], smooth=out$smooth[win], Kmeasure=out$Kmeasure[fbox], Bartlett=out$Bartlett[fbox], edge=out$edge[win])) } else { result <- switch(what, kernel = out[fbox], smooth = lapply(out, "[", i=win), Kmeasure = lapply(out, "[", i=fbox), Bartlett = lapply(out, "[", i=fbox), edge = out[win], smoothedge = list( smooth = lapply(out$smooth, "[", i=win), edge = out$edge[win]), all = list( kernel=out$kernel[fbox], smooth=lapply(out$smooth, "[", i=win), Kmeasure=lapply(out$Kmeasure, "[", i=fbox), Bartlett=lapply(out$Bartlett, "[", i=fbox), edge=out$edge[win])) } return(result) } second.moment.engine <- function(x, sigma=NULL, edge=TRUE, what=c("Kmeasure", "kernel", "smooth", "Bartlett", "edge", "smoothedge", "all"), ..., obswin = as.owin(x), varcov=NULL, npts=NULL, debug=FALSE) { what <- match.arg(what) if(is.ppp(x)) { # convert list of points to mass distribution X <- pixellate(x, ..., padzero=TRUE) if(is.null(npts)) npts <- npoints(x) } else X <- x if(is.im(X)) { Xlist <- list(X) nimages <- 1 } else if(all(unlist(lapply(X, is.im)))) { Xlist <- X X <- Xlist[[1]] nimages <- length(Xlist) blanklist <- vector(mode="list", length=nimages) names(blanklist) <- names(Xlist) } else stop("internal error: unrecognised format for x") unitsX <- unitname(X) # ensure obswin has same bounding frame as X if(!missing(obswin)) obswin <- rebound.owin(obswin, as.rectangle(X)) # go to work Y <- X$v Ylist <- lapply(Xlist, getElement, name="v") # xw <- X$xrange yw <- X$yrange # pad with zeroes nr <- nrow(Y) nc <- ncol(Y) Ypad <- matrix(0, ncol=2*nc, nrow=2*nr) Ypadlist <- rep(list(Ypad), nimages) for(i in 1:nimages) Ypadlist[[i]][1:nr, 1:nc] <- Ylist[[i]] Ypad <- Ypadlist[[1]] lengthYpad <- 4 * nc * nr # corresponding coordinates xw.pad <- xw[1] + 2 * c(0, diff(xw)) yw.pad <- yw[1] + 2 * c(0, diff(yw)) xcol.pad <- X$xcol[1] + X$xstep * (0:(2*nc-1)) yrow.pad <- X$yrow[1] + X$ystep * (0:(2*nr-1)) # set up Gauss kernel xcol.G <- X$xstep * c(0:(nc-1),-(nc:1)) yrow.G <- X$ystep * c(0:(nr-1),-(nr:1)) Gpixarea <- X$xstep * X$ystep if(!is.null(sigma)) { densX.G <- dnorm(xcol.G, sd=sigma) densY.G <- dnorm(yrow.G, sd=sigma) Kern <- outer(densY.G, densX.G, "*") * Gpixarea } else if(!is.null(varcov)) { # anisotropic kernel detSigma <- det(varcov) Sinv <- solve(varcov) halfSinv <- Sinv/2 constG <- Gpixarea/(2 * pi * sqrt(detSigma)) xsq <- matrix((xcol.G^2)[col(Ypad)], ncol=2*nc, nrow=2*nr) ysq <- matrix((yrow.G^2)[row(Ypad)], ncol=2*nc, nrow=2*nr) xy <- outer(yrow.G, xcol.G, "*") Kern <- constG * exp(-(xsq * halfSinv[1,1] + xy * (halfSinv[1,2]+halfSinv[2,1]) + ysq * halfSinv[2,2])) # xx <- matrix(xcol.G[col(Ypad)], ncol=2*nc, nrow=2*nr) # yy <- matrix(yrow.G[row(Ypad)], ncol=2*nc, nrow=2*nr) # Kern <- const * exp(-(xx * (xx * Sinv[1,1] + yy * Sinv[1,2]) # + yy * (xx * Sinv[2,1] + yy * Sinv[2,2]))/2) } else stop("Must specify either sigma or varcov") # these options call for several image outputs if(what %in% c("all", "smoothedge")) result <- list() if(what %in% c("kernel", "all")) { # return the kernel # first rearrange it into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(debug) { if(any(fave.order(xcol.G) != rtwist)) cat("something round the twist\n") } Kermit <- Kern[ rtwist, ctwist] ker <- im(Kermit, xcol.G[ctwist], yrow.G[ rtwist], unitname=unitsX) if(what == "kernel") return(ker) else result$kernel <- ker } # convolve using fft fK <- fft(Kern) if(what != "edge") { if(nimages == 1) { fY <- fft(Ypad) sm <- fft(fY * fK, inverse=TRUE)/lengthYpad if(debug) { cat(paste("smooth: maximum imaginary part=", signif(max(Im(sm)),3), "\n")) if(!is.null(npts)) cat(paste("smooth: mass error=", signif(sum(Mod(sm))-npts,3), "\n")) } } else { fYlist <- smlist <- blanklist for(i in 1:nimages) { fYlist[[i]] <- fY.i <- fft(Ypadlist[[i]]) smlist[[i]] <- sm.i <- fft(fY.i * fK, inverse=TRUE)/lengthYpad if(debug) { cat(paste("smooth component", i, ": maximum imaginary part=", signif(max(Im(sm.i)),3), "\n")) if(!is.null(npts)) cat(paste("smooth component", i, ": mass error=", signif(sum(Mod(sm.i))-npts,3), "\n")) } } } } if(what %in% c("smooth", "all", "smoothedge")) { # return the smoothed point pattern without edge correction if(nimages == 1) { smo <- im(Re(sm)[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "smooth") { return(smo) } else { result$smooth <- smo } } else { smolist <- blanklist for(i in 1:nimages) smolist[[i]] <- im(Re(smlist[[i]])[1:nr, 1:nc], xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) smolist <- as.listof(smolist) if(what == "smooth") { return(smolist) } else { result$smooth <- smolist } } } if(what != "edge") { # compute Bartlett spectrum if(nimages == 1) { bart <- Mod(fY)^2 * fK } else { bartlist <- lapply(fYlist, function(z, fK) { Mod(z)^2 * fK}, fK=fK) } } if(what %in% c("Bartlett", "all")) { # return Bartlett spectrum # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { Bart <- bart[ rtwist, ctwist] Bartlett <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) if(what == "Bartlett") return(Bartlett) else result$Bartlett <- Bartlett } else { Bartlist <- blanklist for(i in 1:nimages) { Bart <- (bartlist[[i]])[ rtwist, ctwist] Bartlist[[i]] <- im(Mod(Bart),(-nc):(nc-1), (-nr):(nr-1)) } Bartlist <- as.listof(Bartlist) if(what == "Bartlett") return(Bartlist) else result$Bartlett <- Bartlist } } #### ------- Second moment measure -------------- # if(what != "edge") { if(nimages == 1) { mom <- fft(bart, inverse=TRUE)/lengthYpad if(debug) { cat(paste("2nd moment measure: maximum imaginary part=", signif(max(Im(mom)),3), "\n")) if(!is.null(npts)) cat(paste("2nd moment measure: mass error=", signif(sum(Mod(mom))-npts^2, 3), "\n")) } mom <- Mod(mom) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom <- mom - npts* Kern } else { momlist <- blanklist for(i in 1:nimages) { mom.i <- fft(bartlist[[i]], inverse=TRUE)/lengthYpad if(debug) { cat(paste("2nd moment measure: maximum imaginary part=", signif(max(Im(mom.i)),3), "\n")) if(!is.null(npts)) cat(paste("2nd moment measure: mass error=", signif(sum(Mod(mom.i))-npts^2, 3), "\n")) } mom.i <- Mod(mom.i) # subtract (delta_0 * kernel) * npts if(is.null(npts)) stop("Internal error: second moment measure requires npts") mom.i <- mom.i - npts* Kern momlist[[i]] <- mom.i } } } # edge correction if(edge || what %in% c("edge", "all", "smoothedge")) { # compute kernel-smoothed set covariance M <- as.mask(obswin, dimyx=c(nr, nc))$m # previous line ensures M has same dimensions and scale as Y Mpad <- matrix(0, ncol=2*nc, nrow=2*nr) Mpad[1:nr, 1:nc] <- M lengthMpad <- 4 * nc * nr fM <- fft(Mpad) if(edge && what != "edge") { # apply edge correction co <- fft(Mod(fM)^2 * fK, inverse=TRUE)/lengthMpad co <- Mod(co) a <- sum(M) wt <- a/co me <- spatstat.options("maxedgewt") weight <- matrix(pmin.int(me, wt), ncol=2*nc, nrow=2*nr) # apply edge correction to second moment measure if(nimages == 1) { mom <- mom * weight # set to NA outside 'reasonable' region mom[wt > 10] <- NA } else { wgt10 <- (wt > 10) for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * weight # set to NA outside 'reasonable' region mom.i[wgt10] <- NA momlist[[i]] <- mom.i } } } } if(what != "edge") { # rearrange into spatially sensible order (monotone x and y) rtwist <- ((-nr):(nr-1)) %% (2 * nr) + 1 ctwist <- (-nc):(nc-1) %% (2*nc) + 1 if(nimages == 1) { mom <- mom[ rtwist, ctwist] } else { momlist <- lapply(momlist, "[", i=rtwist, j=ctwist) } if(debug) { if(any(fave.order(xcol.G) != rtwist)) cat("internal error: something round the twist\n") } } if(what %in% c("edge", "all", "smoothedge")) { # return convolution of window with kernel # (evaluated inside window only) con <- fft(fM * fK, inverse=TRUE)/lengthMpad edg <- Mod(con[1:nr, 1:nc]) edg <- im(edg, xcol.pad[1:nc], yrow.pad[1:nr], unitname=unitsX) if(what == "edge") return(edg) else result$edge <- edg } if(what == "smoothedge") return(result) # Second moment measure, density estimate # Divide by number of points * lambda and convert mass to density pixarea <- with(X, xstep * ystep) if(nimages == 1) { mom <- mom * area.owin(obswin) / (pixarea * npts^2) # this is the second moment measure mm <- im(mom, xcol.G[ctwist], yrow.G[rtwist], unitname=unitsX) if(what == "Kmeasure") return(mm) else result$Kmeasure <- mm } else { ccc <- area.owin(obswin) / (pixarea * npts^2) mmlist <- blanklist for(i in 1:nimages) { mom.i <- momlist[[i]] mom.i <- mom.i * ccc # this is the second moment measure mmlist[[i]] <- im(mom.i, xcol.G[ctwist], yrow.G[rtwist], unitname=unitsX) } mmlist <- as.listof(mmlist) if(what == "Kmeasure") return(mmlist) else result$Kmeasure <- mmlist } # what = "all", so return all computed objects return(result) } Kest.fft <- function(X, sigma, r=NULL, breaks=NULL) { verifyclass(X, "ppp") W <- X$window lambda <- X$n/area.owin(W) rmaxdefault <- rmax.rule("K", W, lambda) bk <- handle.r.b.args(r, breaks, W, rmaxdefault=rmaxdefault) breaks <- bk$val rvalues <- bk$r u <- Kmeasure(X, sigma) xx <- rasterx.im(u) yy <- rastery.im(u) rr <- sqrt(xx^2 + yy^2) tr <- whist(rr, breaks, u$v) K <- cumsum(tr) rmax <- min(rr[is.na(u$v)]) K[rvalues >= rmax] <- NA result <- data.frame(r=rvalues, theo=pi * rvalues^2, border=K) w <- X$window alim <- c(0, min(diff(w$xrange), diff(w$yrange))/4) out <- fv(result, "r", substitute(K[fft](r), NULL), "border", . ~ r, alim, c("r", "{%s^{pois}}(r)", "{hat(%s)^{bord}}(r)"), c("distance argument r", "theoretical Poisson %s", "border-corrected estimate of %s"), fname="K[fft]", unitname=unitname(X) ) return(out) } spatstat/R/formulae.R0000755000176000001440000001167412237642727014333 0ustar ripleyusers# # # formulae.S # # Functions for manipulating model formulae # # $Revision: 1.16 $ $Date: 2012/08/22 01:28:24 $ # # identical.formulae() # Test whether two formulae are identical # # termsinformula() # Extract the terms from a formula # # sympoly() # Create a symbolic polynomial formula # # polynom() # Analogue of poly() but without dynamic orthonormalisation # # ------------------------------------------------------------------- # # new generic "formula<-" <- function(x, ..., value) { UseMethod("formula<-") } identical.formulae <- function(x, y) { # workaround for bug in all.equal.formula in R 2.5.0 if(is.null(y) && !is.null(x)) return(FALSE) return(identical(all.equal(x,y), TRUE)) } termsinformula <- function(x) { if(is.null(x)) return(character(0)) if(class(x) != "formula") stop("argument is not a formula") attr(terms(x), "term.labels") } variablesinformula <- function(x) { if(is.null(x)) return(character(0)) if(class(x) != "formula") stop("argument is not a formula") all.vars(as.expression(x)) } offsetsinformula <- function(x) { if(is.null(x)) return(character(0)) if(class(x) != "formula") stop("argument is not a formula") tums <- terms(x) offs <- attr(tums, "offset") if(length(offs) == 0) return(character(0)) vars <- attr(tums, "variables") termnames <- unlist(lapply(vars, deparse))[-1] termnames[offs] } lhs.of.formula <- function(x) { if(!inherits(x, "formula")) stop("x must be a formula") if(length(as.list(x)) == 3) { # formula has a response: return it return(x[2]) } return(NULL) } rhs.of.formula <- function(x, tilde=TRUE) { if(!inherits(x, "formula")) stop("x must be a formula") if(length(as.list(x)) == 3) { # formula has a response: strip it x <- x[-2] } if(!tilde) # remove the "~" x <- x[[2]] return(x) } sympoly <- function(x,y,n) { if(nargs()<2) stop("Degree must be supplied.") if(nargs()==2) n <- y eps <- abs(n%%1) if(eps > 0.000001 | n <= 0) stop("Degree must be a positive integer") x <- deparse(substitute(x)) temp <- NULL left <- "I(" rght <- ")" if(nargs()==2) { for(i in 1:n) { xhat <- if(i==1) "" else paste("^",i,sep="") temp <- c(temp,paste(left,x,xhat,rght,sep="")) } } else { y <- deparse(substitute(y)) for(i in 1:n) { for(j in 0:i) { k <- i-j xhat <- if(k<=1) "" else paste("^",k,sep="") yhat <- if(j<=1) "" else paste("^",j,sep="") xbit <- if(k>0) x else "" ybit <- if(j>0) y else "" star <- if(j*k>0) "*" else "" term <- paste(left,xbit,xhat,star,ybit,yhat,rght,sep="") temp <- c(temp,term) } } } as.formula(paste("~",paste(temp,collapse="+"))) } polynom <- function(x, ...) { rest <- list(...) # degree not given if(length(rest) == 0) stop("degree of polynomial must be given") #call with single variable and degree if(length(rest) == 1) { degree <- ..1 if((degree %% 1) != 0 || length(degree) != 1 || degree < 1) stop("degree of polynomial should be a positive integer") # compute values result <- outer(x, 1:degree, "^") # compute column names - the hard part ! namex <- deparse(substitute(x)) # check whether it needs to be parenthesised if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") # column names namepowers <- if(degree == 1) namex else c(namex, paste(namex, "^", 2:degree, sep="")) namepowers <- paste("[", namepowers, "]", sep="") # stick them on dimnames(result) <- list(NULL, namepowers) return(result) } # call with two variables and degree if(length(rest) == 2) { y <- ..1 degree <- ..2 # list of exponents of x and y, in nice order xexp <- yexp <- numeric() for(i in 1:degree) { xexp <- c(xexp, i:0) yexp <- c(yexp, 0:i) } nterms <- length(xexp) # compute result <- matrix(, nrow=length(x), ncol=nterms) for(i in 1:nterms) result[, i] <- x^xexp[i] * y^yexp[i] # names of these terms namex <- deparse(substitute(x)) # namey <- deparse(substitute(..1)) ### seems not to work in R zzz <- as.list(match.call()) namey <- deparse(zzz[[3]]) # check whether they need to be parenthesised # if so, add parentheses if(!is.name(substitute(x))) namex <- paste("(", namex, ")", sep="") if(!is.name(zzz[[3]])) namey <- paste("(", namey, ")", sep="") nameXexp <- c("", namex, paste(namex, "^", 2:degree, sep="")) nameYexp <- c("", namey, paste(namey, "^", 2:degree, sep="")) # make the term names termnames <- paste(nameXexp[xexp + 1], ifelse(xexp > 0 & yexp > 0, ".", ""), nameYexp[yexp + 1], sep="") termnames <- paste("[", termnames, "]", sep="") dimnames(result) <- list(NULL, termnames) # return(result) } stop("Can't deal with more than 2 variables yet") } spatstat/R/GJfox.R0000755000176000001440000000557012237642727013534 0ustar ripleyusers# # GJfox.R # # Foxall G-function and J-function # # $Revision: 1.6 $ $Date: 2013/07/26 00:43:29 $ # Gfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), ...) { stopifnot(is.ppp(X)) if(!(is.ppp(Y) || is.psp(Y) || is.owin(Y))) stop("Y should be an object of class ppp, psp or owin") if(!identical(unitname(X), unitname(Y))) warning("X and Y are not in the same units") # if(is.null(correction)) correction <- c("rs", "km", "cs") correction <- pickoption("correction", correction, c(none="none", raw="none", border="rs", rs="rs", KM="km", km="km", Kaplan="km", han="han", Hanisch="han", best="km"), multi=TRUE) corxtable <- c("km", "rs", "han", "none") corx <- as.list(corxtable %in% correction) names(corx) <- corxtable # ensure compatible windows WX <- as.owin(X) WY <- as.owin(Y) if(!is.subset.owin(WX, WY)) { warning("Trimming the window of X to be a subset of the window of Y") WX <- intersect.owin(WX, WY) X <- X[WX] } # compute distances and censoring distances D <- distfun(Y) dist <- D(X) bdry <- bdist.points(X[WY]) # histogram breakpoints dmax <- max(dist) breaks <- handle.r.b.args(r, breaks, WX, NULL, rmaxdefault=dmax) rval <- breaks$r # censoring indicators d <- (dist <= bdry) # observed distances o <- pmin.int(dist, bdry) # calculate estimates Z <- censtimeCDFest(o, bdry, d, breaks, KM=corx$km, RS=corx$rs, HAN=corx$han, RAW=corx$none, han.denom=if(corx$han) eroded.areas(WX, rval) else NULL, tt=dist) # relabel Z <- rebadge.fv(Z, substitute(Gfox(r), NULL), "Gfox") unitname(Z) <- unitname(Y) return(Z) } Jfox <- function(X, Y, r=NULL, breaks=NULL, correction=c("km", "rs", "han"), ...) { H <- Hest(Y, r=r, breaks=breaks, correction=correction, ...) G <- Gfox(X, Y, r=H$r, correction=correction, ...) # derive J-function J <- eval.fv((1-G)/(1-H), dotonly=FALSE) # correct calculation of hazard is different if("hazard" %in% names(J)) J$hazard <- G$hazard - H$hazard # base labels on 'J' rather than full expression attr(J, "labl") <- attr(H, "labl") # add column of 1's J <- bind.fv(J, data.frame(theo=rep.int(1, nrow(J))), "%s[theo](r)", "theoretical value of %s for independence") # rename J <- rebadge.fv(J, substitute(Jfox(r), NULL), "Jfox") funs <- c("km", "han", "rs", "raw", "theo") fvnames(J, ".") <- funs[funs %in% names(J)] unitname(J) <- unitname(Y) return(J) } spatstat/R/simplepanel.R0000644000176000001440000001637112250734313015012 0ustar ripleyusers# # simplepanel.R # # A simple, robust point & click interface # used in rmh visual debugger. # # $Revision: 1.11 $ $Date: 2013/12/08 00:08:23 $ # simplepanel <- function(title, B, boxes, clicks, redraws=NULL, exit=NULL, env) { stopifnot(is.rectangle(B)) stopifnot(is.list(boxes)) if(!all(unlist(lapply(boxes, is.rectangle)))) stop("some of the boxes are not rectangles") if(!all(unlist(lapply(boxes, is.subset.owin, B=B)))) stop("Some boxes do not lie inside the bounding box B") stopifnot(is.list(clicks) && length(clicks) == length(boxes)) if(!all(unlist(lapply(clicks, is.function)))) stop("clicks must be a list of functions") if(is.null(redraws)) { redraws <- rep.int(list(dflt.redraw), length(boxes)) } else { stopifnot(is.list(redraws) && length(redraws) == length(boxes)) if(any(isnul <- unlist(lapply(redraws, is.null)))) redraws[isnul] <- rep.int(list(dflt.redraw), sum(isnul)) if(!all(unlist(lapply(redraws, is.function)))) stop("redraws must be a list of functions") } if(is.null(exit)) { exit <- function(...) { NULL} } else stopifnot(is.function(exit)) stopifnot(is.environment(env)) n <- length(boxes) got.boxnames <- (sum(nzchar(names(boxes))) == n) got.clicknames <- (sum(nzchar(names(clicks))) == n) nama <- if(got.boxnames && !got.clicknames) names(boxes) else if(got.clicknames && !got.boxnames) names(clicks) else paste("Button", seq_len(n)) out <- list(title=title, B=B, nama=nama, boxes=boxes, clicks=clicks, redraws=redraws, exit=exit, env=env) class(out) <- c("simplepanel", class(out)) return(out) } grow.simplepanel <- function(P, side=c("right","left","top","bottom"), len=NULL, new.clicks, new.redraws=NULL, ..., aspect) { verifyclass(P, "simplepanel") side <- match.arg(side) stopifnot(is.list(new.clicks)) if(!all(unlist(lapply(new.clicks, is.function)))) stop("new.clicks must be a list of functions") if(is.null(new.redraws)) { new.redraws <- rep.int(list(dflt.redraw), length(new.clicks)) } else { stopifnot(is.list(new.redraws) && length(new.redraws) == length(new.clicks)) if(!all(unlist(lapply(new.redraws, is.function)))) stop("new.redraws must be a list of functions") } if(missing(aspect) || is.null(aspect)) { # determine aspect ratio from length of longest text string n <- length(new.clicks) nama <- names(new.clicks) if(sum(nzchar(nama)) != n) nama <- names(new.redraws) if(sum(nzchar(nama)) != n) nama <- paste("Box", seq_len(n)) aspect <- 3/max(4, nchar(nama)) } B <- P$B n <- length(new.clicks) switch(side, right={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[2] + c(0, new.width), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, left={ new.width <- if(!is.null(len)) len else sidelengths(B)[1]/2 extraspace <- owin(B$xrange[1] - c(new.width, 0), B$yrange) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect) }, top={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[2] + c(0, new.height)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }, bottom={ new.height <- if(!is.null(len)) len else sidelengths(B)[2]/2 extraspace <- owin(B$xrange, B$yrange[1] - c(new.height, 0)) new.boxes <- layout.boxes(extraspace, n, ..., aspect=aspect, horizontal=TRUE) }) with(P, simplepanel(title, bounding.box(B, extraspace), append(boxes, new.boxes), append(clicks, new.clicks), append(redraws, new.redraws), exit, env)) } redraw.simplepanel <- function(P, verbose=FALSE) { verifyclass(P, "simplepanel") if(verbose) cat("Redrawing entire panel\n") with(P, { ntitle <- sum(nzchar(title)) plot(B, type="n", main=title) for(j in seq_along(nama)) (redraws[[j]])(boxes[[j]], nama[j], env) }) invisible(NULL) } clear.simplepanel <- function(P) { verifyclass(P, "simplepanel") plot(P$B, main="") invisible(NULL) } run.simplepanel <- function(P, popup=TRUE, verbose=FALSE) { verifyclass(P, "simplepanel") if(popup) dev.new() ntitle <- sum(nzchar(P$title)) opa <- par(mar=c(0,0,ntitle+0.2,0),ask=FALSE) with(P, { # interaction loop more <- TRUE while(more) { redraw.simplepanel(P, verbose=verbose) xy <- locator(1) if(is.null(xy)) { if(verbose) cat("No (x,y) coordinates\n") break } found <- FALSE for(j in seq_along(boxes)) { if(inside.owin(xy$x, xy$y, boxes[[j]])) { found <- TRUE if(verbose) cat(paste("Caught click on", sQuote(nama[j]), "\n")) more <- (clicks[[j]])(env, xy) if(!is.logical(more) || length(more) != 1) { warning(paste("Click function for", sQuote(nama[j]), "did not return TRUE/FALSE")) more <- FALSE } if(verbose) cat(if(more) "Continuing\n" else "Terminating\n") break } } if(verbose && !found) cat(paste("Coordinates", paren(paste(xy, collapse=",")), "not matched to any box\n")) } }) if(verbose) cat("Calling exit function\n") rslt <- with(P, exit(env)) # revert to original graphics parameters par(opa) # close popup window? if(popup) dev.off() # return value of 'exit' function return(rslt) } layout.boxes <- function(B, n, horizontal=FALSE, aspect=0.5, usefrac=0.9){ # make n boxes in B stopifnot(is.rectangle(B)) stopifnot(n > 0) width <- sidelengths(B)[1] height <- sidelengths(B)[2] if(!horizontal) { heightshare <- height/n useheight <- min(width * aspect, heightshare * usefrac) usewidth <- min(useheight /aspect, width * usefrac) lostwidth <- width - usewidth lostheightshare <- heightshare - useheight template <- owin(c(0, usewidth), c(0, useheight)) boxes <- list() boxes[[1]] <- shift(template, c(B$xrange[1]+lostwidth/2, B$yrange[1] + lostheightshare/2)) if(n > 1) for(j in 2:n) boxes[[j]] <- shift(boxes[[j-1]], c(0, heightshare)) } else { boxes <- layout.boxes(flipxy(B), n, horizontal=FALSE, aspect=1/aspect, usefrac=usefrac) boxes <- lapply(boxes, flipxy) } return(boxes) } # default redraw function for control buttons dflt.redraw <- function(button, name, env) { plot(button, add=TRUE, border="pink") text(centroid.owin(button), labels=name) } print.simplepanel <- function(x, ...) { nama <- x$nama cat("simplepanel object\n") cat(paste("\tTitle:", sQuote(x$title), "\n")) cat("\tPanel names:") for(i in seq_along(nama)) { if(i %% 6 == 1) cat("\n\t") cat(paste0(sQuote(nama[i]), " ")) } cat("\n") return(invisible(NULL)) } spatstat/R/envelope.R0000755000176000001440000016675712237642727014353 0ustar ripleyusers# # envelope.R # # computes simulation envelopes # # $Revision: 2.51 $ $Date: 2013/10/21 01:39:54 $ # envelope <- function(Y, fun, ...) { UseMethod("envelope") } # ................................................................. # A 'simulation recipe' contains the following variables # # type = Type of simulation # "csr": uniform Poisson process # "rmh": simulated realisation of fitted Gibbs or Poisson model # "kppm": simulated realisation of fitted cluster model # "expr": result of evaluating a user-supplied expression # "list": user-supplied list of point patterns # # expr = expression that is repeatedly evaluated to generate simulations # # envir = environment in which to evaluate the expression `expr' # # 'csr' = TRUE iff the model is (known to be) uniform Poisson # # pois = TRUE if model is known to be Poisson # # ................................................................... simulrecipe <- function(type, expr, envir, csr, pois=csr) { if(csr && !pois) warning("Internal error: csr=TRUE but pois=FALSE") out <- list(type=type, expr=expr, envir=envir, csr=csr, pois=pois) class(out) <- "simulrecipe" out } envelope.ppp <- function(Y, fun=Kest, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) if(is.null(simulate)) { # ................................................... # Realisations of complete spatial randomness # will be generated by rpoispp # Data pattern X is argument Y # Data pattern determines intensity of Poisson process X <- Y sY <- summary(Y, checkdup=FALSE) Yintens <- sY$intensity Ywin <- Y$window # expression that will be evaluated simexpr <- if(!is.marked(Y)) { # unmarked point pattern expression(rpoispp(Yintens, win=Ywin)) } else { # marked point pattern Ymarx <- marks(Y, dfok=FALSE) expression({A <- rpoispp(Yintens, win=Ywin); A %mark% sample(Ymarx, A$n, replace=TRUE)}) } # evaluate in THIS environment simrecipe <- simulrecipe(type = "csr", expr = simexpr, envir = envir.here, csr = TRUE, pois = TRUE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate # Data pattern is argument Y X <- Y } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.ppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- data.ppm(Y) if(is.null(simulate)) { # ................................................... # Simulated realisations of the fitted model Y # will be generated pois <- is.poisson(Y) csr <- is.stationary(Y) && pois type <- if(csr) "csr" else "rmh" # Set up parameters for rmh rmodel <- rmhmodel(Y, verbose=FALSE) if(is.null(start)) start <- list(n.start=X$n) rstart <- rmhstart(start) rcontr <- rmhcontrol(control) # pre-digest arguments rmhinfolist <- rmh(rmodel, rstart, rcontr, preponly=TRUE, verbose=FALSE) # expression that will be evaluated simexpr <- expression(rmhEngine(rmhinfolist, verbose=FALSE)) envir <- envir.here # evaluate in THIS environment simrecipe <- simulrecipe(type = type, expr = simexpr, envir = envir.here, csr = csr, pois = pois) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } envelope.kppm <- function(Y, fun=Kest, nsim=99, nrank=1, ..., simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) { cl <- short.deparse(sys.call()) if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) if(is.null(fun)) fun <- Kest envir.user <- if(!is.null(envir.simul)) envir.simul else parent.frame() envir.here <- sys.frame(sys.nframe()) # Extract data pattern X from fitted model Y X <- Y$X if(is.null(simulate)) { # Simulated realisations of the fitted model Y # will be generated using simulate.kppm kmodel <- Y # expression that will be evaluated simexpr <- expression(simulate(kmodel)[[1]]) # evaluate in THIS environment simrecipe <- simulrecipe(type = "kppm", expr = simexpr, envir = envir.here, csr = FALSE, pois = FALSE) } else { # ................................................... # Simulations are determined by 'simulate' argument # Processing is deferred to envelopeEngine simrecipe <- simulate } envelopeEngine(X=X, fun=fun, simul=simrecipe, nsim=nsim, nrank=nrank, ..., verbose=verbose, clipdata=clipdata, transform=transform, global=global, ginterval=ginterval, savefuns=savefuns, savepatterns=savepatterns, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, Yname=Yname, maxnerr=maxnerr, cl=cl, envir.user=envir.user, do.pwrong=do.pwrong) } ## ................................................................. ## Engine for simulating and computing envelopes ## ................................................................. # # X is the data point pattern, which could be ppp, pp3, ppx etc # X determines the class of pattern expected from the simulations # envelopeEngine <- function(X, fun, simul, nsim=99, nrank=1, ..., verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE) { # envir.here <- sys.frame(sys.nframe()) # ---------------------------------------------------------- # Determine Simulation # ---------------------------------------------------------- # Identify class of patterns to be simulated, from data pattern X Xclass <- if(is.ppp(X)) "ppp" else if(is.pp3(X)) "pp3" else if(is.ppx(X)) "ppx" else stop("Unrecognised class of point pattern") Xobjectname <- paste("point pattern of class", sQuote(Xclass)) # Option to use weighted average if(use.weights <- !is.null(weights)) { # weight can be either a numeric vector or a function if(is.numeric(weights)) { compute.weights <- FALSE weightfun <- NULL } else if(is.function(weights)) { compute.weights <- TRUE weightfun <- weights weights <- NULL } else stop("weights should be either a function or a numeric vector") } else compute.weights <- FALSE # Undocumented option to generate patterns only. patterns.only <- identical(internal$eject, "patterns") # Undocumented option to evaluate 'something' for each pattern if(savevalues <- !is.null(saveresultof)) { stopifnot(is.function(saveresultof)) SavedValues <- list() } # Identify type of simulation from argument 'simul' if(inherits(simul, "simulrecipe")) { # .................................................. # simulation recipe is given simtype <- simul$type simexpr <- simul$expr envir <- simul$envir csr <- simul$csr pois <- simul$pois } else { # ................................................... # simulation is specified by argument `simulate' to envelope() simulate <- simul # which should be an expression, or a list of point patterns, # or an envelope object. csr <- FALSE # override if(!is.null(icsr <- internal$csr)) csr <- icsr pois <- csr model <- NULL if(inherits(simulate, "envelope")) { # envelope object: see if it contains stored point patterns simpat <- attr(simulate, "simpatterns") if(!is.null(simpat)) simulate <- simpat else stop(paste("The argument", sQuote("simulate"), "is an envelope object but does not contain", "any saved point patterns.")) } if(is.expression(simulate)) { # The user-supplied expression 'simulate' will be evaluated repeatedly simtype <- "expr" simexpr <- simulate envir <- envir.user } else if(is.list(simulate) && ( (is.ppp(X) && all(unlist(lapply(simulate, is.ppp)))) || (is.pp3(X) && all(unlist(lapply(simulate, is.pp3)))) || (is.ppx(X) && all(unlist(lapply(simulate, is.ppx)))))) { # The user-supplied list of point patterns will be used simtype <- "list" SimDataList <- simulate # expression that will be evaluated simexpr <- expression(SimDataList[[i]]) envir <- envir.here # ensure that `i' is defined i <- 1 # any messages? if(!is.null(mess <- attr(simulate, "internal"))) { # determine whether these point patterns are realisations of CSR csr <- !is.null(mc <- mess$csr) && mc } } else stop(paste(sQuote("simulate"), "should be an expression, or a list of point patterns")) } # ------------------------------------------------------------------- # Determine clipping window # ------------------------------------------------------------------ if(clipdata) { # Generate one realisation Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) # Extract window clipwin <- Xsim$window if(!is.subset.owin(clipwin, X$window)) warning("Window containing simulated patterns is not a subset of data window") } # ------------------------------------------------------------------ # Summary function to be applied # ------------------------------------------------------------------ if(is.null(fun)) stop("Internal error: fun is NULL") # Name of function, for error messages fname <- if(is.name(substitute(fun))) short.deparse(substitute(fun)) else if(is.character(fun)) fun else "fun" fname <- sQuote(fname) # R function to apply if(is.character(fun)) { gotfun <- try(get(fun, mode="function")) if(inherits(gotfun, "try-error")) stop(paste("Could not find a function named", sQuote(fun))) fun <- gotfun } else if(!is.function(fun)) stop(paste("unrecognised format for function", fname)) fargs <- names(formals(fun)) if(!any(c(expected.arg, "...") %in% fargs)) stop(paste(fname, "should have", ngettext(length(expected.arg), "an argument", "arguments"), "named", commasep(sQuote(expected.arg)), "or a", sQuote("..."), "argument")) usecorrection <- any(c("correction", "...") %in% fargs) # --------------------------------------------------------------------- # validate other arguments if((nrank %% 1) != 0) stop("nrank must be an integer") if((nsim %% 1) != 0) stop("nsim must be an integer") stopifnot(nrank > 0 && nrank < nsim/2) rgiven <- any(expected.arg %in% names(list(...))) if(tran <- !is.null(transform)) { stopifnot(is.expression(transform)) transform.funX <- dotexpr.to.call(transform, "funX", "eval.fv") transform.funXsim <- dotexpr.to.call(transform, "funXsim", "eval.fv") # 'transform.funX' and 'transform.funXsim' are unevaluated calls to eval.fv # .... old code .... # 'transform' is an expression # aa <- substitute(substitute(tt, list(.=as.name("funX"))), # list(tt=transform)) # 'aa' is a language expression invoking 'substitute' # bb <- eval(parse(text=deparse(aa))) # 'bb' is an expression obtained by replacing "." by "funX" # transform.funX <- as.call(bb) # transform.funX[[1]] <- as.name("eval.fv") # 'transform.funX' is an unevaluated call to eval.fv # aa <- substitute(substitute(tt, list(.=as.name("funXsim"))), # list(tt=transform)) # bb <- eval(parse(text=deparse(aa))) # transform.funXsim <- as.call(bb) # transform.funXsim[[1]] <- as.name("eval.fv") } if(!is.null(ginterval)) stopifnot(is.numeric(ginterval) && length(ginterval) == 2) # --------------------------------------------------------------------- # Evaluate function for data pattern X # ------------------------------------------------------------------ Xarg <- if(!clipdata) X else X[clipwin] corrx <- if(usecorrection) list(correction="best") else NULL funX <- do.call(fun, resolve.defaults(list(Xarg), list(...), corrx)) if(!inherits(funX, "fv")) stop(paste("The function", fname, "must return an object of class", sQuote("fv"))) argname <- fvnames(funX, ".x") valname <- fvnames(funX, ".y") has.theo <- "theo" %in% fvnames(funX, "*") csr.theo <- csr && has.theo if(tran) { # extract only the recommended value if(csr.theo) funX <- funX[, c(argname, valname, "theo")] else funX <- funX[, c(argname, valname)] # apply the transformation to it funX <- eval(transform.funX) } rvals <- funX[[argname]] fX <- funX[[valname]] # default domain over which to maximise alim <- attr(funX, "alim") if(global && is.null(ginterval)) ginterval <- if(rgiven) range(rvals) else alim #-------------------------------------------------------------------- # Determine number of simulations # ------------------------------------------------------------------ # ## determine whether dual simulations are required ## (one set of simulations to calculate the theoretical mean, ## another independent set of simulations to obtain the critical point.) dual <- (global && !csr.theo && !VARIANCE) Nsim <- if(!dual) nsim else (nsim + nsim2) # if taking data from a list of point patterns, # check there are enough of them if(simtype == "list" && Nsim > length(SimDataList)) stop(paste("Number of simulations", paren(if(!dual) paste(nsim) else paste(nsim, "+", nsim2, "=", Nsim) ), "exceeds number of point pattern datasets supplied")) # Undocumented secret exit # ------------------------------------------------------------------ if(patterns.only) { # generate simulated realisations and return only these patterns if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", list = "point patterns from list", "simulated realisations") explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" cat(paste(action, Nsim, descrip, explan, "...\n")) } XsimList <- list() # start simulation loop for(i in 1:Nsim) { if(verbose) progressreport(i, Nsim) Xsim <- eval(simexpr, envir=envir) if(!inherits(Xsim, Xclass)) switch(simtype, csr={ stop(paste("Internal error:", Xobjectname, "not generated")) }, rmh={ stop(paste("Internal error: rmh did not return an", Xobjectname)) }, kppm={ stop(paste("Internal error: simulate.kppm did not return an", Xobjectname)) }, expr={ stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)) }, list={ stop(paste("Internal error: list entry was not an", Xobjectname)) }, stop(paste("Internal error:", Xobjectname, "not generated")) ) XsimList[[i]] <- Xsim } if(verbose) { cat(paste("Done.\n")) flush.console() } attr(XsimList, "internal") <- list(csr=csr) return(XsimList) } # capture main decision parameters EnvelopeInfo <- list(call=cl, Yname=Yname, valname=valname, csr=csr, csr.theo=csr.theo, pois=pois, simtype=simtype, nrank=nrank, nsim=nsim, Nsim=Nsim, global=global, dual=dual, nsim2=nsim2, VARIANCE=VARIANCE, nSD=nSD, use.weights=use.weights, do.pwrong=do.pwrong) # ---------------------------------------- ######### SIMULATE ####################### # ---------------------------------------- if(verbose) { action <- if(simtype == "list") "Extracting" else "Generating" descrip <- switch(simtype, csr = "simulations of CSR", rmh = paste("simulated realisations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm = "simulated realisations of fitted cluster model", expr = "simulations by evaluating expression", list = "point patterns from list", "simulated patterns") explan <- if(dual) paren(paste(nsim2, "to estimate the mean and", nsim, "to calculate envelopes")) else "" cat(paste(action, Nsim, descrip, explan, "...\n")) } # determine whether simulated point patterns should be saved catchpatterns <- savepatterns && simtype != "list" Caughtpatterns <- list() # allocate space for computed function values nrvals <- length(rvals) simvals <- matrix(, nrow=nrvals, ncol=Nsim) # allocate space for weights to be computed if(compute.weights) weights <- numeric(Nsim) # inferred values of function argument 'r' or equivalent parameters if(identical(expected.arg, "r")) { # Kest, etc inferred.r.args <- list(r=rvals) } else if(identical(expected.arg, c("rmax", "nrval"))) { # K3est, etc inferred.r.args <- list(rmax=max(rvals), nrval=length(rvals)) } else stop(paste("Don't know how to infer values of", commasep(expected.arg))) # arguments for function funargs <- resolve.defaults(inferred.r.args, list(...), if(usecorrection) list(correction="best") else NULL) # start simulation loop nerr <- 0 for(i in 1:Nsim) { ok <- FALSE # safely generate a random pattern and apply function while(!ok) { Xsim <- eval(simexpr, envir=envir) # check valid point pattern if(!inherits(Xsim, Xclass)) switch(simtype, csr=stop(paste("Internal error:", Xobjectname, "not generated")), rmh=stop(paste("Internal error: rmh did not return an", Xobjectname)), kppm=stop(paste("Internal error:", "simulate.kppm did not return an", Xobjectname)), expr=stop(paste("Evaluating the expression", sQuote("simulate"), "did not yield an", Xobjectname)), list=stop(paste("Internal error: list entry was not an", Xobjectname)), stop(paste("Internal error:", Xobjectname, "not generated")) ) if(catchpatterns) Caughtpatterns[[i]] <- Xsim if(savevalues) SavedValues[[i]] <- saveresultof(Xsim) if(compute.weights) { wti <- weightfun(Xsim) if(!is.numeric(wti)) stop("weightfun did not return a numeric value") if(length(wti) != 1) stop("weightfun should return a single numeric value") weights[i] <- wti } # apply function safely funXsim <- try(do.call(fun, append(list(Xsim), funargs))) ok <- !inherits(funXsim, "try-error") if(!ok) { nerr <- nerr + 1 if(nerr > maxnerr) stop("Exceeded maximum number of errors") cat("[retrying]\n") } } # sanity checks if(i == 1) { if(!inherits(funXsim, "fv")) stop(paste("When applied to a simulated pattern, the function", fname, "did not return an object of class", sQuote("fv"))) argname.sim <- fvnames(funXsim, ".x") valname.sim <- fvnames(funXsim, ".y") if(argname.sim != argname) stop(paste("The objects returned by", fname, "when applied to a simulated pattern", "and to the data pattern", "are incompatible. They have different argument names", sQuote(argname.sim), "and", sQuote(argname), "respectively")) if(valname.sim != valname) stop(paste("When", fname, "is applied to a simulated pattern", "it provides an estimate named", sQuote(valname.sim), "whereas the estimate for the data pattern is named", sQuote(valname), ". Try using the argument", sQuote("correction"), "to make them compatible")) rfunX <- with(funX, ".x") rfunXsim <- with(funXsim, ".x") if(!identical(rfunX, rfunXsim)) stop(paste("When", fname, "is applied to a simulated pattern,", "the values of the argument", sQuote(argname.sim), "are different from those used for the data.")) } if(tran) { # extract only the recommended value if(csr.theo) funXsim <- funXsim[, c(argname, valname, "theo")] else funXsim <- funXsim[, c(argname, valname)] # apply the transformation to it funXsim <- eval(transform.funXsim) } # extract the values for simulation i simvals.i <- funXsim[[valname]] if(length(simvals.i) != nrvals) stop("Vectors of function values have incompatible lengths") simvals[ , i] <- funXsim[[valname]] if(verbose) progressreport(i, Nsim) } ## end simulation loop if(verbose) { cat("\nDone.\n") flush.console() } # ........................................................... # save functions and/or patterns if so commanded if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=attr(funX, "ylab"), valu="sim1", fmla= deparse(. ~ r), alim=attr(funX, "alim"), labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:nsim, sep="")), fname=attr(funX, "fname")) fvnames(SimFuns, ".") <- simnames } if(savepatterns) SimPats <- if(simtype == "list") SimDataList else Caughtpatterns ######### COMPUTE ENVELOPES ####################### etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" if(dual) { jsim <- 1:nsim jsim.mean <- nsim + 1:nsim2 } else { jsim <- jsim.mean <- NULL } result <- envelope.matrix(simvals, funX=funX, jsim=jsim, jsim.mean=jsim.mean, type=etype, csr=csr, use.theory=csr.theo, nrank=nrank, ginterval=ginterval, nSD=nSD, Yname=Yname, do.pwrong=do.pwrong, weights=weights) # tack on envelope information attr(result, "einfo") <- EnvelopeInfo # tack on functions and/or patterns if so commanded if(savefuns) attr(result, "simfuns") <- SimFuns if(savepatterns) { attr(result, "simpatterns") <- SimPats attr(result, "datapattern") <- X } # save function weights if(use.weights) attr(result, "weights") <- weights # undocumented - tack on values of some other quantity if(savevalues) { attr(result, "simvalues") <- SavedValues attr(result, "datavalue") <- saveresultof(X) } return(result) } plot.envelope <- function(x, ...) { shade.given <- ("shade" %in% names(list(...))) shade.implied <- !is.null(fvnames(x, ".s")) if(!(shade.given || shade.implied)) { # ensure x has default 'shade' attribute # (in case x was produced by an older version of spatstat) if(all(c("lo", "hi") %in% colnames(x))) fvnames(x, ".s") <- c("lo", "hi") else warning("Unable to determine shading for envelope") } NextMethod("plot") } print.envelope <- function(x, ...) { e <- attr(x, "einfo") g <- e$global csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr simtype <- e$simtype nr <- e$nrank nsim <- e$nsim V <- e$VARIANCE fname <- deparse(attr(x, "ylab")) type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" cat(paste(type, "critical envelopes for", fname, "\n")) cat(paste("and observed value for", sQuote(e$Yname), "\n")) if(!is.null(valname <- e$valname)) cat(paste("Edge correction:", dQuote(valname), "\n")) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data") } else "simulations of fitted model" # cat(paste("Obtained from", nsim, descrip, "\n")) # if(!is.null(e$dual) && e$dual) cat(paste("Theoretical (i.e. null) mean value of", fname, "estimated from a separate set of", e$nsim2, "simulations\n")) if(!is.null(attr(x, "simfuns"))) cat("(All simulated function values are stored)\n") if(!is.null(attr(x, "simpatterns"))) cat("(All simulated point patterns are stored)\n") alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } if(!V) { # significance interpretation! cat(paste("Significance level of", if(!g) "pointwise", "Monte Carlo test:", paste(if(g) nr else 2 * nr, "/", nsim+1, sep=""), "=", alpha, "\n")) } if(!is.null(pwrong <- attr(x, "pwrong"))) { cat(paste("\t[Estimated significance level of pointwise excursions:", paste("pwrong=", signif(pwrong, 3), "]\n", sep=""))) } cat("\n") NextMethod("print") } summary.envelope <- function(object, ...) { e <- attr(object, "einfo") g <- e$global nr <- e$nrank nsim <- e$nsim csr <- e$csr pois <- e$pois if(is.null(pois)) pois <- csr has.theo <- "theo" %in% fvnames(object, "*") csr.theo <- csr && has.theo simtype <- e$simtype fname <- deparse(attr(object, "ylab")) V <- e$VARIANCE type <- if(V) paste("Pointwise", e$nSD, "sigma") else if(g) "Simultaneous" else "Pointwise" cat(paste(type, "critical envelopes for", fname, "\n")) # determine *actual* type of simulation descrip <- if(csr) "simulations of CSR" else if(!is.null(simtype)) { switch(simtype, csr="simulations of CSR", rmh=paste("simulations of fitted", if(pois) "Poisson" else "Gibbs", "model"), kppm="simulations of fitted cluster model", expr="evaluations of user-supplied expression", list="point pattern datasets in user-supplied list", funs="columns of user-supplied data", "simulated point patterns") } else "simulations of fitted model" # cat(paste("Obtained from", nsim, descrip, "\n")) # if(!is.null(attr(object, "simfuns"))) cat(paste("(All", nsim, "simulated function values", "are stored in attr(,", dQuote("simfuns"), ") )\n")) if(!is.null(attr(object, "simpatterns"))) cat(paste("(All", nsim, "simulated point patterns", "are stored in attr(,", dQuote("simpatterns"), ") )\n")) # if(V) { # nSD envelopes cat(paste("Envelopes computed as sample mean plus/minus", e$nSD, "sample standard deviations\n")) } else { # critical envelopes lo.ord <- if(nr == 1) "minimum" else paste(ordinal(nr), "smallest") hi.ord <- if(nr == 1) "maximum" else paste(ordinal(nsim-nr+1), "largest") if(g) cat(paste("Envelopes computed as", if(csr.theo) "theoretical curve" else "mean of simulations", "plus/minus", hi.ord, "simulated value of maximum absolute deviation\n")) else { cat(paste("Upper envelope: pointwise", hi.ord,"of simulated curves\n")) cat(paste("Lower envelope: pointwise", lo.ord,"of simulated curves\n")) } alpha <- if(g) { nr/(nsim+1) } else { 2 * nr/(nsim+1) } cat(paste("Significance level of Monte Carlo test:", paste(if(g) nr else 2 * nr, "/", nsim+1, sep=""), "=", alpha, "\n")) } cat(paste("Data:", e$Yname, "\n")) return(invisible(NULL)) } # envelope.matrix # core functionality to compute envelope values # theory = funX[["theo"]] # observed = fX envelope.matrix <- function(Y, ..., rvals=NULL, observed=NULL, theory=NULL, funX=NULL, nsim=NULL, nsim2=NULL, jsim=NULL, jsim.mean=NULL, type=c("pointwise", "global", "variance"), csr=FALSE, use.theory = csr, nrank=1, ginterval=NULL, nSD=2, savefuns=FALSE, check=TRUE, Yname=NULL, do.pwrong=FALSE, weights=NULL, precomputed=NULL) { if(is.null(Yname)) Yname <- short.deparse(substitute(Y)) type <- match.arg(type) if(!is.null(funX)) stopifnot(is.fv(funX)) pwrong <- NULL use.weights <- !is.null(weights) cheat <- !is.null(precomputed) if(is.null(rvals) && is.null(observed) && !is.null(funX)) { # assume funX is summary function for observed data rvals <- with(funX, .x) observed <- with(funX, .y) theory <- if(use.theory && "theo" %in% names(funX)) with(funX, theo) else NULL } else if(check) { # validate vectors of data if(is.null(rvals)) stop("rvals must be supplied") if(is.null(observed)) stop("observed must be supplied") if(!is.null(Y)) stopifnot(length(rvals) == nrow(Y)) stopifnot(length(observed) == length(rvals)) } if(use.theory) { use.theory <- !is.null(theory) if(use.theory && check) stopifnot(length(theory) == length(rvals)) } simvals <- Y fX <- observed atr <- if(!is.null(funX)) attributes(funX) else list(alim=range(rvals), ylab=quote(f(r)), yexp=quote(f(r)), fname="f") if(!cheat) { # ................ standard calculation ..................... # validate weights if(use.weights) check.nvector(weights, ncol(simvals), things="simulated functions", naok=TRUE) # determine numbers of columns used Ncol <- ncol(simvals) if(Ncol < 2) stop("Need at least 2 columns of function values") if(is.null(jsim) && !is.null(nsim)) { # usual case - 'nsim' determines 'jsim' if(nsim > Ncol) stop(paste(nsim, "simulations are not available; only", Ncol, "columns provided")) jsim <- 1:nsim if(!is.null(nsim2)) { # 'nsim2' determines 'jsim.mean' if(nsim + nsim2 > Ncol) stop(paste(nsim, "+", nsim2, "=", nsim+nsim2, "simulations are not available; only", Ncol, "columns provided")) jsim.mean <- nsim + 1:nsim2 } } restrict.columns <- !is.null(jsim) dual <- !is.null(jsim.mean) } else { # ................ precomputed values .................. # validate weights if(use.weights) check.nvector(weights, nsim, things="simulations", naok=TRUE) restrict.columns <- FALSE dual <- FALSE } shadenames <- NULL switch(type, pointwise = { # ....... POINTWISE ENVELOPES ............................... if(cheat) { stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi } else { simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[,jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) nsim.mean <- NULL if(nrank == 1) { lohi <- apply(simvals, 1, range) } else { lohi <- apply(simvals, 1, function(x, n) { sort(x)[n] }, n=c(nrank, nsim-nrank+1)) } lo <- lohi[1,] hi <- lohi[2,] } lo.name <- paste("lower pointwise envelope of %s from simulations") hi.name <- paste("upper pointwise envelope of %s from simulations") # if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) } else { m <- if(cheat) precomputed$mmean else if(!use.weights) apply(simvals, 1, mean, na.rm=TRUE) else apply(simvals, 1, weighted.mean, w=weights, na.rm=TRUE) results <- data.frame(r=rvals, obs=fX, mmean=m, lo=lo, hi=hi) } shadenames <- c("lo", "hi") if(do.pwrong) { # estimate the p-value for the 'wrong test' if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { dataranks <- t(apply(simvals, 1, rank, ties.method="random")) is.signif <- (dataranks <= nrank) | (dataranks >= nsim-nrank+1) is.signif.somewhere <- apply(is.signif, 2, any) pwrong <- sum(is.signif.somewhere)/nsim } } }, global = { # ..... SIMULTANEOUS ENVELOPES .......................... if(cheat) { # ... use precomputed values .. stopifnot(checkfields(precomputed, c("lo", "hi"))) lo <- precomputed$lo hi <- precomputed$hi if(use.theory) { reference <- theory } else { stopifnot(checkfields(precomputed, "mmean")) reference <- precomputed$mmean } nsim.mean <- NULL domain <- rep.int(TRUE, length(rvals)) } else { # ... normal case: compute envelopes from simulations if(!is.null(ginterval)) { domain <- (rvals >= ginterval[1]) & (rvals <= ginterval[2]) funX <- funX[domain, ] simvals <- simvals[domain, ] } else domain <- rep.int(TRUE, length(rvals)) simvals[is.infinite(simvals)] <- NA if(use.theory) { reference <- theory[domain] if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim.mean <- NULL } else if(dual) { # Estimate the mean from one set of columns # Form envelopes from another set of columns simvals.mean <- simvals[, jsim.mean] reference <- mmean <- if(!use.weights) apply(simvals.mean, 1, mean, na.rm=TRUE) else apply(simvals.mean, 1, weighted.mean, w=weights[jsim.mean], na.rm=TRUE) nsim.mean <- ncol(simvals.mean) # retain only columns used for envelope simvals <- simvals[, jsim] } else { # Compute the mean and envelopes using the same data if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } reference <- mmean <- if(!use.weights) apply(simvals.mean, 1, mean, na.rm=TRUE) else apply(simvals.mean, 1, weighted.mean, w=weights, na.rm=TRUE) nsim.mean <- NULL } nsim <- ncol(simvals) # compute max absolute deviations deviations <- sweep(simvals, 1, reference) suprema <- apply(abs(deviations), 2, max, na.rm=TRUE) # ranked deviations dmax <- sort(suprema)[nsim-nrank+1] # simultaneous bands lo <- reference - dmax hi <- reference + dmax } lo.name <- "lower critical boundary for %s" hi.name <- "upper critical boundary for %s" if(use.theory) results <- data.frame(r=rvals[domain], obs=fX[domain], theo=reference, lo=lo, hi=hi) else results <- data.frame(r=rvals[domain], obs=fX[domain], mmean=reference, lo=lo, hi=hi) shadenames <- c("lo", "hi") if(do.pwrong) warning(paste("Argument", sQuote("do.pwrong=TRUE"), "ignored;", "it is not relevant to global envelopes")) }, variance={ # ....... POINTWISE MEAN, VARIANCE etc ...................... if(cheat) { # .... use precomputed values .... stopifnot(checkfields(precomputed, c("Ef", "varf"))) Ef <- precomputed$Ef varf <- precomputed$varf } else { # .... normal case: compute from simulations simvals[is.infinite(simvals)] <- NA if(restrict.columns) { simvals <- simvals[, jsim] if(use.weights) weights <- weights[jsim] } nsim <- ncol(simvals) if(!use.weights) { Ef <- apply(simvals, 1, mean, na.rm=TRUE) varf <- apply(simvals, 1, var, na.rm=TRUE) } else { Ef <- apply(simvals, 1, weighted.mean, w=weights, na.rm=TRUE) varf <- apply(simvals, 1, weighted.var, w=weights, na.rm=TRUE) } } nsim.mean <- NULL # derived quantities sd <- sqrt(varf) stdres <- (fX-Ef)/sd stdres[!is.finite(stdres)] <- NA # critical limits lo <- Ef - nSD * sd hi <- Ef + nSD * sd lo.name <- paste("lower", nSD, "sigma critical limit for %s") hi.name <- paste("upper", nSD, "sigma critical limit for %s") # confidence interval loCI <- Ef - nSD * sd/sqrt(nsim) hiCI <- Ef + nSD * sd/sqrt(nsim) loCI.name <- paste("lower", nSD, "sigma confidence bound", "for mean of simulated %s") hiCI.name <- paste("upper", nSD, "sigma confidence bound", "for mean of simulated %s") # put together if(use.theory) { results <- data.frame(r=rvals, obs=fX, theo=theory, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(mmean=Ef, var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) mslabl <- c("bar(%s)(r)", "paste(var,%s)(r)", "paste(res,%s)(r)", "paste(stdres,%s)(r)", "%s[loCI](r)", "%s[hiCI](r)") wted <- if(use.weights) "weighted " else NULL msdesc <- c(paste0(wted, "sample mean of %s from simulations"), paste0(wted, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } else { results <- data.frame(r=rvals, obs=fX, mmean=Ef, lo=lo, hi=hi) shadenames <- c("lo", "hi") morestuff <- data.frame(var=varf, res=fX-Ef, stdres=stdres, loCI=loCI, hiCI=hiCI) mslabl <- c("paste(var,%s)(r)", "paste(res,%s)(r)", "paste(stdres,%s)(r)", "%s[loCI](r)", "%s[hiCI](r)") msdesc <- c(paste0(if(use.weights) "weighted " else NULL, "sample variance of %s from simulations"), "raw residual", "standardised residual", loCI.name, hiCI.name) } if(do.pwrong) { # estimate the p-value for the 'wrong test' if(cheat) { pwrong <- precomputed$pwrong do.pwrong <- !is.null(pwrong) && !badprobability(pwrong, FALSE) } else { is.signif <- (simvals < lo) | (simvals > hi) is.signif.somewhere <- apply(is.signif, 2, any) pwrong <- sum(is.signif.somewhere)/nsim } } } ) ############ WRAP UP ######################### if(use.theory) { # reference is computed curve `theo' reflabl <- "%s[theo](r)" refdesc <- paste0("theoretical value of %s", if(csr) " for CSR" else NULL) } else { # reference is sample mean of simulations reflabl <- "bar(%s)(r)" refdesc <- paste0(if(use.weights) "weighted " else NULL, "sample mean of %s from simulations") } result <- fv(results, argu="r", ylab=atr$ylab, valu="obs", fmla= deparse(. ~ r), alim=atr$alim, labl=c("r", "%s[obs](r)", reflabl, "%s[lo](r)", "%s[hi](r)"), desc=c("distance argument r", "observed value of %s for data pattern", refdesc, lo.name, hi.name), fname=atr$fname, yexp =atr$yexp) # columns to be plotted by default dotty <- c("obs", if(use.theory) "theo" else "mmean", "hi", "lo") if(type == "variance") { # add more stuff result <- bind.fv(result, morestuff, mslabl, msdesc) if(use.theory) dotty <- c(dotty, "mmean") } fvnames(result, ".") <- dotty fvnames(result, ".s") <- shadenames unitname(result) <- unitname(funX) class(result) <- c("envelope", class(result)) # tack on envelope information attr(result, "einfo") <- list(global = (type =="global"), csr = csr, use.theory = use.theory, csr.theo = csr && use.theory, simtype = "funs", nrank = nrank, nsim = nsim, VARIANCE = (type == "variance"), nSD = nSD, valname = NULL, dual = dual, nsim = nsim, nsim2 = nsim.mean, Yname = Yname, do.pwrong=do.pwrong, use.weights=use.weights) # tack on saved functions if(savefuns) { alldata <- cbind(rvals, simvals) simnames <- paste("sim", 1:nsim, sep="") colnames(alldata) <- c("r", simnames) alldata <- as.data.frame(alldata) SimFuns <- fv(alldata, argu="r", ylab=atr$ylab, valu="sim1", fmla= deparse(. ~ r), alim=atr$alim, labl=names(alldata), desc=c("distance argument r", paste("Simulation ", 1:nsim, sep=""))) fvnames(SimFuns, ".") <- simnames attr(result, "simfuns") <- SimFuns } if(do.pwrong) attr(result, "pwrong") <- pwrong if(use.weights) attr(result, "weights") <- weights return(result) } envelope.envelope <- function(Y, fun=NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) { Yname <- short.deparse(substitute(Y)) stopifnot(inherits(Y, "envelope")) Yorig <- Y csr <- list(...)$internal$csr if(is.null(csr)) csr <- attr(Y, "einfo")$csr X <- attr(Y, "datapattern") sf <- attr(Y, "simfuns") sp <- attr(Y, "simpatterns") wt <- attr(Y, "weights") if(is.null(fun) && is.null(sf)) { # No simulated functions - must compute them from simulated patterns if(is.null(sp)) stop(paste("Cannot compute envelope:", "Y does not contain simulated functions", "(was not generated with savefuns=TRUE)", "and does not contain simulated patterns", "(was not generated with savepatterns=TRUE)")) # set default fun=Kest fun <- Kest } if(!is.null(fun)) { # apply new function # point patterns are required if(is.null(sp)) stop(paste("Object Y does not contain simulated point patterns", "(attribute", dQuote("simpatterns"), ");", "cannot apply a new", sQuote("fun"))) if(is.null(X)) stop(paste("Cannot apply a new", sQuote("fun"), "; object Y generated by an older version of spatstat")) result <- do.call(envelope, resolve.defaults(list(Y=X, fun=fun, simulate=sp), list(...), list(transform=transform, Yname=Yname, nsim=length(sp), weights=wt), .StripNull=TRUE)) } else { # compute new envelope with existing simulated functions if(is.null(sf)) stop(paste("Y does not contain a", dQuote("simfuns"), "attribute", "(it was not generated with savefuns=TRUE)")) if(!is.null(transform)) { # Apply transformation to Y and sf stopifnot(is.expression(transform)) cc <- dotexpr.to.call(transform, "Y", "eval.fv") Y <- eval(cc) cc <- dotexpr.to.call(transform, "sf", "eval.fv") sf <- eval(cc) } # extract simulated function values df <- as.data.frame(sf) rname <- fvnames(sf, ".x") df <- df[, (names(df) != rname)] # interface with 'envelope.matrix' etype <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" result <- do.call(envelope.matrix, resolve.defaults(list(Y=as.matrix(df)), list(...), list(type=etype, csr=csr, funX=Y, Yname=Yname, weights=wt), .StripNull=TRUE)) } if(!is.null(transform)) { # post-process labels labl <- attr(result, "labl") dnames <- colnames(result) dnames <- dnames[dnames %in% fvnames(result, ".")] # expand "." ud <- as.call(lapply(c("cbind", dnames), as.name)) expandtransform <- eval(substitute(substitute(tr, list(.=ud)), list(tr=transform[[1]]))) # compute new labels attr(result, "fname") <- attr(Yorig, "fname") mathlabl <- as.character(fvlegend(result, expandtransform)) # match labels to columns evars <- all.vars(expandtransform) used.dotnames <- evars[evars %in% dnames] mathmap <- match(colnames(result), used.dotnames) okmath <- !is.na(mathmap) # update appropriate labels labl[okmath] <- mathlabl[mathmap[okmath]] attr(result, "labl") <- labl } # Tack on envelope info copyacross <- c("Yname", "csr.theo", "simtype") attr(result, "einfo")[copyacross] <- attr(Yorig, "einfo")[copyacross] attr(result, "einfo")$csr <- csr # Save data return(result) } pool <- function(...) { UseMethod("pool") } pool.envelope <- function(..., savefuns=FALSE, savepatterns=FALSE) { Yname <- short.deparse(sys.call()) if(nchar(Yname) > 60) Yname <- paste(substr(Yname, 1, 40), "[..]") Elist <- unname(list(...)) nE <- length(Elist) if(nE == 0) return(NULL) # ........ validate envelopes ..................... # All arguments must be envelopes notenv <- !unlist(lapply(Elist, inherits, what="envelope")) if(any(notenv)) { n <- sum(notenv) why <- paste(ngettext(n, "Argument", "Arguments"), commasep(which(notenv)), ngettext(n, "does not", "do not"), "belong to the class", dQuote("envelope")) stop(why) } # Only one envelope? if(nE == 1) return(Elist[[1]]) # Envelopes must be compatible ok <- do.call(compatible, Elist) if(!ok) stop("Envelopes are not compatible") # ... reconcile parameters in different envelopes ....... eilist <- lapply(Elist, attr, which="einfo") global <- resolveEinfo(eilist, "global", FALSE) VARIANCE <- resolveEinfo(eilist, "VARIANCE", FALSE) simtype <- resolveEinfo(eilist, "simtype", "funs", "Envelopes were generated using different types of simulation") csr <- resolveEinfo(eilist, "csr", FALSE, NULL) csr.theo <- resolveEinfo(eilist, "csr.theo", FALSE, NULL) use.weights <- resolveEinfo(eilist, "use.weights" , FALSE, "Weights were used in some, but not all, envelopes: they will be ignored") # weights <- if(use.weights) unlist(lapply(Elist, attr, which="weights")) else NULL type <- if(global) "global" else if(VARIANCE) "variance" else "pointwise" # ........ validate saved functions ..................... if(savefuns || !VARIANCE) { # Individual simulated functions are required SFlist <- lapply(Elist, attr, which="simfuns") isnul <- unlist(lapply(SFlist, is.null)) if(any(isnul)) { n <- sum(isnul) comply <- if(!VARIANCE) "compute the envelope:" else "save the simulated functions:" why <- paste("Cannot", comply, ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simfuns"), "attribute", "(not generated with savefuns=TRUE)") stop(why) } # Simulated functions must be the same function fnames <- unique(unlist(lapply(SFlist, attr, which="fname"))) if(length(fnames) > 1) stop(paste("Envelope objects contain values", "of different functions:", commasep(sQuote(fnames)))) # vectors of r values must be identical rlist <- lapply(SFlist, function(z) { with(z, .x) }) rvals <- rlist[[1]] samer <- unlist(lapply(rlist, identical, y=rvals)) if(!all(samer)) stop(paste("Simulated function values are not compatible", "(different values of function argument)")) } # compute pooled envelope switch(type, global = , pointwise = { # assemble function values into one matrix getsimvals <- function(z) { rname <- fvnames(z, ".x") as.matrix(as.data.frame(z)[, names(z) != rname]) } matlist <- lapply(SFlist, getsimvals) bigmat <- do.call(cbind, matlist) # ..... ready to compute result <- envelope(bigmat, funX=Elist[[1]], type=type, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns) }, variance = { # Pool sample means and variances nsims <- unlist(lapply(eilist, getElement, name="nsim")) mmeans <- lapply(Elist, getElement, name="mmean") vars <- lapply(Elist, getElement, name="var") mmeans <- matrix(unlist(mmeans), ncol=nE) vars <- matrix(unlist(vars), ncol=nE) if(!use.weights) { w.mean <- nsims d.mean <- sum(nsims) w.var <- nsims - 1 d.var <- sum(nsims) - 1 } else { weightlist <- lapply(Elist, attr, which="weights") w.mean <- unlist(lapply(weightlist, sum)) d.mean <- sum(w.mean) ssw <- unlist(lapply(weightlist, function(x) {sum((x/sum(x))^2)})) w.var <- w.mean * (1 - ssw) d.var <- d.mean * (1 - sum(ssw)) } poolmmean <- as.numeric(mmeans %*% matrix(w.mean/d.mean, ncol=1)) within <- vars %*% matrix(w.var, ncol=1) between <- ((mmeans - poolmmean[])^2) %*% matrix(w.mean, ncol=1) poolvar <- as.numeric((within + between)/d.var) # feed precomputed data to envelope.matrix pc <- list(Ef=poolmmean[], varf=poolvar[]) nsim <- sum(nsims) result <- envelope.matrix(NULL, funX=Elist[[1]], type=type, csr=csr, Yname=Yname, weights=weights, savefuns=savefuns, nsim=nsim, precomputed=pc) }) # ..............saved patterns ..................... if(savepatterns) { SPlist <- lapply(Elist, attr, which="simpatterns") isnul <- unlist(lapply(SPlist, is.null)) if(any(isnul)) { n <- sum(isnul) why <- paste("Cannot save the simulated patterns:", ngettext(n, "argument", "arguments"), commasep(which(isnul)), ngettext(n, "does not", "do not"), "contain a", dQuote("simpatterns"), "attribute", "(not generated with savepatterns=TRUE)") warning(why) } else { attr(result, "simpatterns") <- Reduce(SPlist, append) } } dotnames <- lapply(Elist, fvnames, a=".") dn <- dotnames[[1]] if(all(unlist(lapply(dotnames, identical, y=dn)))) fvnames(result, ".") <- dn shadenames <- lapply(Elist, fvnames, a=".s") sh <- shadenames[[1]] if(all(unlist(lapply(shadenames, identical, y=sh)))) fvnames(result, ".s") <- sh return(result) } # resolve matching entries in different envelope objects # x is a list of envelope info objects resolveEinfo <- function(x, what, fallback, warn) { y <- unique(unlist(lapply(x, getElement, name=what))) if(length(y) == 1) return(y) if(missing(warn)) warn <- paste("Envelopes were generated using different values", "of argument", paste(sQuote(what), ";", sep=""), "reverting to default value") if(!is.null(warn)) warning(warn, call.=FALSE) return(fallback) } spatstat/R/listof.R0000755000176000001440000000713212237642727014013 0ustar ripleyusers# # listof.R # # Methods for class `listof' # # plot.listof is defined in plot.splitppp.R # "[<-.listof" <- function(x, i, value) { # invoke list method class(x) <- "list" x[i] <- value # then make it a 'listof' object too class(x) <- c("listof", class(x)) x } summary.listof <- function(object, ...) { x <- lapply(object, summary, ...) class(x) <- "summary.listof" x } print.summary.listof <- function(x, ...) { class(x) <- "listof" print(x) invisible(NULL) } listof <- function(...) { stuff <- list(...) class(stuff) <- c("listof", class(stuff)) return(stuff) } as.listof <- function(x) { if(!is.list(x)) x <- list(x) if(!inherits(x, "listof")) class(x) <- c("listof", class(x)) return(x) } contour.listof <- function(x, ...) { xname <- short.deparse(substitute(x)) do.call("plot.listof", resolve.defaults(list(x=x, plotcommand="contour"), list(...), list(main=xname))) } image.listof <- local({ image.listof <- function(x, ..., equal.ribbon = FALSE) { xname <- short.deparse(substitute(x)) if(equal.ribbon && !all(unlist(lapply(x, is.im)))) { warning("equal.ribbon is only implemented for objects of class 'im'") equal.ribbon <- FALSE } if(equal.ribbon) imagecommon(x, ..., xname=xname) else do.call("plot.listof", resolve.defaults(list(x=x, plotcommand="image"), list(...), list(main=xname))) } imagecommon <- function(x, ..., xname, zlim=NULL, ribbon=TRUE, ribside=c("right", "left", "bottom", "top"), ribsep=NULL, ribwid=0.5, ribn=1024, ribscale=NULL, ribargs=list()) { if(missing(xname)) xname <- short.deparse(substitute(x)) ribside <- match.arg(ribside) stopifnot(is.list(ribargs)) if(!is.null(ribsep)) warning("Argument ribsep is not yet implemented for image arrays") # determine range of values if(is.null(zlim)) zlim <- range(unlist(lapply(x, range))) # determine common colour map imcolmap <- plot.im(x[[1]], preponly=TRUE, zlim=zlim, ..., ribn=ribn) # plot ribbon? if(!ribbon) { ribadorn <- list() } else { # determine plot arguments for colour ribbon vertical <- (ribside %in% c("right", "left")) scaleinfo <- if(!is.null(ribscale)) list(labelmap=ribscale) else list() sidecode <- match(ribside, c("bottom", "left", "top", "right")) ribstuff <- c(list(x=imcolmap, main="", vertical=vertical), ribargs, scaleinfo, list(side=sidecode)) ribmar <- switch(ribside, left = c(1, 2, 1, 0), right = c(1, 0, 1, 2), bottom = c(2, 1, 0, 1), top = c(0, 1, 2, 1)) # function executed to plot colour ribbon do.ribbon <- function() { opa <- par(mar=ribmar) do.call("plot", ribstuff) par(opa) } # encoded as 'adorn' argument ribadorn <- list(adorn=do.ribbon, adorn.size=ribwid) names(ribadorn)[1] <- paste("adorn", ribside, sep=".") } # do.call("plot.listof", resolve.defaults(list(x=x, plotcommand="image"), list(...), list(main=xname), list(col=imcolmap, zlim=zlim, ribbon=FALSE), ribadorn)) } image.listof }) spatstat/R/linalg.R0000755000176000001440000001021512237642727013755 0ustar ripleyusers# # linalg.R # # $Revision: 1.7 $ $Date: 2013/09/25 05:55:41 $ # sumouter <- function(x, w=NULL) { stopifnot(is.matrix(x)) p <- ncol(x) n <- nrow(x) nama <- colnames(x) # transpose (compute outer squares of columns) tx <- t(x) ok <- apply(is.finite(tx), 2, all) if(!is.null(w)) { if(length(w) != n) stop(paste("The length of w does not match the number of rows of x", "\t(", length(w), "!=", n, ")")) ok <- ok & is.finite(w) } if(!all(ok)) { tx <- tx[ , ok, drop=FALSE] if(!is.null(w)) w <- w[ok] } DUP <- spatstat.options("dupC") if(is.null(w)) { z <- .C("Csumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), y=as.double(numeric(p * p)), DUP=DUP) # PACKAGE="spatstat") } else { z <- .C("Cwsumouter", x=as.double(tx), n=as.integer(n), p=as.integer(p), w=as.double(w), y=as.double(numeric(p * p)), DUP=DUP) # PACKAGE="spatstat") } out <- matrix(z$y, p, p) if(!is.null(nama)) dimnames(out) <- list(nama, nama) return(out) } quadform <- function(x, v) { stopifnot(is.matrix(x)) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ok <- apply(is.finite(tx), 2, all) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } DUP <- spatstat.options("dupC") z <- .C("Cquadform", x=as.double(tx), n=as.integer(n), p=as.integer(p), v=as.double(v), y=as.double(numeric(n)), DUP=DUP) # PACKAGE="spatstat") result <- z$y names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } bilinearform <- function(x, v, y) { stopifnot(is.matrix(x)) stopifnot(is.matrix(y)) stopifnot(identical(dim(x), dim(y))) p <- ncol(x) n <- nrow(x) nama <- rownames(x) # transpose (evaluate quadratic form for each column) tx <- t(x) ty <- t(y) ok <- apply(is.finite(tx), 2, all) & apply(is.finite(ty), 2, all) allok <- all(ok) if(!allok) { tx <- tx[ , ok, drop=FALSE] ty <- ty[ , ok, drop=FALSE] n <- ncol(tx) } if(missing(v)) { v <- diag(rep.int(1, p)) } else { stopifnot(is.matrix(v)) if(nrow(v) != ncol(v)) stop("v should be a square matrix") stopifnot(ncol(x) == nrow(v)) } DUP <- spatstat.options("dupC") z <- .C("Cbiform", x=as.double(tx), y=as.double(ty), n=as.integer(n), p=as.integer(p), v=as.double(v), z=as.double(numeric(n)), DUP=DUP) result <- z$z names(result) <- nama[ok] if(allok) return(result) fullresult <- rep.int(NA_real_, length(ok)) fullresult[ok] <- result names(fullresult) <- nama return(fullresult) } sumsymouter <- function(x, w=NULL) { # computes the sum of outer(x[,i,j], x[,j,i]) * w[i,j] over all pairs i != j stopifnot(is.array(x) && length(dim(x)) == 3) if(dim(x)[2] != dim(x)[3]) stop("The second and third dimensions of x should be equal") if(!is.null(w)) { stopifnot(is.matrix(w)) if(!all(dim(w) == dim(x)[-1])) stop("Dimensions of w should match the second and third dimensions of x") } p <- dim(x)[1] n <- dim(x)[2] DUP <- spatstat.options("dupC") if(is.null(w)) { zz <- .C("Csumsymouter", x = as.double(x), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), DUP = DUP) # PACKAGE = "spatstat") } else { zz <- .C("Cwsumsymouter", x = as.double(x), w = as.double(w), p = as.integer(p), n = as.integer(n), y = as.double(numeric(p * p)), DUP = DUP) # PACKAGE = "spatstat") } matrix(zz$y, p, p) } spatstat/R/lpp.R0000755000176000001440000002144412243051473013275 0ustar ripleyusers# # lpp.R # # $Revision: 1.22 $ $Date: 2013/10/20 00:51:27 $ # # Class "lpp" of point patterns on linear networks lpp <- function(X, L) { stopifnot(inherits(L, "linnet")) localnames <- c("seg", "tp") if(checkfields(X, c("x", "y", localnames))) { # includes spatial and local coordinates X <- as.data.frame(X) # local coords lo <- X[ , localnames, drop=FALSE] # spatial coords and marks df <- X[, !(names(X) %in% localnames), drop=FALSE] # validate local coordinates if(nrow(X) > 0) { nedge <- nobjects(as.psp(L)) if(with(lo, any(seg < 1 || seg > nedge))) stop("Segment index coordinate 'seg' exceeds bounds") if(with(lo, any(tp < 0 || tp > 1))) stop("Local coordinate 'tp' outside [0,1]") } } else { # local coordinates must be computed if(!is.ppp(X)) X <- as.ppp(X, W=L$window) # project to segment pro <- project2segment(X, as.psp(L)) # projected points (spatial coordinates and marks) df <- as.data.frame(pro$Xproj) # local coordinates lo <- data.frame(seg=pro$mapXY, tp=pro$tp) } # combine spatial, local, marks nmark <- ncol(df) - 2 if(nmark == 0) { df <- cbind(df, lo) ctype <- c(rep("s", 2), rep("l", 2)) } else { df <- cbind(df[,1:2], lo, df[, -(1:2), drop=FALSE]) ctype <- c(rep("s", 2), rep("l", 2), rep("m", nmark)) } out <- ppx(data=df, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } print.lpp <- function(x, ...) { stopifnot(inherits(x, "lpp")) cat("Point pattern on linear network\n") sd <- summary(x$data) np <- sd$ncases nama <- sd$col.names cat(paste(np, ngettext(np, "point", "points"), "\n")) if(any(iscoord <- (x$ctype == "spatial"))) cat(paste(sum(iscoord), "-dimensional space coordinates ", paren(paste(nama[iscoord], collapse=",")), "\n", sep="")) if(any(istime <- (x$ctype == "temporal"))) cat(paste(sum(istime), "-dimensional time coordinates ", paren(paste(nama[istime], collapse=",")), "\n", sep="")) if(any(islocal <- (x$ctype == "local"))) cat(paste(sum(islocal), ngettext(sum(islocal), "column", "columns"), "of local coordinates:", commasep(sQuote(nama[islocal])), "\n")) if(any(ismark <- (x$ctype == "mark"))) cat(paste(sum(ismark), ngettext(sum(ismark), "column", "columns"), "of marks:", commasep(sQuote(nama[ismark])), "\n")) print(x$domain, ...) invisible(NULL) } # plot.lpp removed: plot.ppx sufficient summary.lpp <- function(object, ...) { stopifnot(inherits(object, "lpp")) L <- object$domain npoints <- nrow(object$data) totlen <- sum(lengths.psp(L$lines)) marx <- marks(object) summarx <- if(is.null(marx)) NULL else summary(marx) out <- list(npoints=npoints, totlength=totlen, intensity=npoints/totlen, nvert=L$vertices$n, nedge=L$lines$n, unitinfo=summary(unitname(L)), marks=summarx) class(out) <- "summary.lpp" return(out) } print.summary.lpp <- function(x, ...) { cat("Point pattern on linear network\n") cat(paste(x$npoints, "points\n")) cat(paste("Linear network with", x$nvert, "vertices and", x$nedge, "edges\n")) u <- x$unitinfo cat(paste("Total edge length", x$totlength, u$plural, u$explain, "\n")) cat(paste("Average intensity", x$intensity, "points per", if(u$vanilla) "unit length" else u$singular, "\n")) if(!is.null(x$marks)) { cat("Marks:\n") print(x$marks) } invisible(NULL) } intensity.lpp <- function(X, ...) { len <- sum(lengths.psp(as.psp(as.linnet(X)))) if(is.multitype(X)) table(marks(X))/len else npoints(X)/len } is.lpp <- function(x) { inherits(x, "lpp") } as.lpp <- function(x, y=NULL, seg=NULL, tp=NULL, ..., marks=NULL, L=NULL, check=FALSE) { nomore <- is.null(y) && is.null(seg) && is.null(tp) if(inherits(x, "lpp") && nomore) { X <- x } else { if(!inherits(L, "linnet")) stop("L should be a linear network") if(is.ppp(x) && nomore) { X <- lpp(x, L) } else { xy <- xy.coords(x,y)[c("x", "y")] if(!is.null(seg) && !is.null(tp)) { # add segment map information xy <- append(xy, list(seg=seg, tp=tp)) } else { # convert to ppp, typically suppressing check mechanism xy <- as.ppp(xy, W=as.owin(L), check=check) } X <- lpp(xy, L) } } if(!is.null(marks)) marks(X) <- marks return(X) } as.ppp.lpp <- function(X, ..., fatal=TRUE) { verifyclass(X, "lpp", fatal=fatal) L <- X$domain Y <- as.ppp(coords(X, temporal=FALSE, local=FALSE), W=L$window, check=FALSE) marks(Y) <- marks(X) return(Y) } as.owin.lpp <- function(W, ..., fatal=TRUE) { as.owin(as.ppp(W, ..., fatal=fatal)) } as.linnet.lpp <- function(X, ..., fatal=TRUE) { verifyclass(X, "lpp", fatal=fatal) X$domain } "[.lpp" <- function (x, i, ...) { # invoke [.ppx y <- NextMethod("[") class(y) <- c("lpp", class(y)) return(y) } unitname.lpp <- function(x) { u <- unitname(x$domain) return(u) } "unitname<-.lpp" <- function(x, value) { w <- x$domain unitname(w) <- value x$domain <- w return(x) } "marks<-.lpp" <- function(x, ..., value) { Y <- NextMethod("marks<-") class(Y) <- c("lpp", class(Y)) Y } unmark.lpp <- function(X) { Y <- NextMethod("unmark") class(Y) <- c("lpp", class(Y)) Y } as.psp.lpp <- function(x, ..., fatal=TRUE){ verifyclass(x, "lpp", fatal=fatal) return(x$domain$lines) } local2lpp <- function(L, seg, tp, X=NULL) { stopifnot(inherits(L, "linnet")) if(is.null(X)) { # map to (x,y) Ldf <- as.data.frame(L$lines) dx <- with(Ldf, x1-x0) dy <- with(Ldf, y1-y0) x <- with(Ldf, x0[seg] + tp * dx[seg]) y <- with(Ldf, y0[seg] + tp * dy[seg]) } else { x <- X$x y <- X$y } # compile into data frame data <- data.frame(x=x, y=y, seg=seg, tp=tp) ctype <- c("s", "s", "l", "l") out <- ppx(data=data, domain=L, coord.type=ctype) class(out) <- c("lpp", class(out)) return(out) } #################################################### # subset extractor #################################################### "[.lpp" <- function (x, i, j, ...) { if(!missing(i) && !is.null(i)) { if(is.owin(i)) { # spatial domain: call code for 'j' xi <- x[,i] } else { # usual row-type index da <- x$data daij <- da[i, , drop=FALSE] xi <- ppx(data=daij, domain=x$domain, coord.type=as.character(x$ctype)) class(xi) <- c("lpp", class(xi)) } x <- xi } if(missing(j) || is.null(j)) return(x) stopifnot(is.owin(j)) W <- j L <- x$domain da <- x$data # Find vertices that lie inside 'j' okvert <- inside.owin(L$vertices, w=W) # find segments whose endpoints both lie in 'upper' okedge <- okvert[L$from] & okvert[L$to] # assign new serial numbers to vertices, and recode newserial <- cumsum(okvert) newfrom <- newserial[L$from[okedge]] newto <- newserial[L$to[okedge]] # make new linear network Lnew <- linnet(L$vertices[W], edges=cbind(newfrom, newto)) # find data points that lie on accepted segments coo <- coords(x) okxy <- okedge[coo$seg] cook <- coo[okxy,] # make new lpp object dfnew <- data.frame(x=cook$x, y=cook$y, seg=cook$seg, tp=cook$tp) ctype <- c(rep("spatial", 2), rep("local", 2)) xj <- ppx(data=dfnew, domain=Lnew, coord.type=ctype) class(xj) <- c("lpp", class(xj)) marks(xj) <- marks(x[okxy]) return(xj) } #################################################### # affine transformations #################################################### scalardilate.lpp <- function(X, f, ...) { trap.extra.arguments(..., .Context="In scalardilate(X,f)") check.1.real(f, "In scalardilate(X,f)") stopifnot(is.finite(f) && f > 0) Y <- X Y$data$x <- f * X$data$x Y$data$y <- f * X$data$y Y$domain <- scalardilate(X$domain, f) return(Y) } affine.lpp <- function(X, mat=diag(c(1,1)), vec=c(0,0), ...) { verifyclass(X, "lpp") Y <- X Y$data[, c("x","y")] <- affinexy(X$data[, c("x","y")], mat=mat, vec=vec) Y$domain <- affine(X$domain, mat=mat, vec=vec, ...) return(Y) } shift.lpp <- function(X, ...) { verifyclass(X, "lpp") Y <- X Y$domain <- shift(X$domain, ...) vec <- attr(Y$domain, "lastshift") Y$data[, c("x","y")] <- shiftxy(X$data[, c("x","y")], vec=vec) # tack on shift vector attr(Y, "lastshift") <- vec return(Y) } rotate.lpp <- function(X, angle=pi/2, ...) { verifyclass(X, "lpp") Y <- X Y <- X Y$data[, c("x","y")] <- rotxy(X$data[, c("x","y")], angle=angle) Y$domain <- rotate(X$domain, angle=angle, ...) return(Y) } rescale.lpp <- function(X, s) { if(missing(s)) s <- 1/unitname(X)$multiplier Y <- scalardilate(X, f=1/s) unitname(Y) <- rescale(unitname(X), s) return(Y) } spatstat/R/pickoption.R0000755000176000001440000000223412237642727014670 0ustar ripleyusers# # pickoption.R # # $Revision: 1.5 $ $Date: 2013/04/25 06:37:43 $ # pickoption <- function(what="option", key, keymap, ..., exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE) { keyname <- short.deparse(substitute(key)) if(!is.character(key)) stop(paste(keyname, "must be a character string", if(multi) "or strings" else NULL)) if(length(key) == 0) stop(paste("Argument", sQuote(keyname), "has length zero")) key <- unique(key) if(!multi && length(key) > 1) stop(paste("Must specify only one", what, sQuote(keyname))) id <- if(exact) match(key, names(keymap), nomatch=NA) else pmatch(key, names(keymap), nomatch=NA) if(any(nbg <- is.na(id))) { # no match whinge <- paste("unrecognised", what, paste(dQuote(key[nbg]), collapse=", "), "in argument", sQuote(keyname)) if(list.on.err) { cat(paste(whinge, "\n", "Options are:"), paste(dQuote(names(keymap)), collapse=","), "\n") } if(die) stop(whinge, call.=FALSE) else return(NULL) } key <- keymap[id] names(key) <- NULL return(key) } spatstat/R/multipair.util.R0000755000176000001440000000166412237642727015501 0ustar ripleyusers# # # multipair.util.R # # $Revision: 1.12 $ $Date: 2013/04/25 06:37:43 $ # # Utilities for multitype pairwise interactions # # ------------------------------------------------------------------- # MultiPair.checkmatrix <- function(mat, n, matname, naok=TRUE, zerook=TRUE) { if(missing(matname)) matname <- short.deparse(substitute(mat)) if(!is.matrix(mat)) stop(paste(matname, "must be a matrix")) if(any(dim(mat) != rep.int(n,2))) stop(paste(matname, "must be a square matrix,", "of size", n, "x", n)) isna <- is.na(mat) if(!naok && any(isna)) stop(paste("NA entries not allowed in", matname)) if(any(mat[!isna] < 0)) stop(paste("Negative entries not allowed in", matname)) if(!zerook && any(mat[!isna] == 0)) stop(paste("Zero entries not allowed in", matname)) if(!isSymmetric(mat)) stop(paste(matname, "must be a symmetric matrix")) } spatstat/vignettes/0000755000176000001440000000000012252324024014151 5ustar ripleyusersspatstat/vignettes/shapefiles.Rnw0000755000176000001440000004135012237642736017012 0ustar ripleyusers\documentclass[twoside,11pt]{article} % \VignetteIndexEntry{Handling shapefiles in the spatstat package} \SweaveOpts{eps=TRUE} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \usepackage[colorlinks=true,urlcolor=blue]{hyperref} \usepackage{color} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \begin{document} %\bibliographystyle{plain} \thispagestyle{empty} <>= library(spatstat) options(useFancyQuotes=FALSE) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") @ \title{Handling shapefiles in the \texttt{spatstat} package} \author{Adrian Baddeley} \date{ \Sexpr{sdate} \\ \pkg{spatstat} version \texttt{\Sexpr{sversion}} } \maketitle This vignette explains how to read data into the \pkg{spatstat} package from files in the popular `shapefile' format. This vignette is part of the documentation included in \pkg{spatstat} version \texttt{\Sexpr{sversion}}. The information applies to \pkg{spatstat} versions \texttt{1.18-0} and above. \section{Shapefiles} A shapefile represents a list of spatial objects --- a list of points, a list of lines, or a list of polygonal regions --- and each object in the list may have additional variables attached to it. A dataset stored in shapefile format is actually stored in a collection of text files, for example \begin{verbatim} mydata.shp mydata.prj mydata.sbn mydata.dbf \end{verbatim} which all have the same base name \texttt{mydata} but different file extensions. To refer to this collection you will always use the filename with the extension \texttt{shp}, for example \texttt{mydata.shp}. \section{Helper packages} \label{S:helpers} We'll use two other packages% \footnote{In previous versions of \pkg{spatstat}, the package \pkg{gpclib} was also needed for some tasks. This is no longer required.} to handle shapefile data. The \pkg{maptools} package is designed specifically for handling file formats for spatial data. It contains facilities for reading and writing files in shapefile format. The \pkg{sp} package supports a standard set of spatial data types in \R. These standard data types can be handled by many other packages, so it is useful to convert your spatial data into one of the data types supported by \pkg{sp}. \section{How to read shapefiles into \pkg{spatstat}} To read shapefile data into \pkg{spatstat}, you follow two steps: \begin{enumerate} \item using the facilities of \pkg{maptools}, read the shapefiles and store the data in one of the standard formats supported by \pkg{sp}. \item convert the \pkg{sp} data type into one of the data types supported by \pkg{spatstat}. \end{enumerate} \subsection{Read shapefiles using \pkg{maptools}} Here's how to read shapefile data. \begin{enumerate} \item ensure that the package \pkg{maptools} is installed. You will need version \texttt{0.7-16} or later. \item start R and load the package: <>= library(maptools) @ \item read the shapefile into an object in the \pkg{sp} package using \texttt{readShapeSpatial}, for example <>= x <- readShapeSpatial("mydata.shp") @ \item To find out what kind of spatial objects are represented by the dataset, inspect its class: <>= class(x) @ The class may be either \texttt{SpatialPoints} indicating a point pattern, \texttt{SpatialLines} indicating a list of polygonal lines, or \texttt{SpatialPolygons} indicating a list of polygons. It may also be \texttt{SpatialPointsDataFrame}, \texttt{SpatialLinesDataFrame} or \texttt{SpatialPolygonsDataFrame} indicating that, in addition to the spatial objects, there is a data frame of additional variables. \end{enumerate} Here are some examples, using the example shapefiles supplied in the \pkg{maptools} package itself. % fake data because we don't want spatstat to depend on maptools <>= baltim <- columbus <- fylk <- list() class(baltim) <- "SpatialPointsDataFrame" class(columbus) <- "SpatialPolygonsDataFrame" class(fylk) <- "SpatialLinesDataFrame" @ <>= setwd(system.file("shapes", package="maptools")) baltim <- readShapeSpatial("baltim.shp") columbus <- readShapeSpatial("columbus.shp") fylk <- readShapeSpatial("fylk-val.shp") @ <<>>= class(baltim) class(columbus) class(fylk) @ \subsection{Convert data to \pkg{spatstat} format} To convert the dataset to an object in the \pkg{spatstat} package, the procedure depends on the type of data, as explained below. \subsubsection{Objects of class \texttt{SpatialPoints}} An object \texttt{x} of class \texttt{SpatialPoints} represents a spatial point pattern. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert it to a spatial point pattern in \pkg{spatstat}. The window for the point pattern will be taken from the bounding box of the points. You will probably wish to change this window, usually by taking another dataset to provide the window information. Use \verb![.ppp! to change the window: if \texttt{X} is a point pattern object of class \verb!"ppp"! and \texttt{W} is a window object of class \verb!"owin"!, type <>= X <- X[W] @ \subsubsection{Objects of class \texttt{SpatialPointsDataFrame }} An object \texttt{x} of class \texttt{SpatialPointsDataFrame} represents a pattern of points with additional variables (`marks') attached to each point. It includes an object of class \texttt{SpatialPoints} giving the point locations, and a data frame containing the additional variables attached to the points. Use \verb!as(x, "ppp")! or \texttt{as.ppp(x)} to convert an object \texttt{x} of class \texttt{SpatialPointsDataFrame} to a spatial point pattern in \pkg{spatstat}. In this conversion, the data frame of additional variables in \texttt{x} will become the \texttt{marks} of the point pattern \texttt{z}. <>= y <- as(x, "ppp") @ Before the conversion you can extract the data frame of auxiliary data by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. After the conversion you can extract these data by \verb!df <- marks(y)!. For example: <>= balt <- as(baltim, "ppp") bdata <- slot(baltim, "data") @ \subsubsection{Objects of class \texttt{SpatialLines}} \label{spatiallines.2.psp} A ``line segment'' is the straight line between two points in the plane. In the \pkg{spatstat} package, an object of class \texttt{psp} (``planar segment pattern'') represents a pattern of line segments, which may or may not be connected to each other (like matches which have fallen at random on the ground). In the \pkg{sp} package, an object of class \texttt{SpatialLines} represents a \textbf{list of lists} of \textbf{connected curves}, each curve consisting of a sequence of straight line segments that are joined together (like several pieces of a broken bicycle chain.) So these two data types do not correspond exactly. The list-of-lists hierarchy in a \texttt{SpatialLines} object is useful when representing internal divisions in a country. For example, if \texttt{USA} is an object of class \texttt{SpatialLines} representing the borders of the United States of America, then \verb!USA@lines! might be a list of length 52, with \verb!USA@lines[[i]]! representing the borders of the \texttt{i}-th State. The borders of each State consist of several different curved lines. Thus \verb!USA@lines[[i]]@Lines[[j]]! would represent the \texttt{j}th piece of the boundary of the \texttt{i}-th State. If \texttt{x} is an object of class \texttt{SpatialLines}, there are several things that you might want to do: \begin{enumerate} \item collect together all the line segments (all the segments that make up all the connected curves) and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a spatial line segment pattern. \end{quote} \item convert each connected curve to an object of class \texttt{psp}, keeping different connected curves separate. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) @ The result will be a \textbf{list of lists} of objects of class \texttt{psp}. Each one of these objects represents a connected curve, although the \pkg{spatstat} package does not know that. The list structure will reflect the list structure of the original \texttt{SpatialLines} object \texttt{x}. If that's not what you want, then use \verb!curvelist <- do.call("c", out)! or <>= curvegroup <- lapply(out, function(z) { do.call("superimposePSP", z)}) @ to collapse the list-of-lists-of-\texttt{psp}'s into a list-of-\texttt{psp}'s. In the first case, \texttt{curvelist[[i]]} is a \texttt{psp} object representing the \texttt{i}-th connected curve. In the second case, \texttt{curvegroup[[i]]} is a \texttt{psp} object containing all the line segments in the \texttt{i}-th group of connected curves (for example the \texttt{i}-th State in the \texttt{USA} example). \end{enumerate} The window for the spatial line segment pattern can be specified as an argument \texttt{window} to the function \texttt{as.psp}. \subsubsection{Objects of class \texttt{SpatialLinesDataFrame}} An object \texttt{x} of class \texttt{SpatialLinesDataFrame} is a \texttt{SpatialLines} object with additional data. The additional data is stored as a data frame \verb!x@data! with one row for each entry in \verb!x@lines!, that is, one row for each group of connected curves. In the \pkg{spatstat} package, an object of class \texttt{psp} (representing a collection of line segments) may have a data frame of marks. Note that each \emph{line segment} in a \texttt{psp} object may have different mark values. If \texttt{x} is an object of class \texttt{SpatialLinesDataFrame}, there are two things that you might want to do: \begin{enumerate} \item collect together all the line segments that make up all the connected lines, and store them as a single object of class \texttt{psp}. \begin{quote} To do this, use \verb!as(x, "psp")! or \texttt{as.psp(x)} to convert it to a marked spatial line segment pattern. \end{quote} \item keep each connected curve separate, and convert each connected curve to an object of class \texttt{psp}. To do this, type something like the following: <>= out <- lapply(x@lines, function(z) { lapply(z@Lines, as.psp) }) dat <- x@data for(i in seq(nrow(dat))) out[[i]] <- lapply(out[[i]], "marks<-", value=dat[i, , drop=FALSE]) @ The result is a list-of-lists-of-\texttt{psp}'s. See the previous subsection for explanation on how to change this using \texttt{c()} or \texttt{superimposePSP}. \end{enumerate} In either case, the mark variables attached to a particular \emph{group of connected lines} in the \texttt{SpatialLinesDataFrame} object, will be duplicated and attached to each \emph{line segment} in the resulting \texttt{psp} object. \subsubsection{Objects of class \texttt{SpatialPolygons}} First, so that we don't go completely crazy, let's introduce some terminology. A \emph{polygon} is a closed curve that is composed of straight line segments. You can draw a polygon without lifting your pen from the paper. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(chorley) plot(as.owin(chorley), lwd=3, main="polygon") @ \end{center} A \emph{polygonal region} is a region in space whose boundary is composed of straight line segments. A polygonal region may consist of several unconnected pieces, and each piece may have holes. The boundary of a polygonal region consists of one or more polygons. To draw the boundary of a polygonal region, you may need to lift and drop the pen several times. \setkeys{Gin}{width=0.4\textwidth} \begin{center} <>= data(demopat) plot(as.owin(demopat), col="blue", main="polygonal region") @ \end{center} An object of class \texttt{owin} in \pkg{spatstat} represents a polygonal region. It is a region of space that is delimited by boundaries made of lines. An object \texttt{x} of class \texttt{SpatialPolygons} represents a \textbf{list of polygonal regions}. For example, a single object of class \texttt{SpatialPolygons} could store information about every State in the United States of America (or the United States of Malaysia). Each State would be a separate polygonal region (and it might contain holes such as lakes). There are two things that you might want to do with an object of class \texttt{SpatialPolygons}: \begin{enumerate} \item combine all the polygonal regions together into a single polygonal region, and convert this to a single object of class \texttt{owin}. \begin{quote} For example, you could combine all the States of the USA together and obtain a single object that represents the territory of the USA. To do this, use \verb!as(x, "owin")! or \texttt{as.owin(x)}. The result is a single window (object of class \texttt{"owin"}) in the \pkg{spatstat} package. \end{quote} \item keep the different polygonal regions separate; convert each one of the polygonal regions to an object of class \texttt{owin}. \begin{quote} For example, you could keep the States of the USA separate, and convert each State to an object of class \texttt{owin}. \end{quote} To do this, type the following: <>= regions <- slot(x, "polygons") regions <- lapply(regions, function(x) { SpatialPolygons(list(x)) }) windows <- lapply(regions, as.owin) @ The result is a list of objects of class \texttt{owin}. Often it would make sense to convert this to a tessellation object, by typing <>= te <- tess(tiles=windows) @ \end{enumerate} The conversion process may generate an error message, saying that some of the polygons intersect each other, or are self-intersecting, or violate other geometrical conditions. This happens because an object of class \texttt{SpatialPolygons} is just a list of lists of polygons, possibly self-intersecting or mutually intersecting, but an object of class \texttt{"owin"} is intended to specify a well-defined region of space. If you chose option 1, the conversion process will check whether any of the polygons in \texttt{x} intersect each other. This often generates an error with a shapefile representing a division of space into states or counties or administrative regions, like the D\'epartements of France, because two adjacent regions have boundaries that intersect (even though the intersection has zero area). If you chose option 2, the conversion process will only check whether, for each polygonal region in \texttt{x}, the component polygons intersect each other. This will \emph{usually} avoid the checking problem. If an error occurs, the error message will usually specify which component polygons fail the test. The best strategy is usually just to plot the object \texttt{x} (using the plot facilities in \pkg{sp}) to identify the problem. It is possible to suppress the stringent checking of polygons in \pkg{spatstat} during the conversion: <>= spatstat.options(checkpolygons=FALSE) y <- as(x, "owin") spatstat.options(checkpolygons=TRUE) @ The resulting object \texttt{y} should be inspected carefully and used circumspectly; it has not passed the stringent tests required for many algorithms in \pkg{spatstat}. \subsubsection{Objects of class \texttt{SpatialPolygonsDataFrame}} What a mouthful! An object \texttt{x} of class \texttt{SpatialPolygonsDataFrame} represents a list of polygonal regions, with additional variables attached to each region. It includes an object of class \texttt{SpatialPolygons} giving the spatial regions, and a data frame containing the additional variables attached to the regions. The regions are extracted by <>= y <- as(x, "SpatialPolygons") @ and you then proceed as above to convert the curves to \pkg{spatstat} format. The data frame of auxiliary data is extracted by \verb!df <- x@data! or \verb!df <- slot(x, "data")!. For example: <>= cp <- as(columbus, "SpatialPolygons") cregions <- slot(cp, "polygons") cregions <- lapply(cregions, function(x) { SpatialPolygons(list(x)) }) cwindows <- lapply(cregions, as.owin) @ There is currently no facility in \pkg{spatstat} for attaching marks to an \texttt{owin} object directly. However, \pkg{spatstat} supports objects called \textbf{hyperframes}, which are like data frames except that the entries can be any type of object. Thus we can represent the \texttt{columbus} data in \pkg{spatstat} as follows: <>= ch <- hyperframe(window=cwindows) ch <- cbind.hyperframe(ch, columbus@data) @ Then \texttt{ch} is a hyperframe containing a column of \texttt{owin} objects followed by the columns of auxiliary data. \end{document} spatstat/vignettes/replicated.Rnw0000644000176000001440000012616112252274535016777 0ustar ripleyusers\documentclass[11pt]{article} % \VignetteIndexEntry{Analysing Replicated Point Patterns in Spatstat} \usepackage{graphicx} \usepackage{Sweave} \usepackage{bm} \usepackage{anysize} \marginsize{2cm}{2cm}{2cm}{2cm} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\indicate}[1]{\boldmaths{1}\{ {#1} \}} \newcommand{\dee}[1]{\, {\rm d}{#1}} \newcommand{\boldmaths}[1]{{\ensuremath\boldsymbol{#1}}} \newcommand{\xx}{\boldmaths{x}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Analysing replicated point patterns in \texttt{spatstat}} \author{Adrian Baddeley} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle \begin{abstract} This document describes \spst's capabilities for fitting models to replicated point patterns. More generally it applies to data from a designed experiment in which the response from each unit is a spatial point pattern. \end{abstract} \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} `Replicated point patterns' are datasets consisting of several point patterns which can be regarded as independent repetitions of the same experiment. For example, three point patterns taken from micrographs of three pipette samples of the same jug of milk, could be assumed to be replicated observations. More generally we could have several experimental groups, with replicated point pattern data in each group. For example there may be two jugs of milk that were treated differently, and we take three pipette samples from each jug. Even more generally our point patterns could be the result of a designed experiment involving control and treatment groups, covariates such as temperature, and even spatial covariates (such as image data). This document describes the capabilities available in the \spst\ package for analysing such data. {\bf These capabilities are still under development and will be extended soon.} Our aim is to fit a model to the data which explains the influence of experimental conditions on the point patterns. The paper \cite{statpaper} outlines a method for fitting such models using maximum product pseudolikelihood. This has been implemented in \spst. This document is an explanation with examples on how to use the code. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Overview of software} The main components needed are: \begin{itemize} \item the model-fitting function \texttt{mppm}, an extension of the \texttt{spatstat} function \texttt{ppm}, that will fit Gibbs point process models to multiple point pattern datasets; \item support for the class \texttt{"mppm"} of point process models fitted by \texttt{mppm} (e.g. functions to print and plot the fitted model, analysis of deviance for Poisson models) \item some tools for exploratory data analysis; \item basic support for the data from such experiments by storing the data in a \emph{``hyperframe''}. A hyperframe is like a data frame, except that each entry in a column can be a point pattern or a pixel image, as well as a single number or categorical value. \item four example datasets. \end{itemize} \section{Formulating the problem} We view the experiment as involving a series of {\em `units'\/}. Each unit is subjected to a known set of experimental conditions (described by the values of the {\em covariates\/}), and each unit yields a {\em response\/} which is a spatial point pattern. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a pixel image. Three important cases are: \begin{description} \item[independent replicates:] We observe $n$ different point patterns that can be regarded as independent replicates, i.e.\ independent realisations of the same point process. The `responses' are the point patterns; there are no covariates. \item[replication in groups:] there are $K$ different experimental groups (e.g. control, aspirin, nurofen). In group $k$ ($k=1,\ldots,K$) we observe $n_k$ point patterns which can be regarded as independent replicates within this group. We regard this as an experiment with $n = \sum_k n_k$ units. The responses are the point patterns; there is one covariate which is a factor (categorical variable) identifying which group each point pattern belongs to. \item[general case:] there are covariates other than factors that influence the response. The point patterns are assumed to be independent, but no two patterns have the same distribution. \end{description} Examples of these three cases are given in the datasets \texttt{waterstriders}, \texttt{pyramidal} and \texttt{demohyper} respectively, which are installed in \spst. \section{Installed datasets} The following datasets are currently installed in \spst. \begin{itemize} \item \texttt{waterstriders}: Penttinen's \cite{pent84} waterstriders data recording the locations of insect larvae on a pond in 3 independent experiments. \item \texttt{pyramidal}: data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain, 31 human subjects grouped into 3 groups (controls, schizoaffective and schizophrenic). \item \texttt{flu}: data from Chen et al \cite{chenetal08} giving the locations of two different virus proteins on the membranes of cells infected with influenza virus; 41 multitype point patterns divided into two virus types (wild and mutant) and two stain types. \item \texttt{simba}: simulated data from an experiment with two groups and 5 replicate point patterns per group. \item \texttt{demohyper}: simulated data from an experiment with two groups in which each experimental unit has a point pattern response and a pixel image covariate. \end{itemize} \section{Lists of point patterns} First we need a convenient way to store the \emph{responses} from all the units in an experiment. An individual point pattern is stored as an object of class \verb!"ppp"!. The easiest way to store all the responses is to form a list of \verb!"ppp"! objects. \subsection{Waterstriders data} The \texttt{waterstriders} data are an example of this type. The data consist of 3 independent point patterns representing the locations of insect larvae on a pond. See \texttt{help(waterstriders)}. <<>>= waterstriders @ The \texttt{waterstriders} dataset is a list of point patterns. It is a list, each of whose entries is a point pattern (object of class \verb!"ppp"!). Note that the observation windows of the three point patterns are {\tt not\/} identical. \subsection{The class \texttt{listof}} For convenience, the \texttt{waterstriders} dataset also belongs to the class \verb!"listof"!. This is a simple mechanism to allow us to handle the list neatly --- for example, we can provide special methods for printing, plotting and summarising the list. \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(waterstriders, main="") @ Notice that the plot method displays each entry of the list in a separate panel. There's also the summary method: <<>>= summary(waterstriders) @ \subsection{Creating a \texttt{listof} object} For example, here is a simulated dataset containing three independent realisations of the Poisson process with intensity 100. <<>>= X <- listof(rpoispp(100), rpoispp(100), rpoispp(100)) @ Then it can be printed and plotted. <>= plot(X) X @ To convert an existing list to the class \code{listof}, use \code{as.listof}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Hyperframes} A \emph{hyperframe} is like a data frame, except that its entries can be objects of any kind. A hyperframe is effectively a two-dimensional array in which each column consists of values of one type (as in a data frame) or consists of objects of one class. The entries in a hyperframe can be point patterns, pixel images, windows, or any other objects. To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). \subsection{Creating hyperframes} The function \texttt{hyperframe} will create a hyperframe. <>= hyperframe(...) @ The arguments \verb!...! are any number of arguments of the form \texttt{tag=value}. Each \texttt{value} will become a column of the array. The \texttt{tag} determines the name of the column. Each \texttt{value} can be either \begin{itemize} \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of identical objects. \end{itemize} All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. For example, here is a hyperframe containing a column of numbers and a column of \emph{functions}: <<>>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) H @ Note that a column of character strings will be converted to a factor, unless you set \texttt{stringsAsFactors=FALSE} in the call to \code{hyperframe}. This is the same behaviour as for the function \code{data.frame}. <<>>= G <- hyperframe(X=1:3, Y=letters[1:3], Z=factor(letters[1:3]), W=list(rpoispp(100),rpoispp(100), rpoispp(100)), U=42, V=rpoispp(100), stringsAsFactors=FALSE) G @ This hyperframe has 3 rows. The columns named \texttt{U} and \texttt{V} are constant (all entries in a column are the same). The column named \texttt{Y} is a character vector. \subsection{Hyperframes of data} To analyse an experiment, we will store {\bf all} the data from the experiment in a single hyperframe. The rows of the hyperframe will correspond to different experimental units, while the columns represent different variables (response variables or covariates). Several examples of hyperframes are provided with the package, including \texttt{demohyper}, \texttt{flu}, \texttt{simba} and \texttt{pyramidal}, described above. The \texttt{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the control group are independent Poisson point patterns with intensity 80. The responses in the treatment group are independent realisations of a Strauss process (see \texttt{help(simba)} for details). The \texttt{simba} dataset is a hyperframe with 10 rows and 2 columns: \texttt{Points} (the point patterns) and \texttt{group} (a factor with levels \texttt{control} and \texttt{treatment}). <<>>= simba @ The \texttt{pyramidal} dataset contains data from Diggle, Lange and Benes \cite{digglangbene91} on the locations of pyramidal neurons in human brain. One point pattern was observed in each of 31 human subjects. The subjects were classified into 3 groups (controls, schizoaffective and schizophrenic). The \texttt{pyramidal} dataset is a hyperframe with 31 rows and 2 columns: \code{Neurons} (the point patterns) and \code{group} (a factor with levels \texttt{control}, \texttt{schizoaffective} and \texttt{schizophrenic}). <<>>= pyramidal @ The \texttt{waterstriders} dataset is not a hyperframe; it's just a list of point patterns. It can easily be converted into a hyperframe: <<>>= ws <- hyperframe(Striders=waterstriders) @ \subsection{Columns of a hyperframe} Individual columns of a hyperframe can be extracted using \verb!$!: <<>>= H$X H$Y @ The result of \verb!$! is a vector or factor if the column contains atomic values; otherwise it is a list of objects (with class \texttt{"listof"} to make it easier to print and plot). Individual columns can also be assigned (overwritten or created) using \verb!$<-!: <<>>= H$U <- letters[1:3] H @ This can be used to build up a hyperframe column-by-column: <<>>= G <- hyperframe() G$X <- waterstriders G$Y <- 1:3 G @ \subsection{Subsets of a hyperframe} Other subsets of a hyperframe can be extracted with \verb![!: <<>>= H[,1] H[2,] H[2:3, ] H[1,1] @ The result of \verb![! is a hyperframe, unless you set \verb!drop=TRUE! and the subset consists of only one element or one column: <<>>= H[,1,drop=TRUE] H[1,1,drop=TRUE] H[1,2,drop=TRUE] @ Currently there is no method for \verb![<-! that would allow you to assign values to a subset of a hyperframe. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Plotting} \subsection{Plotting a \code{listof} object} The plot method for \code{listof} objects has formal arguments <>= plot.listof(x, ..., main, arrange = TRUE, nrows = NULL, ncols = NULL) @ where \code{main} is a title for the entire page. If \code{arrange=TRUE} then the entries of the list are displayed in separate panels on the same page (with \code{nrows} rows and \code{ncols} columns of panels), while if \code{arrange=FALSE} then the entries are just plotted as a series of plot frames. The extra arguments \verb!...! control the individual plot panels. These arguments will be passed to the plot method that displays each entry of the list. Suitable arguments depend on the type of entries. <>= plot(waterstriders, pch=16, nrows=1) @ \subsection{Plotting a hyperframe} \subsubsection{Plotting one column} If \code{h} is a hyperframe, then the default action of \code{plot(h)} is to extract the first column of \code{h} and plot each of the entries in a separate panel on one page (actually using the plot method for class \verb!"listof"!). \SweaveOpts{width=7,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(simba) @ This only works if the entries in the first column are objects for which a plot method is defined (for example, point patterns, images, windows). To select a different column, use \verb!$! or \verb![!: \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(X=1:3, Y=list(sin,cos,tan)) plot(H$Y) @ The plot can be controlled using the arguments for \code{plot.listof} (and, in this case, \code{plot.function}, since \verb!H$Y! consists of functions). \subsubsection{Complex plots} More generally, we can display any kind of higher-order plot involving one or more columns of a hyperframe: <>= plot(h, e) @ where \code{h} is a hyperframe and \code{e} is an \R\ language call or expression that must be evaluated in each row to generate each plot panel. \SweaveOpts{width=9,height=5} \setkeys{Gin}{width=0.9\textwidth} <>= plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) })) @ Note the use of \code{quote}, which prevents the code inside the braces from being evaluated immediately. To plot the $K$-functions of each of the patterns in the \code{waterstriders} dataset, \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= H <- hyperframe(Bugs=waterstriders) plot(H, quote(plot(Kest(Bugs))), marsize=1) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Data analysis} \subsection{Computing with hyperframes} Often we want to perform some computation on each row of a hyperframe. In a data frame, this can be done using the command \code{with}: <<>>= df <- data.frame(A=1:10, B=10:1) with(df, A-B) @ In this example, the expression \code{A-B} is evaluated in each row of the data frame, and the result is a vector containing the computed values for each row. The function \code{with} is generic, and has a method for data frames, \code{with.data.frame}. The computation above was executed by \code{with.data.frame}. The same syntax is available for hyperframes using the method \code{with.hyperframe}: <>= with(h,e) @ Here \code{h} is a hyperframe, and \code{e} is an {\sf R} language construct involving the names of columns in \code{h}. For each row of \code{h}, the expression \code{e} will be evaluated in such a way that each entry in the row is identified by its column name. <<>>= H <- hyperframe(Bugs=waterstriders) with(H, npoints(Bugs)) with(H, distmap(Bugs)) @ The result of \code{with.hyperframe} is a list of objects (of class \verb!"listof"!), or a vector or factor if appropriate. Notice that (unlike the situation for data frames) the operations in the expression \code{e} do not have to be vectorised. For example, \code{distmap} expects a single point pattern, and is not vectorised to deal with a list of point patterns. Instead, the expression \code{distmap(Bugs)} is evaluated separately in each row of the hyperframe. \subsection{Summary statistics} One application of \code{with.hyperframe} is to calculate summary statistics for each row of a hyperframe. For example, the number of points in a point pattern \code{X} is returned by \code{npoints(X)}. To calculate this for each of the responses in the \code{simba} dataset, <<>>= with(simba, npoints(Points)) @ The summary statistic can be any kind of object. For example, to compute the empirical $K$-functions for each of the patterns in the \code{waterstriders} dataset, <<>>= H <- hyperframe(Bugs=waterstriders) K <- with(H, Kest(Bugs)) @ To plot these $K$-functions you can then just type \SweaveOpts{width=6,height=2} \setkeys{Gin}{width=0.9\textwidth} <>= plot(K) @ The summary statistic for each row could be a numeric vector: <<>>= H <- hyperframe(Bugs=waterstriders) with(H, nndist(Bugs)) @ The result is a list, each entry being a vector of nearest neighbour distances. To find the minimum interpoint distance in each pattern: <<>>= with(H, min(nndist(Bugs))) @ \subsection{Generating new columns} New columns of a hyperframe can be created by computation from the existing columns. For example, I can add a new column to the \code{simba} dataset that contains pixel images of the distance maps for each of the point pattern responses. <>= simba$Dist <- with(simba, distmap(Points)) @ \subsection{Simulation} This can be useful for simulation. For example, to generate Poisson point patterns with different intensities, where the intensities are given by a numeric vector \code{lambda}: \SweaveOpts{width=6,height=6} \setkeys{Gin}{width=0.7\textwidth} <>= lambda <- rexp(6, rate=1/50) H <- hyperframe(lambda=lambda) H$Points <- with(H, rpoispp(lambda)) plot(H, quote(plot(Points, main=paste("lambda=", signif(lambda, 4))))) @ It's even simpler to generate 10 independent Poisson point patterns with the \emph{same} intensity 50, say: <>= H$X <- with(H, rpoispp(50)) @ (the expression \code{rpoispp(50)} is evaluated once in each row, yielding a different point pattern in each row because of the randomness). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Exploratory data analysis} Before fitting models to the data, it is prudent to explore the data to detect unusual features and to suggest appropriate models. \subsection{Exploring spatial trend and covariate effects} Points may be distributed non-uniformly either because they are intrinsically non-uniform (``spatial trend'') or because their abundance depends on a spatial covariate (``covariate effects''). Non-uniformity of a point pattern can be investigated using the kernel smoothed intensity. This is the convolution of the point pattern with a smooth density called the kernel. Effectively each point in the pattern is replaced by a copy of the kernel, and the sum of all copies of the kernel is the kernel-smoothed intensity function. It is computed by \texttt{density.ppp} separately for each point pattern. <>= plot(simba, quote(plot(density(Points), main="")), nrows=2) @ Covariate effects due to a real-valued spatial covariate (a real-valued pixel image) can be investigated using the command \code{rhohat}. This uses a kernel smoothing technique to fit a model of the form \[ \lambda(u) = \rho(Z(u)) \] where $\lambda(u)$ is the point process intensity at a location $u$, and $Z(u)$ is the value of the spatial covariate at that location. Here $\rho$ is an unknown, smooth function which is to be estimated. The function $\rho$ expresses the effect of the spatial covariate on the point process intensity. If $\rho$ turns out to be constant, then the covariate has no effect on point process intensity (and the constant value of $\rho$ is the constant intensity of the point process). <>= rhos <- with(demohyper, rhohat(Points, Image)) plot(rhos) @ \SweaveOpts{width=6,height=4} \setkeys{Gin}{width=0.9\textwidth} \subsection{Exploring interpoint interaction} Still to be written. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Fitting models of spatial trend} The command \code{mppm} fits models to multiple point patterns. Its syntax is very similar to that of \code{lm} and \code{glm}: <>= mppm(formula, data, interaction, ...) @ where \code{formula} is a formula describing the systematic trend part of the model, \code{data} is a hyperframe containing all the data (responses and covariates), and \code{interaction} determines the stochastic interpoint interaction part of the model. For example: <>= mppm(Points ~ group, simba, Poisson()) @ Note that the formula has a left hand side, which identifies the response. This should be the name of a column of \code{data}. \subsection{Trend formula} The right side of \code{formula} is an expression for the linear predictor (effectively the {\bf logarithm} of the spatial trend). The variables appearing in the right hand side of \code{formula} should be either \begin{itemize} \item names of columns in \code{data} \item objects in the {\sf R} global environment (such as \code{pi} and \code{log}) \item the reserved names \code{x}, \code{y} (representing Cartesian coordinates), \code{marks} (representing mark values attached to points) or \code{id} (a factor representing the row number in the hyperframe). \end{itemize} \subsubsection{Design covariates} The variables in the trend could be `design covariates'. For example, to fit a model to the \code{simba} dataset in which all patterns are independent replicates of the same uniform Poisson process, with the same constant intensity: <<>>= mppm(Points ~ 1, simba) @ To fit a model in which the two groups of patterns (control and treatment groups) each consist of independent replicates of a uniform Poisson process, but with possibly different intensity in each group: <<>>= mppm(Points ~ group, simba) @ To fit a uniform Poisson process to each pattern, with different intensity for each pattern: <<>>= mppm(Points ~ id, simba) @ \subsubsection{Spatial covariates} The variables in the trend could be `spatial covariates'. For example, the \code{demohyper} dataset has a column \code{Image} containing pixel images. <<>>= mppm(Points ~ Image, data=demohyper) @ This model postulates that each pattern is a Poisson process with intensity of the form \[ \lambda(u) = \exp(\beta_0 + \beta_1 Z(u)) \] at location $u$, where $\beta_0, \beta_1$ are coefficients to be estimated, and $Z(u)$ is the value of the pixel image \code{Image} at location $u$. It may or may not be appropriate to assume that the intensity of the points is an exponential function of the image pixel value $Z$. If instead we wanted the intensity $\lambda(u)$ to be \emph{proportional} to $Z(u)$, the appropriate model is <>= mppm(Points ~ offset(log(Image)), data=demohyper) @ which corresponds to an intensity proportional to \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \log Z(u)) = e^{\beta_0} \; Z(u). \] The \code{offset} indicates that there is no coefficient in front of $\log Z(u)$. Alternatively we could allow a coefficient: <>= mppm(Points ~ log(Image), data=demop) @ which corresponds to a gamma transformation of \code{Image}, \[ \lambda(u) = \exp(\beta_0 + \beta_1 \log Z(u)) = e^{\beta_0} \; Z(u)^{\beta_1}. \] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Interpoint interaction} The stochastic interpoint interaction in a point process model is specified by the arguments \code{interaction} and (optionally) \code{iformula} in <>= mppm(formula, data, interaction, ..., iformula=NULL) @ \subsection{Same interaction for all patterns} In the simplest case, the argument \texttt{interaction} is one of the familiar objects that describe the point process interaction structure. It is an object of class \texttt{"interact"} created by calling one of the functions \begin{center} \begin{tabular}{rl} \texttt{Poisson()} & the Poisson point process\\ \texttt{Hardcore()} & the hard core process \\ \texttt{Strauss()} & the Strauss process \\ \texttt{StraussHard()} & the Strauss/hard core point process\\ \texttt{Softcore()} & pairwise interaction, soft core potential\\ \texttt{PairPiece()} & pairwise interaction, piecewise constant \\ \texttt{DiggleGatesStibbard() } & Diggle-Gates-Stibbard pair potential \\ \texttt{DiggleGratton() } & Diggle-Gratton pair potential \\ \texttt{Fiksel() } & Fiksel pair potential \\ \texttt{LennardJones() } & Lennard-Jones pair potential \\ \texttt{Pairwise()} & pairwise interaction, user-supplied potential\\ \texttt{AreaInter()} & area-interaction potential\\ \texttt{Geyer()} & Geyer's saturation process\\ \texttt{BadGey()} & multiscale Geyer saturation process\\ \texttt{Saturated()} & Saturated pair model, user-supplied potential\\ \texttt{OrdThresh()} & Ord process, threshold potential\\ \texttt{Ord()} & Ord model, user-supplied potential \\ \texttt{MultiStrauss()} & multitype Strauss process \\ \texttt{MultiStraussHard()} & multitype Strauss/hard core process \\ \texttt{Concom()} & connected component interaction \\ \texttt{Hybrid()} & hybrid of several interactions \\ \end{tabular} \end{center} In this `simple' usage of \texttt{mppm}, the point process model assumes that all point patterns have exactly the same interpoint interaction, (with the same interaction parameters), and only differ in their spatial trend. \subsection{Hyperframe of interactions} More generally the argument \code{interaction} can be a hyperframe containing objects of class \texttt{"interact"}. For example, we might want to fit a Strauss process to each point pattern, but with a different Strauss interaction radius for each pattern. <>= radii <- with(simba, mean(nndist(Points))) @ Then \code{radii} is a vector of numbers which we could use as the values of the interaction radius for each case. First we need to make the interaction objects: <<>>= Rad <- hyperframe(R=radii) Str <- with(Rad, Strauss(R)) @ Then we put them into a hyperframe and fit the model: <<>>= Int <- hyperframe(str=Str) mppm(Points ~ 1, simba, interaction=Int) @ An important constraint is that all of the interaction objects in one column must be \emph{instances of the same process} (e.g. Strauss) albeit possibly having different parameter values. For example, you cannot put Poisson and Strauss processes in the same column. \subsection{Interaction formula} If \code{interaction} is a hyperframe, then the additional argument \code{iformula} may be used to fully specify the interaction. (An \code{iformula} is also required if \code{interaction} has more than one column.) The \code{iformula} should be a formula without a left hand side. Variables on the right hand side are typically the names of columns in \code{interaction}. \subsubsection{Selecting one column} If the right hand side of \code{iformula} is a single name, then this identifies the column in \code{interaction} to be used as the interpoint interaction structure. <<>>= h <- hyperframe(Y=waterstriders) g <- hyperframe(po=Poisson(), str4 = Strauss(4), str7= Strauss(7)) mppm(Y ~ 1, data=h, interaction=g, iformula=~str4) @ \subsubsection{Interaction depending on design} The \code{iformula} can also involve columns of \code{data}, but only those columns that are vectors or factors. This allows us to specify an interaction that depends on the experimental design. [This feature is {\bf experimental}.] For example <<>>= fit <- mppm(Points ~ 1, simba, Strauss(0.07), iformula = ~Interaction*group) @ Since \code{Strauss(0.1)} is not a hyperframe, it is first converted to a hyperframe with a single column named \code{Interaction}. The \code{iformula = ~Interaction*group} specifies (since \code{group} is a factor) that the interpoint interaction shall have a different coefficient in each experimental group. That is, we fit a model which has two different values for the Strauss interaction parameter $\gamma$, one for the control group and one for the treatment group. When you print the result of such a fit, the package tries to do `automatic interpretation' of the fitted model (translating the fitted interaction coefficients into meaningful numbers like $\gamma$). This will be successful in \emph{most} cases: <<>>= fit @ <>= co <- coef(fit) si <- function(x) { signif(x, 4) } @ Thus we see that the estimate of the Strauss parameter $\gamma$ for the control group is \Sexpr{si(exp(co[2]))}, and for the treatment group \Sexpr{si(exp(sum(co[c(2,4)])))} (the correct values in this simulated dataset were $1$ and $0.5$). The fitted model can also be interpreted directly from the fitted canonical coefficients: <<>>= coef(fit) @ The last output shows all the coefficients $\beta_j$ in the linear predictor for the (log) conditional intensity. The interpretation of the model coefficients, for any fitted model in \R, depends on the \emph{contrasts} which were applicable when the model was fitted. This is part of the core {\sf R} system: see \code{help(contrasts)} or \code{options(contrasts)}. If you did not specify otherwise, the default is to use \emph{treatment contrasts}. This means that, for an explanatory variable which is a \texttt{factor} with $N$ levels, the first level of the factor is used as a baseline, and the fitted model coefficients represent the factor levels $2, 3, \ldots, N$ relative to this baseline. In the output above, there is a coefficient for \code{(Intercept)} and one for \code{grouptreatment}. These are coefficients related to the \code{group} factor. According to the ``treatment contrasts'' rule, the \code{(Intercept)} coefficient is the estimated effect for the control group, and the \code{grouptreatment} coefficient is the estimated difference between the treatment and control groups. Thus the fitted first order trend is $\exp(\Sexpr{si(co[1])}) = \Sexpr{si(exp(co[1]))}$ for the control group and $\exp(\Sexpr{si(co[1])} + \Sexpr{si(co[3])}) = \Sexpr{si(exp(sum(co[c(1,3)])))}$ for the treatment group. The correct values in this simulated dataset were $80$ and $100$. The remaining coefficients in the output are \code{Interaction} and \code{Interaction:grouptreatment}. Recall that the Strauss process interaction term is $\gamma^{t(u,\xx)} = \exp(t(u,\xx) \log\gamma)$ at a spatial location $u$, for a point pattern $\xx$. Since we're using treatment contrasts, the coefficient \code{Interaction} is the estimate of $\log\gamma$ for the control group. The coefficient \code{Interaction:grouptreatment} is the estimate of the difference in $\log\gamma$ between the treatment and control groups. Thus the estimated Strauss interaction parameter $\gamma$ is $\exp(\Sexpr{si(co[2])}) = \Sexpr{si(exp(co[2]))}$ for the control group and $\exp(\Sexpr{si(co[2])} + (\Sexpr{si(co[4])})) = \Sexpr{si(exp(co[2]+co[4]))}$ for the treatment group. The correct values were $1$ and $0.5$. \subsubsection{Completely different interactions for different cases} In the previous example, when we fitted a Strauss model to all point patterns in the \code{simba} dataset, the fitted model for the patterns in the control group was close to Poisson ($\gamma \approx 1$). Suppose we now want to fit a model which {\it is} Poisson in the control group, and Strauss in the treatment group. The Poisson and Strauss interactions must be given as separate columns in a hyperframe of interactions: <>= interaction=hyperframe(po=Poisson(), str=Strauss(0.07)) @ What do we write for the \code{iformula}? The following \emph{will not} work: <>= iformula=~ifelse(group=="control", po, str) @ This does not work because the Poisson and Strauss models are `incompatible' inside such expressions. The canonical sufficient statistics for the Poisson and Strauss processes do not have the same dimension. Internally in \code{mppm} we translate the symbols \code{po} and \code{str} into matrices; the dimensions of these matrices are different, so the \code{ifelse} expression cannot be evaluated. Instead we need something like the following: <>= iformula=~I((group=="control")*po) + I((group=="treatment") * str) @ The letter \code{I} here is a standard R function that prevents its argument from being interpreted as a formula (thus the \code{*} is interpreted as multiplication instead of a model interaction). The expression \code{(group=="control")} is logical, and when multiplied by the matrix \code{po}, yields a matrix. So the following does work: <<>>= g <- hyperframe(po=Poisson(), str=Strauss(0.07)) fit2 <- mppm(Points ~ 1, simba, g, iformula=~I((group=="control")*po) + I((group=="treatment") * str)) fit2 @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Studying the fitted model} Fitted models produced by \code{mppm} can be examined and validated in many ways. \subsection{Fits for each pattern} \subsubsection{Subfits} The command \code{subfits} takes an \code{mppm} object and extracts, for each individual point pattern, the fitted point process model for that pattern \emph{that is implied by the overall fit}. It returns a list of objects of class \code{ppm}. <<>>= H <- hyperframe(W=waterstriders) fit <- mppm(W ~ 1, H) subfits(fit) @ In this example the result is a list of three \code{ppm} objects representing the implied fits for each of the three point patterns in the \code{waterstriders} dataset. Notice that {\bf the fitted coefficients are the same} in all three models. Note that there are some unresolved difficulties with the implementation of \code{subfits}. Two completely different implementations are supplied in the package; they are called \code{subfits.old} %(used in versions 0.1--1 and earlier) and \code{subfits.new}.% (introduced in 0.1--2). The old version would occasionally crash. Unfortunately the newer version \code{subfits.new} is quite memory-hungry and sometimes causes R to hang. We're still working on this problem. So for the time being, \code{subfits} is the same as \code{subfits.old}. You can change this simply by reassigning, e.g. <>= subfits <- subfits.new @ \subsubsection{Fitting separately to each pattern} For comparison, we could fit a point process model separately to each point pattern dataset using \code{ppm}. The easy way to do this is with \code{with.hyperframe}. To fit a \emph{separate} uniform Poisson point process to each of the three waterstriders patterns, <<>>= H <- hyperframe(W=waterstriders) with(H, ppm(W)) @ The result is again a list of three fitted point process models (objects of class \code{ppm}), but now the fitted coefficients are different. \subsection{Residuals} One standard way to check a fitted model is to examine the residuals. \subsubsection{Point process residuals} Some recent papers \cite{baddetal05,baddmollpake08} have defined residuals for a fitted point process model (fitted to a \emph{single} point pattern). These residuals are implemented in \code{spatstat} as \code{residuals.ppm} and apply to an object of class \code{ppm}, that is, a model fitted to a \emph{single} point pattern. The command \code{residuals.mppm} computes the point process residuals for an \code{mppm} object. <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) @ The result is a list, with one entry for each of the point pattern datasets. Each list entry contains the point process residuals for the corresponding point pattern dataset. Each entry in the list is a signed measure (object of class \code{"msr"}) as explained in the help for \code{residuals.ppm}). It can be plotted: <>= plot(res) @ You probably want the smoothed residual field: <>= smor <- with(hyperframe(res=res), Smooth(res, sigma=4)) plot(smor) @ \subsubsection{Sums of residuals} It would be useful to have a residual that is a single value for each point pattern (representing how much that point pattern departs from the model fitted to all the point patterns). That can be computed by \emph{integrating} the residual measures using the function \code{integral.msr}: <<>>= fit <- mppm(P ~ x, hyperframe(P=waterstriders)) res <- residuals(fit) totres <- sapply(res, integral.msr) @ In designed experiments we can plot these total residuals against the design covariates: <>= fit <- mppm(Points~Image, data=demohyper) resids <- residuals(fit, type="Pearson") totres <- sapply(resids, integral.msr) areas <- with(demohyper, area.owin(as.owin(Points))) df <- as.data.frame(demohyper[, "Group"]) df$resids <- totres/areas plot(resids~Group, df) @ \subsubsection{Four-panel diagnostic plots} Sometimes a more useful tool is the function \code{diagnose.ppm} which produces a four-panel diagnostic plot based on the point process residuals. However, it is only available for \code{ppm} objects. To obtain a four-panel diagnostic plot for each of the point patterns, do the following: \begin{enumerate} \item fit a model to multiple point patterns using \code{mppm}. \item extract the individual fits using \code{subfits}. \item plot the residuals of the individual fits. \end{enumerate} For example: <>= fit <- mppm(P ~ 1, hyperframe(P=waterstriders)) sub <- hyperframe(Model=subfits(fit)) plot(sub, quote(diagnose.ppm(Model))) @ (One could also do this for models fitted separately to the individual point patterns.) \subsubsection{Residuals of the parameter estimates} We can also compare the parameter estimates obtained by fitting the model simultaneously to all patterns (using \code{mppm}) with those obtained by fitting the model separately to each pattern (using \code{ppm}). <<>>= H <- hyperframe(P = waterstriders) fitall <- mppm(P ~ 1, H) together <- subfits(fitall) separate <- with(H, ppm(P)) Fits <- hyperframe(Together=together, Separate=separate) dr <- with(Fits, unlist(coef(Separate)) - unlist(coef(Together))) dr exp(dr) @ One could also try deletion residuals, etc. \subsection{Goodness-of-fit tests} \subsubsection{Quadrat count test} The $\chi^2$ goodness-of-fit test based on quadrat counts is implemented for objects of class \code{ppm} (in \code{quadrat.test.ppm}) and also for objects of class \code{mppm} (in \code{quadrat.test.mppm}). This is a goodness-of-fit test for a fitted {\bf Poisson} point process model only. The model could be uniform or non-uniform and the intensity might depend on covariates. <<>>= H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) @ See the help for \code{quadrat.test.ppm} and \code{quadrat.test.mppm} for further details. \subsubsection{Kolmogorov-Smirnov test} The Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model compares the observed and predicted distributions of the values of a spatial covariate. We want to test the null hypothesis $H_0$ that the observed point pattern ${\mathbf x}$ is a realisation from the Poisson process with intensity function $\lambda(u)$ (for locations $u$ in the window $W$). Let $Z(u)$ be a given, real-valued covariate defined at each spatial location $u$. Under $H_0$, the \emph{observed} values of $Z$ at the data points, $Z(x_i)$ for each $x_i \in {\mathbf x}$, are independent random variables with common probability distribution function \[ F_0(z) = \frac{\int_W \lambda(u) \indicate{Z(u) \le z} \dee u} {\int_W \lambda(u) \dee u}. \] We can therefore apply the Kolmogorov-Smirnov test of goodness-of-fit. This compares the empirical cumulative distribution of the observed values $Z(x_i)$ to the predicted c.d.f. $F_0$. The test is implemented as \code{kstest.ppm}. The syntax is <>= kstest.mppm(model, covariate) @ where \code{model} is a fitted model (of class \texttt{"mppm"}) and \code{covariate} is either \begin{itemize} \item a \code{function(x,y)} making it possible to compute the value of the covariate at any location \code{(x,y)} \item a pixel image containing the covariate values \item a list of functions, one for each row of the hyperframe of original data \item a list of pixel images, one for each row of the hyperframe of original data \item a hyperframe with one column containing either functions or pixel images. \end{itemize} \newpage \addcontentsline{toc}{section}{Bibliography} %\bibliography{% %extra,% %extra2,% %biblio/badd,% %biblio/bioscience,% %biblio/censoring,% %biblio/mcmc,% %biblio/spatstat,% %biblio/stat,% %biblio/stochgeom% %} \begin{thebibliography}{1} \bibitem{baddmollpake08} A. Baddeley, J. M{\o}ller, and A.G. Pakes. \newblock Properties of residuals for spatial point processes. \newblock {\em Annals of the Institute of Statistical Mathematics}, 60:627--649, 2008. \bibitem{statpaper} A. Baddeley, I. Sintorn, L. Bischof, R. Turner, and S. Heggarty. \newblock Analysing designed experiments where the response is a spatial point pattern. \newblock In preparation. \bibitem{baddetal05} A. Baddeley, R. Turner, J. M{\o}ller, and M. Hazelton. \newblock Residual analysis for spatial point processes (with discussion). \newblock {\em Journal of the Royal Statistical Society, series B}, 67(5):617--666, 2005. \bibitem{chenetal08} B.J. Chen, G.P. Leser, D. Jackson, and R.A. Lamb. \newblock The influenza virus {M2} protein cytoplasmic tail interacts with the {M1} protein and influences virus assembly at the site of virus budding. \newblock {\em Journal of Virology}, 82:10059--10070, 2008. \bibitem{digglangbene91} P.J. Diggle, N. Lange, and F. M. Benes. \newblock Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \newblock {\em Journal of the {A}merican {S}tatistical {A}ssociation}, 86:618--625, 1991. \bibitem{pent84} A. Penttinen. \newblock {\em Modelling Interaction in Spatial Point Patterns: Parameter Estimation by the Maximum Likelihood Method}. \newblock Number 7 in {Jyv\"askyl\"a} Studies in Computer Science, Economics and Statistics. University of {Jyv\"askyl\"a}, 1984. \end{thebibliography} %\addcontentsline{toc}{section}{Index} %\printindex \end{document} spatstat/vignettes/getstart.Rnw0000644000176000001440000003076012237642736016524 0ustar ripleyusers\documentclass[11pt]{article} % \VignetteIndexEntry{Getting Started with Spatstat} <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ \usepackage{graphicx} \newcommand{\pkg}[1]{\texttt{#1}} \newcommand{\bold}[1]{{\textbf {#1}}} \newcommand{\R}{{\sf R}} \newcommand{\spst}{\pkg{spatstat}} \newcommand{\Spst}{\pkg{Spatstat}} \begin{document} \bibliographystyle{plain} \thispagestyle{empty} \SweaveOpts{eps=TRUE} \setkeys{Gin}{width=0.6\textwidth} <>= library(spatstat) spatstat.options(image.colfun=function(n) { grey(seq(0,1,length=n)) }) sdate <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Date") sversion <- read.dcf(file = system.file("DESCRIPTION", package = "spatstat"), fields = "Version") options(useFancyQuotes=FALSE) @ \title{Getting started with \texttt{spatstat}} \author{Adrian Baddeley and Rolf Turner} \date{For \spst\ version \texttt{\Sexpr{sversion}}} \maketitle Welcome to \spst, a package in the \R\ language for analysing spatial point patterns. This document will help you to get started with \spst. It gives you a quick overview of \spst, and some cookbook recipes for doing basic calculations. \section*{What kind of data does \spst\ handle?} \Spst\ is mainly designed for analysing \emph{spatial point patterns}. For example, suppose you are an ecologist studying plant seedlings. You have pegged out a $10 \times 10$ metre rectangle for your survey. Inside the rectangle you identify all the seedlings of the species you want, and record their $(x,y)$ locations. You can plot the $(x,y)$ locations: <>= data(redwood) plot(redwood, pch=16, main="") @ This is a \emph{spatial point pattern} dataset. Methods for analysing this kind of data are summarised in the highly recommended book by Diggle \cite{digg03} and other references in the bibliography. \nocite{handbook10,bivapebegome08} Alternatively the points could be locations in one dimension (such as road accidents recorded on a road network) or in three dimensions (such as cells observed in 3D microscopy). You might also have recorded additional information about each seedling, such as its height, or the number of fronds. Such information, attached to each point in the point pattern, is called a \emph{mark} variable. For example, here is a stand of pine trees, with each tree marked by its diameter at breast height (dbh). The circle radii represent the dbh values (not to scale). <>= data(longleaf) plot(longleaf, main="") @ You might also have recorded supplementary data, such as the terrain elevation, which might serve as explanatory variables. These data can be in any format. \Spst\ does not usually provide capabilities for analysing such data in their own right, but \spst\ does allow such explanatory data to be taken into account in the analysis of a spatial point pattern. \Spst\ is \underline{\bf not} designed to handle point data where the $(x,y)$ locations are fixed (e.g.\ temperature records from the state capital cities in Australia) or where the different $(x,y)$ points represent the same object at different times (e.g.\ hourly locations of a tiger shark with a GPS tag). These are different statistical problems, for which you need different methodology. \section*{What can \spst\ do?} \Spst\ supports a very wide range of popular techniques for statistical analysis for spatial point patterns, for example \begin{itemize} \item kernel estimation of density/intensity \item quadrat counting and clustering indices \item detection of clustering using Ripley's $K$-function \item spatial logistic regression \item model-fitting \item Monte Carlo tests \end{itemize} as well as some advanced statistical techniques. \Spst\ is one of the largest packages available for \R, containing over 1000 commands. It is the product of 15 years of software development by leading researchers in spatial statistics. \section*{How do I start using \spst?} \begin{enumerate} \item Install \R\ on your computer \begin{quote} Go to \texttt{r-project.org} and follow the installation instructions. \end{quote} \item Install the \spst\ package in your \R\ system \begin{quote} Start \R\ and type \verb!install.packages("spatstat")!. If that doesn't work, go to \texttt{r-project.org} to learn how to install Contributed Packages. \end{quote} \item Start \R\ \item Type \texttt{library(spatstat)} to load the package. \item Type \texttt{help(spatstat)} for information. \end{enumerate} \section*{How do I get my data into \spst?} <>= data(finpines) mypattern <- unmark(finpines) mydata <- round(as.data.frame(finpines), 2) @ Here is a cookbook example. Suppose you've recorded the $(x,y)$ locations of seedlings, in an Excel spreadsheet. You should also have recorded the dimensions of the survey area in which the seedlings were mapped. \begin{enumerate} \item In Excel, save the spreadsheet into a comma-separated values (CSV) file. \item Start \R\ \item Read your data into \R\ using \texttt{read.csv}. \begin{quote} If your CSV file is called \texttt{myfile.csv} then you could type something like <>= mydata <- read.csv("myfile.csv") @ to read the data from the file and save them in an object called \texttt{mydata} (or whatever you want to call it). You may need to set various options to get this to work for your file format: type \texttt{help(read.csv)} for information. \end{quote} \item Check that \texttt{mydata} contains the data you expect. \begin{quote} For example, to see the first few rows of data from the spreadsheet, type <<>>= head(mydata) @ To select a particular column of data, you can type \texttt{mydata[,3]} to extract the third column, or \verb!mydata$x! to extract the column labelled \texttt{x}. \end{quote} \item Type \texttt{library(spatstat)} to load the \spst\ package \item Now convert the data to a point pattern object using the \spst\ command \texttt{ppp}. \begin{quote} Suppose that the \texttt{x} and \texttt{y} coordinates were stored in columns 3 and 7 of the spreadsheet. Suppose that the sampling plot was a rectangle, with the $x$ coordinates ranging from 100 to 200, and the $y$ coordinates ranging from 10 to 90. Then you would type <>= mypattern <- ppp(mydata[,3], mydata[,7], c(100,200), c(10,90)) @ The general form is <>= ppp(x.coordinates, y.coordinates, x.range, y.range) @ Note that this only stores the seedling locations. If you have additional columns of data (such as seedling height, seedling sex, etc) these can be added as \emph{marks}, later. \end{quote} \item Check that the point pattern looks right by plotting it: <>= plot(mypattern) @ \item Now you are ready to do some statistical analysis. Try the following: \begin{itemize} \item Basic summary of data: type <>= summary(mypattern) @ \item Ripley's $K$-function: <>= options(SweaveHooks=list(fig=function() par(mar=rep(4,4)+0.1))) @ <>= plot(Kest(mypattern)) @ For more information, type \texttt{help(Kest)} \item Envelopes of $K$-function: <>= plot(envelope(mypattern,Kest)) @ <>= env <- envelope(mypattern,Kest, nsim=39) @ <>= plot(env, main="envelope(mypattern, Kest)") @ <>= options(SweaveHooks=list(fig=function() par(mar=c(1,1,1,1)))) @ For more information, type \texttt{help(envelope)} \item kernel smoother of point density: <>= plot(density(mypattern)) @ For more information, type \texttt{help(density.ppp)} \end{itemize} \item Next if you have additional columns of data recording (for example) the seedling height and seedling sex, you can add these data as \emph{marks}. Suppose that columns 5 and 9 of the spreadsheet contained such values. Then do something like <>= marks(mypattern) <- mydata[, c(5,9)] @ <>= mypattern <-finpines @ Now you can try things like the kernel smoother of mark values: <>= plot(Smooth(mypattern)) @ \setkeys{Gin}{width=0.8\textwidth} <>= plot(Smooth(mypattern, sigma=1.2), main="Smooth(mypattern)") @ \setkeys{Gin}{width=0.4\textwidth} \item You are airborne! Now look at the workshop notes \cite{badd10wshop} for more hints. \end{enumerate} \section*{How do I find out which command to use?} Information sources for \spst\ include: \begin{itemize} \item the Quick Reference guide: a list of the most useful commands. \begin{quote} To view the quick reference guide, start \R, then type \texttt{library(spatstat)} and then \texttt{help(spatstat)}. Alternatively you can download a pdf of the Quick Reference guide from the website \texttt{www.spatstat.org} \end{quote} \item online help: \begin{quote} The online help files are useful --- they give detailed information and advice about each command. They are available when you are running \spst. To get help about a particular command \texttt{blah}, type \texttt{help(blah)}. There is a graphical help interface, which you can start by typing \texttt{help.start()}. Alternatively you can download a pdf of the entire manual (1000 pages!) from the website \texttt{www.spatstat.org}. \end{quote} \item workshop notes: \begin{quote} A complete set of notes from an Introductory Workshop is available from \texttt{www.csiro.au/resources/pf16h.html} or by visiting \texttt{www.spatstat.org} \end{quote} \item vignettes: \begin{quote} \Spst\ comes installed with several `vignettes' (introductory documents with examples) which can be accessed using the graphical help interface. They include a document about \texttt{Handling shapefiles}. \end{quote} \item website: \begin{quote} Visit the \spst\ package website \texttt{www.spatstat.org} \end{quote} \item forums: \begin{quote} Join the forum \texttt{R-sig-geo} by visiting \texttt{r-project.org}. Then email your questions to the forum. Alternatively you can ask the authors of the \spst\ package (their email addresses are given in the package documentation). \end{quote} \end{itemize} % The following is inserted from refs.bbl originally generated from refs.tex \begin{thebibliography}{10} \bibitem{badd10wshop} A. Baddeley. \newblock Analysing spatial point patterns in {{R}}. \newblock Technical report, CSIRO, 2010. \newblock Version 4. Available at {\texttt{www.csiro.au/resources/pf16h.html}}. \bibitem{bivapebegome08} R. Bivand, E.J. Pebesma, and V. G{\'{o}}mez-Rubio. \newblock {\em Applied spatial data analysis with {R}}. \newblock Springer, 2008. \bibitem{cres93} N.A.C. Cressie. \newblock {\em Statistics for Spatial Data}. \newblock {John Wiley and Sons}, {New York}, second edition, 1993. \bibitem{digg03} P.J. Diggle. \newblock {\em Statistical Analysis of Spatial Point Patterns}. \newblock Hodder Arnold, London, second edition, 2003. \bibitem{fortdale05} M.J. Fortin and M.R.T. Dale. \newblock {\em Spatial analysis: a guide for ecologists}. \newblock Cambridge University Press, Cambridge, UK, 2005. \bibitem{fothroge09handbook} A.S. Fotheringham and P.A. Rogers, editors. \newblock {\em The {SAGE} {H}andbook on {S}patial {A}nalysis}. \newblock SAGE Publications, London, 2009. \bibitem{gaetguyo09} C. Gaetan and X. Guyon. \newblock {\em Spatial statistics and modeling}. \newblock Springer, 2009. \newblock Translated by Kevin Bleakley. \bibitem{handbook10} A.E. Gelfand, P.J. Diggle, M. Fuentes, and P. Guttorp, editors. \newblock {\em Handbook of Spatial Statistics}. \newblock CRC Press, 2010. \bibitem{illietal08} J. Illian, A. Penttinen, H. Stoyan, and D. Stoyan. \newblock {\em Statistical Analysis and Modelling of Spatial Point Patterns}. \newblock John Wiley and Sons, Chichester, 2008. \bibitem{mollwaag04} J. M{\o}ller and R.P. Waagepetersen. \newblock {\em Statistical Inference and Simulation for Spatial Point Processes}. \newblock Chapman and Hall/CRC, Boca Raton, 2004. \bibitem{pfeietal08} D.U. Pfeiffer, T. Robinson, M. Stevenson, K. Stevens, D. Rogers, and A. Clements. \newblock {\em Spatial analysis in epidemiology}. \newblock Oxford University Press, Oxford, UK, 2008. \bibitem{wallgotw04} L.A. Waller and C.A. Gotway. \newblock {\em Applied spatial statistics for public health data}. \newblock Wiley, 2004. \end{thebibliography} \end{document} spatstat/MD50000644000176000001440000017347512252364236012503 0ustar ripleyuserseb69a9d14eb8af7c473c4e488f497ba0 *DESCRIPTION 0bd4fbd105d4e253cc480f310043cb51 *NAMESPACE 458f31d00b10b03ed4a56cdaa77f7acc *NEWS fd3079d885f1921a2e13afc804ac998d *R/Fest.R 181f8b11faf4d789f91fec7ebf20f226 *R/First.R 6ed249717d3f5d354d859e45642dc020 *R/GJfox.R 685209f96c6d978b5e9af63b5e7d7a1d *R/Gcom.R 689abaef5f0d146dc62a168dd4331410 *R/Gest.R aa22c0433c7bda8a0eb1e6aa99531dc8 *R/Gmulti.R 1101d753c8f35af184bfa8ff36a64486 *R/Gres.R fb60966acb87e9825f6b051d43d3d4ab *R/Hest.R ad8ee203edd080ad68111c0e506cf250 *R/Iest.R 963eff184133bb0e8c555d38d9c448b5 *R/Jest.R 64741fa27917ed51bfb073e41d641f6a *R/Jinhom.R 441a78cc5984e880f28ea37afe7266fb *R/Jmulti.R 26cdb3f8ca7cdd8639710b502cdb84c7 *R/Kcom.R 64b4a0317df865e8fe42cfcb267a44c2 *R/Kest.R 57a21ba53bad438b215ad49c522a3f4a *R/Kinhom.R 308c725e4c3ed6879544c6240e959a2c *R/Kmeasure.R a8742620874e75312563d5844b76ac3d *R/Kmodel.R 59c1ffe49790e5a72f0cb6a4762346be *R/Kmulti.R fc0d8e2be258ffd085822982231d6ad9 *R/Kmulti.inhom.R abbf6db440f53371a4f757cb17274de6 *R/Kres.R ac521454f5b7cba9a4f583fc90d3bb54 *R/Kscaled.R 6642c14908f79382d7208399990755f1 *R/Tstat.R eb6302b9796cfc1d9c930c871f305f2d *R/adaptive.density.R c12ed8f498f3a8568806142d01ecb524 *R/addvar.R 0b74077679dde62586fc3170f1072893 *R/affine.R 74fcd864b2aeddb7481260fe7217adca *R/allstats.R c29552223cce38367bb1c5cbc3f9559f *R/alltypes.R 891d68efa9b9241bf03c04b70f2b2078 *R/anova.mppm.R 8914fcae7fc9f8db3949164d4815756d *R/anova.ppm.R 5f71b1016d94ef704c5f8aba18895179 *R/applynbd.R 2d7a4145901b52ba23b8b38494be5c00 *R/areadiff.R cce73db03b80aeefc896e66ad328d9ec *R/areainter.R fd5fb40c6925516cbda162472993d239 *R/as.im.R afe3c5e768b7c05a2479350ae93d7391 *R/badgey.R f2266be713ddef16bdd80655682c2ec9 *R/beginner.R 14ba4e0f758549e91a47892c5aa87e9d *R/bermantest.R 456123833aa57865b4dd31d8ec605e93 *R/blur.R 92168b7261759329671c88b22199e2e8 *R/breakpts.R c3e96fdf7e9e36dcff7172f34129d2d5 *R/bw.optim.R 418624216be7911738abec960dc4ac7e *R/bw.ppl.R ec2ea8feb2385414c21f2e8ce965140e *R/by.ppp.R bdb2daee9252dfead825bb9b0408591c *R/centroid.R 008ff5df43c358b7fa8e6cf90cf27731 *R/clarkevans.R 55b8ccb851f8a2cf879ff18a57963304 *R/classes.R 5364eafd37cab0599f39e612132babb9 *R/clickjoin.R 961d646097c3f3c84a8e1f1508826e39 *R/clickpoly.R c022cb1e5ebb4f0cfd64b5db9e938e3b *R/clickppp.R 793160945e3bc2de4a9de0ffeb7a53a2 *R/clip.psp.R 445961621f8b0c0c9a3ad94fa7fe977c *R/closepairs.R 6a127f84d3e57c47f73ec7d773ee62b2 *R/clusterset.R aeae580cf7946e5ce35b81d7133b4f40 *R/colourschemes.R fe2dfead0c7bf41ea8ecb3c05e2357b4 *R/colourtables.R ab3f9135173837cf4b7a41749bac6ae5 *R/colourtools.R 96d2d00c44b21c5cc0b65cf136b8de52 *R/compareFit.R 025d9cb206122a617dc98d3b8951cefc *R/compileK.R c23379f6e832abefdc6c539f63a7e082 *R/concom.R 86bf6930ceb21c231f59de7656d39c52 *R/connected.R 648abb3e8f01c8182b85ec20e6ac44a0 *R/covariates.R e98e525e1dd67b4db486c9879f8e8dfe *R/crossdistlpp.R e72794611c3b282b61aad31c2bdd628e *R/cut.ppp.R f955b7d8038fa1585e10430b6bf9fc01 *R/datasetup.R 53ee353f690b50afa0f646fb1f2c1bff *R/dclftest.R 413374931b90c52655db4c1c7613767f *R/defaultwin.R 488d09aa143020b7bf8446216bdaac96 *R/deldir.R 4da80f235b7b98353691ae6799df7e11 *R/deltametric.R 63cddefe82da0ac4b2d0d8d68b6b5b3e *R/density.ppp.R cde6f963290610ef388d3dcb1296cfbd *R/density.psp.R 15bfdda8e173f4acbf8c2de3dbcdd24d *R/derivfv.R cdb0d135860a7aca058bf3aa2303a663 *R/dg.R 848670279ea152759d280279f2764e85 *R/dgs.R db717e86f7b6ff12d85baeddddd6c840 *R/diagnoseppm.R 6c17c0dc93b9265546a6f98c8dcece28 *R/disc.R 13ab2885f54583d8c4cffb07ca2b9471 *R/discarea.R 44d2ad1dabb4321581fbc7f15e326b94 *R/dist2dpath.R 92d681fa1de099333113a4fcbd1210b4 *R/distan3D.R fcb2963e046985c9d5dbca90dfe10195 *R/distances.R b80a767eb6315533a62106c6f81625b2 *R/distances.psp.R 3ed01e6b57db1eaa294555a55eb0d896 *R/distanxD.R 6923cf5f2c5b6c6af302e390431d283b *R/distbdry.R d45ad8cb4861013d53500b20eef860e0 *R/distcdf.R 1b791dd5129f3128f214f5bd1ecd48ee *R/distfun.R 0b52d1734b61b935ee9b71ea41c3d62d *R/distfunlpp.R 1943af58c735936099c04e77f60c60e0 *R/distmap.R e19fb5bd5eb1e9cc2c92cf4478644467 *R/dummify.R cf28474084f1850aff62e27d0cd7b51c *R/dummy.R 45699ee5dc0caae0230c8222995c5afe *R/edgeRipley.R 728edce44cf2c5896783b9e77c2e2746 *R/edgeTrans.R 6e361a4d679bfa4d756101acf8239a5c *R/edges2triangles.R f41cfef5a017692ee84c46e395aa4c36 *R/eem.R a6ca610bb0b345db3a998894f02de13c *R/effectfun.R 227383b0d6df6b1f9b8337805da833ef *R/envelope.R fe213e8bba131a31d2ac5ae6ff047a84 *R/envelope3.R ce39afb08c976f8f555099fe58140b33 *R/envelopelpp.R b4c6a2f8bbe3e4636d9c7947749469c9 *R/eval.fasp.R c2f2d9302f45d60d33ba08b4f67ca181 *R/eval.fv.R 1ac152fe32b59edcec30f6980fab933f *R/eval.im.R f84cf930f8b5f0333f104571d28ad588 *R/evalcovar.R c58cfb9cb1952033858d85087ba2fe2b *R/ewcdf.R 7af2cd6d38839f9e3080b509920e4f3a *R/exactMPLEstrauss.R e1d84d3162516d0e39cac35dca107bf6 *R/exactPdt.R 742219fcadeaf848ee1c209a3fc4ab45 *R/exactdt.R 1ee96128bcb76b156c092eaeb67074af *R/fasp.R 971dd66df1f8ed12d7aaff91bf7655b5 *R/fgk3.R 80d8946f7a2bf02590c462cd006ac137 *R/fii.R 81991ff87ab7f27f4a777453f39a2f63 *R/fiksel.R 23fbe10cc51eb6804e6649187709e29b *R/fitted.mppm.R 860601117473980671c52d2e9b0600af *R/fitted.ppm.R f67c9812cd9d00ff3d63cc8c37645600 *R/flipxy.R bb9b33bb3253ddecec79918117d6c224 *R/formulae.R e7ea22de831804dd4b5bc822b28f2d40 *R/fryplot.R 9eda6a6239ead0ca0c0cc7519e6ac9a7 *R/funxy.R 93c603ed74a5352bd3c800b0a7d7000c *R/fv.R 8cf26431639e04703e6e99d97cd9dd6c *R/geyer.R 35a42c8da0d941958de645e61e9a607e *R/hardcore.R f1e16ee9c975eda27002f44980d1ea57 *R/harmonic.R 38a100c6528e0e39dd216d8cff67a824 *R/ho.R 6b9d70e9c5ff0c5427a3a9888597a24f *R/hybrid.R e90bb69ff9ab00a31b1d21cdb3b64b3c *R/hybrid.family.R ccd40040bfe48283af62f25c1521bda1 *R/hyperframe.R 23ba35acf9eb4bdd10d637322da0d181 *R/hypersub.R 4b26707466d6d678aca0b520b9d1164b *R/idw.R ba09838a018fc5f6e4f7a7cf49c08437 *R/images.R 8896769628dd1853952747a7deb29660 *R/infline.R b5d86a0ef4b0631df4e32747acd13171 *R/inforder.family.R 071a00d26d33d367913909973573e971 *R/intensity.R c36caafc15c99ddcd5862d61191860ad *R/interact.R aaf0009fffdaccae3e81ae7d70ff5a21 *R/interactions.R aa543ab2d17deaa80450a2b1a7184dfa *R/interp.im.R c2a65fa827a8901ee7ba30b871006611 *R/iplot.R dd0dd82c9602144ccbc1e553718f08c3 *R/iplotlayered.R b920dffda74bfe6ca9cb09565769b9ae *R/ippm.R a4defbdf84ad11dea11a8b58a8d2e578 *R/is.cadlag.R b172d59b48140824b765c6027407abac *R/is.subset.owin.R 7ce3381d58eeb285611aa2ad86fb5b46 *R/istat.R 79abe3ad5c54621a3dc219c8051ad102 *R/kmrs.R bb8e036d4beb8fba16a9a2ebb788f56e *R/kppm.R 62bded8b831fd63a00683593906fbd82 *R/kstest.R 9d2b37a7b716ea48816d638f10d9f08f *R/kstest.mppm.R 7f544e6962de319c598587de1d26a51f *R/layered.R f1c196678723dc183202a4d27da2b8ed *R/lennard.R ef995ab3a81df103de6b3a2058728b70 *R/levelset.R ffd4d915eaad02850316c7d7c60716ac *R/leverage.R 2096ce8caac0e16cc7b465717560533e *R/linalg.R 2bb2228328792252f980d1b860294575 *R/linearK.R b4ab1b3406b13778983a47a065b1f6e9 *R/linearKmulti.R d62a524d4d77af8260fab8371162e2e4 *R/lineardisc.R 5f08522e240a6de60c570605956cb904 *R/linearmrkcon.R 364dd86e3079d27c770a5cd43b332fe7 *R/linearpcf.R e50d4dcd1e45b79629c26840d34a5a68 *R/linearpcfmulti.R b0e76837c7034d45c363d3219a781e9c *R/linequad.R 5be5a1afec8fd17153daa53e236529c9 *R/linfun.R eac3c002d096dc43c706151059d10f8e *R/linim.R af7682f74601a86af2bcd3068a52c47a *R/linnet.R bc8eea2739941ab5c12c244d1b884e7f *R/listof.R 82e72bfe47f486cb8846b6c93772910b *R/localK.R 0fd9a8476252e98097ba1ef000b2b965 *R/localpcf.R 3c84b1d2be12c5ebc93997c0151d5dbe *R/logistic.R d0eeed0fdaf496bd0b417baca1a400ca *R/lohboot.R 91eb0240373638140541d99d1e0157c0 *R/lpp.R 4290eb73c03982f4003cdb4f988d1303 *R/lppm.R acbdef4fee027c67d996e94af985b5ec *R/lurking.R 14104496afbcd28b956b67ed445cd5d4 *R/markcorr.R 290578705340eb819a7747ba4f1c1550 *R/marks.R d692b6cee26fcd47f65452128965afcf *R/marktable.R 59af6690a34659d06478ff9b4716ddaa *R/measures.R 10b8c549d7cdfb4e127af3d25a59c348 *R/mincontrast.R ba8782eb90ee38cc0a41750b16962bda *R/model.depends.R 56e67dae289319296f4e20bb1b178fd4 *R/morishita.R f931ddb17efe7fdad4ed2dfe8dd53251 *R/morphology.R 38fca66d09ccca0dd4289a60865841ce *R/mpl.R 2546637860ea31842ad398a5681bdfae *R/mppm.R e9fe3a5622d157a57d8a264544b0d925 *R/multihard.R 30ca6752d5613a3cd1d93fee26d6e13b *R/multipair.util.R bffac3a0fe4e399f6cfa6b40fce0bf22 *R/multistrauss.R 45e01d2a3fa2c9bbbf08efc4ccf74213 *R/multistrhard.R e95b9c79ef12eb20859b0206f19788bf *R/nearestsegment.R 47eb12d44ca62df710820b5807abf6cf *R/news.R ff6a799a5400edc25193e3b692263bd7 *R/nnclean.R 2bb45b41ac4c5c40e962d13f53c2c0d7 *R/nncorr.R 0165b725cae6ec96e945605c401017c0 *R/nncross.R 048e5118286b47cb4775ca0181163f0b *R/nncross3D.R ec4be4673b53588e9553b0c88831f1d2 *R/nndensity.R 1fe66156d7e3103eab05460c112b0e48 *R/nndist.R 0a85278b7baba0151e92ecf673fa2178 *R/nndistlpp.R 6f2fcc1060d323345bb4fb915664b553 *R/nnfun.R 9147f0427f130d57275601b4be29ecc2 *R/nnfunlpp.R e807a00b764faa4fb5cda90a855800c9 *R/nnmap.R 69534ce43f429ade9df988ea640e1570 *R/nnmark.R 2f0b48eb6d63a186e5f3870f46a4ff8b *R/objsurf.R 91499189707e2841f3671bbecfb9054a *R/options.R 8dda4d10c50289406d3f008309b4016b *R/ord.R 8455eed87a55cdf33df4b9bdfe6d9ec3 *R/ord.family.R e3b7d2155d614727cc08bebe29e49f51 *R/ordthresh.R 575e2a83fd8702920b84f24f508f7239 *R/pairdistlpp.R 73cf77ee69c07232cc25eab2ba63b7b3 *R/pairpiece.R c7b1712cfdf7f3a4caa38b0e765d1c64 *R/pairs.im.R af93a97f749d9677507741e2a394de0f *R/pairsat.family.R 51b39fa93d4793aa918f3034b1c342d5 *R/pairwise.R 762d93f6fb48464ee14bbb300f286f6c *R/pairwise.family.R 2a4cfdbbc2796b3e92d18dc12e4b960e *R/parres.R 45aecf67d90b0add48270f16b49349c7 *R/pcf.R ef6088f4775e361696edae511b51c606 *R/pcfcross.R 5a4b56110fe6e96030bbacfdfb7ea18b *R/pcfinhom.R 460971aa4c6a729b293be224d04f46d5 *R/pcfmulti.inhom.R 4e2cfc0fbe156faabfc39466b943f522 *R/periodify.R caf3ce651abd72af3005240d406797f8 *R/pickoption.R fa07088c1ca1fef7bf9d01c1b2b07f83 *R/pixellate.R d1c6192177be9b868ef0ed5fea15dd24 *R/plot.fasp.R 67e9051a3e9aa29d0bfc3bd10983b529 *R/plot.fv.R 1d726cc6e82e197566e374a1b383c00b *R/plot.im.R 042d4df7ab031933cdeddaf24f25201a *R/plot.mppm.R cdb645f0604efb1e4654993f8391a006 *R/plot.owin.R 07ca8db4c4df142cdf47da4818c5eb0b *R/plot.plotppm.R df0dbd056b370db13b5532ac3b26ed85 *R/plot.ppm.R d6e48ddd2e720a5026b0a9b28598ec4b *R/plot.ppp.R 147304a0485d772f72e17ed3151cd025 *R/plot.splitppp.R 8636732529434b68f310116159b3f9e3 *R/pointsonlines.R 713f28f15d86975a69f3cfedcf1e258f *R/poisson.R 9d540b5bc972e9ae15c488478ee9ec87 *R/pp3.R 01bbde5cc66b62fe7223ef086c3e735f *R/ppm.R 515235b0ae6b029eaaa1d9fd81db4f73 *R/ppmclass.R 700e2330ee3789ebdd9244085604662e *R/ppp.R 59ce5e6eb0dc65afd4d4d4db7249e87b *R/pppmatch.R db1ef812ca14e67d04e9851e89e09b0c *R/ppx.R 523c82f66734649ed2cebe796e572538 *R/predict.ppm.R 81db27cd2bb05909a62b842ed26fb688 *R/predictmppm.R 194e65dff4167995a653485e7a11df3e *R/primefactors.R 5f0b306e3df0803a991b22d2168b5b8f *R/profilepl.R a6ff026a0d22afed743e2556c8d032a3 *R/progress.R 821fd1672a81a5ac6f1bd7e57d65a65f *R/psp.R e0525708f4dc653f4b504c2e308f934b *R/psp2pix.R 979388effe9ccc1833806044baede039 *R/pspcross.R 9ce2c9ef6cb11e508298b2678a832bc6 *R/psst.R c4af07f9a38c034bd57c1c1e83030b6e *R/psstA.R 16020ecb873c58a9955395dbd317400a *R/psstG.R 0df624e49949d343941ae9d0c95389fc *R/qqplotppm.R 65fe21ae837f84a3d75a74a443805342 *R/quadclass.R 884fcbb6aa31e84b728f6d8aea77c4db *R/quadratcount.R 3928fd84e11208b225cabf3eff17befd *R/quadratmtest.R 927ac2f8e1832913ccb7dcd4831854d7 *R/quadratresample.R cb89fca536ca4a2bf7d2f775817a6a86 *R/quadrattest.R d1d3e8f3b9b957dd2d220e04ae20af66 *R/quadscheme.R 287aef0f93c7790ef3b2c67ff0dd7bf6 *R/rLGCP.R 65f3a242d22784793cac3624e590cafa *R/rPerfect.R 58a692e51c707cd22472496aeaedb308 *R/random.R bb34c3035b070c9444e259d145b5bb7f *R/randomNS.R 04fa3f0fc3fa4c70d0091a29b44286f5 *R/randomlpp.R 2a34b97cd6af75ff72f7a227ce1dcb9a *R/randommk.R 7c2488aadae31bf896e2d0aa7e9df98d *R/randomonlines.R 34d99e239d0f4b3f8d5da6172b7ddcd9 *R/randomseg.R 57fdbc29ac251ac841308a8e9b125f3d *R/randomtess.R 4879216e6e042fede8c78892265fdad7 *R/rat.R a8a6f895acc18aa94be66b546be6c17f *R/reach.R a4126ac0afdcc3e3f4dc945cf0c5b087 *R/reduceformula.R 478d22b18506a7f2ad082024aeb71860 *R/relrisk.R dd809793fa06685d2462c32a402c941e *R/replace.ppp.R 2c1f5d7a5244fbb903753155d28fbfe1 *R/rescale.R 7a685c4e7cf92aa99ec1c6bebe0e7cd5 *R/rescue.rectangle.R 17be8671385e4c235e2f79a940f16726 *R/resid4plot.R a87097e700c0877b6d0510da380d3d37 *R/residppm.R 422377b4f7db742f427cf71f45d0d444 *R/residuals.mppm.R ab1c78793ed8d6ad95c91a817fabf80c *R/resolve.defaults.R 1ea9857788b290854b6aaa0d1390c11b *R/rho2hat.R a5d1a1e3e958f53725627289bacf1d2d *R/rhohat.R 71498badb63e091a19a74fd89b73c8be *R/ripras.R f47c0eccaa94d6655967d988635109f5 *R/rknn.R 8c820e6bf174bf1a33886bd9515c53a8 *R/rlabel.R 35f1e78c4ec615ebdf02bf522d15dbe7 *R/rmh.R e0a2bdbac4f8d79ba5945fea979f4b76 *R/rmh.default.R aa2cd90b764b723360170fbed1d8b112 *R/rmh.ppm.R 7968359dbc6c8929bbae8698ca339565 *R/rmhResolveTypes.R 0e0d3632fc1571a0262528a6e45b25d8 *R/rmhcontrol.R 699025636ad66cad0041504988cef64a *R/rmhexpand.R 0ee70248741ecd9d0c4dedbb3b6ce98b *R/rmhmodel.R bcc1f19d0e9257e3e4bbb622e24cb638 *R/rmhmodel.ppm.R 05b016d4b9dddb9a0fd8fdc349538fd9 *R/rmhsnoop.R d074b4401b53aa03991247b7d7114d63 *R/rmhstart.R 55eeb8604adbcfb36a0147d67e1ec04d *R/rotate.R 00c58075e8e3a791b26efd1f11b7f3e6 *R/round.R db2a9a534c2899586a737cd2b2cea75d *R/rshift.R d232999f512896464f8a1cc92f6d1661 *R/rshift.psp.R 822f2f01f8251f082d5c2c99d7d28ce1 *R/satpiece.R 1d44769c755f4fea6f5084af1faefb0d *R/saturated.R 673cf1f0116a2174fd1e4f7ac4a01b01 *R/scanstat.R f642c3cc98aa54f8fcb72c2d84bf1046 *R/setcov.R 048ef0497b1b7b5b715a35c0d88bd4f9 *R/sharpen.R e5f89dbfc813e0812dfefc2b2004180e *R/simplepanel.R b1925b06deb81a92d3802e3ce28bdb4d *R/slrm.R 30ff19b0dde40a2268a578af0ceac32d *R/smooth.ppp.R 0f8f3259b35bf309387c31f4c1165937 *R/smoothfv.R a65d349912f85ba9b40b099f094c144e *R/softcore.R 9641508183c348f1069cd0156447604a *R/split.ppp.R a0142977c235c3fd893f5bb75daa4fc1 *R/split.ppx.R 84d1113a6140064cb110973db89adcf6 *R/strauss.R 6f6ef06c1933cc6be8e84d2046caa6d7 *R/strausshard.R 22d40f234d4d349ca92486448021f50f *R/subfits.R 5ce5c7351ab113de0450a01fa495759d *R/suffstat.R 152b1c00fcbbd62ec11bd3dd90055b61 *R/summary.im.R c97c353662e74d037f181e8573424a9a *R/summary.mppm.R fde7a2abd0841a8da9b488a194022dc8 *R/summary.ppm.R a0b5673c316276f14a661882cead07c3 *R/summary.quad.R fc49c2b7556f6a33815ea9bc603d166b *R/superimpose.R eefd4e362f0003a778efca39f916064f *R/tess.R ac4c53fd6fb6d473d0477a842831fa4c *R/transect.R 6852ab624a8f0e62a8e6a362efb8e055 *R/triplet.family.R e341016251c65890c8f530413ad85a65 *R/triplets.R 1cab82b2504b77f885cb74f59f8774a8 *R/unique.ppp.R 831851c576f965057450b41613573c4f *R/units.R 3b4364c7997a798e289315c0ad400f6d *R/unnormdensity.R 8f1b9dbef44e223714d9116a3024fcb9 *R/update.ppm.R c2f0f2b06807b1831d78bad9c2d31dde *R/util.R 512c681f82e4b80029658594691ff3d0 *R/varblock.R ed8f22a5f219dd432260ff41a3ff09e3 *R/vcov.kppm.R 3b851766d886a441ec7e650f67b79e5c *R/vcov.mppm.R 2b17a89352f68e0d4107decb44eda567 *R/vcov.ppm.R 4c3e99615801bab02e67a12a45003111 *R/versions.R a332b2afc6bc6294c0ea8f379c41a58b *R/weights.R 20290afdc2d2615df07ba5b8e509c391 *R/window.R 67409b991427cb81310d9277dc6f1709 *R/wingeom.R eff6f40760d6132ce305eec5280a47f3 *R/xypolygon.R a777edb3716876df7825be2db2bd8730 *R/xysegment.R 16adfaeeba03c93dc2361cdfb9ad1d2b *build/vignette.rds 62dd8c88e6bbdca718b6ee4eede61c16 *data/amacrine.rda d742fa4ed8d19e6c13dbd7dc5d5d0c23 *data/anemones.rda 2325c4c93d85f2b8e0e6616fabf03fb3 *data/ants.rda a62a82bee6c2766589c8ef1e1fca6c7c *data/bei.rda 34172275128c56996d6a2e796d79804f *data/betacells.rda 7ebe3d67d6294cdf413afe680405bfce *data/bramblecanes.rda b052c4cc4533c9c5d1a4dfcd7092ae2d *data/bronzefilter.rda 806a0d79517f81b1bf0d46ed0cf6efd3 *data/cells.rda 0511bd9287bc3eecf5ddbef5e3df10a4 *data/chicago.rda 07cc25998a9e8bfbc902e70ead7917a8 *data/chorley.rda c73a8306bebdc611e5a77be511fcc191 *data/clmfires.rda 77c15ec629937ce18290c849f6311685 *data/copper.rda 5bb680eb5ab29e93c14b6ee7c4f21827 *data/datalist cf61104fb2eb95b66fdeb802852d9627 *data/demohyper.rda 674867657e9a3df07b6eced02aa022a1 *data/demopat.rda 214f7bae6df16af7b58a89b505be9676 *data/finpines.rda aec532275665ca5782af8ee81e60e1f3 *data/flu.rda 8707101ab0231fdf17b3944e583e7818 *data/ganglia.rda 0ddc9903de8a79397731fd799b76bdfa *data/gordon.rda 14bc80075a16c3988fd638d968ed47ef *data/gorillas.rda a0c07e09b68c194c86e1e6a41e4ee41d *data/hamster.rda f36a2616ed523461082ebff60e589529 *data/heather.rda bfa0610032ee8d3762c55c5c3596466d *data/humberside.rda 9509088c024aff138eb7ac2970ce29c1 *data/hyytiala.rda 56284f1bae9cce3370e5880e89792668 *data/japanesepines.rda 9d02bc9ec0a8c8b56ba0522f01ab8e44 *data/lansing.rda 5af48223452f95eec9d7d1af0c3c3486 *data/letterR.rda 13fd009cba17d3d3f91331a5ee5df059 *data/longleaf.rda 3276b7b36c97161fe10b3cc788d80240 *data/mucosa.rda 63352e4636b8932767a342fc33bebb48 *data/murchison.rda b71c209a01a38cab54b40bfa31aa1331 *data/nbfires.rda 114b993385909c8bada737058589b954 *data/nztrees.rda 414f1aab0b6b0d9535e15b2bdce7e553 *data/osteo.rda 21989756fb1688834c416238809359ad *data/paracou.rda 0724cacabbef2e8943d643ab387b6975 *data/ponderosa.rda 4bed47fce533d6a6d086f21f85dd80a1 *data/pyramidal.rda cf06f27ce7c8f7699cd1d7ca4a7340b2 *data/redwood.rda e0d1b7370f0b00c90713f911d280afdb *data/redwoodfull.rda 4999d28b4b768ce1e57d6ec016a25637 *data/residualspaper.rda cb4384dd9523e1eeda489e9b511447a1 *data/shapley.rda 364c75756e02986973416260d7cb0981 *data/simba.rda 7df3bb556f08ea04845ffbfe8e5c9d37 *data/simdat.rda 1acb7b977c5dfa7b1a56c64e95e9834d *data/simplenet.rda a52620fe2b7001928ba7840d1e37a88a *data/spruces.rda 5964cce6a19ac8bb35b2052ecc79e491 *data/swedishpines.rda d17512a727807144f75e52192117574f *data/urkiola.rda c2924cc74278a200bc645ef490a725a4 *data/waka.rda dcdd5004b149bb6f1a616d6c4812f8e7 *data/waterstriders.rda 2069a06a0577c0d3d36e1ab2e640488f *demo/00Index 0ff592e89867b73d67f5b9dcd9967d20 *demo/data.R 584ddd744ffe944358d171ea35af9e7f *demo/diagnose.R 4d0be3375bd89bbaf43b94022f8f5add *demo/spatstat.R 99557b70b437e73736502c5f400ec6bf *inst/CITATION 629f260f04fbe43fac5a06a800cefedf *inst/doc/BEGINNER.txt 61f1a81418830c9fee55e2a9305a4dbd *inst/doc/getstart.R 22bc211751c0bebafff85ef6bb9a0c5a *inst/doc/getstart.Rnw 15739aac0bcb1963b5e3ecc4191307e2 *inst/doc/getstart.pdf 62df8ca90a236a4300b427942f78ac39 *inst/doc/replicated.R e12cbd8f919c23ce3940157cb54fb6b6 *inst/doc/replicated.Rnw 9cb397c9af9ce42773eba96088a126a3 *inst/doc/replicated.pdf 17b8b5cc083e713875179cc9857bd8f5 *inst/doc/shapefiles.R 1fe6ddbc7fd98890b0e0c68662dc387d *inst/doc/shapefiles.Rnw c0f848c81abd1e6d368738a2bc39a9c1 *inst/doc/shapefiles.pdf 12e68895fef0d3aa0bde45a0ddbadfa4 *inst/ratfor/Makefile 22e8a5189942ba190c13475b35459c7f *inst/ratfor/dppll.r 6d471ec061ea91398ba16323de56b9db *inst/ratfor/inxypOld.r 0643fdeb95977d65e4b05fc10fb9a0b3 *man/AreaInter.Rd 6d3a0e64e150db5f0d858c289d6b5b09 *man/BadGey.Rd bcb13b28bbc389bbf694960e65965d12 *man/Concom.Rd 9aef0866e77901b499afd70b64421b89 *man/DiggleGatesStibbard.Rd ea8f9ee0400bad60a3e7d4db2e66b845 *man/DiggleGratton.Rd 416c33821d43161e5e0f2782334f2ce6 *man/Emark.Rd b1602d4f4c5ff5e35f45cc129e4bc189 *man/Extract.fasp.Rd aa596d3c068ea1050a8d30e09a2ab703 *man/Extract.fv.Rd ddc3ed2edc8736c4803101fcb1655232 *man/Extract.im.Rd 8c7f9e7029a1ca1be63b290b1c0b323b *man/Extract.layered.Rd 20c762d537fba374d5b221d848be20b2 *man/Extract.linnet.Rd 6f9de07c144316473e737a1b9ae394a7 *man/Extract.listof.Rd b5c690abff219e3a69ee923100943d6b *man/Extract.lpp.Rd 8c7460e43adc9fb1ece97852995df470 *man/Extract.msr.Rd 07d851f56c8e75de4fecf1b90a49fdd5 *man/Extract.owin.Rd 62f012af5ab317f37116991cb84f055a *man/Extract.ppp.Rd 8ecef890a921a879b4990fa352eeaec2 *man/Extract.ppx.Rd ad6b129c123e2fbb7e272f51e55be42a *man/Extract.psp.Rd ed2e2e4bd9446a56957e5c84666f90f5 *man/Extract.quad.Rd e0a29654580e0e02ebee016f7239eaee *man/Extract.splitppp.Rd b675deaedd83de0bb2b07bacfeefd09a *man/Extract.tess.Rd a483315d38c00648c605dc02b6cd2be8 *man/F3est.Rd 304e181c3e1c171fe8a600c82f229bbe *man/Fest.Rd 05ccd758832ce5a0092690b4a21420b0 *man/Fiksel.Rd eb36896b04f4ab60a9381bcee97aa509 *man/Finhom.Rd 73d461fdd8a78e2aa2f93ce49f27eebc *man/G3est.Rd 47aaefb6580bcbfbcf2e6db725ef24cc *man/Gcom.Rd 04f0c75799f7c7caf173162a7484da3c *man/Gcross.Rd b28559271f0cd3c3ec11c4b8997006bd *man/Gdot.Rd 4cd29073e5fa0d29029829b581efb70e *man/Gest.Rd 410aa5115da32aa0399d4ae4472be3da *man/Geyer.Rd 32d1f33f04dc0b4b35df2f3880abc679 *man/Gfox.Rd 68c4a61d3bc73802dc282f91443f3174 *man/Ginhom.Rd 685299fd0e4c2716a7e3d90ec6c4952f *man/Gmulti.Rd 90ece6921c59193a05268604788d05ed *man/Gres.Rd 66c78e3613a1e7f93b89d227eb765811 *man/Hardcore.Rd 455eb463f7ef917e55157a5617753496 *man/Hest.Rd 488d682a22fdab232c0badc4561807da *man/Hybrid.Rd 4193debd6b929114d53f3630c9e23076 *man/Iest.Rd 83fbcb27ccfa5daa1ffadc07d643975c *man/Jcross.Rd f792a94259b8b7c60dcdb1b24d33dec0 *man/Jdot.Rd 70be1e42f777af8884f6ab476619a2e3 *man/Jest.Rd 0a056844db10683e47b972e5db5a1678 *man/Jinhom.Rd 6c240b7c5428d2d8dd7d8c97eafbf278 *man/Jmulti.Rd 4fd51ccccd3507c4ad0857da908bc883 *man/K3est.Rd 958b5d267f3fd776bf62e0730e47eda2 *man/Kcom.Rd cd123165a07bc0a81ca07d54c56a1a79 *man/Kcross.Rd e740af205de3670c4ce6dfd4663e321b *man/Kcross.inhom.Rd 9e954a3423d2a65fb9b8c666738744de *man/Kdot.Rd 28358d97660cc8be51846cf6b3e678ed *man/Kdot.inhom.Rd 1bd29e4e49926786b75da1e9b8c6f53f *man/Kest.Rd 87ebf04b3ee2ce22dd418d807091ba3c *man/Kest.fft.Rd 2a9c16d1b5c59c3ad12c82f67660883a *man/Kinhom.Rd 01f3c65f26b4c1ff2ba068598658f698 *man/Kmeasure.Rd 98b1ca88d687f543ce13a0d662f0aa57 *man/Kmodel.Rd 09b8ec08dee60e40afbaa7d3ebd50313 *man/Kmulti.Rd 8c1b7af1b4c4aad0d8bb8a6f8617abac *man/Kmulti.inhom.Rd c32dc9e8de3d6ebb1acd4bfb57487de3 *man/Kres.Rd 3cebd909c19de470e2cf2be98a5e7602 *man/Kscaled.Rd a77e2776263105d3bd1c93af6cc10f51 *man/LambertW.Rd 1412a8c279ca63d59d234250ba662b44 *man/Lcross.Rd 6b7b087eaa971982b7588a63405e33e5 *man/Lcross.inhom.Rd 5df8a955bd5897f600670ca76bd2cc4b *man/Ldot.Rd 09489af097eae3ad59285c70c322427d *man/Ldot.inhom.Rd 08f1c17225da248ddc73ca0e7a2463d6 *man/LennardJones.Rd 134f7b98b804037ca00335d748fc2bf4 *man/Lest.Rd 0bd8404b056866249d1eb1a33b546afe *man/Linhom.Rd a0f73d53f581639c818f2969d44e9906 *man/MultiHard.Rd 4c42659d58777b23a8dfa3d0efd568fa *man/MultiStrauss.Rd b5b745a575aae8e1d658040de2437288 *man/MultiStraussHard.Rd 80f31a09c2f03bd728a6d4bff64ba239 *man/Ord.Rd d53f106716b7a74b8a340f3839dcac35 *man/OrdThresh.Rd 2f5b7ec9f02c9bf5abeb30a35be10d31 *man/PairPiece.Rd b13168ed72f1adc54412e65ac66ec751 *man/Pairwise.Rd 94a04b50218a2d17ba1ce869c1bdf1bb *man/Poisson.Rd 4f0563b953759c010f31b7e41f9c5675 *man/Replace.im.Rd f87587d327d8b848e681c634bf924685 *man/SatPiece.Rd 035a70b3d72ec369306f2fc927d8d81f *man/Saturated.Rd 2a438849f3254a6e46fb7ea7d32f1796 *man/Smooth.Rd a0af86bc5a5cf5c26b71e6b05dba5218 *man/Softcore.Rd bb968f75b4c6cdafda9ab3a6eee31b67 *man/Strauss.Rd a5c41e23f5d4b99139d8301decd743af *man/StraussHard.Rd 89120bbb7cc76f7ed29ca06bb253d3ca *man/Triplets.Rd 703ab820365571592223be75abb2165e *man/Tstat.Rd f69330e860cf400dddb4d1f38226c421 *man/adaptive.density.Rd 8d408661cec745a93aa537f7c56e5e66 *man/addvar.Rd 825fa83d839f3493879e0cf528a96c3c *man/affine.Rd 067f3afb8f902053ecf01a68121880b2 *man/affine.im.Rd 8cb5c016631880d8f41505c2dd9998d5 *man/affine.linnet.Rd c5cfcf0ebd428018b5b2ea28544da827 *man/affine.lpp.Rd 02ff89b1f98c46a4a18f1ab1965400df *man/affine.owin.Rd 04fd94b1224f421400b9875e0763b21d *man/affine.ppp.Rd 77eb1c77ee8d6c80e90f7debc9b3d2f9 *man/affine.psp.Rd 453ec0fc39aa64a7908f698af041d65a *man/allstats.Rd 92398a5bfd1cc25de411934603a7a934 *man/alltypes.Rd 77afccb9bbb3c6b7c1bdfef0acfc4cf7 *man/amacrine.Rd 5448597dfb5906be2c6d41b715b96a4b *man/anemones.Rd 1933c800d675696b5939bb5f26b44137 *man/angles.psp.Rd 90795b8e36e8e375fb88f9c1855c664c *man/anova.lppm.Rd 2463666f847fe9ce88d6387273249bf9 *man/anova.mppm.Rd 1283b6fc3796cf976405ddabbc17966d *man/anova.ppm.Rd 1e05fd0f958cd56dba24fd7c19b802a1 *man/anova.slrm.Rd 11878bcc000e86200962a3157ce75e68 *man/ants.Rd a623323ce07c50e48479ce56a901ba78 *man/append.psp.Rd 582724fecbc607fc3ba21a68f529796d *man/applynbd.Rd a51ed6023817cff46ff383fe860a261e *man/area.owin.Rd be441de2d769e4da9b5812b8b19d7412 *man/areaGain.Rd d00cd47f8070e18f21699cd4a11dcba5 *man/areaLoss.Rd bc0da2d18ca4b29b4798c41a2fab8c56 *man/as.box3.Rd f8a9b017e3c74053a4622efbbe00c29a *man/as.data.frame.hyperframe.Rd 711382dcd54197825dcb4146213be4b8 *man/as.data.frame.im.Rd f8f2accc9818cdba45b253cf64259ac1 *man/as.data.frame.ppp.Rd cd1e65cd324cc1bb6cc1f5bc68575830 *man/as.data.frame.psp.Rd b148911ad141c759bda01f158d34b89f *man/as.function.fv.Rd 97038c5b37803f33b301d0893419c157 *man/as.fv.Rd 66dfca450df9d84fa6f3173f74c53abd *man/as.hyperframe.Rd 509d35f77dc91cc5e09c0f5f5e6f7937 *man/as.hyperframe.ppx.Rd 6d40fda15051311211b32b2654291adc *man/as.im.Rd ebd66a1a2ef08be3573087d265157a73 *man/as.interact.Rd a69b14eebd6fdbfd020de0a0e5e291dd *man/as.linim.Rd d92dead64b6eb9f037319592958a3d27 *man/as.lpp.Rd 02cb494e7737f85dd3c8483007df00eb *man/as.mask.Rd c93adf881bcd3baf961d9038746953aa *man/as.mask.psp.Rd 46d6be37ece35773ed92a0ef4dbde833 *man/as.matrix.im.Rd 004daef5d5d5e65fe9d29f5b299dcaf6 *man/as.matrix.owin.Rd 43fd87cf88bb67d6119e4b2d1b7cf934 *man/as.owin.Rd b4aabb97b7689bc69ff5f4c3851e57f0 *man/as.polygonal.Rd 10e128fddd9bcc80a23c571ffae35a58 *man/as.ppm.Rd 4ffea6c8eade5ae4f7fffd13e3a4e6cc *man/as.ppp.Rd 37cfda383746c818a082153d154aeb88 *man/as.psp.Rd d440de0e0e80dcaa2b103fff580c3bb8 *man/as.rectangle.Rd f66d326b9cbe9a2ab1afb132cc18f181 *man/as.tess.Rd dd09326334dec47fd6109060608f3311 *man/bdist.pixels.Rd 1ac03e53870a2c3d26bb8e8a9b814fdf *man/bdist.points.Rd 941584f1784b4a6e8b2f1b9a1e66af8a *man/bdist.tiles.Rd 34ffbf506a1387c8e26cac682d296d51 *man/beachcolours.Rd 03a9628b39cb1dd9208bc19db147e16a *man/beginner.Rd 0b25e72dd3f0279a2d0f90cfd144acc0 *man/bei.Rd 2bf90ea031bb037fafd8e101f00d578d *man/bermantest.Rd 07590dc52a72eb6dfd20d382089e341c *man/betacells.Rd 0d6c6424f4cb24b15b0d0390e8065980 *man/bind.fv.Rd 62e7c01a6215d989f6b39cb30db76798 *man/blur.Rd 2dd70ff89fd37ad5479bdd6fc144495e *man/border.Rd 59ee5512935e85d9120373d6c41414c1 *man/bounding.box.Rd 858cdd7b586340d1aa51199e6e709d73 *man/bounding.box.xy.Rd 2f60344050e34407e1e211f8cfe40806 *man/box3.Rd 68050dcd80604a50dc35270280e401ed *man/boxx.Rd 3d63735c1fa0d8169ffbac24d1186b80 *man/bramblecanes.Rd 9a11c1fa791d401d15f847e41d0956f4 *man/bronzefilter.Rd 2b765304cc51209d91b551399e151e26 *man/bw.diggle.Rd 749c02e221fbd342818c547846b6a617 *man/bw.frac.Rd 858618223aff2bce5604b7c89d34f316 *man/bw.ppl.Rd 5f2a748cc51cf85f3f40f6a5a96c26f1 *man/bw.relrisk.Rd 08d85de540748bbc271c82f4312d536a *man/bw.scott.Rd 9dfde3d886b7bc191a2157033bc44c68 *man/bw.smoothppp.Rd 4ab335aed0d732935b2f49b8f87fa197 *man/bw.stoyan.Rd a50c0c5c0d5761f57d452d8ee45c17fa *man/by.im.Rd 387f4c469a29406285404bb7152e782d *man/by.ppp.Rd 371f26f380af9cff8f55aec931b8b88e *man/cauchy.estK.Rd da793862062cb3b03ba607a7e8d7daf1 *man/cauchy.estpcf.Rd 09ccfe156ffc2d1e8849c2b69a5bc4b4 *man/cbind.hyperframe.Rd 6f8d58a5c1320e43f9b35574d48a7603 *man/cells.Rd 6b6a746b07e5ab88e608bda1eb12cd2e *man/centroid.owin.Rd 32047619f0bc527b2821efca2fda2eee *man/chicago.Rd fae7aa2e870fbc43d1c7400318c9293c *man/chop.tess.Rd 4d17c698a80b9b04f9180ff71fe607c1 *man/chorley.Rd a73edf3b44792ee4710abf70d2cc23f2 *man/circumradius.Rd 20b99a8274602e83f531fef751ed6023 *man/clarkevans.Rd 485d1cc3ffbc48e3507b4a0093170b8a *man/clarkevans.test.Rd c3c8e1f9dc79ac65cd35af3785ba038a *man/clickjoin.Rd 98970cf2175755f62a3da98f39e076e4 *man/clickpoly.Rd abe62af3b43def3616eb7f56a06cd1b6 *man/clickppp.Rd 6d06faddd7d388900969f628f56a68a0 *man/clip.infline.Rd d2c1db4d5ed8cad183c231635cc7345b *man/clmfires.Rd 7711fa1baf04929e5f5144d163d57eb2 *man/closepairs.Rd 9e1c3c087b8658434f9b185b52a5ed57 *man/closing.Rd e30a382eca3088ee287aa5877971a3a2 *man/clusterset.Rd 2e774a6caa18068f9051c32bcdbc8df4 *man/coef.mppm.Rd f9d64b1026df50a545bf233e52452ccc *man/coef.ppm.Rd 4029118c7fa7359ddef40f6b45e1b008 *man/coef.slrm.Rd d65e99f5aa1bccc3182d9e709cef7b1d *man/collapse.fv.Rd 4473d8635bf0b5ef26a1b0879cd0af5b *man/colourmap.Rd fcfa3a3727d5b064519edb742277745f *man/colourtools.Rd 6efb23cf57efbe241707a3989f8ea60c *man/commonGrid.Rd 361d2023d715cde08abdb3daecabc3ab *man/compareFit.Rd 04c42c209407752cc72166fbef95a24e *man/compatible.Rd 2212b0db049c63096725ae13b456e45e *man/compatible.fasp.Rd b0a87e0ca3415488c9c679b1deeb7590 *man/compatible.fv.Rd f662b50009e8254d71f50815cd26b356 *man/compatible.im.Rd 1e13b23fef30e0cd405918a83d0012a3 *man/complement.owin.Rd 8c20f91229450457c7662e70bab4e87c *man/concatxy.Rd e14a093e896368662a4a07acdc5113f2 *man/connected.Rd 80a1b4ab9bdda8df3bb5df27a4342eaf *man/connected.ppp.Rd 797a8e45fb0dfe982252a25bcf04a7e0 *man/contour.im.Rd 278e6a278484be141b2fcd7e4279c155 *man/contour.listof.Rd 025e2491724833929c0b6309a8bdfb76 *man/convexhull.Rd 072251dda4ebee6d2aa4621386667d05 *man/convexhull.xy.Rd a80265ba2c630e6749775fa4c9089232 *man/convolve.im.Rd 3e9f926359e52830b7b26ea4f4123dde *man/coords.Rd 937a7e861bf0e85c298d8971082aab4d *man/copper.Rd d4e78e66dacb8645717f2b2148dc59de *man/corners.Rd d250b753fe8707ec89bf7335a09178b5 *man/crossdist.Rd c0343c8f277293a5c5545f9b1f7a556d *man/crossdist.default.Rd 55437ae5cdf6434ade065350ae1b5ca8 *man/crossdist.lpp.Rd 3d9721c29b21b00de88ea305b6f3b0e6 *man/crossdist.pp3.Rd 82596a0164c72005317aa198b6a86944 *man/crossdist.ppp.Rd 2888a01c2b8fb9d0559d8fbc4a24ff52 *man/crossdist.ppx.Rd a1bd55b6dcd9de1215f4e93f8f31cb61 *man/crossdist.psp.Rd d7dd9bef6b0aefc6a697b75c0af9f2a7 *man/crossing.psp.Rd 29dd9202bba76524170fe344d7bb3075 *man/cut.im.Rd b396e801724fe83ebb5a2c6e39bdf4f4 *man/cut.ppp.Rd bba8aad7241dfde2ec7c04605d7847e4 *man/data.ppm.Rd faf0af1341385e42785eb57d0d12a236 *man/dclf.progress.Rd 6c9d9a7bf2936b0053213dc4a2a58828 *man/dclf.test.Rd 64c8c52a024ab5cce40ac2df206f00cb *man/default.dummy.Rd 823184be7973226d028e534c4876d728 *man/default.expand.Rd 402be6aac5aacc9d030f74b300ad1e8e *man/default.rmhcontrol.Rd b6fdf60f4ffa1a85246284df6de2be66 *man/delaunay.Rd 1767cf60bc9585cbf45713d544c90845 *man/delaunay.distance.Rd 1968650aed9bce6b582a2e1b2e67ce28 *man/deltametric.Rd c5f3cda1b74bc343c917ecb6f9e069d3 *man/demohyper.Rd 3292b98a0ccd3ecc4e161ff952bdae9d *man/demopat.Rd 21a59dc975b247cf70cbc376c60bb8cd *man/density.ppp.Rd f6a823c365e1f690c424644d00b3aa1e *man/density.psp.Rd 447978ef4a142e502930f9574e56eb81 *man/density.splitppp.Rd d9d86f20ac1f8b62615256bac50dc454 *man/deriv.fv.Rd 92f6edbfcf54cfba6e0f788deb3d16fd *man/dfbetas.ppm.Rd f94a56920f673272c6b119421d818f42 *man/diagnose.ppm.Rd 03dd48632c29f692e3575a29a1bb34b9 *man/diameter.Rd f5cd5f0c46e89e2936ce676d9ae92b48 *man/diameter.box3.Rd bf9c3ac1b9e48d0b209e8aff0dbb4d83 *man/diameter.boxx.Rd 841be020ddca5c133752aef1ef1df870 *man/diameter.owin.Rd 6275345a3eaad3049853ad05582a52dd *man/dilated.areas.Rd 967929940d393fbf48541952f8f7ca93 *man/dilation.Rd 13eb541dffb9197e27f654cb9aef3a0a *man/dirichlet.Rd 3477c7499572b3f7dec4086fc0aa4f12 *man/dirichlet.weights.Rd 575623e7308e0fbb0c63074cd3f2637b *man/disc.Rd 7cbb83b50d7bde50e49f8c25694652ae *man/discpartarea.Rd ce0ebdc3c2c6d420bc598b97e683f693 *man/discretise.Rd 6db2b7007fbbe5b16a93892f745b6e46 *man/distcdf.Rd a603cdd565e8bb580157f0928a24f0a2 *man/distfun.Rd f0bc880af40c8e186826eae2be151f7a *man/distfun.lpp.Rd fe41704f8e9a6fb6ba6011f8fb1d25f1 *man/distmap.Rd a1d9646f6416691a35c186e8190c0ca1 *man/distmap.owin.Rd 50d1a9d3216509d660f168e9291bfccd *man/distmap.ppp.Rd 7a4864851e6023869e0c9e37c8d5fedb *man/distmap.psp.Rd 9adc88bece15dcc0743a3f6a1b7a0210 *man/dummify.Rd 69127241085b80340e8472ea9dd0a445 *man/dummy.ppm.Rd 5e844e1e249a42f049318f28cb06b938 *man/duplicated.ppp.Rd 26c95f961e80eaf0939852027515fab4 *man/edge.Ripley.Rd f41a768477c33cd21310a07f2209bbee *man/edge.Trans.Rd 58f94820fba99ea3bcae0703db04231e *man/edges2triangles.Rd 3991409bd17643d947186184d101b83b *man/edges2vees.Rd a4445d6b9677268eb3bd831880a7df4d *man/eem.Rd a5b5725024643672830395446fdc270f *man/effectfun.Rd b2cb089c2dca98279b772c0c7d16fbf5 *man/endpoints.psp.Rd e54fdc1e11dd654d506a9f46375293a4 *man/envelope.Rd ac80dfe55705ce1505751ab056aa0892 *man/envelope.envelope.Rd 6f6ce61cfd13157f380536eb9bdece88 *man/envelope.lpp.Rd 798d8bf3d680cb8bd5f2650ffb629581 *man/envelope.pp3.Rd d628cf34064ddb26b32ac1daf1da2b65 *man/eroded.areas.Rd 5a7004eb0d9aa5202f3177244be02821 *man/erosion.Rd 201e70051c7cfdf009d0671b6063f92f *man/eval.fasp.Rd 636e8b06f52bd95770f8d456232ac199 *man/eval.fv.Rd a6c52c1ed75f905712ffcdc01af1c168 *man/eval.im.Rd 077d872145fae4cc758c00749466e94a *man/eval.linim.Rd 1a558cd79e93323e334b70240e202166 *man/ewcdf.Rd cb627056bf7b205d84804140f11c0851 *man/exactMPLEstrauss.Rd faab31f681243ac6fdc3f91d0e082c72 *man/expand.owin.Rd 4616337c135de2dbbc5961f8015fb539 *man/fasp.object.Rd bfd43210b18174749dfbee74c9bb1f6a *man/finpines.Rd de3cbd5c07127307bbf69deb8dd2628f *man/fitin.Rd 6718305cb6c062be4aac68cbd9f66f9c *man/fitted.mppm.Rd c219a083c5b7f1eaa1a84919a480cd25 *man/fitted.ppm.Rd 705b8e8fb0e2af71b95990c20edd6b43 *man/fitted.slrm.Rd b578bccf4c7cd90f2076fcd1656adab4 *man/flipxy.Rd 4bfa8df44629b2c7d1276b33fd96ae3e *man/flu.Rd 7338ec0eea5a2a91b7370e2a1f52b611 *man/formula.fv.Rd 94a48a6a8315ef30ed0f4f9ae2087737 *man/formula.ppm.Rd 44d4b578d2a03a45890f7ff3fb9348c5 *man/fryplot.Rd c613a2a45dbe6f8fa872f3733949b7c1 *man/funxy.Rd 72a8c8bbdfd8ab1f4f8ad553634fe5f2 *man/fv.Rd e7e38a15829209694f91e4f6166363a9 *man/fv.object.Rd ce3275e550210485737e97acd581e816 *man/fvnames.Rd 6b6a59ffc0a3fd0c38db2bdc4d056180 *man/ganglia.Rd df0d40e16137b6dc3dcf5ecc264ae876 *man/gordon.Rd da9469564bd13c79ffea6d6e10ad45b7 *man/gorillas.Rd e3b11d2824eb8241be3bdb32d93bac8a *man/gridcentres.Rd 16268421b3bd13c554bc783f53de104e *man/gridweights.Rd 9c5c09ffb67e0bc66a921a269d9176c1 *man/hamster.Rd 5bb8123cec1f799c019ed95ac34fe233 *man/harmonic.Rd 89fb7c5eb5879f2f6abb9abc2168ea64 *man/harmonise.im.Rd 73ce10910f8f7f2ba8a7366a4ef000ff *man/heather.Rd 2119234f779f55d327d5c55cc16ecfc1 *man/hist.im.Rd 71f25ace9577a86d66c4b4126ad8d0ee *man/humberside.Rd 5bc057c8b97c409fee5a4c50fa8bdff0 *man/hybrid.family.Rd cf9fbfc477ae073ff07ff68bd1cdf856 *man/hyperframe.Rd ce512a5e916d4162a531a7e6dc6e6680 *man/hyytiala.Rd 0119abe4b0804902d0d8718e3d83a62e *man/identify.ppp.Rd bbdfbe4afe08ac57b31668576697a14b *man/identify.psp.Rd 8f0fd44c83d0d2fd89cc946e96718b2d *man/idw.Rd 3120b59d5223e672b78eff1802dac416 *man/im.Rd 66748c0595ad982148ffacf1224a2497 *man/im.object.Rd 64edd33f70470ff1f600ce6d2ab4862b *man/imcov.Rd ece64f6441a43b8198f9d00c6b6d9709 *man/incircle.Rd 999498150a45202f3179bed60b0c7159 *man/infline.Rd e6b6e0902d84b5a50b3a49866a06a951 *man/influence.ppm.Rd 811613ce3a9fc69bb8323070fb1dee62 *man/inforder.family.Rd fa66b0e6cb3a730bcc2d19bd67f0ac41 *man/inside.owin.Rd 9894452dfe9383f7ba757e229d9b138a *man/integral.im.Rd 61d0766455992d5c2ec8f59601968c33 *man/integral.msr.Rd aaf16cd797dbffe665baae3ed452bde0 *man/intensity.Rd 87822184ad0562a7303a2351f4fa282a *man/intensity.lpp.Rd b74e8e687698133fae42fc105cf636ff *man/intensity.ppm.Rd 7dc5b7a83516c9a1972d9b44189cf75e *man/intensity.ppp.Rd 3e071068d58d5a590813f828ebe7b4b4 *man/interp.colourmap.Rd 0fc7f69002d6c47631cbc50de03de1e5 *man/interp.im.Rd 6b5ba8c5e7126824269785b683e51778 *man/intersect.owin.Rd 3a63c4dab0e9dcc550421bf51ab86c9d *man/intersect.tess.Rd 5773c5dfb7870824f8223f9c4e684113 *man/iplot.Rd 40ce3e65ed93728bff6ad99f64977388 *man/ippm.Rd e3b72d5d6927f784b7a5605f11ee2e53 *man/is.convex.Rd 36b29bcafd32b84befa7d22248344b4b *man/is.empty.Rd d08109232ee89b323f8565b9e773286c *man/is.hybrid.Rd 345031d54fd537a32944751fb2d38974 *man/is.im.Rd cb3b010e2613b1338a26d9bd09bcdd40 *man/is.lpp.Rd a318e52c4884694f83c438d93d938357 *man/is.marked.Rd 29ad79e07fd2c2eac4af1512a463ab58 *man/is.marked.ppm.Rd 28f729051f5e5b81d4fd44659960fb31 *man/is.marked.ppp.Rd 1f1976146dba8c4f3611c89fee82b9e9 *man/is.multitype.Rd b26eb08d732bf4c423742a59ce86c747 *man/is.multitype.ppm.Rd ff9350b8e3d790542fbdb59d6f057f82 *man/is.multitype.ppp.Rd b16d7ab95cfbc432c16642a1825f8c18 *man/is.owin.Rd 8544479b1cedb6ce090a40ac0b131821 *man/is.ppm.Rd 368feff6754c32a74444033d5e06d97d *man/is.ppp.Rd ce7989f6644034ec4fdb08abaa952071 *man/is.rectangle.Rd 4f4039c96be8480560ade0411aa91fb0 *man/is.stationary.Rd 5bb9c8a3577246811e9deec1e7e54ff8 *man/is.subset.owin.Rd 8a358f710ad0a0fb3d9e3bbf942c433e *man/istat.Rd 907bc70733d63ff4328d38b65721b316 *man/japanesepines.Rd 822439a70c5b93d7163ef8ef19e49939 *man/kaplan.meier.Rd a69e4228c8f92831f18143e874bbe396 *man/km.rs.Rd 1f8477489924d3b74d06152df6635efa *man/kppm.Rd 45aef2580298ef4248829b6e4f3e092c *man/kstest.Rd 7c5a1d7cf639891412579fc1b1d5ec6f *man/kstest.mppm.Rd d0f12ab0ca79efa35667f0f13270a012 *man/lansing.Rd 3dc41c499d214b5b368c2895d04b0cd2 *man/latest.news.Rd d15a9e62ca8acbb8b24a4dac4d6b4a98 *man/layered.Rd d57c79f14498c15795bf7611d6371ecb *man/layerplotargs.Rd 14764016d60acfbb28ea793aa7cc7a0c *man/layout.boxes.Rd 0ffdec2d36b54827b924f931d6f67afd *man/lengths.psp.Rd 20286b4976bbb03538f416e01b669bfc *man/letterR.Rd b1d7140051b741c07543e25f1d7374d8 *man/levelset.Rd 918e6b2184a372d40764c84e880da24d *man/leverage.ppm.Rd 3d58d81e7af333ed0ceb74cc3ea26f55 *man/lgcp.estK.Rd f55883a2981ae6b5f6260659687c46b5 *man/lgcp.estpcf.Rd 186f5a2d3cb226b165fc701c3dd7cfa7 *man/linearK.Rd 02928b3816777890a07731f6cdfd8966 *man/linearKcross.Rd 6ef8eba93f6f6167961deb9418a93fdd *man/linearKcross.inhom.Rd 45b7a9e68fe5340853595648907f0c12 *man/linearKdot.Rd d3bbb1565fa3a043ba9eff3f55f6fba1 *man/linearKdot.inhom.Rd eaf19f872c9e023d44a397abe1949488 *man/linearKinhom.Rd 4f06b430b43d585c2659df989d490036 *man/lineardisc.Rd 71bf38065f8053fac25e777930b9c4fd *man/linearmarkconnect.Rd 153e6911a48ccc2d69c970f8f0a9e882 *man/linearmarkequal.Rd e392ba8d5fdb995d7b9cc27150eb4bd7 *man/linearpcf.Rd bba45aa6e60ff25b8b36455f855700a6 *man/linearpcfcross.Rd 0e338ca9699339878ecc1716e32d70aa *man/linearpcfcross.inhom.Rd a8ca2cfa065abf76c017a7d620a9ae27 *man/linearpcfdot.Rd 0e8ccbd6f5fc3f6b71c85dae336670be *man/linearpcfdot.inhom.Rd 4f3eed1f0ba7cc767cdb017ee5485e0b *man/linearpcfinhom.Rd 5bc2d37f910564ce2447b333d7237126 *man/linfun.Rd bd925c5140fc21f2bb2b38a928415113 *man/linim.Rd 49633f0629c6faaed5e482add6bd9866 *man/linnet.Rd e4678403dfba2685b9e8efffb05beb3a *man/localK.Rd 52863f7109f1a6e08bbd50cda134ee14 *man/localKinhom.Rd 67234f1134e2b680353d8b3b230ff194 *man/localpcf.Rd 89cdb757ea76c6deddd063bfb53ac7f7 *man/logLik.mppm.Rd 81d12b3a6ff0d5b3d2660c2b113fe5f7 *man/logLik.ppm.Rd 634aea3db8cce65d5d4c52823a36f422 *man/logLik.slrm.Rd 754f7a58a927d651dff032bc2f4a8371 *man/lohboot.Rd 8de1379bdea5aa88239d1bcffa87fd07 *man/longleaf.Rd 34a82c3cbdcfed1f3479f5d6b3986235 *man/lpp.Rd 8edcec1e44b559b0c03b940ded20eb7e *man/lppm.Rd 4e3bae12531272b4e9299f56fecef570 *man/lurking.Rd 299db7eca49b0246958d7ad5e66568cf *man/lut.Rd 00976bd83f75416810cab00229ee56e9 *man/markconnect.Rd 80dedacafef1ba79c917e10ae9daccf4 *man/markcorr.Rd 6739e01b2e31225b42e94a57c5603c56 *man/markcorrint.Rd 37a063f4b868e554b646e951cfa4762a *man/marks.Rd 476d913e8add5fb13561ddcddeeb6da7 *man/marks.psp.Rd 6847096ec9e53bb306993d9fa85259bd *man/markstat.Rd 901ea648451e886c1d4cc7a1a0ce5692 *man/marktable.Rd 689ad02df8cba12da28bf017a6074072 *man/markvario.Rd c6b7406bc7d4c64c92ed0ff18466d388 *man/matchingdist.Rd 9612071fd0722900a27ecfeed3ed99b7 *man/matclust.estK.Rd f72d9d489136588ddf9638915cc39162 *man/matclust.estpcf.Rd 6c53c0cddf99df605001905bab236780 *man/mean.im.Rd 34f88f232c3f4c0c37433adaecdb659b *man/methods.box3.Rd 3b4ecc591021addb6f33a3ba303081bb *man/methods.boxx.Rd c264713ce5734cb1ee1446322d17c2b8 *man/methods.fii.Rd 993fd1770e3e1d5816aed5d09b36351d *man/methods.funxy.Rd b870bce6beffd17337326fe46e9da80e *man/methods.kppm.Rd 7f50a925bbaf2f5a1302069fdedd46e3 *man/methods.layered.Rd b5480e7f8fb688ed345a1cd650bdae87 *man/methods.linfun.Rd a3ddfe8fa0675e6fb7e1fc829827554d *man/methods.linnet.Rd e2fc3b05c7b0b69d41a2a24a2c79f9ac *man/methods.lpp.Rd 22bcfe8ea41bac38826d1174882f8e96 *man/methods.lppm.Rd b82b57bfff4efd9a907d0921763752ef *man/methods.objsurf.Rd 8411f22964b8c8cdcdd278937a45dda6 *man/methods.pp3.Rd 10cc77cf7cb143e5f92e79434384ab7c *man/methods.ppx.Rd 3c075fc5a94a317e9ece99a947717456 *man/methods.rho2hat.Rd 181e6bd2edd45d7a9c2b2683c5ba99d1 *man/methods.rhohat.Rd ef056dca7bc9a6fef238bea6a23dbf80 *man/methods.slrm.Rd c22adb11602774a64184420af2011a50 *man/methods.units.Rd 97a0cdd9369a1b5865f93a916b44c8ff *man/midpoints.psp.Rd 89422e9b1787edfe34a1b89f8cfeb351 *man/mincontrast.Rd d57ce4f0c5a274b2aada6acf798e4207 *man/miplot.Rd 70c0d8dfd24fb44a77030f9bfd1ffc37 *man/model.depends.Rd a37b56bb2822c501a95d591feadc1068 *man/model.frame.ppm.Rd 531e1c714c690f20aca442109a7b0796 *man/model.images.Rd 0f09f6c93fba1752e6ff37bdff54b5cf *man/model.matrix.ppm.Rd d1f86eabec343877e9b646ade0cd5574 *man/model.matrix.slrm.Rd d969b501aa534ab281062031e834df70 *man/mppm.Rd 28114c52c1c3d4991202c4dc299a8ae8 *man/msr.Rd b9395f215800b9a5a53a8d4d80fe0eb1 *man/mucosa.Rd aa69802afb4436d9d76380a4aee675fc *man/multiplicity.ppp.Rd 2694cc016c4cd0d310120ed9460aeace *man/murchison.Rd 3bdc8a2c467e14358663d5f70da7e8cf *man/nbfires.Rd 9a42ed6adf3350b40a588221a6dbe383 *man/nearest.raster.point.Rd 8a3b909f5fed158b3c0b956fdbc3b33b *man/nearestsegment.Rd 9a95d1ed68c4b240bca89d4595ccedc5 *man/nnclean.Rd da22339465274d1bae9403d3c6798314 *man/nncorr.Rd d088ccef4db80c859ce53fb7962e9cf8 *man/nncross.Rd 21d87f28c6d8685a44fa614275ae76fa *man/nncross.lpp.Rd 6d5ce485261a49eafe565bc4862321d8 *man/nncross.pp3.Rd 26471d2d21bded05ea0d2f82d341a3e2 *man/nndensity.Rd 8403840f781834f9c2c53edeab41ba01 *man/nndist.Rd c250da87167db5adfc16ad031db0e069 *man/nndist.lpp.Rd 591514bed9f5fe05563e5e9b2af2cf68 *man/nndist.pp3.Rd 830449f39c2ea4322dd4fcacb98c4456 *man/nndist.ppx.Rd 390e87cbb30e8fdaca71215d250a40df *man/nndist.psp.Rd 73d971ad75df0e724476c127fc796c79 *man/nnfun.Rd 63227aac8ec23fa7b86a9ad5ab831d67 *man/nnfun.lpp.Rd 21a0da4cb1a17f7bafa119d9a3212d93 *man/nnmap.Rd 9f1c5a13e8e03a4f7adfd33284e7df67 *man/nnmark.Rd 7bb971ccd675180526689edd83e1c116 *man/nnwhich.Rd a4fa19cac14f7329d24b8d3c3d2e0b96 *man/nnwhich.lpp.Rd b759410c51a2ed4e6b64072741411bd6 *man/nnwhich.pp3.Rd a320954ea0d7bca2d4178ed9c4ca0642 *man/nnwhich.ppx.Rd e7152e5e800c01ec03e3dbbb7eaaeab4 *man/npfun.Rd 51bbc0a0a8d66784330010ea081b1885 *man/npoints.Rd 9b592ce4f61bb14be7051a3dcf7f5b7c *man/nsegments.Rd 483a100c0e515f744d21c08c79048975 *man/nztrees.Rd 6cfd7b94c43201c9acb995cbd95e0fa1 *man/objsurf.Rd 1322375317796f6a8ca887a8aa9638a6 *man/opening.Rd 329eb1d579e128aef59f25b1424eb844 *man/ord.family.Rd e4852c5e60e53fe5b2dc16b37c381024 *man/osteo.Rd 1cc090881e8d1f983a3c204b48698759 *man/overlap.owin.Rd 28e3f39de3e3275e4e22a6d484e0eb8a *man/owin.Rd e5daddd58c8a038a65dfba20d31d0f56 *man/owin.object.Rd 21191def96959547e04c8e0950c80d58 *man/pairdist.Rd 8c6e30da5c497fb48adc32a7f4b2aabe *man/pairdist.default.Rd 055457586d879294a1d6e704c65fad5c *man/pairdist.lpp.Rd 7a00cc87e7c8562a0586b29c852251b2 *man/pairdist.pp3.Rd b98b7474ceea759c1278e8b24847da78 *man/pairdist.ppp.Rd 6e4d3d26fbd4b2c2e71b36dce3057b85 *man/pairdist.ppx.Rd d275e10463f9966ff88e6840f0d228ec *man/pairdist.psp.Rd 6c2d600ad1cd424ea58b3c0b325e708d *man/pairs.im.Rd 1b66ba119d627cab19176a9ffe43eb6f *man/pairsat.family.Rd 77c4ae8010273b39a194aa0dd19adc57 *man/pairwise.family.Rd db23d28dba08eed88f4993d595228ef2 *man/paracou.Rd 44035711cd92f96ae2a21d4c1e277b97 *man/parres.Rd 10fa3620b6a67a341d5a6859d3958706 *man/pcf.Rd b409751602bc644d5bfe38a7447a71ce *man/pcf.fasp.Rd 69a06b452dced4a84088c3bf9fb67907 *man/pcf.fv.Rd 5baf3ba88ef7e4f4cf1364ce82dd1b31 *man/pcf.ppp.Rd 4f25be91c070ae96eece78d1ca9cf95c *man/pcf3est.Rd 648e52db8e44d708dd18ef6d6399b00d *man/pcfcross.Rd 6aab279fbf0aeaadfc2f91d3d5e915ef *man/pcfcross.inhom.Rd b6a05345966fa7538564cd943e7e540b *man/pcfdot.Rd 5701145647157161ee2c276b423539ca *man/pcfdot.inhom.Rd adb44b55ae207a6d8823f489c1de5c4e *man/pcfinhom.Rd f522a3ed222cb5e25b0656c5aaad6639 *man/perimeter.Rd bce6e59189140a25cab5a3f087f69e82 *man/periodify.Rd 6cafb2a191ce9e6e9f6fc950549a31e8 *man/persp.im.Rd 5539d83be62fe6abba698959358c5a23 *man/pixellate.Rd 921197ed0e2b173fac746393942af1ff *man/pixellate.owin.Rd 8f2f3c68751b5d52f7808c3f9a565a70 *man/pixellate.ppp.Rd ad4c77ab157629a1bf9db1952964b5c8 *man/pixellate.psp.Rd 6ab16206c49c768fd49785cb2d7287f0 *man/pixelquad.Rd c8d2e2763ea38613f2b32f32a0dfa50d *man/plot.bermantest.Rd 1300edc585467520cabe9f0f17523e83 *man/plot.colourmap.Rd f6289ee70b103d9b04298fa2733647a0 *man/plot.envelope.Rd a295993a522634a892ec1f80553a36bc *man/plot.fasp.Rd 1c63784d4dcb7665aee5432a1d87e30b *man/plot.fv.Rd e3d7ee468fb66352998b98a9e7941e00 *man/plot.hyperframe.Rd f67a56f97e83d579b97c78d8f8d7bf1d *man/plot.im.Rd 80e7e5f1afa7999bb6ff7fea57f9ccb3 *man/plot.influence.ppm.Rd bfc0fb8c0af92ab942a57813f8b7146c *man/plot.kppm.Rd 75d5b22e9eb26b4794ab65d76717ce70 *man/plot.kstest.Rd a0204101b34b9c278d92dd8b4518449c *man/plot.layered.Rd 404e12da1e59ff718fb97357577396f5 *man/plot.leverage.ppm.Rd a105c14ee37859107a4db5a74a3b99d2 *man/plot.linim.Rd c4e22dcd7c313370dfe2eab366e0a1e9 *man/plot.linnet.Rd c09e8bf216ce85be090b752637ec20f3 *man/plot.listof.Rd 8c56f4a58b55ab4df8110abcc3ebfe56 *man/plot.lppm.Rd cbcbb9129bb396c086edb9c509250ac4 *man/plot.mppm.Rd 76ca349e296396133091dba802fa00e6 *man/plot.msr.Rd e6f4bfc6b38450c619137fd8d825fdc6 *man/plot.owin.Rd 6044136a5f4703fcbbfdcd4a99c505b9 *man/plot.plotppm.Rd 829e7b0aeebc1308f267d2d32f421a45 *man/plot.pp3.Rd 065dc373320c591ab50725d9151354ee *man/plot.ppm.Rd 1de928ff6e5a24cdc2dd5b55aa3376ba *man/plot.ppp.Rd cfb7bf9dc2eb7cdfa8df14e3617b80d1 *man/plot.psp.Rd 4b4cca7b13a4eba65836191460defcf2 *man/plot.quad.Rd fa598dbf968683984c4f4446f49c8ea5 *man/plot.slrm.Rd 0111aac79d739cbe317d8278bfee97e9 *man/plot.splitppp.Rd 95a4290cd9a7f8fb8dc9326f823995c5 *man/plot.tess.Rd 86d5ab99bb247da632fe41fba4fd9166 *man/pointsOnLines.Rd d9aae8e7675eef5b86acc13255aa41de *man/ponderosa.Rd cf545b6d0a800f72f09f5cdc5dc610cd *man/pool.Rd 6de59cd1f1b9b03463ec1a490d46bd22 *man/pool.envelope.Rd e8027bf191f57fd15deaed8ec8923672 *man/pool.fasp.Rd d9ad207ae6d33298edc86ac5cfe9f58e *man/pool.quadrattest.Rd cf2dc7fa50ec66fc0e5ffe0ad4dbba99 *man/pool.rat.Rd bc1a08ffd503824ac8c8f1fa93bae560 *man/pp3.Rd 8732a7b2b941263a5075322f2e2e02ff *man/ppm.Rd 9431d5433962e98cc200695fa2dca158 *man/ppm.object.Rd ed85cb8d134dda7e5c57c2c68760898b *man/ppp.Rd f18a41a3ea252350204f157f017bd3ff *man/ppp.object.Rd 5aaab3accf250a6c2b87b773acd01f95 *man/pppdist.Rd df932fbc1b9c8ca4af27ca0e1a537727 *man/pppmatching.Rd 4bdb6dcbce07348faefcb5b76986e72b *man/pppmatching.object.Rd 96fe735e0cf1d0f554a7d9d8ce287eec *man/ppx.Rd db7143611447e808a394d7540ac7f919 *man/predict.kppm.Rd d0bef5a65b45c3669a60a4b17db14d2e *man/predict.lppm.Rd 9c6297b82835a557c31ef064f65a79ae *man/predict.mppm.Rd fb1c1f20f14a73373b4391b266ca516f *man/predict.ppm.Rd ba05a129149e1a4de44505dd5cd4a870 *man/predict.slrm.Rd ff29e1aed6f97821f2b40ae3b5bfc48f *man/print.im.Rd 94a21f0353c45690293c274ed00f65f4 *man/print.owin.Rd 4e7388dfed2793d22cdf2988975c3565 *man/print.ppm.Rd 6e12d9edb8c3e6040851e329143a69c2 *man/print.ppp.Rd 1dd4b5db57b61105ad90bab466d103e9 *man/print.psp.Rd c41ebf38b6fbd7fe8b292e4067c2d758 *man/print.quad.Rd 6b9b5ed9d86ad08e57acac70883e2b3c *man/profilepl.Rd 13056924cceb78dd1648c24fa0c9c9ab *man/progressreport.Rd a1a922679a72e047f8d6829accb4e188 *man/project.ppm.Rd a40eb0db17cbda92f59a8b8079cb6cb5 *man/project2segment.Rd 05e0980d7fdc137879f2cd759e98df78 *man/psp.Rd 9e3040201f7f7993e443c0fa788f5fe4 *man/psp.object.Rd 9384d8f44d98f07ca668b36670687aae *man/psst.Rd 79cdfaa1cc82477772cbdf7b61e18b26 *man/psstA.Rd bcdb56e733b3e855f131904f3a4d0b10 *man/psstG.Rd 310f87263aaf42e207dc9677d0403178 *man/pyramidal.Rd 959621300d998959cda0c53a95782648 *man/qqplot.ppm.Rd a2e15eecb16aca446a7e367abd202699 *man/quad.object.Rd 12c166a4fd97cb8e939d3a97e8d86507 *man/quad.ppm.Rd 321717325d2a45a4f31e122300b1ab61 *man/quadrat.test.Rd 43cb34fbd8792a8914fc6c6854862306 *man/quadrat.test.mppm.Rd ae4bc16508b9e4d86de46102b232265f *man/quadrat.test.splitppp.Rd 4fd51dc1e99baaf878e49c77b59d59f4 *man/quadratcount.Rd ffbf29e72eb9fdf6e4ac5d39fa6eb783 *man/quadratresample.Rd ad9dda29e8adcbda5c8575d0447d8ce0 *man/quadrats.Rd 97bede66dd62b658ecbf26df01344319 *man/quadscheme.Rd b860d9bd4042791eff415efa1aa9ccda *man/quadscheme.logi.Rd 2935e87e7207f90bf7a82b4f6cf83b6c *man/quantile.im.Rd f790dfe30590faf90ff004d707dd075d *man/rCauchy.Rd 1d1d9a0f0f69cb88c5b493bcf5a1cc0e *man/rDGS.Rd 9b1c11401388afdf3a7d526d843bb635 *man/rDiggleGratton.Rd faa1d0c41289de524ab0fe30838a0ee7 *man/rGaussPoisson.Rd 96c602d921be8a025b5364e7f882e8a2 *man/rHardcore.Rd 64078e7d14fc8910bf1c42d6bd764c67 *man/rLGCP.Rd 2c8b08b508d1dc5c2fd7dccb190f9340 *man/rMatClust.Rd 27ab45f2697a2be5838d1ef2558b9a3c *man/rMaternI.Rd 531a794fa7f1d76ec2318cd0dd98865e *man/rMaternII.Rd 4ffb083fe224b37b5c2c728449844a32 *man/rMosaicField.Rd 7c7a442d4cc09e85216d625799981104 *man/rMosaicSet.Rd f7b4ccc0a6c5d60436d357a23aa48fbb *man/rNeymanScott.Rd c45a2cc320c1733c9081d8b7bfeee9c8 *man/rPoissonCluster.Rd bc36caead6f116036956a19d4fd94e31 *man/rSSI.Rd 81a802b4977cb800d53292818eafc50d *man/rStrauss.Rd 8db3bc9c941b30bd5807282484727089 *man/rStraussHard.Rd 563a4acad73ca1fb003993a0ff81bc05 *man/rThomas.Rd 04c369a3928626b0fbe3a3296fc8883b *man/rVarGamma.Rd f25ccc927cb64e6b33301ecf2e7499cc *man/raster.x.Rd 5443352a162ef5c263e5e149f481dc63 *man/rat.Rd 58ae4eca944353396b20a4bea04ca5c1 *man/rcell.Rd 727948686e4f99eae8c0f2d1b4db2450 *man/rcellnumber.Rd d98335a9ac93daf57af3d4959414d005 *man/reach.Rd 0e132e7f4916ed8ddd9bd3368e82f24b *man/reduced.sample.Rd b0678f6fa5caf46ce4adada6d09a051a *man/redwood.Rd 59dc82f5bfabbd81da48f268b70ccd4f *man/redwoodfull.Rd bea24e3967ce00b2fa0c3bc9439e7663 *man/reflect.Rd b3720f49b6d0e0a2d66fd5c04645bd2b *man/relrisk.Rd b95ef8dca0b96dc45fbab57c985723dd *man/rescale.Rd 5a79640a2a770f4d1923a1cbbaf4de38 *man/rescale.im.Rd bda88a8c3836b9bae3900047003fe391 *man/rescale.owin.Rd a4d138b003b6e5f8b80886fa822f8ae4 *man/rescale.ppp.Rd abe8740132005f66b3a0ddb3886b1a30 *man/rescale.psp.Rd 81b8122269cfcb11a568d5b0c47dff45 *man/rescue.rectangle.Rd c5b2f93fae7123eec85d6d597a3e31e8 *man/residuals.mppm.Rd c25428088bd92ee710bd25e2f41e16dd *man/residuals.ppm.Rd 52db1e0bcddf44fadaf1de86184f5217 *man/residualspaper.Rd 4d428eecce285489f1d80ab1f8315e16 *man/rgbim.Rd 3a6d110bd595f5d04dd3903b5d30873c *man/rho2hat.Rd 632284517c4729fe4a1574beddafaace *man/rhohat.Rd 7c6edea025b02766a396a5b1bbeaabf6 *man/ripras.Rd 4ff37ee6c5c7898fc8db8a5729f3c5a9 *man/rjitter.Rd 3e5b4ea3e77d722b369c58fa6d4a9c5a *man/rknn.Rd cc7a281fc493987f6bcc4132236d873d *man/rlabel.Rd b89b3f7f8bbccc3e2348038fde1ed238 *man/rlinegrid.Rd 0c3b71ebc4acfe6d747d4e8ed5b209bf *man/rmh.Rd 7be06651ec608f0aa13e406ef116afa6 *man/rmh.default.Rd 6e22c520802c97dec3aaba0932e9ae89 *man/rmh.ppm.Rd 1541e2cb357ca980f84121acb2e7da05 *man/rmhcontrol.Rd 08adcec6da35c83bc770fccbe69e3619 *man/rmhexpand.Rd a3a36f26ce9a7bcb9f8ce4fa691dbb76 *man/rmhmodel.Rd 047bd69db4c786ef72a2926400fbdf95 *man/rmhmodel.default.Rd 0e1c6f8683f78ceed24faccff86bbd5d *man/rmhmodel.list.Rd 379b9a3a97604091fc1177f5b92271ef *man/rmhmodel.ppm.Rd fb47c79333102475669ed29989766695 *man/rmhstart.Rd fba274e73644df1edfed49f7b7f44091 *man/rmpoint.Rd 85ee3aeabc756e5772049395b7caf8fa *man/rmpoispp.Rd 3f943e17aa123a5823e32a1cdb0ecd9a *man/rotate.Rd 12449b6d7bcb25770bbc6d4665da3bc7 *man/rotate.im.Rd f40af9976569959893421805e4b7df7a *man/rotate.owin.Rd 9e082088b921dd8d469f7315ec0096f6 *man/rotate.ppp.Rd 04e195aee48689d30b46d2f68b9e17fe *man/rotate.psp.Rd 26f3e6da51029b3805eda91994a004a9 *man/round.ppp.Rd a66190713704b70ff92c4366c5feb99e *man/rounding.Rd 40dc80f5e997394dff980cf69f74e1ec *man/rpoint.Rd b82033175930c543c842a9724e0f100e *man/rpoisline.Rd e49171aa8d53b3ce3446510b1f06825d *man/rpoislinetess.Rd 006ebf933f5d5348c45d48548edaa784 *man/rpoislpp.Rd f2062deb79b0f6b5fe5d11cc03445849 *man/rpoispp.Rd b4b6580617177153f0af269a2b925a7b *man/rpoispp3.Rd 1b2d57b2802fe77fc5ab19237099c680 *man/rpoisppOnLines.Rd 79e41a34bc8fac7d29751db3111f3865 *man/rpoisppx.Rd b7352aefa3103a143be9954ea4d1557d *man/rshift.Rd c33be0620e40f1f6f815410de5213e3c *man/rshift.ppp.Rd cce57ca2ec86518073421e1183240291 *man/rshift.psp.Rd ee45aac80209e79b2bf0b048847d90df *man/rshift.splitppp.Rd c11706f7fb08829e5d493d07f7f50602 *man/rstrat.Rd 414def92e1e1d3cddc96c6a00a3a0567 *man/rsyst.Rd 3740dc7b53f51a4fbc64500e3533ef05 *man/rthin.Rd 8896452139979fc579afa83050450e86 *man/run.simplepanel.Rd cc179c9d6750e46fa3feb3456899957c *man/runifdisc.Rd 36f588515ff0ec4f50cc03a01c52e146 *man/runiflpp.Rd 74b3b67eb17f4b86c58b061338474c52 *man/runifpoint.Rd 50b3cbd9e579a2ac8428fc0b70e22ab7 *man/runifpoint3.Rd 2bcf8a69fa57a21aa5009df5d9db250c *man/runifpointOnLines.Rd 8cae0abdfb690b42f273b17c88aae0e6 *man/runifpointx.Rd 24a8ae02ed284509bdac2f27cb89590e *man/scalardilate.Rd 0ed1e619c025f52af2100dd2c2d47ce0 *man/scaletointerval.Rd 13799c8433a545f6d0f546257b62126e *man/scan.test.Rd e16abbb28b292b693ab2cd2470f5b67e *man/scanpp.Rd e078076578c936a107edd35edf2957ed *man/selfcrossing.psp.Rd 9c19b0b015a8f6ce30b8f06722556b34 *man/sessionLibs.Rd 9e69bab3dd6b77e13d7836050800c71d *man/setcov.Rd 23a4d59639eea402a65a37b3335ddb2f *man/shapley.Rd 5eb0a6b4910d49148aa7c6f78f5ca88b *man/sharpen.Rd 5b3569cda53063fec2205745cbbb07d3 *man/shift.Rd 3dda5b95bcca0940ee9ce17e35e62c7e *man/shift.im.Rd 198ec4034a9a72461d99ad1384581417 *man/shift.owin.Rd 974d20ff284692e916150b867c8403b4 *man/shift.ppp.Rd 8db2f4382fb9f9f86bd455226d89b05d *man/shift.psp.Rd 257b4710d506afe59000793d29553d00 *man/sidelengths.owin.Rd 72802bca93378faed3aef5424d5a74f5 *man/simba.Rd ce53548105ab0df79288a05636d9efb6 *man/simdat.Rd 46a58f8ff34ca84bc3fc2c0159a6e667 *man/simplenet.Rd 711a1e3e9ab84c867fb4092157945db3 *man/simplepanel.Rd 798f56814316bd32294f90d597780a50 *man/simplify.owin.Rd 882a304374406cf50c5a5e8ed2c095fe *man/simulate.kppm.Rd 967ce7677fcb0d21dfe5177c6f69aee2 *man/simulate.ppm.Rd 0473df79b00df9f2f90b3aec5a48bc1b *man/simulate.slrm.Rd e707132f1a067ddfb46dcf0ae0bd0b92 *man/slrm.Rd 9a1d2a9f9fd7b6d8401e06201207f157 *man/smooth.fv.Rd b223e12ecad5e6d0c0d3b309b93ef6ab *man/smooth.msr.Rd 4c21d4b774d60d073e0baa7f40550617 *man/smooth.ppp.Rd 97fb78db3e1a4b291bc3d7a17e1fef60 *man/solutionset.Rd 944a96e4cf6c8e54c0a35d5d9f99fd79 *man/spatstat-deprecated.Rd 9595d010dfa0dce64a1e530e994e4026 *man/spatstat-internal.Rd 339c402df830fc4032d87b5bc558e706 *man/spatstat-package.Rd 5a6268331c8a9ae6198939e097044e87 *man/spatstat.options.Rd ab05aa450771daabc4cbabf595434f19 *man/split.im.Rd a1de2c46e552ebb393c486e913afd012 *man/split.ppp.Rd 1a4c960e8bd097966d0ef4b80d1d030c *man/split.ppx.Rd ca32b65bc9c55418194b7a6478a28848 *man/spokes.Rd 432dfc89baa2a8b59a5470286a90399d *man/spruces.Rd 980263ec6495b2d5256a045d322f7ec4 *man/square.Rd 70f8c89394c756284943a3dcd9520e76 *man/stieltjes.Rd 2b06ced6bcc195f18e42e74b359c68e3 *man/stratrand.Rd a0895d8c396454962b69670f5d9552a3 *man/subfits.Rd df3e3f87bf012c51ede41800579ad45b *man/suffstat.Rd 092f6e688bc8e3a6dcd26ad26bb8f0e3 *man/summary.im.Rd 3aa1c0b4183d263bacdae02ba860e3fc *man/summary.listof.Rd 317553f79b6a5f020a1c0be171a7983b *man/summary.owin.Rd f258cf9b7f42eebf91364a29069910d7 *man/summary.ppm.Rd 991de4088d9c79967351ddc0da55bab9 *man/summary.ppp.Rd b78a1f5093a25b09f96feceddd0fd805 *man/summary.psp.Rd 9e4971aa1fd99a0b86cc1b812158423c *man/summary.quad.Rd bbdb582f23a86bba349e047935675ca6 *man/summary.splitppp.Rd 811d1fd931d054d13884a084323ed981 *man/sumouter.Rd 4b1f69f48575b349c873a3c681a610f3 *man/superimpose.Rd d546191d31103226e9f5cc5c973559b9 *man/swedishpines.Rd c28378b3188ec20db99bb605540c997d *man/tess.Rd 957ff14c0ebf5430b6fd34357ca584cb *man/thomas.estK.Rd 876c7ebc30fe192e0f5611e4ac93044d *man/thomas.estpcf.Rd 12488d0115f99ba83d6f43a79c86767c *man/tile.areas.Rd 9e7213a77c61147b8918f96cfd1c1226 *man/tilenames.Rd b2a9da1a68b3d51dd69526c1e12705b8 *man/tiles.Rd b89e85b379ccaed43943785a232f4db7 *man/timed.Rd c396fe1d39a9a462cad291b3b9684c74 *man/transect.im.Rd c0f77a4f083774946dff032551fc29d5 *man/trim.rectangle.Rd 577ba960595b38f930c725b7118424b3 *man/triplet.family.Rd e39438edb79cef74bf4d283f5eeb5733 *man/tweak.colourmap.Rd 846ff6d84fa8bcd260140b216d84e77e *man/union.quad.Rd d634d62f4ea857cf218179ec585343cf *man/unique.ppp.Rd c7a68b055e8500f492a8d5416aff7e3e *man/unitname.Rd ef438818f283f020db66bbae9d4f54d2 *man/unmark.Rd a9c69f881f2f0e7416714082bdd1d81a *man/unnormdensity.Rd 727cbafafeec644a8b4a99159c895893 *man/update.kppm.Rd dd026fbcddedf3e547402bcf7829462a *man/update.ppm.Rd 6e5a67de363b4b444bdba24c9748dfd6 *man/update.rmhcontrol.Rd 8ed447c9fcba58750ad3dda1e1fdbac7 *man/urkiola.Rd 4debd1aac73ba1dda385b7bea9a36591 *man/valid.ppm.Rd ccde4d57966bd746a811ca4f23637e47 *man/varblock.Rd e037f609dbf0a5452f5c530520e6b3fe *man/vargamma.estK.Rd 5902d9e1361d8ee1a1b5a65769d00ebf *man/vargamma.estpcf.Rd 61b93596e936cb266e44063166fd6499 *man/vcov.kppm.Rd 0dd5c679ff14f666c63253dc71fbef5c *man/vcov.mppm.Rd 29dec1feafb1d30bb328c6d83d141ca5 *man/vcov.ppm.Rd c114cce973a920d2cecda08d9ac2ca43 *man/vcov.slrm.Rd dfc050b6ee39286be311bbe1d8647b29 *man/vertices.Rd 2d5d54cabe686900176994de2ce293cb *man/volume.Rd 733bdb9dbd16fa09e03f6475c8eae4fb *man/waka.Rd e24cb30fd8a41eeac41ed717e25d0479 *man/waterstriders.Rd d863ce00f38f5a051d96e0eb3fbf7856 *man/which.max.im.Rd 618243606ea87eaf652ec20ca43b9fa4 *man/whist.Rd 84ca6d6ce9ff969b70b00215601bc878 *man/will.expand.Rd e81edb3937921d29fa837dc8ba619d14 *man/with.fv.Rd 11b63a8ff4f2343c5cbce3aa1b69334e *man/with.hyperframe.Rd 45440990efd00a25ec01e55808cd0de5 *man/zapsmall.im.Rd 46790052e7c7315c903e0edc916381e8 *src/Ediggatsti.c c806a2dbea90783c99471a068a389651 *src/Ediggra.c e51eecfab668694de1224a6a5b237dec *src/Efiksel.c abeee9246c0ef81fbbf9f742ee02c605 *src/Egeyer.c 384679c88629aa638caa786775614fdd *src/Estrauss.c 183f2fb6304e391e54ab0ed6c6928237 *src/Kborder.c f5ddb0253e3280cc428e534f1c3b76a6 *src/Kborder.h 83589fde9faccd8b888bce02b02fddfc *src/Knone.c c394c76e3bf450475cc8cc5789b8ebf5 *src/Knone.h a44a4f008cf85ff075b4d46c69d44625 *src/Perfect.cc c0ac68682e8c7e26ce52b8c76fd3d4ab *src/PerfectDGS.h 3a040c136a2b157e7854524d2c5c9c11 *src/PerfectDiggleGratton.h 5bc6a1478093ba8401ef8ff47d939298 *src/PerfectHardcore.h 96f4e131926c2e4c89570cab8052419d *src/PerfectStrauss.h f7f12d2a08a64ee1d8b249d0fbe57ad5 *src/PerfectStraussHard.h 051ca2afe6d6e5904ebba115bdcbf8e4 *src/areadiff.c 6ba6b3e1b37ebe8ac0680c5e071188f6 *src/areaint.c 9255d4a6902de2807401c997cd223174 *src/areapair.c 89cad006e13a81a4b793d89b2b3bb7cf *src/badgey.c 27c73a0901b9d346ba84974f7481ff3c *src/call3d.c 3c6ee73e77c45154372fe8dbb893902f *src/chunkloop.h 42d502ea49d0f0fe37eac7a5106a55cb *src/closefuns.h 083a1dc1a2c42e370368db1a1b6e951f *src/closepair.c 72e219e7bf86c2a0eba609edb6be5aeb *src/connect.c 2223acb81d0ea34296b1ffe4c422a892 *src/constants.h 7436c08a79ce611f48ebac47564a24b1 *src/corrections.c 6658ec665c27a18ffc63c0edfadc44b6 *src/denspt.c fd5c0ecd545b4d9e50d0d4e1e202beb3 *src/dgs.c d7d12f4878e93a3932efc744f93aaa35 *src/digber.c 57fcffca69c7d7deb5937acbb6955dad *src/diggra.c 9d438b3137f879ab21f85378bba410a6 *src/dinfty.c 7e1e7445f9d0faeac100e98764c73391 *src/discarea.c 69884b5b42f142e6d0e863b1eafc3ea8 *src/dist2.c 0cb97b8730ae8c91553644bb39b277ce *src/dist2.h 30e6782eea1d200aeb5af29554e86e99 *src/dist2dpath.c a1809fc2566fabacd87f85780fed4431 *src/dist2dpath.h 3bbbb08f1bd01cd2632e7f68329e34a8 *src/distan3.c c786229dc6751e5c25ca6515077baebb *src/distances.c b0e4920146ae880786dc61d4b6e7fcba *src/distmapbin.c 2214769b031225151f12603e0f37fd0c *src/distseg.c fac3554a33c2270a02f3d2b8346fc8b0 *src/dppll.f 7028f09de82307e1e46de69c104b7423 *src/dwpure.c e73182d2331b6f41c46a793bf8606e88 *src/exactPdist.c 60642750dc3fb722bc05b35a2237f5ca *src/exactdist.c 245f55a80fddf3b31f553f96217e97cc *src/f3.c ab7588df53688ba2bb383afaaa58d0d7 *src/fexitc.c 9ad3159a4625df6b45245dedba8b816d *src/fiksel.c 0ba81075007b6ab741f3eda829da8e99 *src/functable.h f2bb0d35bc86962384f01c6959409981 *src/g3.c 3280a084e3cdcb931801c42fcd111d2e *src/geom3.h ea0722189e9118dd1a744523b4e98a5b *src/getcif.c c4d587523e2d2e58615eb0d2084a2167 *src/geyer.c 3228576b7ca41179fe5a99fd0a4d4001 *src/hardcore.c 50f80d5b4ec82eb0b7e86bbf5a98c1d0 *src/idw.c f8e6302140d1ee2c3e2185a32b2bddac *src/inxyp.c f278df4559baf738b80af57722c275a9 *src/inxypOld.f 9c79e8972c24446466e9dcb30ad82217 *src/k3.c 04fa575485349dece8695de21e011188 *src/knn3Ddist.h 493c3e5d2f83c09bf713b173a9e6658a *src/knn3DdistX.h f129ad504bf8946cf6a755c7f04fe253 *src/knnXdist.h 07039b406a87967282c97eb2bfd60707 *src/knndist.h 7ecc9842efabd93bc452bbcf485dbbb8 *src/knndistance.c 55295547fd74e0cdd004ba3547e181e2 *src/knngrid.c b0a76da19cf0ffdd64936cf9ecaf9aef *src/knngrid.h 5ca88ac5e99e094f0b91183500a4f433 *src/lennard.c 183231d93d260b0595b2a7e83e126557 *src/linalg.c 0a65286b5bc50c1afd304fbc390db156 *src/lincrossdist.c f1fbd11669e26f5f62fcb3a12cef621b *src/lineardisc.c bb011958a4675e5acc2fcd32718c3dde *src/linnncross.c 04957c57de670c667619f89fe68fa5f1 *src/linnncross.h 82b4b8dad34974380b777eb5c38f4c07 *src/linnndist.c 79080eb00c6ea7482a81cf056d83c0a5 *src/linpairdist.c ea49927ad721530df8bc059694b46df9 *src/localpcf.c cc9a75e32ca0e80ff4e45b9721888faa *src/localpcf.h ec2fa91dc203cd470c053975d59fe1b4 *src/loccum.c af8d07773e8fff1a7b1eee6cbe26d45d *src/loccums.h 3a40dd98a06f88e3319159c23007e06f *src/loccumx.h ec8a6f16dafb28572ff44a370cb1ab02 *src/lookup.c 458aaf8343056834e27096e20cfb4a98 *src/massdisthack.c b38db8e7aa80323f27ff46f2372f6d00 *src/methas.c b2f1af8d83ebbba0ff6e0e1efc9f3ca6 *src/methas.h fc1f62f5d65c50e1b3baecee7fd94f41 *src/mhloop.h 4bfdc5f406a612060458999e9319bbbc *src/mhsnoop.c 3aec5d482a96acefc8bcf9ccef064f57 *src/mhsnoop.h dbcb22b795dda5203ac34fc61e930365 *src/mhsnoopdef.h 19ca30742351e422fac32fe27613a504 *src/mhv1.h 9729a578722aa471c9330a01af5a5c09 *src/mhv2.h 3d9d655e94d771cbf55ffdfbb1124492 *src/mhv3.h 98ee53a13b67b8b91ddd0ee76ff34eb9 *src/mhv4.h dc4453e1a8317eab70e9710371c384d2 *src/multihard.c bbf9e1d275d180289b9155f04db9de6b *src/nn3Ddist.c ee5ed316bb3f302e25ab36eab25078fe *src/nn3Ddist.h 609029dcaa0bbcf85efbe6f330d1ddce *src/nn3DdistX.h e56ce2952ae715addc650117b828caa3 *src/nnMDdist.c 77417067aa7539794fef337365495dff *src/nndist.h af1ef3af29ac5dc30a190234e9b28e0b *src/nndistX.h 5deb863d2ce3f7706c0c6c35244017ff *src/nndistance.c 93dff60f269800a42b3dc330294e8c97 *src/nngrid.c 3b5ef6cbf1a70531ab6caae5edf8f991 *src/nngrid.h 1af628eee52b314db4b2a85ed45d261a *src/pairloop.h 9d1981b78382e7e368e2cf9cee262342 *src/pcf3.c 4c45ba722ab495d1da60fe026a35353f *src/poly2im.c a387ad5b47dd254334ec4bdf510e7b35 *src/raster.h cdda9b160cf3edae4c6cadbce7bad53f *src/scan.c e8280b1cc95377db913c0c2439dab2fd *src/seg2pix.c 3a5e04ac4ad9fc0efd10ef39dc55f041 *src/sftcr.c 616cfb8ef04f625dd3395fb5e6a38f14 *src/sphefrac.c 7877dac5d832e95257442e8b7fa8f02b *src/sphevol.c d6477ed6dfb8fc69b84b1a4737496f99 *src/straush.c e072e3a74914a74af746481c3a3b8b3b *src/straushm.c 28d7ac41aaef4367e9d57b020ed5fb3c *src/strauss.c 0cf60fa5405e4b7f31cde35a0d390351 *src/straussm.c 2143b5d2f472c4190dea1114a8fef54a *src/sumsymouter.h a5f7ec4d8e57384ca46f098c3b93c9a8 *src/trigraf.c 03e65a27588194512db2649bec6e5277 *src/triplets.c 90a762be786789184a74cd66501b8023 *src/utils.c 94d4b6605e4a2c5271dd567ea7648fd0 *src/veegraf.c 41552329d886ee7870caddcf0f580243 *src/xyseg.c 5c127a9d5ddeaee8cc8f34b32218a3a5 *src/yesno.h 702b63067c7a5072bcbc57adf0c738ce *tests/alltests.R 8f8fb2368ba7089f44c0fe6a22d1e356 *tests/badwindow.R 22bc211751c0bebafff85ef6bb9a0c5a *vignettes/getstart.Rnw e12cbd8f919c23ce3940157cb54fb6b6 *vignettes/replicated.Rnw 1fe6ddbc7fd98890b0e0c68662dc387d *vignettes/shapefiles.Rnw spatstat/build/0000755000176000001440000000000012252324025013241 5ustar ripleyusersspatstat/build/vignette.rds0000644000176000001440000000045212252324025015601 0ustar ripleyusersRK0:]Q,]0K|^`MCrP?5֎d2"֍Ls%lf hVܭFՈ-CԠ =;ֹF"!+vbXNP :' θա[!@Z.i ȢqY\ȍ P㩢<3ހرNz\SP.)pY<+*3?Idk_H@LVxtqXzw\BY^q׵qn,q_ӚN;6`BGL Jm-(.%spatstat/DESCRIPTION0000644000176000001440000001070212252364235013657 0ustar ripleyusersPackage: spatstat Version: 1.35-0 Nickname: Multiple Personality Date: 2013-12-12 Title: Spatial Point Pattern analysis, model-fitting, simulation, tests Author: Adrian Baddeley and Rolf Turner with substantial contributions of code by Kasper Klitgaard Berthelsen; Abdollah Jalilian; Marie-Colette van Lieshout; Ege Rubak; Dominic Schuhmacher; and Rasmus Waagepetersen. Additional contributions by Q.W. Ang; S. Azaele; C. Beale; M. Bell; R. Bernhardt; T. Bendtsen; A. Bevan; B. Biggerstaff; L. Bischof; R. Bivand; J.M. Blanco Moreno; F. Bonneu; J. Burgos; S. Byers; Y.M. Chang; J.B. Chen; I. Chernayavsky; Y.C. Chin; B. Christensen; J.-F. Coeurjolly; R. Corria Ainslie; M. de la Cruz; P. Dalgaard; P.J. Diggle; P. Donnelly; I. Dryden; S. Eglen; O. Flores; N. Funwi-Gabga; O. Garcia; A. Gault; M. Genton; J. Gilbey; J. Goldstick; P. Grabarnik; C. Graf; J. Franklin; U. Hahn; A. Hardegen; M. Hering; M.B. Hansen; M. Hazelton; J. Heikkinen; K. Hornik; R. Ihaka; A. Jammalamadaka; R. John-Chandran; D. Johnson; M. Kuhn; J. Laake; F. Lavancier; T. Lawrence; R.A. Lamb; J. Lee; G.P. Leser; H.T. Li; G. Limitsios; B. Madin; J. Marcus; K. Marchikanti; R. Mark; J. Mateu; P. McCullagh; U. Mehlig; S. Meyer; X.C. Mi; J. Moller; E. Mudrak; L.S. Nielsen; F. Nunes; J. Oehlschlaegel; T. Onkelinx; S. O'Riordan; E. Parilov; J. Picka; N. Picard; S. Protsiv; A. Raftery; M. Reiter; T.O. Richardson; B.D. Ripley; E. Rosenbaum; B. Rowlingson; J. Rudge; F. Safavimanesh; A. Sarkka; K. Schladitz; B.T. Scott; G.C. Shen; V. Shcherbakov; I.-M. Sintorn; Y. Song; M. Spiess; M. Stevenson; K. Stucki; M. Sumner; P. Surovy; B. Taylor; T. Thorarinsdottir; B. Turlach; A. van Burgel; T. Verbeke; A. Villers; F. Vinatier; H. Wang; H. Wendrock; J. Wild; S. Wong; M.E. Zamboni and A. Zeileis. Maintainer: Adrian Baddeley Depends: R (>= 3.0.2), stats, graphics, grDevices, utils, mgcv, deldir (>= 0.0-21), abind, tensor, polyclip (>= 1.2-0) Suggests: sm, maptools, gsl, locfit, spatial, rpanel, tkrplot, scatterplot3d, RandomFields (>= 3.0.0) Description: A package for analysing spatial data, mainly Spatial Point Patterns, including multitype/marked points and spatial covariates, in any two-dimensional spatial region. Also supports three-dimensional point patterns, and space-time point patterns in any number of dimensions. Contains over 1500 functions for plotting spatial data, exploratory data analysis, model-fitting, simulation, spatial sampling, model diagnostics, and formal inference. Data types include point patterns, line segment patterns, spatial windows, pixel images and tessellations. Exploratory methods include quadrat counts, K-functions and their simulation envelopes, nearest neighbour distance and empty space statistics, Fry plots, pair correlation function, kernel smoothed intensity, relative risk estimation with cross-validated bandwidth selection, mark correlation functions, segregation indices, mark dependence diagnostics, and kernel estimates of covariate effects. Formal hypothesis tests of random pattern (chi-squared, Kolmogorov-Smirnov, Diggle-Cressie-Loosmore-Ford, Dao-Genton) and tests for covariate effects (Cox-Berman-Waller-Lawson, Kolmogorov-Smirnov) are also supported. Parametric models can be fitted to point pattern data using the functions ppm, kppm, slrm similar to glm. Types of models include Poisson, Gibbs, Cox and cluster point processes. Models may involve dependence on covariates, interpoint interaction, cluster formation and dependence on marks. Models are fitted by maximum likelihood, logistic regression, minimum contrast, and composite likelihood methods. Fitted point process models can be simulated, automatically. Formal hypothesis tests of a fitted model are supported (likelihood ratio test, analysis of deviance, Monte Carlo tests) along with basic tools for model selection (stepwise, AIC). Tools for validating the fitted model include simulation envelopes, residuals, residual plots and Q-Q plots, leverage and influence diagnostics, partial residuals, and added variable plots. License: GPL (>= 2) URL: http://www.spatstat.org LazyData: true NeedsCompilation: yes ByteCompile: true Packaged: 2013-12-12 12:12:12 UTC; adrian Repository: CRAN Date/Publication: 2013-12-12 17:47:25 spatstat/man/0000755000176000001440000000000012250607776012734 5ustar ripleyusersspatstat/man/rpoislpp.Rd0000755000176000001440000000234212237642734015075 0ustar ripleyusers\name{rpoislpp} \alias{rpoislpp} \title{ Poisson Point Process on a Linear Network } \description{ Generates a realisation of the Poisson point process with specified intensity on the given linear network. } \usage{ rpoislpp(lambda, L, ...) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). } \item{\dots}{ Arguments passed to \code{\link{rpoisppOnLines}}. } } \details{ This function uses \code{\link{rpoisppOnLines}} to generate the random points. } \value{ A point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{runiflpp}}, \code{\link{lpp}}, \code{\link{linnet}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) plot(X) # multitype X <- rpoislpp(c(a=5, b=5), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/harmonic.Rd0000755000176000001440000000425312237642732015026 0ustar ripleyusers\name{harmonic} \alias{harmonic} \title{Basis for Harmonic Functions} \description{ Evaluates a basis for the harmonic polynomials in \eqn{x} and \eqn{y} of degree less than or equal to \eqn{n}. } \usage{ harmonic(x, y, n) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates } \item{y}{ Vector of \eqn{y} coordinates } \item{n}{ Maximum degree of polynomial } } \value{ A data frame with \code{2 * n} columns giving the values of the basis functions at the coordinates. Each column is labelled by an algebraic expression for the corresponding basis function. } \details{ This function computes a basis for the harmonic polynomials in two variables \eqn{x} and \eqn{y} up to a given degree \eqn{n} and evaluates them at given \eqn{x,y} locations. It can be used in model formulas (for example in the model-fitting functions \code{\link{lm},\link{glm},\link{gam}} and \code{\link{ppm}}) to specify a linear predictor which is a harmonic function. A function \eqn{f(x,y)} is harmonic if \deqn{\frac{\partial^2}{\partial x^2} f + \frac{\partial^2}{\partial y^2}f = 0.}{ (d/dx)^2 f + (d/dy)^2 f = 0.} The harmonic polynomials of degree less than or equal to \eqn{n} have a basis consisting of \eqn{2 n} functions. This function was implemented on a suggestion of P. McCullagh for fitting nonstationary spatial trend to point process models. } \seealso{ \code{\link{ppm}} } \examples{ data(longleaf) X <- unmark(longleaf) # inhomogeneous point pattern \testonly{ # smaller dataset longleaf <- longleaf[seq(1,longleaf$n, by=50)] } # fit Poisson point process with log-cubic intensity fit.3 <- ppm(X, ~ polynom(x,y,3), Poisson()) # fit Poisson process with log-cubic-harmonic intensity fit.h <- ppm(X, ~ harmonic(x,y,3), Poisson()) # Likelihood ratio test lrts <- 2 * (fit.3$maxlogpl - fit.h$maxlogpl) x <- X$x y <- X$y df <- ncol(polynom(x,y,3)) - ncol(harmonic(x,y,3)) pval <- 1 - pchisq(lrts, df=df) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/plot.ppm.Rd0000755000176000001440000001555412237642733015006 0ustar ripleyusers\name{plot.ppm} \alias{plot.ppm} \title{plot a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, create spatial trend and conditional intensity surfaces of the model, in a form suitable for plotting, and (optionally) plot these surfaces. } \usage{ \method{plot}{ppm}(x, ngrid = c(40,40), superimpose = TRUE, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how=c("persp","image", "contour"), plot.it = TRUE, locations = NULL, covariates=NULL, \dots) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{ngrid}{ The dimensions for a grid on which to evaluate, for plotting, the spatial trend and conditional intensity. A vector of 1 or 2 integers. If it is of length 1, \code{ngrid} is replaced by \code{c(ngrid,ngrid)}. } \item{superimpose}{ logical flag; if \code{TRUE} (and if \code{plot=TRUE}) the original data point pattern will be superimposed on the plots. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{se}{ logical flag; if \code{TRUE}, the estimated standard error of the spatial trend surface will be produced. } \item{pause}{ logical flag indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. (This flag is ignored if \code{plot=FALSE}). } \item{how}{ character string or character vector indicating the style or styles of plots to be performed. Ignored if \code{plot=FALSE}. } \item{plot.it}{ logical scalar; should a plot be produced immediately? } \item{locations}{ If present, this determines the locations of the pixels at which predictions are computed. It must be a binary pixel image (an object of class \code{"owin"} with type \code{"mask"}). (Incompatible with \code{ngrid}). } \item{covariates}{ Values of external covariates required by the fitted model. Passed to \code{\link{predict.ppm}}. } \item{\dots}{ extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \value{ An object of class \code{plotppm}. Such objects may be plotted by \code{\link{plot.plotppm}()}. This is a list with components named \code{trend} and \code{cif}, either of which may be missing. They will be missing if the corresponding component does not make sense for the model, or if the corresponding argument was set equal to \code{FALSE}. Both \code{trend} and \code{cif} are lists of images. If the model is an unmarked point process, then they are lists of length 1, so that \code{trend[[1]]} is an image of the spatial trend and \code{cif[[1]]} is an image of the conditional intensity. If the model is a marked point process, then \code{trend[[i]]} is an image of the spatial trend for the mark \code{m[i]}, and \code{cif[[1]]} is an image of the conditional intensity for the mark \code{m[i]}, where \code{m} is the vector of levels of the marks. } \details{ This is the \code{plot} method for the class \code{"ppm"} (see \code{\link{ppm.object}} for details of this class). It invokes \code{\link{predict.ppm}} to compute the spatial trend and conditional intensity of the fitted point process model. See \code{\link{predict.ppm}} for more explanation about spatial trend and conditional intensity. The default action is to create a rectangular grid of points in (the bounding box of) the observation window of the data point pattern, and evaluate the spatial trend and conditional intensity of the fitted spatial point process model \code{x} at these locations. If the argument \code{locations=} is supplied, then the spatial trend and conditional intensity are calculated at the grid of points specified by this argument. The argument \code{locations}, if present, should be a binary image mask (an object of class \code{"owin"} and type \code{"mask"}). This determines a rectangular grid of locations, or a subset of such a grid, at which predictions will be computed. Binary image masks are conveniently created using \code{\link{as.mask}}. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The argument \code{covariates} has the same format and interpretation as in \code{\link{predict.ppm}}. It may be either a data frame (the number of whose rows must match the number of pixels in \code{locations} multiplied by the number of possible marks in the point pattern), or a list of images. If argument \code{locations} is not supplied, and \code{covariates} \bold{is} supplied, then it \bold{must} be a list of images. If the fitted model was a marked (multitype) point process, then predictions are made for each possible mark value in turn. If the fitted model had no spatial trend, then the default is to omit calculating this (flat) surface, unless \code{trend=TRUE} is set explicitly. If the fitted model was Poisson, so that there were no spatial interactions, then the conditional intensity and spatial trend are identical, and the default is to omit the conditional intensity, unless \code{cif=TRUE} is set explicitly. If \code{plot.it=TRUE} then \code{\link{plot.plotppm}()} is called upon to plot the class \code{plotppm} object which is produced. (That object is also returned, silently.) Plots are produced successively using \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}} (or only a selection of these three, if \code{how} is given). Extra graphical parameters controlling the display may be passed directly via the arguments \code{...} or indirectly reset using \code{\link{spatstat.options}}. } \seealso{ \code{\link{plot.plotppm}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{predict.ppm}}, \code{\link{print.ppm}}, \code{\link{persp}}, \code{\link{image}}, \code{\link{contour}}, \code{\link{plot}}, \code{\link{spatstat.options}} } \section{Warnings}{ See warnings in \code{\link{predict.ppm}}. } \examples{ \dontrun{ data(cells) m <- ppm(cells, ~1, Strauss(0.05)) pm <- plot(m) # The object ``pm'' will be plotted as well as saved # for future plotting. } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/thomas.estpcf.Rd0000755000176000001440000001444212237642734016007 0ustar ripleyusers\name{thomas.estpcf} \alias{thomas.estpcf} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ thomas.estpcf(X, startpar=c(kappa=1,sigma2=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function \code{\link{pcf}}. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical pair correlation function of the Thomas process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma}. The theoretical pair correlation function of the Thomas process is \deqn{ g(r) = 1 + \frac 1 {4\pi \kappa \sigma^2} \exp(-\frac{r^2}{4\sigma^2})). }{ g(r) = 1 + exp(-r^2/(4 * sigma^2)))/(4 * pi * kappa * sigma^2). } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{thomas.estK}} \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estpcf(redwood, c(kappa=10, sigma2=0.1)) u plot(u, legendpos="topright") u2 <- thomas.estpcf(redwood, c(kappa=10, sigma2=0.1), pcfargs=list(stoyan=0.12)) } \keyword{spatial} \keyword{models} spatstat/man/Kcross.Rd0000755000176000001440000001716412237642731014476 0ustar ripleyusers\name{Kcross} \alias{Kcross} \title{ Multitype K Function (Cross-type) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ Kcross(X, i, j, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kcross} and its companions \code{\link{Kdot}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The ``cross-type'' (type \eqn{i} to type \eqn{j}) \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda_j K_{ij}(r)}{lambda[j] Kij(r)} equals the expected number of additional random points of type \eqn{j} within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda_j}{lambda[j]} is the intensity of the type \eqn{j} points, i.e. the expected number of points of type \eqn{j} per unit area. The function \eqn{K_{ij}}{Kij} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{ij}(r)}{Kij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}(r)}{Kij(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{K_{ij}(r)}{Kij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kcross}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # amacrine cells data K01 <- Kcross(amacrine, "off", "on") plot(K01) \testonly{ K01 <- Kcross(amacrine, "off", "on", ratio=TRUE) } \dontrun{ K10 <- Kcross(amacrine, "on", "off") # synthetic example: point pattern with marks 0 and 1 pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) K <- Kcross(pp, "0", "1") K <- Kcross(pp, 0, 1) # equivalent } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/anova.ppm.Rd0000755000176000001440000001124612237642732015125 0ustar ripleyusers\name{anova.ppm} \alias{anova.ppm} \title{ANOVA for Fitted Point Process Models} \description{ Performs analysis of deviance for two or more fitted point process models. } \usage{ \method{anova}{ppm}(object, \dots, test=NULL, override=FALSE) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ One or more fitted point process models. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}. } \item{override}{ Logical flag indicating whether to proceed even when there is no statistical theory to support the calculation. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for fitted point process models (objects of class \code{"ppm"}, usually generated by the model-fitting function \code{\link{ppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then there is no statistical theory available to support a similar analysis. The function issues a warning, and (by default) returns a \code{NULL} value. However if \code{override=TRUE}, then a kind of analysis of deviance table will be printed. The `deviance' differences in this table are equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). At the time of writing, there is no statistical theory to support inferential interpretation of log pseudolikelihood ratios. The \code{override} option is provided for research purposes only! } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This implies that the models were fitted using different quadrature schemes (see \code{\link{quadscheme}}) and/or with different edge corrections or different values of the border edge correction distance \code{rbord}. To ensure that models are comparable, check the following: \itemize{ \item the models must all have been fitted to the same point pattern dataset, in the same window. \item all models must have been fitted by the same fitting method as specified by the argument \code{method} in \code{\link{ppm}}. \item If some of the models depend on covariates, then they should all have been fitted using the same list of covariates, and using \code{allcovar=TRUE} to ensure that the same quadrature scheme is used. \item all models must have been fitted using the same edge correction as specified by the arguments \code{correction} and \code{rbord}. If you did not specify the value of \code{rbord}, then it may have taken a different value for different models. The default value of \code{rbord} is equal to zero for a Poisson model, and otherwise equals the reach (interaction distance) of the interaction term (see \code{\link{reach}}). To ensure that the models are comparable, set \code{rbord} to equal the maximum reach of the interactions that you are fitting. } } } } \seealso{ \code{\link{ppm}} } \examples{ data(swedishpines) mod0 <- ppm(swedishpines, ~1, Poisson()) modx <- ppm(swedishpines, ~x, Poisson()) anova(mod0, modx, test="Chi") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/distmap.owin.Rd0000755000176000001440000000600012237642732015632 0ustar ripleyusers\name{distmap.owin} %DontDeclareMethods \alias{distmap.owin} \title{Distance Map of Window} \description{ Computes the distance from each pixel to the nearest point in the given window. } \usage{ \method{distmap}{owin}(X, \dots, discretise=FALSE, invert=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to control pixel resolution. } \item{discretise}{ Logical flag controlling the choice of algorithm when \code{X} is a polygonal window. See Details. } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of the window. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has an attribute \code{"bdry"} which is a pixel image. } \details{ The ``distance map'' of a window \eqn{W} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{W}. This function computes the distance map of the window \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest pixel in \code{X}. Additionally, the return value has an attribute \code{"bdry"} which is also a pixel image. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. If \code{X} is a binary pixel mask, the distance values computed are not the usual Euclidean distances. Instead the distance between two pixels is measured by the length of the shortest path connecting the two pixels. A path is a series of steps between neighbouring pixels (each pixel has 8 neighbours). This is the standard `distance transform' algorithm of image processing (Rosenfeld and Kak, 1968; Borgefors, 1986). If \code{X} is a polygonal window, then exact Euclidean distances will be computed if \code{discretise=FALSE}. If \code{discretise=TRUE} then the window will first be converted to a binary pixel mask and the discrete path distances will be computed. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. This function is a method for the generic \code{\link{distmap}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.ppp}}, \code{\link{distmap.psp}} } \examples{ data(letterR) U <- distmap(letterR) \dontrun{ plot(U) plot(attr(U, "bdry")) } } \references{ Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Rosenfeld, A. and Pfalz, J.L. Distance functions on digital pictures. \emph{Pattern Recognition} \bold{1} (1968) 33-61. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/MultiStrauss.Rd0000755000176000001440000000760612237642731015711 0ustar ripleyusers\name{MultiStrauss} \alias{MultiStrauss} \title{The Multitype Strauss Point Process Model} \description{ Creates an instance of the multitype Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStrauss(types=NULL, radii) } \arguments{ \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{radii}{Matrix of interaction radii} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype Strauss process with interaction radii \eqn{radii[i,j]}. } \details{ The (stationary) multitype Strauss process with \eqn{m} types, with interaction radii \eqn{r_{ij}}{r[i,j]} and parameters \eqn{\beta_j}{beta[j]} and \eqn{\gamma_{ij}}{gamma[i,j]} is the pairwise interaction point process in which each point of type \eqn{j} contributes a factor \eqn{\beta_j}{beta[j]} to the probability density of the point pattern, and a pair of points of types \eqn{i} and \eqn{j} closer than \eqn{r_{ij}}{r[i,j]} units apart contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the density. The nonstationary multitype Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location and type, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the multitype Strauss process pairwise interaction is yielded by the function \code{MultiStrauss()}. See the examples below. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{radii}. The matrix \code{radii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii are specified in \code{MultiStrauss}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStrauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{MultiHard}} } \examples{ r <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStrauss(radii=r) # prints a sensible description of itself r <- 0.03 * matrix(c(1,2,2,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[ owin(c(0, 0.8), c(0, 1)) ] } ppm(X, ~1, MultiStrauss(, r)) # fit the stationary multitype Strauss process to `amacrine' # Note the comma; needed since "types" is not specified. \dontrun{ ppm(X, ~polynom(x,y,3), MultiStrauss(c("off","on"), r)) # fit a nonstationary multitype Strauss process with log-cubic trend } } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/vertices.Rd0000755000176000001440000000252212237642734015051 0ustar ripleyusers\name{vertices} \alias{vertices} \title{Vertices of a Window} \description{ Finds the vertices of a window } \usage{ vertices(w) } \arguments{ \item{w}{A window.} } \value{ A list with components \code{x} and \code{y} giving the coordinates of the vertices. } \details{ This function computes the vertices (`corners') of a spatial window. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details). If \code{w} is a rectangle, the coordinates of the four corner points are returned. If \code{w} is a polygonal window (consisting of one or more polygons), the coordinates of the vertices of all polygons are returned. If \code{w} is a binary mask, then a `boundary pixel' is defined to be a pixel inside the window which has at least one neighbour outside the window. The coordinates of the centres of all boundary pixels are returned. } \seealso{ \code{\link{owin.object}}. } \examples{ data(letterR) vert <- vertices(letterR) plot(letterR, main="Polygonal vertices") points(vert) plot(letterR, main="Boundary pixels") points(vertices(as.mask(letterR))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/nearest.raster.point.Rd0000755000176000001440000000424412237642733017317 0ustar ripleyusers\name{nearest.raster.point} \alias{nearest.raster.point} \title{Find Pixel Nearest to a Given Point} \description{ Given cartesian coordinates, find the nearest pixel. } \usage{ nearest.raster.point(x,y,w, indices=TRUE) } \arguments{ \item{x}{Numeric vector of \eqn{x} coordinates of any points} \item{y}{Numeric vector of \eqn{y} coordinates of any points} \item{w}{A window (an object of class \code{"owin"}) of type \code{"mask"} representing a binary pixel image. } \item{indices}{Logical flag indicating whether to return the row and column indices, or the actual \eqn{x,y} coordinates. } } \value{ If \code{indices=TRUE}, a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). If \code{indices=FALSE}, a list containing vectors \code{x} and \code{y} giving actual coordinates of the pixels. } \details{ The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) of type \code{"mask"}. This represents a binary pixel image. The arguments \code{x} and \code{y} should be numeric vectors of equal length. They are interpreted as the coordinates of points in space. For each point \code{(x[i], y[i])}, the function finds the nearest pixel in the grid of pixels for \code{w}. If \code{indices=TRUE}, this function returns a list containing two vectors \code{rr} and \code{cc} giving row and column positions (in the image matrix). For the location \code{(x[i],y[i])} the nearest pixel is at row \code{rr[i]} and column \code{cc[i]} of the image. If \code{indices=FALSE}, the function returns a list containing two vectors \code{x} and \code{y} giving the actual coordinates of the pixels. } \seealso{ \code{\link{owin.object}}, \code{\link{as.mask}} } \examples{ w <- owin(c(0,1), c(0,1), mask=matrix(TRUE, 100,100)) # 100 x 100 grid nearest.raster.point(0.5, 0.3, w) nearest.raster.point(0.5, 0.3, w, indices=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Extract.lpp.Rd0000644000176000001440000000374512237642732015434 0ustar ripleyusers\name{Extract.lpp} \alias{[.lpp} \title{Extract Subset of Point Pattern on Linear Network} \description{ Extract a subset of a point pattern on a linear network. } \usage{ \method{[}{lpp}(x, i, j, ...) } \arguments{ \item{x}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained } \item{j}{ Spatial window (object of class \code{"owin"}) delineating the region that should be retained. } \item{\dots}{ Ignored. } } \value{ A point pattern on a linear network (of class \code{"lpp"}). } \details{ This function extracts a designated subset of a point pattern on a linear network. The function \code{[.lpp} is a method for \code{\link{[}} for the class \code{"lpp"}. It extracts a designated subset of a point pattern. The argument \code{i} should be a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The argument \code{j}, if present, should be a spatial window. The pattern inside the region will be retained. \emph{Line segments that cross the boundary of the window are deleted} in the current implementation. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. } \seealso{ \code{\link{lpp}} } \examples{ # Chicago crimes data - remove cases of assault chicago[marks(chicago) != "assault"] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/which.max.im.Rd0000755000176000001440000000234612237642735015524 0ustar ripleyusers\name{which.max.im} \alias{which.max.im} \title{Identify Pixelwise Maximum of Several Pixel Images} \description{ Given a list of pixel images, this function identifies the image that has the largest value at each pixel, and returns an image. } \usage{ which.max.im(x) } \arguments{ \item{x}{A list of images (objects of class \code{"im"}).} } \details{ \code{x} should be a list of pixel images. All images must have compatible dimensions. For each pixel, the algorithm identifies which of the images in the list \code{x} has the largest value at that pixel. The index of this image becomes the pixel value in the output image. If \code{names(x)} is not null, then the indices are replaced by these names. } \value{ An image (object of class \code{"im"}) with factor values. } \seealso{ \code{\link{eval.im}}, \code{\link{im.object}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { x - y }, unit.square()) which.max.im(list(X=X,Y=Y)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/addvar.Rd0000755000176000001440000001436212237642732014471 0ustar ripleyusers\name{addvar} \alias{addvar} \title{ Added Variable Plot for Point Process Model } \description{ Computes the coordinates for an Added Variable Plot for a fitted point process model. } \usage{ addvar(model, covariate, ..., subregion=NULL, bw="nrd0", adjust=1, from=NULL, to=NULL, n=512, bw.input = c("points", "quad"), bw.restrict = FALSE, covname, crosscheck=FALSE) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate to be added to the model. Either a pixel image, a \code{function(x,y)}, or a character string giving the name of a covariate that was supplied when the model was fitted. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{crosscheck}{ For developers only. Logical value indicating whether to perform cross-checks on the validity of the calculation. } } \details{ This command generates the plot coordinates for an Added Variable Plot for a spatial point process model. Added Variable Plots (Cox, 1958, sec 4.5; Wang, 1985) are commonly used in linear models and generalized linear models, to decide whether a model with response \eqn{y} and predictors \eqn{x} would be improved by including another predictor \eqn{z}. In a (generalised) linear model with response \eqn{y} and predictors \eqn{x}, the Added Variable Plot for a new covariate \eqn{z} is a plot of the smoothed Pearson residuals from the original model against the scaled residuals from a weighted linear regression of \eqn{z} on \eqn{x}. If this plot has nonzero slope, then the new covariate \eqn{z} is needed. For general advice see Cook and Weisberg(1999); Harrell (2001). Essentially the same technique can be used for a spatial point process model (Baddeley et al, 2012). The argument \code{model} should be a fitted spatial point process model (object of class \code{"ppm"}). The argument \code{covariate} identifies the covariate that is to be considered for addition to the model. It should be either a pixel image (object of class \code{"im"}) or a \code{function(x,y)} giving the values of the covariate at any spatial location. Alternatively \code{covariate} may be a character string, giving the name of a covariate that was supplied (in the \code{covariates} argument to \code{\link{ppm}}) when the model was fitted, but was not used in the model. The result of \code{addvar(model, covariate)} is an object belonging to the classes \code{"addvar"} and \code{"fv"}. Plot this object to generate the added variable plot. Note that the plot method shows the pointwise significance bands for a test of the \emph{null} model, i.e. the null hypothesis that the new covariate has no effect. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \value{ An object of class \code{"addvar"} containing the coordinates for the added variable plot. There is a \code{plot} method. } \section{Internal data}{ The return value has an attribute \code{"spatial"} which contains the internal data: the computed values of the residuals, and of all relevant covariates, at each quadrature point of the model. It is an object of class \code{"ppp"} with a data frame of marks. } \references{ Baddeley, A. and Chang, Y.-M. and Song, Y. and Turner, R. (2012) \emph{Residual diagnostics for covariate effects in spatial point process models}. Submitted for publication. Cook, R.D. and Weisberg, S. (1999) \emph{Applied regression, including computing and graphics}. New York: Wiley. Cox, D.R. (1958) \emph{Planning of Experiments}. New York: Wiley. Harrell, F. (2001) \emph{Regression Modeling Strategies}. New York: Springer. Wang, P. (1985) Adding a variable in generalized linear models. \emph{Technometrics} \bold{27}, 273--276. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz}, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{parres}}, \code{\link{rhohat}}, \code{\link{rho2hat}}. } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) model <- ppm(X, ~y) adv <- addvar(model, "x") plot(adv) adv <- addvar(model, "x", subregion=square(0.5)) } \keyword{spatial} \keyword{models} spatstat/man/Lest.Rd0000755000176000001440000000565012237642731014136 0ustar ripleyusers\name{Lest} \alias{Lest} \title{L-function} \description{ Calculates an estimate of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Lest(X, ...) } \arguments{ \item{X}{ The observed point pattern, from which an estimate of \eqn{L(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{ Other arguments passed to \code{\link{Kest}} to control the estimation procedure. } } \details{ This command computes an estimate of the \eqn{L}-function for the spatial point pattern \code{X}. The \eqn{L}-function is a transformation of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the \eqn{K}-function. See \code{\link{Kest}} for information about Ripley's \eqn{K}-function. The transformation to \eqn{L} was proposed by Besag (1977). The command \code{Lest} first calls \code{\link{Kest}} to compute the estimate of the \eqn{K}-function, and then applies the square root transformation. For a completely random (uniform Poisson) point pattern, the theoretical value of the \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{K} is more appropriate for use in simulation envelopes and hypothesis tests. See \code{\link{Kest}} for the list of arguments. } \section{Variance approximations}{ If the argument \code{var.approx=TRUE} is given, the return value includes columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat L(r)}{Lest(r)} under CSR. These are obtained by the delta method from the variance approximations described in \code{\link{Kest}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(cells) L <- Lest(cells) plot(L, main="L function for cells") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/dfbetas.ppm.Rd0000755000176000001440000000643512237642732015435 0ustar ripleyusers\name{dfbetas.ppm} \alias{dfbetas.ppm} \title{ Parameter influence measure } \description{ Computes the deletion influence measure for each parameter in a fitted point process model. } \usage{ \method{dfbetas}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process \code{model}, this function computes the influence measure for each parameter, as described in Baddeley, Chang and Song (2013). This is a method for the generic function \code{\link[stats]{dfbetas}}. The influence measure for each parameter \eqn{\theta}{theta} is a signed measure in two-dimensional space. It consists of a discrete mass on each data point (i.e. each point in the point pattern to which the \code{model} was originally fitted) and a continuous density at all locations. The mass at a data point represents the change in the fitted value of the parameter \eqn{\theta}{theta} that would occur if this data point were to be deleted. The density at other non-data locations represents the effect (on the fitted value of \eqn{\theta}{theta}) of deleting these locations (and their associated covariate values) from the input to the fitting procedure. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. } \value{ An object of class \code{"msr"} representing a signed or vector-valued measure. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{leverage.ppm}}, \code{\link{influence.ppm}} } \examples{ \testonly{op <- spatstat.options(npixel=32)} X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) \testonly{fit <- ppm(X, ~x+y, nd=16)} plot(dfbetas(fit)) plot(Smooth(dfbetas(fit))) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{models} spatstat/man/plot.hyperframe.Rd0000755000176000001440000000653712252274531016347 0ustar ripleyusers\name{plot.hyperframe} \alias{plot.hyperframe} \title{Plot Entries in a Hyperframe} \description{ Plots the entries in a hyperframe, in a series of panels, one panel for each row of the hyperframe. } \usage{ \method{plot}{hyperframe}(x, e, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, parargs=list(mar=c(1,1,3,1) * marsize), marsize=0.1) } \arguments{ \item{x}{ Data to be plotted. A hyperframe (object of class \code{"hyperframe"}, see \code{\link{hyperframe}}). } \item{e}{ How to plot each row. Optional. An \R language call or expression (typically enclosed in \code{\link{quote}()} that will be evaluated in each row of the hyperframe to generate the plots. } \item{\dots}{ Extra arguments controlling the plot (when \code{e} is missing). } \item{main}{Overall title for the array of plots.} \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{parargs}{ Optional list of arguments passed to \code{\link{par}} before plotting each panel. Can be used to control margin sizes, etc. } \item{marsize}{ Optional scale parameter controlling the sizes of margins between the panels. } } \details{ This is the \code{plot} method for the class \code{"hyperframe"}. The argument \code{x} must be a hyperframe (like a data frame, except that the entries can be objects of any class; see \code{\link{hyperframe}}). This function generates a series of plots, one plot for each row of the hyperframe. If \code{arrange=TRUE} (the default), then these plots are arranged in a neat array of panels within a single plot frame. If \code{arrange=FALSE}, the plots are simply executed one after another. Exactly what is plotted, and how it is plotted, depends on the argument \code{e}. The default (if \code{e} is missing) is to plot only the first column of \code{x}. Each entry in the first column is plotted using the generic \code{\link{plot}} command, together with any extra arguments given in \code{\dots}. If \code{e} is present, it should be an \R language expression involving the column names of \code{x}. (It is typically created using \code{\link{quote}} or \code{\link{expression}}.) The expression will be evaluated once for each row of \code{x}. It will be evaluated in an environment where each column name of \code{x} is interpreted as meaning the object in that column in the current row. See the Examples. } \value{ \code{NULL}. } \seealso{ \code{\link{hyperframe}}, \code{\link{with.hyperframe}} } \examples{ H <- hyperframe(id=1:10) H$X <- with(H, rpoispp(100)) H$D <- with(H, distmap(X)) # points only plot(H[,"X"]) plot(H, quote(plot(X, main=id))) # points superimposed on images plot(H, quote({plot(D, main=id); plot(X, add=TRUE)})) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/valid.ppm.Rd0000644000176000001440000000472312237642734015121 0ustar ripleyusers\name{valid.ppm} \alias{valid.ppm} \title{ Check Whether Point Process Model is Valid } \description{ Determines whether a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ valid.ppm(object) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } } \details{ The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, \code{\link{ppm}} does not check whether the fitted model actually exists as a point process. This checking is done by \code{valid.ppm}. Unlike a regression model, which is well-defined for any values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{valid.ppm} checks whether the fitted model \code{object} specifies a well-defined point process. It returns \code{TRUE} if the model is well-defined. Another possible reason for invalid models is that the data may not be adequate for estimation of the model parameters. In this case, some of the fitted coefficients could be \code{NA} or infinite values. If this happens then \code{valid.ppm} returns \code{FALSE}. Use the function \code{\link{project.ppm}} to force the fitted model to be valid. } \value{ Logical. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppm}}, \code{\link{project.ppm}} } \examples{ fit1 <- ppm(cells, ~1, Strauss(0.1)) valid.ppm(fit1) fit2 <- ppm(redwood, ~1, Strauss(0.1)) valid.ppm(fit2) } \keyword{spatial} \keyword{models} spatstat/man/Kdot.Rd0000755000176000001440000001720312237642731014125 0ustar ripleyusers\name{Kdot} \alias{Kdot} \title{ Multitype K Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{K} function which counts the expected number of other points of the process within a given distance of a point of type \eqn{i}. } \usage{ Kdot(X, i, r=NULL, breaks=NULL, correction, ..., ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ This function \code{Kdot} and its companions \code{\link{Kcross}} and \code{\link{Kmulti}} are generalisations of the function \code{\link{Kest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The ``type \eqn{i} to any type'' multitype \eqn{K} function of a stationary multitype point process \eqn{X} is defined so that \eqn{\lambda K_{i\bullet}(r)}{lambda Ki.(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical point of type \eqn{i} in the process \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The function \eqn{K_{i\bullet}}{Ki.} is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K_{i\bullet}(r)}{Ki.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points were independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{K_{i\bullet}(r)}{Ki.(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} may suggest dependence between types. This algorithm estimates the distribution function \eqn{K_{i\bullet}(r)}{Ki.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}, using the border correction. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The pair correlation function can also be applied to the result of \code{Kdot}; see \code{\link{pcf}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The reduced sample estimator of \eqn{K_{i\bullet}}{Ki.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. } \seealso{ \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{Kmulti}}, \code{\link{pcf}} } \examples{ # Lansing woods data: 6 types of trees data(lansing) \dontrun{ Kh. <- Kdot(lansing, "hickory") } \testonly{ sub <- lansing[seq(1,lansing$n, by=80), ] Kh. <- Kdot(sub, "hickory") } # diagnostic plot for independence between hickories and other trees plot(Kh.) \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoispp(50) pp <- pp \%mark\% factor(sample(c("a","b"), npoints(pp), replace=TRUE)) K <- Kdot(pp, "a") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.lppm.Rd0000644000176000001440000000261712237642733015153 0ustar ripleyusers\name{plot.lppm} \alias{plot.lppm} \title{ Plot a Fitted Point Process Model on a Linear Network } \description{ Plots the fitted intensity of a point process model on a linear network. } \usage{ \method{plot}{lppm}(x, ..., type="trend") } \arguments{ \item{x}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to \code{\link{plot.linim}} to control the plot. } \item{type}{ Character string (either \code{"trend"} or \code{"cif"}) determining whether to plot the fitted first order trend or the conditional intensity. } } \details{ This function is the plot method for the class \code{"lppm"}. It computes the fitted intensity of the point process model, and displays it using \code{\link{plot.linim}}. The default is to display intensity values as colours. Alternatively if the argument \code{style="width"} is given, intensity values are displayed as the widths of thick lines drawn over the network. } \value{ Null. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lppm}}, \code{\link{plot.linim}}, \code{\link{methods.lppm}}, \code{\link{predict.lppm}}. } \examples{ example(lpp) fit <- lppm(X, ~x) plot(fit) plot(fit, style="width") } \keyword{spatial} \keyword{models} spatstat/man/bw.diggle.Rd0000755000176000001440000000554412237642732015074 0ustar ripleyusers\name{bw.diggle} \alias{bw.diggle} \title{ Cross Validated Bandwidth Selection for Kernel Density } \description{ Uses cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.diggle(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{sigma} is chosen to minimise the mean-square error criterion defined by Diggle (1985). The algorithm computes the mean-square error by the method of Berman and Diggle (1989). See Diggle (2003, pages 115-118) for a summary of this method. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \section{Definition of bandwidth}{ The smoothing parameter \code{sigma} returned by \code{bw.diggle} (and displayed on the horizontal axis of the plot) corresponds to \code{h/2}, where \code{h} is the smoothing parameter described in Diggle (2003, pages 116-118) and Berman and Diggle (1989). In those references, the smoothing kernel is the uniform density on the disc of radius \code{h}. In \code{\link{density.ppp}}, the smoothing kernel is the isotropic Gaussian density with standard deviation \code{sigma}. When replacing one kernel by another, the usual practice is to adjust the bandwidths so that the kernels have equal variance (cf. Diggle 2003, page 118). This implies that \code{sigma = h/2}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.ppl}}, \code{\link{bw.scott}} } \examples{ data(lansing) attach(split(lansing)) b <- bw.diggle(hickory) plot(b, ylim=c(-2, 0), main="Cross validation for hickories") \donttest{ plot(density(hickory, b)) } } \references{ Berman, M. and Diggle, P. (1989) Estimating weighted integrals of the second-order intensity of a spatial point process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/mucosa.Rd0000644000176000001440000000341512237642733014512 0ustar ripleyusers\name{mucosa} \alias{mucosa} \alias{mucosa.subwin} \docType{data} \title{ Cells in Gastric Mucosa } \description{ A bivariate inhomogeneous point pattern, giving the locations of the centres of two types of cells in a cross-section of the gastric mucosa of a rat. } \usage{data(mucosa)} \format{ An object of class \code{"ppp"}, see \code{\link{ppp.object}}. This is a multitype point pattern with two types of points, \code{ECL} and \code{other}. } \details{ This point pattern dataset gives the locations of cell centres in a cross-section of the gastric mucosa (mucous membrane of the stomach) of a rat. The rectangular observation window has been scaled to unit width. The lower edge of the window is closest to the outside of the stomach. The cells are classified into two types: \emph{ECL cells} (enterochromaffin-like cells) and \emph{other} cells. There are 86 ECL cells and 807 other cells in the dataset. ECL cells are a type of neuroendocrine cell which synthesize and secrete histamine. One hypothesis of interest is whether the spatially-varying intensities of ECL cells and other cells are proportional. The data were originally collected by Dr Thomas Berntsen. The data were discussed and analysed in Moller and Waagepetersen (2004, pp. 2, 169). The associated object \code{mucosa.subwin} is the smaller window to which the data were restricted for analysis by Moller and Waagepetersen. } \source{ Dr Thomas Berntsen and Prof Rasmus Waagepetersen. } \references{ Moller, J. and Waagepetersen, R. (2004). \emph{Statistical Inference and Simulation for Spatial Point Processes}. Chapman and Hall/CRC. } \examples{ plot(mucosa, chars=c(1,3), cols=c("red", "green")) plot(mucosa.subwin, add=TRUE, lty=3) } \keyword{datasets} spatstat/man/union.quad.Rd0000755000176000001440000000220012237642734015277 0ustar ripleyusers\name{union.quad} \alias{union.quad} \title{Union of Data and Dummy Points} \description{ Combines the data and dummy points of a quadrature scheme into a single point pattern. } \usage{ union.quad(Q) } \arguments{ \item{Q}{A quadrature scheme (an object of class \code{"quad"}).} } \value{ A point pattern (of class \code{"ppp"}). } \details{ The argument \code{Q} should be a quadrature scheme (an object of class \code{"quad"}, see \code{\link{quad.object}} for details). This function combines the data and dummy points of \code{Q} into a single point pattern. If either the data or the dummy points are marked, the result is a marked point pattern. The function \code{\link{as.ppp}} will perform the same task. } \seealso{ \code{\link{quad.object}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme(simdat, default.dummy(simdat)) U <- union.quad(Q) \dontrun{plot(U)} # equivalent: U <- as.ppp(Q) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/compatible.Rd0000644000176000001440000000202712237642732015337 0ustar ripleyusers\name{compatible} \alias{compatible} \title{Test Whether Objects Are Compatible} \description{ Tests whether two or more objects of the same class are compatible. } \usage{ compatible(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more objects of the same class} } \details{ This generic function is used to check whether the objects \code{A} and \code{B} (and any additional objects \code{\dots}) are compatible. What is meant by \sQuote{compatible} depends on the class of object. There are methods for the classes \code{"fv"}, \code{"fasp"}, \code{"im"} and \code{"units"}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{compatible.fv}}, \code{\link{compatible.fasp}}, \code{\link{compatible.im}}, \code{\link{compatible.units}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/crossdist.lpp.Rd0000644000176000001440000000402212237642732016024 0ustar ripleyusers\name{crossdist.lpp} %DontDeclareMethods \alias{crossdist.lpp} \title{Pairwise distances between two point patterns on a linear network} \description{ Computes the distances between pairs of points taken from two different point patterns on the same linear network. } \usage{ \method{crossdist}{lpp}(X, Y, \dots, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} network. } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns on a linear network, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, measuring distance by the shortest path in the network. This is a method for the generic function \code{\link{crossdist}} for point patterns on a linear network (objects of class \code{"lpp"}). This function expects two point pattern objects \code{X} and \code{Y} on the \emph{same} linear network, and returns the matrix whose \code{[i,j]} entry is the shortest-path distance from \code{X[i]} to \code{Y[j]}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is much faster. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ v <- split(chicago) X <- v$cartheft Y <- v$burglary d <- crossdist(X, Y) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/simba.Rd0000644000176000001440000000214712252274531014311 0ustar ripleyusers\name{simba} \alias{simba} \docType{data} \title{ Simulated data from a two-group experiment with replication within each group. } \description{ The \code{simba} dataset contains simulated data from an experiment with a `control' group and a `treatment' group, each group containing 5 experimental units. The responses in the experiment are point patterns. The responses in the control group are independent realisations of a Poisson point process with intensity 80. The responses in the treatment group are independent realisations of a Strauss process with activity parameter \eqn{\beta=100}{beta=100}, interaction parameter \eqn{\gamma=0.5}{gamma=0.5} and interaction radius \eqn{R=0.07} in the unit square. } \format{ \code{simba} is a hyperframe with 10 rows, and columns named: \itemize{ \item \code{Points} containing the point patterns \item \code{group} factor identifying the experimental group, with levels \code{control} and \code{treatment}). } } \usage{data(simba)} \source{ Simulated data, generated by Adrian Baddeley. } \keyword{datasets} \keyword{spatial} spatstat/man/linearK.Rd0000755000176000001440000000371112237642732014611 0ustar ripleyusers\name{linearK} \alias{linearK} \title{ Linear K Function } \description{ Computes an estimate of the linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearK(X, r=NULL, ..., correction="Ang") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } } \details{ This command computes the linear \eqn{K} function from point pattern data on a linear network. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is the network \eqn{K} function as defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010; Ang et al, 2012). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{compileK}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearK(X) linearK(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.colourmap.Rd0000755000176000001440000000634412250070412016170 0ustar ripleyusers\name{plot.colourmap} \alias{plot.colourmap} \title{Plot a Colour Map} \description{ Displays a colour map as a colour ribbon } \usage{ \method{plot}{colourmap}(x, ..., main, xlim = NULL, ylim = NULL, vertical = FALSE, axis = TRUE, labelmap=NULL, gap=0.25, add=FALSE) } \arguments{ \item{x}{Colour map to be plotted. An object of class \code{"colourmap"}.} \item{\dots}{ Graphical arguments passed to \code{\link{image.default}} or \code{\link{axis}}. } \item{main}{Main title for plot. A character string.} \item{xlim}{ Optional range of \code{x} values for the location of the colour ribbon. } \item{ylim}{ Optional range of \code{y} values for the location of the colour ribbon. } \item{vertical}{Logical flag determining whether the colour ribbon is plotted as a horizontal strip (\code{FALSE}) or a vertical strip (\code{TRUE}).} \item{axis}{Logical flag determining whether an axis should be plotted showing the numerical values that are mapped to the colours. } \item{labelmap}{ Function. If this is present, then the labels on the plot, which indicate the input values corresponding to particular colours, will be transformed by \code{labelmap} before being displayed on the plot. Typically used to simplify or shorten the labels on the plot. } \item{gap}{ Distance between separate blocks of colour, as a fraction of the width of one block, if the colourmap is discrete. } \item{add}{ Logical value indicating whether to add the colourmap to the existing plot (\code{add=TRUE}), or to start a new plot (\code{add=FALSE}, the default). } } \details{ This is the plot method for the class \code{"colourmap"}. An object of this class (created by the function \code{\link{colourmap}}) represents a colour map or colour lookup table associating colours with each data value. The command \code{plot.colourmap} displays the colour map as a colour ribbon or as a colour legend (a sequence of blocks of colour). This plot can be useful on its own to inspect the colour map. If the domain of the colourmap is an interval of real numbers, the colourmap is displayed as a continuous ribbon of colour. If the domain of the colourmap is a finite set of inputs, the colours are displayed as separate blocks of colour. The separation between blocks is equal to \code{gap} times the width of one block. To annotate an existing plot with an explanatory colour ribbon or colour legend, specify \code{add=TRUE} and use the arguments \code{xlim} and/or \code{ylim} to control the physical position of the ribbon on the plot. Labels explaining the colour map are drawn by \code{\link[graphics]{axis}} and can be modified by specifying arguments that will be passed to this function. } \value{ None. } \seealso{\code{\link{colourmap}}} \examples{ co <- colourmap(rainbow(100), breaks=seq(-1,1,length=101)) plot(co) plot(co, col.ticks="pink") ca <- colourmap(rainbow(8), inputs=letters[1:8]) plot(ca, vertical=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{color} \keyword{hplot} spatstat/man/predict.kppm.Rd0000755000176000001440000000314412237642733015625 0ustar ripleyusers\name{predict.kppm} \alias{predict.kppm} \alias{fitted.kppm} \title{Prediction from a Fitted Cluster Point Process Model} \description{ Given a fitted cluster point process model, these functions compute the fitted intensity. } \usage{ \method{fitted}{kppm}(object, ...) \method{predict}{kppm}(object, ...) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } } \details{ These functions are methods for the generic functions \code{\link{fitted}} and \code{\link{predict}}. The argument \code{object} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. The \emph{intensity} of the fitted model is computed, using \code{\link{fitted.ppm}} or \code{\link{predict.ppm}} respectively. } \value{ The value of \code{fitted.kppm} is a numeric vector giving the fitted values at the quadrature points. The value of \code{predict.kppm} is usually a pixel image (object of class \code{"im"}), but see \code{\link{predict.ppm}} for details. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{vcov.kppm}}, \code{\link{fitted.ppm}}, \code{\link{predict.ppm}} } \examples{ data(redwood) fit <- kppm(redwood, ~x, "Thomas") predict(fit) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/nnmark.Rd0000644000176000001440000000653112237642733014513 0ustar ripleyusers\name{nnmark} \alias{nnmark} \title{ Mark of Nearest Neighbour } \description{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the nearest neighbour of \code{y} in \code{X}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. } \usage{ nnmark(X, ..., k = 1, at=c("pixels", "points")) } \arguments{ \item{X}{ A marked point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{k}{ Single integer. The \code{k}th nearest data point will be used. } \item{at}{ String specifying whether to compute the values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } } \details{ Given a marked point pattern dataset \code{X} this function computes, for each desired location \code{y}, the mark attached to the point of \code{X} that is nearest to \code{y}. The desired locations \code{y} can be either a pixel grid or the point pattern \code{X} itself. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The marks are allowed to be a vector or a data frame. \itemize{ \item If \code{at="points"}, then for each point in \code{X}, the algorithm finds the nearest \emph{other} point in \code{X}, and extracts the mark attached to it. The result is a vector or data frame containing the marks of the neighbours of each point. \item If \code{at="pixels"} (the default), then for each pixel in a rectangular grid, the algorithm finds the nearest point in \code{X}, and extracts the mark attached to it. The result is an image or a list of images containing the marks of the neighbours of each pixel. The pixel resolution is controlled by the arguments \code{\dots} passed to \code{\link{as.mask}}. } If the argument \code{k} is given, then the \code{k}-th nearest neighbour will be used. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). The value at each pixel is the mark attached to the nearest point of \code{X}. \item If \code{at="points"}, the result is a vector or factor of length equal to the number of points in \code{X}. Entries are the mark values of the nearest neighbours of each point of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{listof}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, Entries are the mark values of the nearest neighbours of each point of \code{X}. } } \author{Adrian Baddeley and Rolf Turner} \seealso{ \code{\link{Smooth.ppp}}, \code{\link{marktable}}, \code{\link{nnwhich}} } \examples{ plot(nnmark(ants)) plot(nnmark(finpines)) } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/Kres.Rd0000755000176000001440000000577312237642731014141 0ustar ripleyusers\name{Kres} \Rdversion{1.1} \alias{Kres} \title{ Residual K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{K} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Kres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Kcom}}. } \item{\dots}{ Arguments passed to \code{\link{Kcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{K} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Kres} first calls \code{\link{Kcom}} to compute both the nonparametric estimate of the \eqn{K} function and its model compensator. Then \code{Kres} computes the difference between them, which is the residual \eqn{K}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Kcom}}. Then \code{Kres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Related functions: \code{\link{Kcom}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gres}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{ fit0 <- ppm(cells, ~1, nd=16)} K0 <- Kres(fit0) K0 plot(K0) # isotropic-correction estimate plot(K0, ires ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kres(fit1) if(interactive()) { plot(K1, ires ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Kres(cells, interaction=Strauss(0.12))) } # How to make envelopes \dontrun{ E <- envelope(fit1, Kres, interaction=as.interact(fit1), nsim=19) plot(E) } # For computational efficiency Kc <- Kcom(fit1) K1 <- Kres(Kc) } \keyword{spatial} \keyword{models} spatstat/man/inforder.family.Rd0000755000176000001440000000244412237642732016316 0ustar ripleyusers\name{inforder.family} \alias{inforder.family} \title{Infinite Order Interaction Family} \description{ An object describing the family of all Gibbs point processes with infinite interaction order. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the area-interaction process \cite{\link{AreaInter}}. Anyway, \code{inforder.family} is an object of class \code{"isf"} containing a function \code{inforder.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{AreaInter}} to create the area interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/interp.im.Rd0000755000176000001440000000276312237642732015137 0ustar ripleyusers\name{interp.im} \alias{interp.im} \title{Interpolate a Pixel Image} \description{ Interpolates the values of a pixel image at any desired location in the frame. } \usage{ interp.im(Z, x, y) } \arguments{ \item{Z}{ Pixel image (object of class \code{"im"}) with numeric or integer values. } \item{x,y}{ Vectors of Cartesian coordinates. } } \details{ A value at each location \code{(x[i],y[i])} will be interpolated using the pixel values of \code{Z} at the four surrounding pixel centres, by simple bilinear interpolation. At the boundary (where \code{(x[i],y[i])} is not surrounded by four pixel centres) the value at the nearest pixel is taken. } \value{ Vector of interpolated values, with \code{NA} for points that lie outside the domain of the image. } \examples{ opa <- par(mfrow=c(1,2)) # coarse image V <- as.im(function(x,y) { x^2 + y }, owin(), dimyx=10) plot(V, main="coarse image", col=terrain.colors(256)) # lookup value at location (0.5,0.5) V[list(x=0.5,y=0.5)] # interpolated value at location (0.5,0.5) interp.im(V, 0.5, 0.5) # true value is 0.75 # how to obtain an interpolated image at a desired resolution U <- as.im(interp.im, W=owin(), Z=V, dimyx=256) plot(U, main="interpolated image", col=terrain.colors(256)) par(opa) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/matclust.estpcf.Rd0000755000176000001440000001441312237642733016345 0ustar ripleyusers\name{matclust.estpcf} \alias{matclust.estpcf} \title{Fit the Matern Cluster Point Process by Minimum Contrast Using Pair Correlation} \description{ Fits the Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ matclust.estpcf(X, startpar=c(kappa=1,R=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Matern Cluster point process to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical pair correlation function of the Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Matern Cluster point process is described in Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point. The theoretical pair correlation function of the Matern Cluster process is \deqn{ g(r) = 1 + \frac 1 {4\pi R \kappa r} h(\frac{r}{2R}) }{ g(r) = 1 + h(r/(2*R))/(4 * pi * R * kappa * r) } where \deqn{ h(z) = \frac {16} \pi [ z \mbox{arccos}(z) - z^2 \sqrt{1 - z^2} ] }{ h(z) = (16/pi) * ((z * arccos(z) - z^2 * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 0} for \eqn{z > 1}. The theoretical intensity of the Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{matclust.estK}}, \code{\link{thomas.estpcf}}, \code{\link{thomas.estK}}, \code{\link{lgcp.estK}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estpcf(redwood, c(kappa=10, R=0.1)) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/pcf.fv.Rd0000755000176000001440000001156112237642733014411 0ustar ripleyusers\name{pcf.fv} \alias{pcf.fv} \title{Pair Correlation Function obtained from K Function} \description{ Estimates the pair correlation function of a point pattern, given an estimate of the K function. } \usage{ \method{pcf}{fv}(X, \dots, method="c") } \arguments{ \item{X}{ An estimate of the \eqn{K} function or one of its variants. An object of class \code{"fv"}. } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function. Essentially a data frame containing (at least) the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{pcf}{vector of values of \eqn{g(r)} } } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an estimate of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivative. It is a method for the generic function \code{\link{pcf}} for the class \code{"fv"}. The argument \code{X} should be an estimated \eqn{K} function, given as a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). This object should be the value returned by \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{Kmulti}} or \code{\link{Kinhom}}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # univariate point pattern data(simdat) \testonly{ simdat <- simdat[seq(1,simdat$n, by=4)] } K <- Kest(simdat) p <- pcf.fv(K, spar=0.5, method="b") plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/beginner.Rd0000644000176000001440000000161512237642732015013 0ustar ripleyusers\name{beginner} \alias{beginner} \title{ Print Introduction For Beginners } \description{ Prints an introduction for beginners to the \code{spatstat} package, or another specified package. } \usage{ beginner(package = "spatstat") } \arguments{ \item{package}{ Name of package. } } \details{ This function prints an introduction for beginners to the \pkg{spatstat} package. The function can be executed simply by typing \code{beginner} without parentheses. If the argument \code{package} is given, then the function prints the beginner's help file \code{BEGINNER.txt} from the specified package (if it has one). } \value{ Null. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{latest.news}} } \examples{ beginner } \keyword{documentation} spatstat/man/spatstat-package.Rd0000755000176000001440000021001412237653250016451 0ustar ripleyusers\name{spatstat-package} \alias{spatstat-package} \alias{spatstat} \docType{package} \title{The Spatstat Package} \description{ This is a summary of the features of \pkg{spatstat}, a package in \code{R} for the statistical analysis of spatial point patterns. } \details{ \pkg{spatstat} is a package for the statistical analysis of spatial data. Currently, it deals mainly with the analysis of spatial patterns of points in two-dimensional space. The points may carry auxiliary data (`marks'), and the spatial region in which the points were recorded may have arbitrary shape. The package supports \itemize{ \item creation, manipulation and plotting of point patterns \item exploratory data analysis \item simulation of point process models \item parametric model-fitting \item hypothesis tests and model diagnostics } Apart from two-dimensional point patterns and point processes, \pkg{spatstat} also supports point patterns in three dimensions, point patterns in multidimensional space-time, point patterns on a linear network, patterns of line segments in two dimensions, and spatial tessellations and random sets in two dimensions. The package can fit several types of point process models to a point pattern dataset: \itemize{ \item Poisson point process models (by Berman-Turner approximate maximum likelihood or by spatial logistic regression) \item Gibbs/Markov point process models (by Baddeley-Turner approximate maximum pseudolikelihood or Huang-Ogata approximate maximum likelihood) \item Cox/cluster process models (by Waagepetersen's two-step fitting procedure and minimum contrast) } The models may include spatial trend, dependence on covariates, and complicated interpoint interactions. Models are specified by a \code{formula} in the \code{R} language, and are fitted using a function analogous to \code{\link{lm}} and \code{\link{glm}}. Fitted models can be printed, plotted, predicted, simulated and so on. } \section{Getting Started}{ For a quick introduction to \pkg{spatstat}, see the package vignette \emph{Getting started with spatstat} installed with \pkg{spatstat}. (To see this document online, start \R, type \code{help.start()} to open the help browser, and navigate to \code{Packages > spatstat > Vignettes}). For a complete 2-day course on using \pkg{spatstat}, see the workshop notes by Baddeley (2010), available on the internet. Type \code{demo(spatstat)} for a demonstration of the package's capabilities. Type \code{demo(data)} to see all the datasets available in the package. For information about handling data in \bold{shapefiles}, see the Vignette \emph{Handling shapefiles in the spatstat package} installed with \pkg{spatstat}. To learn about spatial point process methods, see the short book by Diggle (2003) and the handbook Gelfand et al (2010). } \section{Updates}{ New versions of \pkg{spatstat} are produced about once a month. Users are advised to update their installation of \pkg{spatstat} regularly. Type \code{latest.news} to read the news documentation about changes to the current installed version of \pkg{spatstat}. Type \code{news(package="spatstat")} to read news documentation about all previous versions of the package. } \section{FUNCTIONS AND DATASETS}{ Following is a summary of the main functions and datasets in the \pkg{spatstat} package. Alternatively an alphabetical list of all functions and datasets is available by typing \code{library(help=spatstat)}. For further information on any of these, type \code{help(name)} where \code{name} is the name of the function or dataset. } \section{CONTENTS:}{ \tabular{ll}{ I. \tab Creating and manipulating data \cr II. \tab Exploratory Data Analysis \cr III. \tab Model fitting (cluster models) \cr IV. \tab Model fitting (Poisson and Gibbs models) \cr V. \tab Model fitting (spatial logistic regression)\cr VI. \tab Simulation \cr VII. \tab Tests and diagnostics\cr VIII. \tab Documentation } } \section{I. CREATING AND MANIPULATING DATA}{ \bold{Types of spatial data:} The main types of spatial data supported by \pkg{spatstat} are: \tabular{ll}{ \code{\link{ppp}} \tab point pattern \cr \code{\link{owin}} \tab window (spatial region) \cr \code{\link{im}} \tab pixel image \cr \code{\link{psp}} \tab line segment pattern \cr \code{\link{tess}} \tab tessellation \cr \code{\link{pp3}} \tab three-dimensional point pattern \cr \code{\link{ppx}} \tab point pattern in any number of dimensions \cr \code{\link{lpp}} \tab point pattern on a linear network } \bold{To create a point pattern:} \tabular{ll}{ \code{\link{ppp}} \tab create a point pattern from \eqn{(x,y)} and window information \cr \tab \code{ppp(x, y, xlim, ylim)} for rectangular window\cr \tab \code{ppp(x, y, poly)} for polygonal window \cr \tab \code{ppp(x, y, mask)} for binary image window \cr \code{\link{as.ppp}} \tab convert other types of data to a \code{ppp} object \cr \code{\link{clickppp}} \tab interactively add points to a plot \cr \code{\link{marks<-}}, \code{\%mark\%} \tab attach/reassign marks to a point pattern } \bold{To simulate a random point pattern:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rMaternI}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rHardcore}} \tab simulate Hard Core process (perfect simulation)\cr \code{\link{rDiggleGratton}} \tab simulate Diggle-Gratton process (perfect simulation)\cr \code{\link{rDGS}} \tab simulate Diggle-Gates-Stibbard process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rPoissonCluster}} \tab simulate a general Neyman-Scott process\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rMatClust}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott Cauchy cluster process \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott Variance Gamma cluster process \cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{rmh}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{simulate.ppm}} \tab simulate Gibbs point process using Metropolis-Hastings \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{To randomly change an existing point pattern:} \tabular{ll}{ \code{\link{rshift}} \tab random shifting of points \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rthin}} \tab random thinning \cr \code{\link{rlabel}} \tab random (re)labelling of a multitype point pattern \cr \code{\link{quadratresample}} \tab block resampling } \bold{Standard point pattern datasets:} Datasets in \pkg{spatstat} are lazy-loaded, so you can simply type the name of the dataset to use it; there is no need to type \code{\link{data}(amacrine)} etc. Type \code{demo(data)} to see a display of all the datasets installed with the package. \tabular{ll}{ \code{\link{amacrine}} \tab Austin Hughes' rabbit amacrine cells \cr \code{\link{anemones}} \tab Upton-Fingleton sea anemones data\cr \code{\link{ants}} \tab Harkness-Isham ant nests data\cr \code{\link{bei}} \tab Tropical rainforest trees \cr \code{\link{betacells}} \tab Waessle et al. cat retinal ganglia data \cr \code{\link{bramblecanes}} \tab Bramble Canes data \cr \code{\link{bronzefilter}} \tab Bronze Filter Section data \cr \code{\link{cells}} \tab Crick-Ripley biological cells data \cr \code{\link{chicago}} \tab Chicago street crimes \cr \code{\link{chorley}} \tab Chorley-Ribble cancer data \cr \code{\link{clmfires}} \tab Castilla-La Mancha forest fires \cr \code{\link{copper}} \tab Berman-Huntington copper deposits data \cr \code{\link{demopat}} \tab Synthetic point pattern \cr \code{\link{finpines}} \tab Finnish Pines data \cr \code{\link{flu}} \tab Influenza virus proteins \cr \code{\link{gordon}} \tab People in Gordon Square, London \cr \code{\link{gorillas}} \tab Gorilla nest sites \cr \code{\link{hamster}} \tab Aherne's hamster tumour data \cr \code{\link{humberside}} \tab North Humberside childhood leukaemia data \cr \code{\link{hyytiala}} \tab {Mixed forest in \ifelse{latex}{\out{Hyyti{\"a}l{\"a}}}{Hyytiala}, Finland}\cr \code{\link{japanesepines}} \tab Japanese Pines data \cr \code{\link{lansing}} \tab Lansing Woods data \cr \code{\link{longleaf}} \tab Longleaf Pines data \cr \code{\link{mucosa}} \tab Cells in gastric mucosa \cr \code{\link{murchison}} \tab Murchison gold deposits \cr \code{\link{nbfires}} \tab New Brunswick fires data \cr \code{\link{nztrees}} \tab Mark-Esler-Ripley trees data \cr \code{\link{osteo}} \tab Osteocyte lacunae (3D, replicated) \cr \code{\link{paracou}} \tab Kimboto trees in Paracou, French Guiana \cr \code{\link{ponderosa}} \tab Getis-Franklin ponderosa pine trees data \cr \code{\link{redwood}} \tab Strauss-Ripley redwood saplings data \cr \code{\link{redwoodfull}} \tab Strauss redwood saplings data (full set) \cr \code{\link{residualspaper}} \tab Data from Baddeley et al (2005) \cr \code{\link{shapley}} \tab Galaxies in an astronomical survey \cr \code{\link{simdat}} \tab Simulated point pattern (inhomogeneous, with interaction) \cr \code{\link{spruces}} \tab Spruce trees in Saxonia \cr \code{\link{swedishpines}} \tab Strand-Ripley Swedish pines data \cr \code{\link{urkiola}} \tab Urkiola Woods data \cr \code{\link{waka}} \tab Trees in Waka national park } \bold{To manipulate a point pattern:} \tabular{ll}{ \code{\link{plot.ppp}} \tab plot a point pattern (e.g. \code{plot(X)}) \cr \code{\link{iplot}} \tab plot a point pattern interactively \cr \code{\link{[.ppp}} \tab extract or replace a subset of a point pattern \cr \tab \code{pp[subset]} or \code{pp[subwindow]} \cr \code{\link{superimpose}} \tab combine several point patterns \cr \code{\link{by.ppp}} \tab apply a function to sub-patterns of a point pattern \cr \code{\link{cut.ppp}} \tab classify the points in a point pattern \cr \code{\link{unmark}} \tab remove marks \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{coords}} \tab extract coordinates, change coordinates \cr \code{\link{marks}} \tab extract marks, change marks or attach marks \cr \code{\link{split.ppp}} \tab divide pattern into sub-patterns \cr \code{\link{rotate}} \tab rotate pattern \cr \code{\link{shift} } \tab translate pattern \cr \code{\link{flipxy} } \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{reflect} } \tab reflect in the origin \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation\cr \code{\link{scalardilate}} \tab apply scalar dilation\cr \code{\link{density.ppp}} \tab kernel smoothing of point pattern\cr \code{\link{Smooth.ppp}} \tab nearest-neightbour smoothing of point pattern\cr \code{\link{nnmark}} \tab smooth the marks attached to points\cr \code{\link{sharpen.ppp}} \tab data sharpening\cr \code{\link{identify.ppp}} \tab interactively identify points \cr \code{\link{unique.ppp}} \tab remove duplicate points \cr \code{\link{duplicated.ppp}} \tab determine which points are duplicates \cr \code{\link{connected.ppp}} \tab find clumps of points \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation \cr \code{\link{delaunay}} \tab compute Delaunay triangulation \cr \code{\link{delaunay.distance}} \tab graph distance in Delaunay triangulation \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{discretise}} \tab discretise coordinates \cr \code{\link{pixellate.ppp}} \tab approximate point pattern by pixel image \cr \code{\link{as.im.ppp}} \tab approximate point pattern by pixel image } See \code{\link{spatstat.options}} to control plotting behaviour. \bold{To create a window:} An object of class \code{"owin"} describes a spatial region (a window of observation). \tabular{ll}{ \code{\link{owin}} \tab Create a window object \cr \tab \code{owin(xlim, ylim)} for rectangular window \cr \tab \code{owin(poly)} for polygonal window \cr \tab \code{owin(mask)} for binary image window \cr \code{\link{as.owin}} \tab Convert other data to a window object \cr \code{\link{square}} \tab make a square window \cr \code{\link{disc}} \tab make a circular window \cr \code{\link{ripras}} \tab Ripley-Rasson estimator of window, given only the points \cr \code{\link{convexhull}} \tab compute convex hull of something \cr \code{\link{letterR}} \tab polygonal window in the shape of the \R logo } \bold{To manipulate a window:} \tabular{ll}{ \code{\link{plot.owin}} \tab plot a window. \cr \tab \code{plot(W)}\cr \code{\link{bounding.box}} \tab Find a tight bounding box for the window \cr \code{\link{erosion}} \tab erode window by a distance r\cr \code{\link{dilation}} \tab dilate window by a distance r\cr \code{\link{closing}} \tab close window by a distance r\cr \code{\link{opening}} \tab open window by a distance r\cr \code{\link{border}} \tab difference between window and its erosion/dilation \cr \code{\link{complement.owin}} \tab invert (swap inside and outside)\cr \code{\link{simplify.owin}} \tab approximate a window by a simple polygon \cr \code{\link{rotate}} \tab rotate window \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{shift} } \tab translate window \cr \code{\link{periodify} } \tab make several translated copies \cr \code{\link{affine}} \tab apply affine transformation } \bold{Digital approximations:} \tabular{ll}{ \code{\link{as.mask}} \tab Make a discrete pixel approximation of a given window \cr \code{\link{as.im.owin}} \tab convert window to pixel image \cr \code{\link{pixellate.owin}} \tab convert window to pixel image \cr \code{\link{commonGrid}} \tab find common pixel grid for windows \cr \code{\link{nearest.raster.point}} \tab map continuous coordinates to raster locations\cr \code{\link{raster.x}} \tab raster x coordinates \cr \code{\link{raster.y}} \tab raster y coordinates \cr \code{\link{as.polygonal}} \tab convert pixel mask to polygonal window } See \code{\link{spatstat.options}} to control the approximation \bold{Geometrical computations with windows:} \tabular{ll}{ \code{\link{intersect.owin}} \tab intersection of two windows\cr \code{\link{union.owin}} \tab union of two windows\cr \code{\link{setminus.owin}} \tab set subtraction of two windows\cr \code{\link{inside.owin}} \tab determine whether a point is inside a window\cr \code{\link{area.owin}} \tab compute area \cr \code{\link{perimeter}} \tab compute perimeter length \cr \code{\link{diameter.owin}} \tab compute diameter\cr \code{\link{incircle}} \tab find largest circle inside a window \cr \code{\link{connected.owin}} \tab find connected components of window \cr \code{\link{eroded.areas}} \tab compute areas of eroded windows\cr \code{\link{dilated.areas}} \tab compute areas of dilated windows\cr \code{\link{bdist.points}} \tab compute distances from data points to window boundary \cr \code{\link{bdist.pixels}} \tab compute distances from all pixels to window boundary \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation \cr \code{\link{distmap.owin}} \tab distance transform image \cr \code{\link{distfun.owin}} \tab distance transform \cr \code{\link{centroid.owin}} \tab compute centroid (centre of mass) of window\cr \code{\link{is.subset.owin}} \tab determine whether one window contains another \cr \code{\link{is.convex}} \tab determine whether a window is convex \cr \code{\link{convexhull}} \tab compute convex hull \cr \code{\link{as.mask}} \tab pixel approximation of window \cr \code{\link{as.polygonal}} \tab polygonal approximation of window \cr \code{\link{is.rectangle}} \tab test whether window is a rectangle \cr \code{\link{is.polygonal}} \tab test whether window is polygonal \cr \code{\link{is.mask}} \tab test whether window is a mask \cr \code{\link{setcov}} \tab spatial covariance function of window } \bold{Pixel images:} An object of class \code{"im"} represents a pixel image. Such objects are returned by some of the functions in \pkg{spatstat} including \code{\link{Kmeasure}}, \code{\link{setcov}} and \code{\link{density.ppp}}. \tabular{ll}{ \code{\link{im}} \tab create a pixel image\cr \code{\link{as.im}} \tab convert other data to a pixel image\cr \code{\link{pixellate}} \tab convert other data to a pixel image\cr \code{\link{as.matrix.im}} \tab convert pixel image to matrix\cr \code{\link{as.data.frame.im}} \tab convert pixel image to data frame\cr \code{\link{plot.im}} \tab plot a pixel image on screen as a digital image\cr \code{\link{contour.im}} \tab draw contours of a pixel image \cr \code{\link{persp.im}} \tab draw perspective plot of a pixel image \cr \code{\link{rgbim}} \tab create colour-valued pixel image \cr \code{\link{hsvim}} \tab create colour-valued pixel image \cr \code{\link{[.im}} \tab extract a subset of a pixel image\cr \code{\link{[<-.im}} \tab replace a subset of a pixel image\cr \code{\link{rotate.im}} \tab rotate pixel image \cr \code{\link{shift.im}} \tab apply vector shift to pixel image \cr \code{\link{affine.im}} \tab apply affine transformation to image \cr \code{X} \tab print very basic information about image \code{X}\cr \code{\link{summary}(X)} \tab summary of image \code{X} \cr \code{\link{hist.im}} \tab histogram of image \cr \code{\link{mean.im}} \tab mean pixel value of image \cr \code{\link{integral.im}} \tab integral of pixel values \cr \code{\link{quantile.im}} \tab quantiles of image \cr \code{\link{cut.im}} \tab convert numeric image to factor image \cr \code{\link{is.im}} \tab test whether an object is a pixel image\cr \code{\link{interp.im}} \tab interpolate a pixel image\cr \code{\link{blur}} \tab apply Gaussian blur to image\cr \code{\link{connected.im}} \tab find connected components \cr \code{\link{compatible.im}} \tab test whether two images have compatible dimensions \cr \code{\link{harmonise.im}} \tab make images compatible \cr \code{\link{commonGrid}} \tab find a common pixel grid for images \cr \code{\link{eval.im}} \tab evaluate any expression involving images\cr \code{\link{scaletointerval}} \tab rescale pixel values \cr \code{\link{zapsmall.im}} \tab set very small pixel values to zero \cr \code{\link{levelset}} \tab level set of an image\cr \code{\link{solutionset}} \tab region where an expression is true \cr \code{\link{imcov}} \tab spatial covariance function of image \cr \code{\link{convolve.im}} \tab spatial convolution of images \cr \code{\link{transect.im}} \tab line transect of image } \bold{Line segment patterns} An object of class \code{"psp"} represents a pattern of straight line segments. \tabular{ll}{ \code{\link{psp}} \tab create a line segment pattern \cr \code{\link{as.psp}} \tab convert other data into a line segment pattern \cr \code{\link{is.psp}} \tab determine whether a dataset has class \code{"psp"} \cr \code{\link{plot.psp}} \tab plot a line segment pattern \cr \code{\link{print.psp}} \tab print basic information \cr \code{\link{summary.psp}} \tab print summary information \cr \code{\link{[.psp}} \tab extract a subset of a line segment pattern \cr \code{\link{as.data.frame.psp}} \tab convert line segment pattern to data frame \cr \code{\link{marks.psp}} \tab extract marks of line segments \cr \code{\link{marks<-.psp}} \tab assign new marks to line segments \cr \code{\link{unmark.psp}} \tab delete marks from line segments \cr \code{\link{midpoints.psp}} \tab compute the midpoints of line segments \cr \code{\link{endpoints.psp}} \tab extract the endpoints of line segments \cr \code{\link{lengths.psp}} \tab compute the lengths of line segments \cr \code{\link{angles.psp}} \tab compute the orientation angles of line segments \cr \code{\link{superimpose}} \tab combine several line segment patterns \cr \code{\link{flipxy}} \tab swap \eqn{x} and \eqn{y} coordinates \cr \code{\link{rotate.psp}} \tab rotate a line segment pattern \cr \code{\link{shift.psp}} \tab shift a line segment pattern \cr \code{\link{periodify}} \tab make several shifted copies \cr \code{\link{affine.psp}} \tab apply an affine transformation \cr \code{\link{pixellate.psp}} \tab approximate line segment pattern by pixel image \cr \code{\link{as.mask.psp}} \tab approximate line segment pattern by binary mask \cr \code{\link{distmap.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{distfun.psp}} \tab compute the distance map of a line segment pattern \cr \code{\link{density.psp}} \tab kernel smoothing of line segments\cr \code{\link{selfcrossing.psp}} \tab find crossing points between line segments \cr \code{\link{crossing.psp}} \tab find crossing points between two line segment patterns \cr \code{\link{nncross}} \tab find distance to nearest line segment from a given point\cr \code{\link{nearestsegment}} \tab find line segment closest to a given point \cr \code{\link{project2segment}} \tab find location along a line segment closest to a given point \cr \code{\link{pointsOnLines}} \tab generate points evenly spaced along line segment \cr \code{\link{rpoisline}} \tab generate a realisation of the Poisson line process inside a window\cr \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window } \bold{Tessellations} An object of class \code{"tess"} represents a tessellation. \tabular{ll}{ \code{\link{tess}} \tab create a tessellation \cr \code{\link{quadrats}} \tab create a tessellation of rectangles\cr \code{\link{as.tess}} \tab convert other data to a tessellation \cr \code{\link{plot.tess}} \tab plot a tessellation \cr \code{\link{tiles}} \tab extract all the tiles of a tessellation \cr \code{\link{[.tess}} \tab extract some tiles of a tessellation \cr \code{\link{[<-.tess}} \tab change some tiles of a tessellation \cr \code{\link{intersect.tess}} \tab intersect two tessellations \cr \tab or restrict a tessellation to a window \cr \code{\link{chop.tess}} \tab subdivide a tessellation by a line \cr \code{\link{dirichlet}} \tab compute Dirichlet-Voronoi tessellation of points\cr \code{\link{delaunay}} \tab compute Delaunay triangulation of points\cr \code{\link{rpoislinetess}} \tab generate tessellation using Poisson line process \cr \code{\link{tile.areas}} \tab area of each tile in tessellation \cr \code{\link{bdist.tiles}} \tab boundary distance for each tile in tessellation } \bold{Three-dimensional point patterns} An object of class \code{"pp3"} represents a three-dimensional point pattern in a rectangular box. The box is represented by an object of class \code{"box3"}. \tabular{ll}{ \code{\link{pp3}} \tab create a 3-D point pattern \cr \code{\link{plot.pp3}} \tab plot a 3-D point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{unitname.pp3}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpoint3}} \tab generate uniform random points in 3-D \cr \code{\link{rpoispp3}} \tab generate Poisson random points in 3-D \cr \code{\link{envelope.pp3}} \tab generate simulation envelopes for 3-D pattern \cr \code{\link{box3}} \tab create a 3-D rectangular box \cr \code{\link{as.box3}} \tab convert data to 3-D rectangular box \cr \code{\link{unitname.box3}} \tab name of unit of length \cr \code{\link{diameter.box3}} \tab diameter of box \cr \code{\link{volume.box3}} \tab volume of box \cr \code{\link{shortside.box3}} \tab shortest side of box \cr \code{\link{eroded.volumes}} \tab volumes of erosions of box } \bold{Multi-dimensional space-time point patterns} An object of class \code{"ppx"} represents a point pattern in multi-dimensional space and/or time. \tabular{ll}{ \code{\link{ppx}} \tab create a multidimensional space-time point pattern \cr \code{\link{coords}} \tab extract coordinates \cr \code{\link{as.hyperframe}} \tab extract coordinates \cr \code{\link{unitname.ppx}} \tab name of unit of length \cr \code{\link{npoints}} \tab count the number of points \cr \code{\link{runifpointx}} \tab generate uniform random points \cr \code{\link{rpoisppx}} \tab generate Poisson random points \cr \code{\link{boxx}} \tab define multidimensional box \cr \code{\link{diameter.boxx}} \tab diameter of box \cr \code{\link{volume.boxx}} \tab volume of box \cr \code{\link{shortside.boxx}} \tab shortest side of box \cr \code{\link{eroded.volumes.boxx}} \tab volumes of erosions of box } \bold{Point patterns on a linear network} An object of class \code{"linnet"} represents a linear network (for example, a road network). \tabular{ll}{ \code{\link{linnet}} \tab create a linear network \cr \code{\link{clickjoin}} \tab interactively join vertices in network \cr \code{\link{simplenet}} \tab simple example of network \cr \code{\link{lineardisc}} \tab disc in a linear network \cr \code{\link{methods.linnet}} \tab methods for \code{linnet} objects } An object of class \code{"lpp"} represents a point pattern on a linear network (for example, road accidents on a road network). \tabular{ll}{ \code{\link{lpp}} \tab create a point pattern on a linear network \cr \code{\link{methods.lpp}} \tab methods for \code{lpp} objects \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network \cr \code{\link{chicago}} \tab Chicago street crime data \cr } \bold{Hyperframes} A hyperframe is like a data frame, except that the entries may be objects of any kind. \tabular{ll}{ \code{\link{hyperframe}} \tab create a hyperframe \cr \code{\link{as.hyperframe}} \tab convert data to hyperframe \cr \code{\link{plot.hyperframe}} \tab plot hyperframe \cr \code{\link{with.hyperframe}} \tab evaluate expression using each row of hyperframe \cr \code{\link{cbind.hyperframe}} \tab combine hyperframes by columns\cr \code{\link{rbind.hyperframe}} \tab combine hyperframes by rows\cr \code{\link{as.data.frame.hyperframe}} \tab convert hyperframe to data frame } \bold{Layered objects} A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. \tabular{ll}{ \code{\link{layered}} \tab create layered object \cr \code{\link{plot.layered}} \tab plot layered object\cr \code{\link{[.layered}} \tab extract subset of layered object } \bold{Colour maps} A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. Using a \code{colourmap} object in a plot command ensures that the mapping from numbers to colours is the same in different plots. \tabular{ll}{ \code{\link{colourmap}} \tab create a colour map \cr \code{\link{plot.colourmap}} \tab plot the colour map only\cr \code{\link{tweak.colourmap}} \tab alter individual colour values \cr \code{\link{interp.colourmap}} \tab make a smooth transition between colours \cr \code{\link{beachcolourmap}} \tab one special colour map } } \section{II. EXPLORATORY DATA ANALYSIS}{ \bold{Inspection of data:} \tabular{ll}{ \code{\link{summary}(X)} \tab print useful summary of point pattern \code{X}\cr \code{X} \tab print basic description of point pattern \code{X} \cr \code{any(duplicated(X))} \tab check for duplicated points in pattern \code{X} \cr \code{\link{istat}(X)} \tab Interactive exploratory analysis } \bold{Classical exploratory tools:} \tabular{ll}{ \code{\link{clarkevans}} \tab Clark and Evans aggregation index \cr \code{\link{fryplot}} \tab Fry plot \cr \code{\link{miplot}} \tab Morishita Index plot } \bold{Smoothing:} \tabular{ll}{ \code{\link{density.ppp}} \tab kernel smoothed density\cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{bw.diggle}} \tab cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.ppl}} \tab likelihood cross-validated bandwidth selection for \code{\link{density.ppp}}\cr \code{\link{bw.scott}} \tab Scott's rule of thumb for density estimation\cr \code{\link{bw.relrisk}} \tab cross-validated bandwidth selection for \code{\link{relrisk}} \cr \code{\link{bw.smoothppp}} \tab cross-validated bandwidth selection for \code{\link{Smooth.ppp}} \cr \code{\link{bw.frac}} \tab bandwidth selection using window geometry\cr \code{\link{bw.stoyan}} \tab Stoyan's rule of thumb for bandwidth for \code{\link{pcf}} } \bold{Modern exploratory tools:} \tabular{ll}{ \code{\link{clusterset}} \tab Allard-Fraley feature detection \cr \code{\link{nnclean}} \tab Byers-Raftery feature detection \cr \code{\link{sharpen.ppp}} \tab Choi-Hall data sharpening \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of covariate effect } \bold{Summary statistics for a point pattern:} \tabular{ll}{ \code{\link{quadratcount}} \tab Quadrat counts \cr \code{\link{Fest}} \tab empty space function \eqn{F} \cr \code{\link{Gest}} \tab nearest neighbour distribution function \eqn{G} \cr \code{\link{Jest}} \tab \eqn{J}-function \eqn{J = (1-G)/(1-F)} \cr \code{\link{Kest}} \tab Ripley's \eqn{K}-function\cr \code{\link{Lest}} \tab Besag \eqn{L}-function\cr \code{\link{Tstat}} \tab Third order \eqn{T}-function \cr \code{\link{allstats}} \tab all four functions \eqn{F}, \eqn{G}, \eqn{J}, \eqn{K} \cr \code{\link{pcf}} \tab pair correlation function \cr \code{\link{Kinhom}} \tab \eqn{K} for inhomogeneous point patterns \cr \code{\link{Linhom}} \tab \eqn{L} for inhomogeneous point patterns \cr \code{\link{pcfinhom}} \tab pair correlation for inhomogeneous patterns\cr \code{\link{Finhom}} \tab \eqn{F} for inhomogeneous point patterns \cr \code{\link{Ginhom}} \tab \eqn{G} for inhomogeneous point patterns \cr \code{\link{Jinhom}} \tab \eqn{J} for inhomogeneous point patterns \cr \code{\link{localL}} \tab Getis-Franklin neighbourhood density function\cr \code{\link{localK}} \tab neighbourhood K-function\cr \code{\link{localpcf}} \tab local pair correlation function\cr \code{\link{localKinhom}} \tab local \eqn{K} for inhomogeneous point patterns \cr \code{\link{localLinhom}} \tab local \eqn{L} for inhomogeneous point patterns \cr \code{\link{localpcfinhom}} \tab local pair correlation for inhomogeneous patterns\cr \code{\link{Kest.fft}} \tab fast \eqn{K}-function using FFT for large datasets \cr \code{\link{Kmeasure}} \tab reduced second moment measure \cr \code{\link{envelope}} \tab simulation envelopes for a summary function \cr \code{\link{varblock}} \tab variances and confidence intervals\cr \tab for a summary function \cr \code{\link{lohboot}} \tab bootstrap for a summary function } Related facilities: \tabular{ll}{ \code{\link{plot.fv}} \tab plot a summary function\cr \code{\link{eval.fv}} \tab evaluate any expression involving summary functions\cr \code{\link{eval.fasp}} \tab evaluate any expression involving an array of functions\cr \code{\link{with.fv}} \tab evaluate an expression for a summary function\cr \code{\link{Smooth.fv}} \tab apply smoothing to a summary function\cr \code{\link{deriv.fv}} \tab calculate derivative of a summary function\cr \code{\link{nndist}} \tab nearest neighbour distances \cr \code{\link{nnwhich}} \tab find nearest neighbours \cr \code{\link{pairdist}} \tab distances between all pairs of points\cr \code{\link{crossdist}} \tab distances between points in two patterns\cr \code{\link{nncross}} \tab nearest neighbours between two point patterns \cr \code{\link{exactdt}} \tab distance from any location to nearest data point\cr \code{\link{distmap}} \tab distance map image\cr \code{\link{distfun}} \tab distance map function\cr \code{\link{density.ppp}} \tab kernel smoothed density\cr \code{\link{Smooth.ppp}} \tab spatial interpolation of marks \cr \code{\link{relrisk}} \tab kernel estimate of relative risk\cr \code{\link{sharpen.ppp}} \tab data sharpening \cr \code{\link{rknn}} \tab theoretical distribution of nearest neighbour distance } \bold{Summary statistics for a multitype point pattern:} A multitype point pattern is represented by an object \code{X} of class \code{"ppp"} such that \code{marks(X)} is a factor. \tabular{ll}{ \code{\link{relrisk}} \tab kernel estimation of relative risk \cr \code{\link{scan.test}} \tab spatial scan test of elevated risk \cr \code{\link{Gcross},\link{Gdot},\link{Gmulti}} \tab multitype nearest neighbour distributions \eqn{G_{ij}, G_{i\bullet}}{G[i,j], G[i.]} \cr \code{\link{Kcross},\link{Kdot}, \link{Kmulti}} \tab multitype \eqn{K}-functions \eqn{K_{ij}, K_{i\bullet}}{K[i,j], K[i.]} \cr \code{\link{Lcross},\link{Ldot}} \tab multitype \eqn{L}-functions \eqn{L_{ij}, L_{i\bullet}}{L[i,j], L[i.]} \cr \code{\link{Jcross},\link{Jdot},\link{Jmulti}} \tab multitype \eqn{J}-functions \eqn{J_{ij}, J_{i\bullet}}{J[i,j],J[i.]} \cr \code{\link{pcfcross}} \tab multitype pair correlation function \eqn{g_{ij}}{g[i,j]} \cr \code{\link{pcfdot}} \tab multitype pair correlation function \eqn{g_{i\bullet}}{g[i.]} \cr \code{\link{markconnect}} \tab marked connection function \eqn{p_{ij}}{p[i,j]} \cr \code{\link{alltypes}} \tab estimates of the above for all \eqn{i,j} pairs \cr \code{\link{Iest}} \tab multitype \eqn{I}-function\cr \code{\link{Kcross.inhom},\link{Kdot.inhom}} \tab inhomogeneous counterparts of \code{Kcross}, \code{Kdot} \cr \code{\link{Lcross.inhom},\link{Ldot.inhom}} \tab inhomogeneous counterparts of \code{Lcross}, \code{Ldot} \cr \code{\link{pcfcross.inhom},\link{pcfdot.inhom}} \tab inhomogeneous counterparts of \code{pcfcross}, \code{pcfdot} } \bold{Summary statistics for a marked point pattern:} A marked point pattern is represented by an object \code{X} of class \code{"ppp"} with a component \code{X$marks}. The entries in the vector \code{X$marks} may be numeric, complex, string or any other atomic type. For numeric marks, there are the following functions: \tabular{ll}{ \code{\link{markmean}} \tab smoothed local average of marks \cr \code{\link{markvar}} \tab smoothed local variance of marks \cr \code{\link{markcorr}} \tab mark correlation function \cr \code{\link{markvario}} \tab mark variogram \cr \code{\link{markcorrint}} \tab mark correlation integral \cr \code{\link{Emark}} \tab mark independence diagnostic \eqn{E(r)} \cr \code{\link{Vmark}} \tab mark independence diagnostic \eqn{V(r)} \cr \code{\link{nnmean}} \tab nearest neighbour mean index \cr \code{\link{nnvario}} \tab nearest neighbour mark variance index } For marks of any type, there are the following: \tabular{ll}{ \code{\link{Gmulti}} \tab multitype nearest neighbour distribution \cr \code{\link{Kmulti}} \tab multitype \eqn{K}-function \cr \code{\link{Jmulti}} \tab multitype \eqn{J}-function } Alternatively use \code{\link{cut.ppp}} to convert a marked point pattern to a multitype point pattern. \bold{Programming tools:} \tabular{ll}{ \code{\link{applynbd}} \tab apply function to every neighbourhood in a point pattern \cr \code{\link{markstat}} \tab apply function to the marks of neighbours in a point pattern \cr \code{\link{marktable}} \tab tabulate the marks of neighbours in a point pattern \cr \code{\link{pppdist}} \tab find the optimal match between two point patterns } \bold{Summary statistics for a point pattern on a linear network:} These are for point patterns on a linear network (class \code{lpp}). For unmarked patterns: \tabular{ll}{ \code{\link{linearK}} \tab \eqn{K} function on linear network \cr \code{\link{linearKinhom}} \tab inhomogeneous \eqn{K} function on linear network \cr \code{\link{linearpcf}} \tab pair correlation function on linear network \cr \code{\link{linearpcfinhom}} \tab inhomogeneous pair correlation on linear network } For multitype patterns: \tabular{ll}{ \code{\link{linearKcross}} \tab \eqn{K} function between two types of points \cr \code{\link{linearKdot}} \tab \eqn{K} function from one type to any type \cr \code{\link{linearKcross.inhom}} \tab Inhomogeneous version of \code{\link{linearKcross}} \cr \code{\link{linearKdot.inhom}} \tab Inhomogeneous version of \code{\link{linearKdot}} \cr \code{\link{linearmarkconnect}} \tab Mark connection function on linear network \cr \code{\link{linearmarkequal}} \tab Mark equality function on linear network \cr \code{\link{linearpcfcross}} \tab Pair correlation between two types of points \cr \code{\link{linearpcfdot}} \tab Pair correlation from one type to any type \cr \code{\link{linearpcfcross.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfcross}} \cr \code{\link{linearpcfdot.inhom}} \tab Inhomogeneous version of \code{\link{linearpcfdot}} } Related facilities: \tabular{ll}{ \code{\link{pairdist.lpp}} \tab distances between pairs \cr \code{\link{crossdist.lpp}} \tab distances between pairs \cr \code{\link{nndist.lpp}} \tab nearest neighbour distances \cr \code{\link{nncross.lpp}} \tab nearest neighbour distances \cr \code{\link{nnwhich.lpp}} \tab find nearest neighbours \cr \code{\link{nnfun.lpp}} \tab find nearest data point \cr \code{\link{distfun.lpp}} \tab distance transform \cr \code{\link{envelope.lpp}} \tab simulation envelopes \cr \code{\link{rpoislpp}} \tab simulate Poisson points on linear network \cr \code{\link{runiflpp}} \tab simulate random points on a linear network } It is also possible to fit point process models to \code{lpp} objects. See Section IV. \bold{Summary statistics for a three-dimensional point pattern:} These are for 3-dimensional point pattern objects (class \code{pp3}). \tabular{ll}{ \code{\link{F3est}} \tab empty space function \eqn{F} \cr \code{\link{G3est}} \tab nearest neighbour function \eqn{G} \cr \code{\link{K3est}} \tab \eqn{K}-function \cr \code{\link{pcf3est}} \tab pair correlation function } Related facilities: \tabular{ll}{ \code{\link{envelope.pp3}} \tab simulation envelopes \cr \code{\link{pairdist.pp3}} \tab distances between all pairs of points \cr \code{\link{crossdist.pp3}} \tab distances between points in two patterns \cr \code{\link{nndist.pp3}} \tab nearest neighbour distances \cr \code{\link{nnwhich.pp3}} \tab find nearest neighbours \cr \code{\link{nncross.pp3}} \tab find nearest neighbours in another pattern } \bold{Computations for multi-dimensional point pattern:} These are for multi-dimensional space-time point pattern objects (class \code{ppx}). \tabular{ll}{ \code{\link{pairdist.ppx}} \tab distances between all pairs of points \cr \code{\link{crossdist.ppx}} \tab distances between points in two patterns \cr \code{\link{nndist.ppx}} \tab nearest neighbour distances \cr \code{\link{nnwhich.ppx}} \tab find nearest neighbours } \bold{Summary statistics for random sets:} These work for point patterns (class \code{ppp}), line segment patterns (class \code{psp}) or windows (class \code{owin}). \tabular{ll}{ \code{\link{Hest}} \tab spherical contact distribution \eqn{H} \cr \code{\link{Gfox}} \tab Foxall \eqn{G}-function \cr \code{\link{Jfox}} \tab Foxall \eqn{J}-function } } \section{III. MODEL FITTING (CLUSTER MODELS)}{ Cluster process models (with homogeneous or inhomogeneous intensity) and Cox processes can be fitted by the function \code{\link{kppm}}. Its result is an object of class \code{"kppm"}. The fitted model can be printed, plotted, predicted, simulated and updated. \tabular{ll}{ \code{\link{kppm}} \tab Fit model\cr \code{\link{plot.kppm}} \tab Plot the fitted model\cr \code{\link{fitted.kppm}} \tab Compute fitted intensity \cr \code{\link{predict.kppm}} \tab Compute fitted intensity \cr \code{\link{update.kppm}} \tab Update the model \cr \code{\link{simulate.kppm}} \tab Generate simulated realisations \cr \code{\link{vcov.kppm}} \tab Variance-covariance matrix of coefficients \cr \code{\link{Kmodel.kppm}} \tab \eqn{K} function of fitted model \cr \code{\link{pcfmodel.kppm}} \tab Pair correlation of fitted model } The theoretical models can also be simulated, for any choice of parameter values, using \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}}, and \code{\link{rLGCP}}. Lower-level fitting functions include: \tabular{ll}{ \code{\link{lgcp.estK}} \tab fit a log-Gaussian Cox process model\cr \code{\link{lgcp.estpcf}} \tab fit a log-Gaussian Cox process model\cr \code{\link{thomas.estK}} \tab fit the Thomas process model \cr \code{\link{thomas.estpcf}} \tab fit the Thomas process model \cr \code{\link{matclust.estK}} \tab fit the Matern Cluster process model \cr \code{\link{matclust.estpcf}} \tab fit the Matern Cluster process model \cr \code{\link{cauchy.estK}} \tab fit a Neyman-Scott Cauchy cluster process \cr \code{\link{cauchy.estpcf}} \tab fit a Neyman-Scott Cauchy cluster process\cr \code{\link{vargamma.estK}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{vargamma.estpcf}} \tab fit a Neyman-Scott Variance Gamma process\cr \code{\link{mincontrast}} \tab low-level algorithm for fitting models \cr \tab by the method of minimum contrast } } \section{IV. MODEL FITTING (POISSON AND GIBBS MODELS)}{ \bold{Types of models} Poisson point processes are the simplest models for point patterns. A Poisson model assumes that the points are stochastically independent. It may allow the points to have a non-uniform spatial density. The special case of a Poisson process with a uniform spatial density is often called Complete Spatial Randomness. Poisson point processes are included in the more general class of Gibbs point process models. In a Gibbs model, there is \emph{interaction} or dependence between points. Many different types of interaction can be specified. For a detailed explanation of how to fit Poisson or Gibbs point process models to point pattern data using \pkg{spatstat}, see Baddeley and Turner (2005b) or Baddeley (2008). \bold{To fit a Poison or Gibbs point process model:} Model fitting in \pkg{spatstat} is performed mainly by the function \code{\link{ppm}}. Its result is an object of class \code{"ppm"}. Here are some examples, where \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{ppm(X)} \tab Complete Spatial Randomness \cr \code{ppm(X, ~1)} \tab Complete Spatial Randomness \cr \code{ppm(X, ~x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{ppm(X, ~1, Strauss(0.1))} \tab Stationary Strauss process \cr \code{ppm(X, ~x, Strauss(0.1))} \tab Strauss process with \cr \tab conditional intensity loglinear in \eqn{x} } It is also possible to fit models that depend on other covariates. \bold{Manipulating the fitted model:} \tabular{ll}{ \code{\link{plot.ppm}} \tab Plot the fitted model\cr \code{\link{predict.ppm}} \tab Compute the spatial trend and conditional intensity\cr \tab of the fitted point process model \cr \code{\link{coef.ppm}} \tab Extract the fitted model coefficients\cr \code{\link{formula.ppm}} \tab Extract the trend formula\cr \code{\link{fitted.ppm}} \tab Compute fitted conditional intensity at quadrature points \cr \code{\link{residuals.ppm}} \tab Compute point process residuals at quadrature points \cr \code{\link{update.ppm}} \tab Update the fit \cr \code{\link{vcov.ppm}} \tab Variance-covariance matrix of estimates\cr \code{\link{rmh.ppm}} \tab Simulate from fitted model \cr \code{\link{simulate.ppm}} \tab Simulate from fitted model \cr \code{\link{print.ppm}} \tab Print basic information about a fitted model\cr \code{\link{summary.ppm}} \tab Summarise a fitted model\cr \code{\link{effectfun}} \tab Compute the fitted effect of one covariate\cr \code{\link{logLik.ppm}} \tab log-likelihood or log-pseudolikelihood\cr \code{\link{anova.ppm}} \tab Analysis of deviance \cr \code{\link{model.frame.ppm}} \tab Extract data frame used to fit model \cr \code{\link{model.images}} \tab Extract spatial data used to fit model \cr \code{\link{model.depends}} \tab Identify variables in the model \cr \code{\link{as.interact}} \tab Interpoint interaction component of model \cr \code{\link{fitin}} \tab Extract fitted interpoint interaction \cr \code{\link{is.hybrid}} \tab Determine whether the model is a hybrid \cr \code{\link{valid.ppm}} \tab Check the model is a valid point process \cr \code{\link{project.ppm}} \tab Ensure the model is a valid point process } For model selection, you can also use the generic functions \code{\link{step}}, \code{\link{drop1}} and \code{\link{AIC}} on fitted point process models. See \code{\link{spatstat.options}} to control plotting of fitted model. \bold{To specify a point process model:} The first order ``trend'' of the model is determined by an \code{R} language formula. The formula specifies the form of the \emph{logarithm} of the trend. \tabular{ll}{ \code{~1} \tab No trend (stationary) \cr \code{~x} \tab Loglinear trend \eqn{\lambda(x,y) = \exp(\alpha + \beta x)}{lambda(x,y) = exp(alpha + beta * x)} \cr \tab where \eqn{x,y} are Cartesian coordinates \cr \code{~polynom(x,y,3)} \tab Log-cubic polynomial trend \cr \code{~harmonic(x,y,2)} \tab Log-harmonic polynomial trend } The higher order (``interaction'') components are described by an object of class \code{"interact"}. Such objects are created by: \tabular{ll}{ \code{\link{Poisson}()} \tab the Poisson point process\cr \code{\link{AreaInter}()} \tab Area-interaction process\cr \code{\link{BadGey}()} \tab multiscale Geyer process\cr \code{\link{Concom}()} \tab connected component interaction\cr \code{\link{DiggleGratton}() } \tab Diggle-Gratton potential \cr \code{\link{DiggleGatesStibbard}() } \tab Diggle-Gates-Stibbard potential \cr \code{\link{Fiksel}()} \tab Fiksel pairwise interaction process\cr \code{\link{Geyer}()} \tab Geyer's saturation process\cr \code{\link{Hardcore}()} \tab Hard core process\cr \code{\link{Hybrid}()} \tab Hybrid of several interactions\cr \code{\link{LennardJones}() } \tab Lennard-Jones potential \cr \code{\link{MultiHard}()} \tab multitype hard core process \cr \code{\link{MultiStrauss}()} \tab multitype Strauss process \cr \code{\link{MultiStraussHard}()} \tab multitype Strauss/hard core process \cr \code{\link{OrdThresh}()} \tab Ord process, threshold potential\cr \code{\link{Ord}()} \tab Ord model, user-supplied potential \cr \code{\link{PairPiece}()} \tab pairwise interaction, piecewise constant \cr \code{\link{Pairwise}()} \tab pairwise interaction, user-supplied potential\cr \code{\link{SatPiece}()} \tab Saturated pair model, piecewise constant potential\cr \code{\link{Saturated}()} \tab Saturated pair model, user-supplied potential\cr \code{\link{Softcore}()} \tab pairwise interaction, soft core potential\cr \code{\link{Strauss}()} \tab Strauss process \cr \code{\link{StraussHard}()} \tab Strauss/hard core point process \cr \code{\link{Triplets}()} \tab Geyer triplets process } Note that it is also possible to combine several such interactions using \code{\link{Hybrid}}. \bold{Finer control over model fitting:} A quadrature scheme is represented by an object of class \code{"quad"}. To create a quadrature scheme, typically use \code{\link{quadscheme}}. \tabular{ll}{ \code{\link{quadscheme}} \tab default quadrature scheme \cr \tab using rectangular cells or Dirichlet cells\cr \code{\link{pixelquad}} \tab quadrature scheme based on image pixels \cr \code{\link{quad}} \tab create an object of class \code{"quad"} } To inspect a quadrature scheme: \tabular{ll}{ \code{plot(Q)} \tab plot quadrature scheme \code{Q}\cr \code{print(Q)} \tab print basic information about quadrature scheme \code{Q}\cr \code{\link{summary}(Q)} \tab summary of quadrature scheme \code{Q} } A quadrature scheme consists of data points, dummy points, and weights. To generate dummy points: \tabular{ll}{ \code{\link{default.dummy}} \tab default pattern of dummy points \cr \code{\link{gridcentres}} \tab dummy points in a rectangular grid \cr \code{\link{rstrat}} \tab stratified random dummy pattern \cr \code{\link{spokes}} \tab radial pattern of dummy points \cr \code{\link{corners}} \tab dummy points at corners of the window } To compute weights: \tabular{ll}{ \code{\link{gridweights}} \tab quadrature weights by the grid-counting rule \cr \code{\link{dirichlet.weights}} \tab quadrature weights are Dirichlet tile areas } \bold{Simulation and goodness-of-fit for fitted models:} \tabular{ll}{ \code{\link{rmh.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{simulate.ppm}} \tab simulate realisations of a fitted model \cr \code{\link{envelope}} \tab compute simulation envelopes for a fitted model } \bold{Point process models on a linear network:} An object of class \code{"lpp"} represents a pattern of points on a linear network. Point process models can also be fitted to these objects. Currently only Poisson models can be fitted. \tabular{ll}{ \code{\link{lppm}} \tab point process model on linear network \cr \code{\link{anova.lppm}} \tab analysis of deviance for \cr \tab point process model on linear network \cr \code{\link{envelope.lppm}} \tab simulation envelopes for \cr \tab point process model on linear network \cr \code{\link{predict.lppm}} \tab model prediction on linear network \cr \code{\link{linim}} \tab pixel image on linear network \cr \code{\link{plot.linim}} \tab plot a pixel image on linear network \cr \code{\link{eval.linim}} \tab evaluate expression involving images \cr \code{\link{linfun}} \tab function defined on linear network \cr \code{\link{methods.linfun}} \tab conversion facilities } } \section{V. MODEL FITTING (SPATIAL LOGISTIC REGRESSION)}{ \bold{Logistic regression} Pixel-based spatial logistic regression is an alternative technique for analysing spatial point patterns that is widely used in Geographical Information Systems. It is approximately equivalent to fitting a Poisson point process model. In pixel-based logistic regression, the spatial domain is divided into small pixels, the presence or absence of a data point in each pixel is recorded, and logistic regression is used to model the presence/absence indicators as a function of any covariates. Facilities for performing spatial logistic regression are provided in \pkg{spatstat} for comparison purposes. \bold{Fitting a spatial logistic regression} Spatial logistic regression is performed by the function \code{\link{slrm}}. Its result is an object of class \code{"slrm"}. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{simulate}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. For example, if \code{X} is a point pattern (class \code{"ppp"}): \tabular{ll}{ \emph{command} \tab \emph{model} \cr \code{slrm(X ~ 1)} \tab Complete Spatial Randomness \cr \code{slrm(X ~ x)} \tab Poisson process with \cr \tab intensity loglinear in \eqn{x} coordinate \cr \code{slrm(X ~ Z)} \tab Poisson process with \cr \tab intensity loglinear in covariate \code{Z} } \bold{Manipulating a fitted spatial logistic regression} \tabular{ll}{ \code{\link{anova.slrm}} \tab Analysis of deviance \cr \code{\link{coef.slrm}} \tab Extract fitted coefficients \cr \code{\link{vcov.slrm}} \tab Variance-covariance matrix of fitted coefficients \cr \code{\link{fitted.slrm}} \tab Compute fitted probabilities or intensity \cr \code{\link{logLik.slrm}} \tab Evaluate loglikelihood of fitted model \cr \code{\link{plot.slrm}} \tab Plot fitted probabilities or intensity \cr \code{\link{predict.slrm}} \tab Compute predicted probabilities or intensity with new data \cr \code{\link{simulate.slrm}} \tab Simulate model } There are many other undocumented methods for this class, including methods for \code{print}, \code{update}, \code{formula} and \code{terms}. Stepwise model selection is possible using \code{step} or \code{stepAIC}. } \section{VI. SIMULATION}{ There are many ways to generate a random point pattern, line segment pattern, pixel image or tessellation in \pkg{spatstat}. \bold{Random point patterns:} \tabular{ll}{ \code{\link{runifpoint}} \tab generate \eqn{n} independent uniform random points \cr \code{\link{rpoint}} \tab generate \eqn{n} independent random points \cr \code{\link{rmpoint}} \tab generate \eqn{n} independent multitype random points \cr \code{\link{rpoispp}} \tab simulate the (in)homogeneous Poisson point process \cr \code{\link{rmpoispp}} \tab simulate the (in)homogeneous multitype Poisson point process \cr \code{\link{runifdisc}} \tab generate \eqn{n} independent uniform random points in disc\cr \code{\link{rstrat}} \tab stratified random sample of points \cr \code{\link{rsyst}} \tab systematic random sample (grid) of points \cr \code{\link{rMaternI}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process\cr \code{\link{rMaternII}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process\cr \code{\link{rSSI}} \tab simulate Simple Sequential Inhibition process\cr \code{\link{rStrauss}} \tab simulate Strauss process (perfect simulation)\cr \code{\link{rNeymanScott}} \tab simulate a general Neyman-Scott process\cr \code{\link{rMatClust}} \tab simulate the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster process\cr \code{\link{rThomas}} \tab simulate the Thomas process \cr \code{\link{rLGCP}} \tab simulate the log-Gaussian Cox process \cr \code{\link{rGaussPoisson}} \tab simulate the Gauss-Poisson cluster process\cr \code{\link{rCauchy}} \tab simulate Neyman-Scott process with Cauchy clusters \cr \code{\link{rVarGamma}} \tab simulate Neyman-Scott process with Variance Gamma clusters \cr \code{\link{rcell}} \tab simulate the Baddeley-Silverman cell process \cr \code{\link{runifpointOnLines}} \tab generate \eqn{n} random points along specified line segments \cr \code{\link{rpoisppOnLines}} \tab generate Poisson random points along specified line segments } \bold{Resampling a point pattern:} \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } See also \code{\link{varblock}} for estimating the variance of a summary statistic by block resampling, and \code{\link{lohboot}} for another bootstrap technique. \bold{Fitted point process models:} If you have fitted a point process model to a point pattern dataset, the fitted model can be simulated. Cluster process models are fitted by the function \code{\link{kppm}} yielding an object of class \code{"kppm"}. To generate one or more simulated realisations of this fitted model, use \code{\link{simulate.kppm}}. Gibbs point process models are fitted by the function \code{\link{ppm}} yielding an object of class \code{"ppm"}. To generate a simulated realisation of this fitted model, use \code{\link{rmh}}. To generate one or more simulated realisations of the fitted model, use \code{\link{simulate.ppm}}. \bold{Other random patterns:} \tabular{ll}{ \code{\link{rlinegrid}} \tab generate a random array of parallel lines through a window \cr \code{\link{rpoisline}} \tab simulate the Poisson line process within a window \cr \code{\link{rpoislinetess}} \tab generate random tessellation using Poisson line process \cr \code{\link{rMosaicSet}} \tab generate random set by selecting some tiles of a tessellation \cr \code{\link{rMosaicField}} \tab generate random pixel image by assigning random values in each tile of a tessellation } \bold{Simulation-based inference} \tabular{ll}{ \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{qqplot.ppm}} \tab diagnostic plot for interpoint interaction \cr \code{\link{scan.test}} \tab spatial scan statistic/test } } \section{VII. TESTS AND DIAGNOSTICS}{ \bold{Classical hypothesis tests:} \tabular{ll}{ \code{\link{quadrat.test}} \tab \eqn{\chi^2}{chi^2} goodness-of-fit test on quadrat counts \cr \code{\link{clarkevans.test}} \tab Clark and Evans test \cr \code{\link{kstest}} \tab Kolmogorov-Smirnov goodness-of-fit test\cr \code{\link{bermantest}} \tab Berman's goodness-of-fit tests\cr \code{\link{envelope}} \tab critical envelope for Monte Carlo test of goodness-of-fit \cr \code{\link{scan.test}} \tab spatial scan statistic/test \cr \code{\link{dclf.test}} \tab Diggle-Cressie-Loosmore-Ford test \cr \code{\link{mad.test}} \tab Mean Absolute Deviation test \cr \code{\link{dclf.progress}} \tab Progress plot for DCLF test \cr \code{\link{mad.progress}} \tab Progress plot for MAD test \cr \code{\link{anova.ppm}} \tab Analysis of Deviance for point process models } \bold{Sensitivity diagnostics:} Classical measures of model sensitivity such as leverage and influence have been adapted to point process models. \tabular{ll}{ \code{\link{leverage.ppm}} \tab Leverage for point process model\cr \code{\link{influence.ppm}} \tab Influence for point process model\cr \code{\link{dfbetas.ppm}} \tab Parameter influence\cr } \bold{Diagnostics for covariate effect:} Classical diagnostics for covariate effects have been adapted to point process models. \tabular{ll}{ \code{\link{parres}} \tab Partial residual plot\cr \code{\link{addvar}} \tab Added variable plot \cr \code{\link{rhohat}} \tab Kernel estimate of covariate effect\cr \code{\link{rho2hat}} \tab Kernel estimate of covariate effect (bivariate) } \bold{Residual diagnostics:} Residuals for a fitted point process model, and diagnostic plots based on the residuals, were introduced in Baddeley et al (2005) and Baddeley, Rubak and Moller (2011). Type \code{demo(diagnose)} for a demonstration of the diagnostics features. \tabular{ll}{ \code{\link{diagnose.ppm}} \tab diagnostic plots for spatial trend\cr \code{\link{qqplot.ppm}} \tab diagnostic Q-Q plot for interpoint interaction\cr \code{\link{residualspaper}} \tab examples from Baddeley et al (2005) \cr \code{\link{Kcom}} \tab model compensator of \eqn{K} function \cr \code{\link{Gcom}} \tab model compensator of \eqn{G} function \cr \code{\link{Kres}} \tab score residual of \eqn{K} function \cr \code{\link{Gres}} \tab score residual of \eqn{G} function \cr \code{\link{psst}} \tab pseudoscore residual of summary function \cr \code{\link{psstA}} \tab pseudoscore residual of empty space function \cr \code{\link{psstG}} \tab pseudoscore residual of \eqn{G} function \cr \code{\link{compareFit}} \tab compare compensators of several fitted models } \bold{Resampling and randomisation procedures} You can build your own tests based on randomisation and resampling using the following capabilities: \tabular{ll}{ \code{\link{quadratresample}} \tab block resampling \cr \code{\link{rjitter}} \tab apply random displacements to points in a pattern\cr \code{\link{rshift}} \tab random shifting of (subsets of) points\cr \code{\link{rthin}} \tab random thinning } } \section{VIII. DOCUMENTATION}{ The online manual entries are quite detailed and should be consulted first for information about a particular function. The paper by Baddeley and Turner (2005a) is a brief overview of the package. Baddeley and Turner (2005b) is a more detailed explanation of how to fit point process models to data. Baddeley (2010) is a complete set of notes from a 2-day workshop on the use of \pkg{spatstat}. Type \code{citation("spatstat")} to get these references. } \references{ Baddeley, A. (2010) \emph{Analysing spatial point patterns in R}. Workshop notes. Version 4.1. CSIRO online technical publication. URL: \code{www.uwa.edu.au/resources/pf16h.html} Baddeley, A. and Turner, R. (2005a) Spatstat: an R package for analyzing spatial point patterns. \emph{Journal of Statistical Software} \bold{12}:6, 1--42. URL: \code{www.jstatsoft.org}, ISSN: 1548-7660. Baddeley, A. and Turner, R. (2005b) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Gelfand, A.E., Diggle, P.J., Fuentes, M. and Guttorp, P., editors (2010) \emph{Handbook of Spatial Statistics}. CRC Press. Huang, F. and Ogata, Y. (1999) Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8}, 510--530. Waagepetersen, R. An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63} (2007) 252--258. } \section{Licence}{ This library and its documentation are usable under the terms of the "GNU General Public License", a copy of which is distributed with the package. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \section{Acknowledgements}{ Kasper Klitgaard Berthelsen, Abdollah Jalilian, Marie-Colette van Lieshout, Ege Rubak, Dominic Schuhmacher and Rasmus Waagepetersen made substantial contributions of code. Additional contributions by Ang Qi Wei, Sandro Azaele, Colin Beale, Melanie Bell, Thomas Bendtsen, Ricardo Bernhardt, Andrew Bevan, Brad Biggerstaff, Leanne Bischof, Roger Bivand, Jose M. Blanco Moreno, Florent Bonneu, Julian Burgos, Simon Byers, Ya-Mei Chang, Jianbao Chen, Igor Chernayavsky, Y.C. Chin, Bjarke Christensen, Jean-Francois Coeurjolly, Robin Corria Ainslie, Marcelino de la Cruz, Peter Dalgaard, Peter Diggle, Patrick Donnelly, Ian Dryden, Stephen Eglen, Olivier Flores, Neba Funwi-Gabga, Oscar Garcia, Agnes Gault, Marc Genton, Julian Gilbey, Jason Goldstick, Pavel Grabarnik, C. Graf, Janet Franklin, Ute Hahn, Andrew Hardegen, Mandy Hering, Martin Bogsted Hansen, Martin Hazelton, Juha Heikkinen, Kurt Hornik, Ross Ihaka, Aruna Jammalamadaka, Robert John-Chandran, Devin Johnson, Mike Kuhn, Jeff Laake, Frederic Lavancier, Tom Lawrence, Robert Lamb, Jonathan Lee, George Leser, Li Haitao, George Limitsios, Ben Madin, Kiran Marchikanti, Jeff Marcus, Robert Mark, Jorge Mateu Mahiques, Monia Mahling, Peter McCullagh, Ulf Mehlig, Sebastian Wastl Meyer, Mi Xiangcheng, Jesper Moller, Erika Mudrak, Linda Stougaard Nielsen, Felipe Nunes, Jens Oehlschlaegel, Thierry Onkelinx, Sean O'Riordan, Evgeni Parilov, Jeff Picka, Nicolas Picard, Sergiy Protsiv, Adrian Raftery, Matt Reiter, Tom Richardson, Brian Ripley, Ted Rosenbaum, Barry Rowlingson, John Rudge, Farzaneh Safavimanesh, Aila Sarkka, Katja Schladitz, Bryan Scott, Vadim Shcherbakov, Shen Guochun, Ida-Maria Sintorn, Yong Song, Malte Spiess, Mark Stevenson, Kaspar Stucki, Michael Sumner, P. Surovy, Ben Taylor, Thordis Linda Thorarinsdottir, Berwin Turlach, Andrew van Burgel, Tobias Verbeke, Alexendre Villers, Fabrice Vinatier, Hao Wang, H. Wendrock, Jan Wild, Selene Wong, Mike Zamboni and Achim Zeileis. } \keyword{spatial} \keyword{package} spatstat/man/distmap.psp.Rd0000755000176000001440000000465612237642732015477 0ustar ripleyusers\name{distmap.psp} %DontDeclareMethods \alias{distmap.psp} \title{ Distance Map of Line Segment Pattern } \description{ Computes the distance from each pixel to the nearest line segment in the given line segment pattern. } \usage{ \method{distmap}{psp}(X, \dots) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a line segment pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the line segment pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest line segment of the pattern \code{X}. Distances are computed using analytic geometry. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which line segment of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the exact distance from the centre of each pixel to the nearest line segment. To compute the exact distance from the points in a point pattern to the nearest line segment, use \code{\link{distfun}} or one of the low-level functions \code{\link{nncross}} or \code{\link{project2segment}}. } \seealso{ \code{\link{distmap}}, \code{\link{distmap.owin}}, \code{\link{distmap.ppp}}, \code{\link{distfun}}, \code{\link{nncross}}, \code{\link{nearestsegment}}, \code{\link{project2segment}}. } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) Z <- distmap(a) plot(Z) plot(a, add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/eval.im.Rd0000755000176000001440000000532112237642732014556 0ustar ripleyusers\name{eval.im} \alias{eval.im} \title{Evaluate Expression Involving Pixel Images} \description{ Evaluates any expression involving one or more pixel images, and returns a pixel image. } \usage{ eval.im(expr, envir, harmonize=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } } \details{ This function is a wrapper to make it easier to perform pixel-by-pixel calculations in an image. Pixel images in \pkg{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{eval.im(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image. Suppose \code{X} and \code{Y} are two pixel images with compatible dimensions: they have the same number of pixels, the same physical size of pixels, and the same bounding box. Then \code{eval.im(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.im} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible. If \code{harmonize=FALSE}, images that are incompatible will cause an error. } \value{ An image object of class \code{"im"}. } \seealso{ \code{\link{as.im}}, \code{\link{compatible.im}}, \code{\link{harmonise.im}}, \code{\link{im.object}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y }, unit.square()) eval.im(X + 3) eval.im(X - Y) eval.im(abs(X - Y)) Z <- eval.im(sin(X * pi) + Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/Fiksel.Rd0000755000176000001440000000715612237642731014447 0ustar ripleyusers\name{Fiksel} \alias{Fiksel} \title{The Fiksel Interaction} \description{ Creates an instance of Fiksel's double exponential pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ Fiksel(r, hc, kappa) } \arguments{ \item{r}{The interaction radius of the Fiksel model} \item{hc}{The hard core distance} \item{kappa}{The rate parameter} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Fiksel process with interaction radius \eqn{r}, hard core distance \code{hc} and rate parameter \code{kappa}. } \details{ Fiksel (1984) introduced a pairwise interaction point process with the following interaction function \eqn{c}. For two points \eqn{u} and \eqn{v} separated by a distance \eqn{d=||u-v||}, the interaction \eqn{c(u,v)} is equal to \eqn{0} if \eqn{d < h}, equal to \eqn{1} if \eqn{d > r}, and equal to \deqn{ \exp(a \exp(-\kappa d))}{exp(a * exp(-kappa * d))} if \eqn{h \le d \le r}{h <= d <= r}, where \eqn{h,r,\kappa,a}{h,r,kappa,a} are parameters. A graph of this interaction function is shown in the Examples. The interpretation of the parameters is as follows. \itemize{ \item \eqn{h} is the hard core distance: distinct points are not permitted to come closer than a distance \eqn{h} apart. \item \eqn{r} is the interaction range: points further than this distance do not interact. \item \eqn{\kappa}{kappa} is the rate or slope parameter, controlling the decay of the interaction as distance increases. \item \eqn{a} is the interaction strength parameter, controlling the strength and type of interaction. If \eqn{a} is zero, the process is Poisson. If \code{a} is positive, the process is clustered. If \code{a} is negative, the process is inhibited (regular). } The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Fiksel pairwise interaction is yielded by the function \code{Fiksel()}. See the examples below. The parameters \eqn{h}, \eqn{r} and \eqn{\kappa}{kappa} must be fixed and given in the call to \code{Fiksel}, while the canonical parameter \eqn{a} is estimated by \code{\link{ppm}()}. To estimate \eqn{h}, \eqn{r} and\eqn{\kappa}{kappa} it is possible to use \code{\link{profilepl}}. The maximum likelihood estimator of\eqn{h} is the minimum interpoint distance. See also Stoyan, Kendall and Mecke (1987) page 161. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{StraussHard}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Stoyan, D, Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic geometry and its applications}. Wiley. } \examples{ Fiksel(r=1,hc=0.02, kappa=2) # prints a sensible description of itself data(spruces) X <- unmark(spruces) fit <- ppm(X, ~1, Fiksel(r=3.5, hc=1, kappa=1)) plot(fitin(fit)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/default.dummy.Rd0000755000176000001440000000617512237642732016011 0ustar ripleyusers\name{default.dummy} \alias{default.dummy} \title{Generate a Default Pattern of Dummy Points} \description{ Generates a default pattern of dummy points for use in a quadrature scheme. } \usage{ default.dummy(X, nd, random=FALSE, ntile=NULL, npix=NULL, \dots, eps=NULL, verbose=FALSE) } \arguments{ \item{X}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{nd}{ Optional. Integer, or integer vector of length 2, specifying an \code{nd * nd} or \code{nd[1] * nd[2]} rectangular array of dummy points. } \item{random}{ Logical value. If \code{TRUE}, the dummy points are randomised. } \item{ntile}{ Optional. Integer or pair of integers specifying the number of rows and columns of tiles used in the counting rule. } \item{npix}{ Optional. Integer or pair of integers specifying the number of rows and columns of pixels used in computing approximate areas. } \item{\dots}{ Ignored. } \item{eps}{ Optional. Grid spacing. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{verbose}{ If \code{TRUE}, information about the construction of the quadrature scheme is printed. } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}) containing the dummy points. } \details{ This function provides a sensible default for the dummy points in a quadrature scheme. A quadrature scheme consists of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. See \code{\link{quad.object}} for further information about quadrature schemes. If \code{random} is false (the default), then the function creates dummy points in an \code{nd[1]} by \code{nd[1]} rectangular grid. If \code{random} is true, then the frame of the window is divided into an \code{nd[1]} by \code{nd[1]} array of tiles, and one dummy point is generated at random inside each tile. In either case, the four corner points of the frame of the window are added. Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{nd} is missing, a default value (depending on the data pattern \code{X}) is computed by \code{default.ngrid}. Alternative functions for creating dummy patterns include \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) P <- simdat D <- default.dummy(P, 100) \dontrun{plot(D)} Q <- quadscheme(P, D, "grid") \dontrun{plot(union.quad(Q))} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rotate.im.Rd0000644000176000001440000000163612237642734015131 0ustar ripleyusers\name{rotate.im} \alias{rotate.im} \title{Rotate a Pixel Image} \description{ Rotates a pixel image } \usage{ \method{rotate}{im}(X, angle=pi/2, \dots) } \arguments{ \item{X}{A pixel image (object of class \code{"im"}).} \item{angle}{Angle of rotation.} \item{\dots}{Ignored.} } \value{ Another object of class \code{"im"} representing the rotated pixel image. } \details{ The image is rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the image 90 degrees anticlockwise. } \seealso{ \code{\link{affine.im}}, \code{\link{shift.im}}, \code{\link{rotate}} } \examples{ Z <- distmap(letterR) X <- rotate(Z) \dontrun{ plot(X) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/rshift.Rd0000755000176000001440000000320012237642734014516 0ustar ripleyusers\name{rshift} \alias{rshift} \title{Random Shift} \description{ Randomly shifts the points of a point pattern or line segment pattern. Generic. } \usage{ rshift(X, \dots) } \arguments{ \item{X}{Pattern to be subjected to a random shift. A point pattern (class \code{"ppp"}), a line segment pattern (class \code{"psp"}) or an object of class \code{"splitppp"}. } \item{\dots}{ Arguments controlling the generation of the random shift vector, or specifying which parts of the pattern will be shifted. } } \value{ An object of the same type as \code{X}. } \details{ This operation applies a random shift (vector displacement) to the points in a point pattern, or to the segments in a line segment pattern. The argument \code{X} may be \itemize{ \item a point pattern (an object of class \code{"ppp"}) \item a line segment pattern (an object of class \code{"psp"}) \item an object of class \code{"splitppp"} (basically a list of point patterns, obtained from \code{\link{split.ppp}}). } The function \code{rshift} is generic, with methods for the three classes \code{"ppp"}, \code{"psp"} and \code{"splitppp"}. See the help pages for these methods, \code{\link{rshift.ppp}}, \code{\link{rshift.psp}} and \code{\link{rshift.splitppp}}, for further information. } \seealso{ \code{\link{rshift.ppp}}, \code{\link{rshift.psp}}, \code{\link{rshift.splitppp}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rotate.owin.Rd0000755000176000001440000000240412237642734015475 0ustar ripleyusers\name{rotate.owin} \alias{rotate.owin} \title{Rotate a Window} \description{ Rotates a window } \usage{ \method{rotate}{owin}(X, angle=pi/2, \dots, rescue=TRUE) } \arguments{ \item{X}{A window (object of class \code{"owin"}).} \item{angle}{Angle of rotation.} \item{rescue}{ Logical. If \code{TRUE}, the rotated window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the resolution of the rotated window, if \code{X} is a binary pixel mask. Ignored if \code{X} is not a binary mask. } } \value{ Another object of class \code{"owin"} representing the rotated window. } \details{ Rotates the window by the specified angle. Angles are measured in radians, anticlockwise. The default is to rotate the window 90 degrees anticlockwise. The centre of rotation is the origin. } \seealso{ \code{\link{owin.object}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- rotate(w, pi/3) \dontrun{ plot(v) } data(letterR) w <- as.mask(letterR) v <- rotate(w, pi/5) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/pool.Rd0000755000176000001440000000205712237642733014200 0ustar ripleyusers\name{pool} \alias{pool} \title{ Pool Data } \description{ Pool the data from several objects of the same class. } \usage{ pool(...) } \arguments{ \item{\dots}{ Objects of the same type. } } \details{ The function \code{pool} is generic. There are methods for several classes, listed below. \code{pool} is used to combine the data from several objects of the same type, and to compute statistics based on the combined dataset. It may be used to pool the estimates obtained from replicated datasets. It may also be used in high-performance computing applications, when the objects \code{\dots} have been computed on different processors or in different batch runs, and we wish to combine them. } \value{ An object of the same class as the arguments \code{\dots}. } \seealso{ \code{\link{pool.envelope}}, \code{\link{pool.fasp}}, \code{\link{pool.rat}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} spatstat/man/BadGey.Rd0000755000176000001440000001013612237642731014355 0ustar ripleyusers\name{BadGey} \alias{BadGey} \title{Hybrid Geyer Point Process Model} \description{ Creates an instance of the Baddeley-Geyer point process model, defined as a hybrid of several Geyer interactions. The model can then be fitted to point pattern data. } \usage{ BadGey(r, sat) } \arguments{ \item{r}{vector of interaction radii} \item{sat}{ vector of saturation parameters, or a single common value of saturation parameter } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is Baddeley's generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to a process with multiple interaction distances. The BadGey point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie within a distance \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. \code{BadGey} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{BadGey()}. See the examples below. The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters that are applied to the point counts \eqn{t_j(x_i, X)}{t(j,x[i],X)}. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} is applied to the count of points within a distance \code{r[1]}, and \code{sat[2]} to the count of points within a distance \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every count. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{BadGey}} have a complicated relationship to the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{SatPiece}} } \examples{ BadGey(c(0.1,0.2), c(1,1)) # prints a sensible description of itself BadGey(c(0.1,0.2), 1) data(cells) # fit a stationary Baddeley-Geyer model ppm(cells, ~1, BadGey(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend \dontrun{ ppm(cells, ~polynom(x,y,3), BadGey(c(0.07, 0.1, 0.13), 2)) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/rHardcore.Rd0000644000176000001440000000525312237642734015137 0ustar ripleyusers\name{rHardcore} \alias{rHardcore} \title{Perfect Simulation of the Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Hardcore process, using a perfect simulation algorithm. } \usage{ rHardcore(beta, R = 0, W = owin()) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{R}{ hard core distance (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } } \details{ This function generates a realisation of the Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Hardcore process is a model for strong spatial inhibition. Two points of the process are forbidden to lie closer than \code{R} units apart. The Hardcore process is the special case of the Strauss process (see \code{\link{rStrauss}}) with interaction parameter \eqn{\gamma}{gamma} equal to zero. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Berthelsen, K.K. and Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rHardcore(0.05,1.5,square(141.4)) Z <- rHardcore(100,0.05) } \seealso{ \code{\link{rmh}}, \code{\link{Hardcore}}, \code{\link{rStrauss}}, \code{\link{rDiggleGratton}}. } \keyword{spatial} \keyword{datagen} spatstat/man/is.subset.owin.Rd0000755000176000001440000000265312237642732016122 0ustar ripleyusers\name{is.subset.owin} \alias{is.subset.owin} \title{Determine Whether One Window is Contained In Another} \description{ Tests whether window \code{A} is a subset of window \code{B}. } \usage{ is.subset.owin(A, B) } \arguments{ \item{A}{A window object (see Details).} \item{B}{A window object (see Details).} } \value{ Logical scalar; \code{TRUE} if \code{A} is a sub-window of \code{B}, otherwise \code{FALSE}. } \details{ This function tests whether the window \code{A} is a subset of the window \code{B}. The arguments \code{A} and \code{B} must be window objects (either objects of class \code{"owin"}, or data that can be coerced to this class by \code{\link{as.owin}}). Various algorithms are used, depending on the geometrical type of the two windows. Note that if \code{B} is not rectangular, the algorithm proceeds by discretising \code{A}, converting it to a pixel mask using \code{\link{as.mask}}. In this case the resulting answer is only ``approximately correct''. The accuracy of the approximation can be controlled: see \code{\link{as.mask}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ w1 <- as.owin(c(0,1,0,1)) w2 <- as.owin(c(-1,2,-1,2)) is.subset.owin(w1,w2) # Returns TRUE. is.subset.owin(w2,w1) # Returns FALSE. } \keyword{spatial} \keyword{math} spatstat/man/infline.Rd0000755000176000001440000000602112237642732014645 0ustar ripleyusers\name{infline} \alias{infline} \alias{plot.infline} \alias{print.infline} \title{Infinite Straight Lines} \description{ Define the coordinates of one or more straight lines in the plane } \usage{ infline(a = NULL, b = NULL, h = NULL, v = NULL, p = NULL, theta = NULL) \method{print}{infline}(x, \dots) \method{plot}{infline}(x, \dots) } \arguments{ \item{a,b}{Numeric vectors of equal length giving the intercepts \eqn{a} and slopes \eqn{b} of the lines. Incompatible with \code{h,v,p,theta} } \item{h}{Numeric vector giving the positions of horizontal lines when they cross the \eqn{y} axis. Incompatible with \code{a,b,v,p,theta} } \item{v}{Numeric vector giving the positions of vertical lines when they cross the \eqn{x} axis. Incompatible with \code{a,b,h,p,theta} } \item{p,theta}{Numeric vectors of equal length giving the polar coordinates of the line. Incompatible with \code{a,b,h,v} } \item{x}{An object of class \code{"infline"}} \item{\dots}{ Extra arguments passed to \code{\link[base]{print}} for printing or \code{\link[graphics]{abline}} for plotting } } \details{ The class \code{infline} is a convenient way to handle infinite straight lines in the plane. The position of a line can be specified in several ways: \itemize{ \item its intercept \eqn{a} and slope \eqn{b} in the equation \eqn{y = a + b x}{y = a + b * x} can be used unless the line is vertical. \item for vertical lines we can use the position \eqn{v} where the line crosses the \eqn{y} axis \item for horizontal lines we can use the position \eqn{h} where the line crosses the \eqn{x} axis \item the polar coordinates \eqn{p} and \eqn{\theta}{theta} can be used for any line. The line equation is \deqn{ y \cos\theta + x \sin\theta = p }{ y * cos(theta) + x * sin(theta) = p } } The command \code{infline} will accept line coordinates in any of these formats. The arguments \code{a,b,h,v} have the same interpretation as they do in the line-plotting function \code{\link[graphics]{abline}}. The command \code{infline} converts between different coordinate systems (e.g. from \code{a,b} to \code{p,theta}) and returns an object of class \code{"infline"} that contains a representation of the lines in each appropriate coordinate system. This object can be printed and plotted. } \value{ The value of \code{infline} is an object of class \code{"infline"} which is basically a data frame with columns \code{a,b,h,v,p,theta}. Each row of the data frame represents one line. Entries may be \code{NA} if a coordinate is not applicable to a particular line. } \examples{ infline(a=10:13,b=1) infline(p=1:3, theta=pi/4) plot(c(-1,1),c(-1,1),type="n",xlab="",ylab="", asp=1) plot(infline(p=0.4, theta=seq(0,pi,length=20))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/thomas.estK.Rd0000755000176000001440000001377212237642734015436 0ustar ripleyusers\name{thomas.estK} \alias{thomas.estK} \title{Fit the Thomas Point Process by Minimum Contrast} \description{ Fits the Thomas point process to a point pattern dataset by the Method of Minimum Contrast using the K function. } \usage{ thomas.estK(X, startpar=c(kappa=1,sigma2=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Thomas model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Thomas process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Thomas point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Thomas point process to \code{X}, by finding the parameters of the Thomas model which give the closest match between the theoretical \eqn{K} function of the Thomas process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Thomas point process is described in Moller and Waagepetersen (2003, pp. 61--62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and isotropically Normally distributed around the parent point with standard deviation \eqn{\sigma}{sigma}. The theoretical \eqn{K}-function of the Thomas process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa (1 - \exp(-\frac{r^2}{4\sigma^2})). }{ K(r) = pi r^2 + (1 - exp(-r^2/(4 sigma^2)))/kappa. } The theoretical intensity of the Thomas process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{\sigma^2}{sigma^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Thomas process can be simulated, using \code{\link{rThomas}}. Homogeneous or inhomogeneous Thomas process models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rThomas}} to simulate the fitted model. } \examples{ data(redwood) u <- thomas.estK(redwood, c(kappa=10, sigma2=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/shift.Rd0000755000176000001440000000207512237642734014345 0ustar ripleyusers\name{shift} \alias{shift} \title{Apply Vector Translation} \description{ Applies a vector shift of the plane to a geometrical object, such as a point pattern or a window. } \usage{ shift(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Arguments determining the shift vector.} } \value{ Another object of the same type, representing the result of applying the shift. } \details{ This is generic. Methods are provided for point patterns (\code{\link{shift.ppp}}) and windows (\code{\link{shift.owin}}). The object is translated by the vector \code{vec}. } \seealso{ \code{\link{shift.ppp}}, \code{\link{shift.owin}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{periodify}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/edges2triangles.Rd0000644000176000001440000000351212237642732016302 0ustar ripleyusers\name{edges2triangles} \alias{edges2triangles} \title{ List Triangles in a Graph } \description{ Given a list of edges between vertices, compile a list of all triangles formed by these edges. } \usage{ edges2triangles(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE, friendly=rep(TRUE, nvert)) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} \item{friendly}{ Optional. For advanced use. See Details. } } \details{ This low level function finds all the triangles (cliques of size 3) in a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. To improve efficiency in some applications, the optional argument \code{friendly} can be used. It should be a logical vector of length \code{nvert} specifying a labelling of the vertices, such that two vertices \code{j,k} which are \emph{not} friendly (\code{friendly[j] = friendly[k] = FALSE}) are \emph{never} connected by an edge. } \value{ A 3-column matrix of integers, in which each row represents a triangle. } \seealso{ \code{\link{edges2vees}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2triangles(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/runifdisc.Rd0000755000176000001440000000316412237642734015216 0ustar ripleyusers\name{runifdisc} \alias{runifdisc} \title{Generate N Uniform Random Points in a Disc} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points in a circular disc. } \usage{ runifdisc(n, radius=1, centre=c(0,0), ...) } \arguments{ \item{n}{ Number of points. } \item{radius}{Radius of the circle.} \item{centre}{Coordinates of the centre of the circle.} \item{\dots}{ Arguments passed to \code{\link{disc}} controlling the accuracy of approximation to the circle. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This function generates \code{n} independent random points, uniformly distributed in a circular disc. It is faster (for a circular window) than the general code used in \code{\link{runifpoint}}. To generate random points in an ellipse, first generate points in a circle using \code{runifdisc}, then transform to an ellipse using \code{\link{affine}}, as shown in the examples. To generate random points in other windows, use \code{\link{runifpoint}}. To generate non-uniform random points, use \code{\link{rpoint}}. } \seealso{ \code{\link{disc}}, \code{\link{runifpoint}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit disc plot(runifdisc(100)) # 42 random points in the ellipse with major axis 3 and minor axis 1 X <- runifdisc(42) Y <- affine(X, mat=diag(c(3,1))) plot(Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/tile.areas.Rd0000755000176000001440000000205512237642734015255 0ustar ripleyusers\name{tile.areas} \alias{tile.areas} \title{Compute Areas of Tiles in a Tessellation} \description{ Computes the area of each tile in a tessellation. } \usage{ tile.areas(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. This command computes the area of each of the tiles that make up the tessellation \code{x}. The result is a numeric vector in the same order as the tiles would be listed by \code{tiles(x)}. } \value{ A numeric vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tile.areas(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tile.areas(E) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/F3est.Rd0000755000176000001440000001232412237642731014207 0ustar ripleyusers\name{F3est} \Rdversion{1.1} \alias{F3est} \title{ Empty Space Function of a Three-Dimensional Point Pattern } \description{ Estimates the empty space function \eqn{F_3(r)}{F3(r)} from a three-dimensional point pattern. } \usage{ F3est(X, ..., rmax = NULL, nrval = 128, vside = NULL, correction = c("rs", "km", "cs"), sphere = c("fudge", "ideal", "digital")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{F_3(r)}{F3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{vside}{ Optional. Side length of the voxels in the discrete approximation. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{sphere}{ Optional. Character string specifying how to calculate the theoretical value of \eqn{F_3(r)}{F3(r)} for a Poisson process. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the empty space function is \deqn{ F_3(r) = P(d(0,\Phi) \le r) }{ F3(r) = P(d(0,Phi) <= r) } where \eqn{d(0,\Phi)}{d(0,Phi)} denotes the distance from a fixed origin \eqn{0} to the nearest point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The empty space function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. The box containing the point pattern is discretised into cubic voxels of side length \code{vside}. The distance function \eqn{d(u,\Phi)}{d(u,Phi)} is computed for every voxel centre point \eqn{u} using a three-dimensional version of the distance transform algorithm (Borgefors, 1986). The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{F_3(r)}{F3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"cs"}:}{ the three-dimensional generalisation of the Chiu-Stoyan or Hanisch estimator (Chiu and Stoyan, 1998). } } The result includes a column \code{theo} giving the theoretical value of \eqn{F_3(r)}{F3(r)} for a uniform Poisson process (Complete Spatial Randomness). This value depends on the volume of the sphere of radius \code{r} measured in the discretised distance metric. The argument \code{sphere} determines how this will be calculated. \itemize{ \item If \code{sphere="ideal"} the calculation will use the volume of an ideal sphere of radius \eqn{r} namely \eqn{(4/3) \pi r^3}{(4/3) * pi * r^3}. This is not recommended because the theoretical values of \eqn{F_3(r)}{F3(r)} are inaccurate. \item If \code{sphere="fudge"} then the volume of the ideal sphere will be multiplied by 0.78, which gives the approximate volume of the sphere in the discretised distance metric. \item If \code{sphere="digital"} then the volume of the sphere in the discretised distance metric is computed exactly using another distance transform. This takes longer to compute, but is exact. } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Borgefors, G. (1986) Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34}, 344--371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rana Moyeed. } \section{Warnings}{ A small value of \code{vside} and a large value of \code{nrval} are required for reasonable accuracy. The default value of \code{vside} ensures that the total number of voxels is \code{2^22} or about 4 million. To change the default number of voxels, see \code{\link{spatstat.options}("nvoxel")}. } \seealso{ \code{\link{G3est}}, \code{\link{K3est}}, \code{\link{pcf3est}}. } \examples{ \testonly{op <- spatstat.options(nvoxel=2^18)} X <- rpoispp3(42) Z <- F3est(X) if(interactive()) plot(Z) \testonly{spatstat.options(op)} } \keyword{spatial} \keyword{nonparametric} spatstat/man/colourtools.Rd0000755000176000001440000000411512237642732015607 0ustar ripleyusers\name{colourtools} \alias{colourtools} %DoNotExport \alias{paletteindex} \alias{rgb2hex} \alias{col2hex} \alias{paletteindex} \alias{samecolour} \title{ Convert and Compare Colours in Different Formats } \description{ These functions convert between different formats for specifying a colour in \R, and determine whether colours are equivalent. } \usage{ col2hex(x) rgb2hex(v) paletteindex(x) samecolour(x,y) } \arguments{ \item{x,y}{ Any valid specification for a colour or sequence of colours accepted by \code{\link{col2rgb}}. } \item{v}{ A numeric vector of length 3, giving the RGB values (0 to 255) of a single colour, or a 3-column matrix giving the RGB values of several colours. } } \details{ \code{col2hex} converts colours specified in any format into their hexadecimal character codes. \code{rgb2hex} converts RGB colour values into their hexadecimal character codes. \code{paletteindex} checks whether the colour or colours specified by \code{x} are available in the default palette returned by \code{\link{palette}()}. If so, it returns the index or indices of the colours in the palette. If not, it returns \code{NA}. \code{samecolour} decides whether two colours \code{x} and \code{y} are equivalent. } \section{Warning}{ \code{paletteindex("green")} returns \code{NA} because the green colour in the default palette is called \code{"green3"}. } \value{ For \code{col2hex} and \code{rgb2hex}, a character vector containing hexadecimal colour codes. For \code{paletteindex}, an integer vector, possibly containing \code{NA} values. For \code{samecolour}, a logical value or logical vector. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{col2rgb}}, \code{\link{palette}} See also the class of colour map objects: \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link{tweak.colourmap}}. } \examples{ samecolour("grey", "gray") paletteindex("grey") } \keyword{color} spatstat/man/Tstat.Rd0000644000176000001440000000565212237642731014325 0ustar ripleyusers\name{Tstat} \alias{Tstat} \title{ Third order summary statistic } \description{ Computes the third order summary statistic \eqn{T(r)} of a spatial point pattern. } \usage{ Tstat(X, ..., r = NULL, rmax = NULL, correction = c("border", "translate"), ratio = FALSE, verbose=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{T(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{T(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{rmax}{ Optional. Numeric. The maximum value of \eqn{r} for which \eqn{T(r)} should be estimated. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"translate"}, \code{"translation"}, or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } \item{verbose}{ Logical. If \code{TRUE}, an estimate of the computation time is printed. } } \details{ This command calculates the third-order summary statistic \eqn{T(r)} for a spatial point patterns, defined by Schladitz and Baddeley (2000). The definition of \eqn{T(r)} is similar to the definition of Ripley's \eqn{K} function \eqn{K(r)}, except that \eqn{K(r)} counts pairs of points while \eqn{T(r)} counts triples of points. Essentially \eqn{T(r)} is a rescaled cumulative distribution function of the diameters of triangles in the point pattern. The diameter of a triangle is the length of its longest side. } \section{Computation time}{ If the number of points is large, the algorithm can take a very long time to inspect all possible triangles. A rough estimate of the total computation time will be printed at the beginning of the calculation. If this estimate seems very large, stop the calculation using the user interrupt signal, and call \code{Tstat} again, using \code{rmax} to restrict the range of \code{r} values, thus reducing the number of triangles to be inspected. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Schladitz, K. and Baddeley, A. (2000) A third order point process characteristic. \emph{Scandinavian Journal of Statistics} \bold{27} (2000) 657--671. } \seealso{ \code{\link{Kest}} } \examples{ plot(Tstat(redwood)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/quadrat.test.Rd0000755000176000001440000002177012237642733015651 0ustar ripleyusers\name{quadrat.test} \alias{quadrat.test} \alias{quadrat.test.ppp} \alias{quadrat.test.ppm} \alias{quadrat.test.quadratcount} \title{Dispersion Test for Spatial Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for a given point pattern, based on quadrat counts. Alternatively performs a goodness-of-fit test of a fitted inhomogeneous Poisson model. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ quadrat.test(X, ...) \method{quadrat.test}{ppp}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{ppm}(X, nx=5, ny=nx, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., xbreaks=NULL, ybreaks=NULL, tess=NULL, nsim=1999) \method{quadrat.test}{quadratcount}(X, alternative=c("two.sided", "regular", "clustered"), method=c("Chisq", "MonteCarlo"), conditional=TRUE, ..., nsim=1999) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) to be subjected to the goodness-of-fit test. Alternatively a fitted point process model (object of class \code{"ppm"}) to be tested. Alternatively \code{X} can be the result of applying \code{\link{quadratcount}} to a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{alternative}{ Character string (partially matched) specifying the alternative hypothesis. } \item{method}{ Character string (partially matched) specifying the test to use: either \code{method="Chisq"} for the chi-squared test (the default), or \code{method="MonteCarlo"} for a Monte Carlo test. } \item{conditional}{ Logical. Should the Monte Carlo test be conducted conditionally upon the observed number of points of the pattern? Ignored if \code{method="Chisq"}. } \item{\dots}{Ignored.} \item{xbreaks}{ Optional. Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Optional. Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"}) determining the quadrats. Incompatible with \code{nx, ny, xbreaks, ybreaks}. } \item{nsim}{ The number of simulated samples to generate when \code{method="MonteCarlo"}. } } \details{ These functions perform \eqn{\chi^2}{chi^2} tests or Monte Carlo tests of goodness-of-fit for a point process model, based on quadrat counts. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}), point process models (class \code{"ppm"}) and quadrat count tables (class \code{"quadratcount"}). \itemize{ \item if \code{X} is a point pattern, we test the null hypothesis that the data pattern is a realisation of Complete Spatial Randomness (the uniform Poisson point process). Marks in the point pattern are ignored. \item if \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness. See \code{\link{quadrat.test.splitppp}} for documentation. \item If \code{X} is a fitted point process model, then it should be a Poisson point process model. The data to which this model was fitted are extracted from the model object, and are treated as the data point pattern for the test. We test the null hypothesis that the data pattern is a realisation of the (inhomogeneous) Poisson point process specified by \code{X}. } In all cases, the window of observation is divided into tiles, and the number of data points in each tile is counted, as described in \code{\link{quadratcount}}. The quadrats are rectangular by default, or may be regions of arbitrary shape specified by the argument \code{tess}. The expected number of points in each quadrat is also calculated, as determined by CSR (in the first case) or by the fitted model (in the second case). Then we perform either the \eqn{\chi^2}{chi^2} test of goodness-of-fit to the quadrat counts (if \code{method="Chisq"}) or a Monte Carlo test (if \code{method="MonteCarlo"}). If \code{method="Chisq"} then the \eqn{\chi^2}{chi^2} test of goodness-of-fit is performed. The Pearson \eqn{X^2} statistic \deqn{ X^2 = sum((observed - expected)^2/expected) } is computed, and compared to the \eqn{\chi^2}{chi^2} distribution with \eqn{m-k} degrees of freedom, where \code{m} is the number of quadrats and \eqn{k} is the number of fitted parameters (equal to 1 for \code{quadrat.test.ppp}). The default is to compute the \emph{two-sided} \eqn{p}-value, so that the test will be declared significant if \eqn{X^2} is either very large or very small. One-sided \eqn{p}-values can be obtained by specifying the \code{alternative}. An important requirement of the \eqn{\chi^2}{chi^2} test is that the expected counts in each quadrat be greater than 5. If \code{method="MonteCarlo"} then a Monte Carlo test is performed, obviating the need for all expected counts to be at least 5. In the Monte Carlo test, \code{nsim} random point patterns are generated from the null hypothesis (either CSR or the fitted point process model). The Pearson \eqn{X^2} statistic is computed as above. The \eqn{p}-value is determined by comparing the \eqn{X^2} statistic for the observed point pattern, with the values obtained from the simulations. Again the default is to compute the \emph{two-sided} \eqn{p}-value. If \code{conditional} is \code{TRUE} then the simulated samples are generated from the multinomial distribution with the number of \dQuote{trials} equal to the number of observed points and the vector of probabilities equal to the expected counts divided by the sum of the expected counts. Otherwise the simulated samples are independent Poisson counts, with means equal to the expected counts. The return value is an object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. } \seealso{ \code{\link{quadrat.test.splitppp}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{kstest}}. To test a Poisson point process model against a specific alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"htest"}. See \code{\link{chisq.test}} for explanation. The return value is also an object of the special class \code{"quadrattest"}, and there is a plot method for this class. See the examples. } \examples{ data(simdat) quadrat.test(simdat) quadrat.test(simdat, 4, 3) quadrat.test(simdat, alternative="regular") quadrat.test(simdat, alternative="clustered") # Using Monte Carlo p-values quadrat.test(swedishpines) # Get warning, small expected values. \dontrun{ quadrat.test(swedishpines, method="M", nsim=4999) quadrat.test(swedishpines, method="M", nsim=4999, conditional=FALSE) } \testonly{ quadrat.test(swedishpines, method="M", nsim=19) quadrat.test(swedishpines, method="M", nsim=19, conditional=FALSE) } # quadrat counts qS <- quadratcount(simdat, 4, 3) quadrat.test(qS) # fitted model: inhomogeneous Poisson fitx <- ppm(simdat, ~x, Poisson()) quadrat.test(fitx) te <- quadrat.test(simdat, 4) residuals(te) # Pearson residuals plot(te) plot(simdat, pch="+", cols="green", lwd=2) plot(te, add=TRUE, col="red", cex=1.4, lty=2, lwd=3) sublab <- eval(substitute(expression(p[chi^2]==z), list(z=signif(te$p.value,3)))) title(sub=sublab, cex.sub=3) # quadrats of irregular shape B <- dirichlet(runifpoint(6, simdat$window)) qB <- quadrat.test(simdat, tess=B) plot(simdat, main="quadrat.test(simdat, tess=B)", pch="+") plot(qB, add=TRUE, col="red", lwd=2, cex=1.2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} spatstat/man/quadrats.Rd0000755000176000001440000000567612237642733015065 0ustar ripleyusers\name{quadrats} \alias{quadrats} \title{Divide Region into Quadrats} \description{ Divides window into rectangular quadrats and returns the quadrats as a tessellation. } \usage{ quadrats(X, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL, keepempty=FALSE) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or anything that can be coerced to a window using \code{\link{as.owin}}, such as a point pattern. } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the quadrats. Incompatible with \code{ny}. } \item{keepempty}{ Logical value indicating whether to delete or retain empty quadrats. See Details. } } \details{ If the window \code{X} is a rectangle, it is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. If \code{X} is not a rectangle, then the bounding rectangle of \code{X} is first divided into an \code{nx * ny} grid of rectangular tiles, and these tiles are then intersected with the window \code{X}. The resulting tiles are returned as a tessellation (object of class \code{"tess"}) which can be plotted and used in other analyses. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{X$window}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. By default (if \code{keepempty=FALSE}), any rectangular tile which does not intersect the window \code{X} is ignored, and only the non-empty intersections are treated as quadrats, so the tessellation may consist of fewer than \code{nx * ny} tiles. If \code{keepempty=TRUE}, empty intersections are retained, and the tessellation always contains exactly \code{nx * ny} tiles, some of which may be empty. } \value{ A tessellation (object of class \code{"tess"}) as described under \code{\link{tess}}. } \examples{ W <- square(10) Z <- quadrats(W, 4, 5) plot(Z) data(letterR) plot(quadrats(letterR, 5, 7)) } \seealso{ \code{\link{tess}}, \code{\link{quadratcount}}, \code{\link{quadrat.test}}, \code{\link{quadratresample}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{utilities} \keyword{datagen} spatstat/man/logLik.ppm.Rd0000755000176000001440000000607512237642733015247 0ustar ripleyusers\name{logLik.ppm} \alias{logLik.ppm} \alias{extractAIC.ppm} \alias{nobs.ppm} \title{Log Likelihood and AIC for Point Process Model} \description{ Extracts the log likelihood, deviance, and AIC of a fitted Poisson point process model, or analogous quantities based on the pseudolikelihood for a fitted Gibbs point process model. } \usage{ \method{logLik}{ppm}(object, ..., warn=TRUE) \method{extractAIC}{ppm}(fit, scale=0, k=2, \dots) \method{nobs}{ppm}(object, ...) } \arguments{ \item{object,fit}{Fitted point process model. An object of class \code{"ppm"}. } \item{\dots}{Ignored.} \item{warn}{ If \code{TRUE}, a warning is given when the pseudolikelihood is returned instead of the likelihood. } \item{scale}{Ignored.} \item{k}{Numeric value specifying the weight of the equivalent degrees of freedom in the AIC. See Details.} } \details{ These functions are methods for the generic commands \code{\link{logLik}}, \code{\link{extractAIC}} and \code{\link{nobs}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{logLik.ppm} computes the maximised value of the log likelihood for the fitted model \code{object} (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning (if \code{warn=TRUE}). The Akaike Information Criterion AIC for a fitted model is defined as \deqn{ AIC = -2 \log(L) + k \times \mbox{edf} }{ AIC = -2 * log(L) + k * edf } where \eqn{L} is the maximised likelihood of the fitted model, and \eqn{\mbox{edf}}{edf} is the effective degrees of freedom of the model. The method \code{extractAIC.ppm} returns the \emph{analogous} quantity \eqn{AIC*} in which \eqn{L} is replaced by \eqn{L*}, the quadrature approximation to the likelihood (if \code{fit} is a Poisson model) or the pseudolikelihood (if \code{fit} is a Gibbs model). The method \code{nobs.ppm} returns the number of points in the original data point pattern to which the model was fitted. The \R functions \code{\link{AIC}} and \code{\link{step}} use these methods. } \value{ A numerical value. } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{formula.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{terms.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ data(cells) fit <- ppm(cells, ~x) nobs(fit) logLik(fit) extractAIC(fit) AIC(fit) step(fit) } \keyword{spatial} \keyword{models} spatstat/man/anova.mppm.Rd0000644000176000001440000000623112241443111015256 0ustar ripleyusers\name{anova.mppm} \alias{anova.mppm} \title{ANOVA for Fitted Multiple Point Process Models} \description{ Performs analysis of deviance for two or more fitted multiple point process models. } \usage{ \method{anova}{mppm}(object, \dots, test=NULL, override=FALSE) } \arguments{ \item{object}{A fitted multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ One or more fitted multiple point process models. } \item{test}{ Type of hypothesis test to perform. A character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. } \item{override}{ Logical flag indicating whether to proceed even when there is no statistical theory to support the calculation. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for comparing several fitted multiple point process models (objects of class \code{"mppm"}, usually generated by the model-fitting function \code{\link{mppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then there is no statistical theory available to support a similar analysis. The function issues a warning, and (by default) returns a \code{NULL} value. However if \code{override=TRUE}, then a kind of analysis of deviance table will be printed. The `deviance' differences in this table are equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{mppm}}). At the time of writing, there is no statistical theory to support inferential interpretation of log pseudolikelihood ratios. The \code{override} option is provided for research purposes only! The argument \code{test} determines which hypothesis test, if any, will be performed to compare the models. The argument \code{test} should be a character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, or \code{NULL}. The first option \code{"Chisq"} gives the likelihood ratio test based on the asymptotic chi-squared distribution of the deviance difference. The meaning of the other options is explained in \code{\link{anova.glm}}. For random effects models, only \code{"Chisq"} is available, and again gives the likelihood ratio test. } \seealso{ \code{\link{mppm}} } \examples{ data(waterstriders) H <- hyperframe(X=waterstriders) mod0 <- mppm(X~1, H, Poisson()) modx <- mppm(X~x, H, Poisson()) anova.mppm(mod0, modx, test="Chi") } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/suffstat.Rd0000755000176000001440000001110412237642734015060 0ustar ripleyusers\name{suffstat} \alias{suffstat} \title{Sufficient Statistic of Point Process Model} \description{ The canonical sufficient statistic of a point process model is evaluated for a given point pattern. } \usage{ suffstat(model, X=data.ppm(model)) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}). } \item{X}{ A point pattern (object of class \code{"ppp"}). } } \value{ A numeric vector of sufficient statistics. The entries correspond to the model coefficients \code{coef(model)}. } \details{ The canonical sufficient statistic of \code{model} is evaluated for the point pattern \code{X}. This computation is useful for various Monte Carlo methods. Here \code{model} should be a point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}), typically obtained from the model-fitting function \code{\link{ppm}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). Every point process model fitted by \code{\link{ppm}} has a probability density of the form \deqn{f(x) = Z(\theta) \exp(\theta^T S(x))}{f(x) = Z(theta) exp(theta * S(x))} where \eqn{x} denotes a typical realisation (i.e. a point pattern), \eqn{\theta}{theta} is the vector of model coefficients, \eqn{Z(\theta)}{Z(theta)} is a normalising constant, and \eqn{S(x)} is a function of the realisation \eqn{x}, called the ``canonical sufficient statistic'' of the model. For example, the stationary Poisson process has canonical sufficient statistic \eqn{S(x)=n(x)}, the number of points in \eqn{x}. The stationary Strauss process with interaction range \eqn{r} (and fitted with no edge correction) has canonical sufficient statistic \eqn{S(x)=(n(x),s(x))} where \eqn{s(x)} is the number of pairs of points in \eqn{x} which are closer than a distance \eqn{r} to each other. \code{suffstat(model, X)} returns the value of \eqn{S(x)}, where \eqn{S} is the canonical sufficient statistic associated with \code{model}, evaluated when \eqn{x} is the given point pattern \code{X}. The result is a numeric vector, with entries which correspond to the entries of the coefficient vector \code{coef(model)}. The sufficient statistic \eqn{S} does not depend on the fitted coefficients of the model. However it does depend on the irregular parameters which are fixed in the original call to \code{\link{ppm}}, for example, the interaction range \code{r} of the Strauss process. The sufficient statistic also depends on the edge correction that was used to fit the model. For example in a Strauss process, \itemize{ \item If the model is fitted with \code{correction="none"}, the sufficient statistic is \eqn{S(x) = (n(x), s(x))} where \eqn{n(x)} is the number of points and \eqn{s(x)} is the number of pairs of points which are closer than \eqn{r} units apart. \item If the model is fitted with \code{correction="periodic"}, the sufficient statistic is the same as above, except that distances are measured in the periodic sense. \item If the model is fitted with \code{correction="translate"}, then \eqn{n(x)} is unchanged but \eqn{s(x)} is replaced by a weighted sum (the sum of the translation correction weights for all pairs of points which are closer than \eqn{r} units apart). \item If the model is fitted with \code{correction="border"} (the default), then points lying less than \eqn{r} units from the boundary of the observation window are treated as fixed. Thus \eqn{n(x)} is replaced by the number \eqn{n_r(x)}{n[r](x)} of points lying at least \eqn{r} units from the boundary of the observation window, and \eqn{s(x)} is replaced by the number \eqn{s_r(x)}{s[r](x)} of pairs of points, which are closer than \eqn{r} units apart, and at least one of which lies more than \eqn{r} units from the boundary of the observation window. } Non-finite values of the sufficient statistic (\code{NA} or \code{-Inf}) may be returned if the point pattern \code{X} is not a possible realisation of the model (i.e. if \code{X} has zero probability of occurring under \code{model} for all values of the canonical coefficients \eqn{\theta}{theta}). } \seealso{ \code{\link{ppm}} } \examples{ fitS <- ppm(swedishpines, ~1, Strauss(7)) X <- rpoispp(intensity(swedishpines), win=as.owin(swedishpines)) suffstat(fitS, X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/MultiStraussHard.Rd0000755000176000001440000000666312237642731016512 0ustar ripleyusers\name{MultiStraussHard} \alias{MultiStraussHard} \title{The Multitype/Hard Core Strauss Point Process Model} \description{ Creates an instance of the multitype/hard core Strauss point process model which can then be fitted to point pattern data. } \usage{ MultiStraussHard(types=NULL, iradii, hradii) } \arguments{ \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{iradii}{Matrix of interaction radii} \item{hradii}{Matrix of hard core radii} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype/hard core Strauss process with interaction radii \eqn{iradii[i,j]} and hard core radii \eqn{hradii[i,j]}. } \details{ This is a hybrid of the multitype Strauss process (see \code{\link{MultiStrauss}}) and the hard core process (case \eqn{\gamma=0}{gamma = 0} of the Strauss process). A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart; if the pair lies more than \eqn{h_{ij}}{h[i,j]} and less than \eqn{r_{ij}}{r[i,j]} units apart, it contributes a factor \eqn{\gamma_{ij}}{gamma[i,j]} to the probability density. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStraussHard interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrices \code{iradii} and \code{hradii}. The matrices \code{iradii} and \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no interaction term should be included for this combination of types. Note that only the interaction radii and hardcore radii are specified in \code{MultiStraussHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} and \eqn{\log(\gamma_{ij})}{log(gamma[i,j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiStraussHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiHard}}, \code{\link{Strauss}} } \examples{ r <- matrix(3, nrow=2,ncol=2) h <- matrix(c(1,2,2,1), nrow=2,ncol=2) MultiStraussHard(iradii=r,hradii=h) # prints a sensible description of itself r <- 0.04 * matrix(c(1,2,2,1), nrow=2,ncol=2) h <- 0.02 * matrix(c(1,NA,NA,1), nrow=2,ncol=2) X <- amacrine \testonly{ X <- X[owin(c(0,0.8), c(0,1))] } fit <- ppm(X, ~1, MultiStraussHard(,r,h)) # fit stationary multitype hardcore Strauss process to `amacrine' # Note the comma; needed since "types" is not specified. } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype/hard core Strauss model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/duplicated.ppp.Rd0000755000176000001440000000443412242557163016142 0ustar ripleyusers\name{duplicated.ppp} \alias{duplicated.ppp} \alias{duplicated.ppx} \title{Determine Duplicated Points in a Spatial Point Pattern} \description{ Determines which points in a spatial point pattern are duplicates of previous points, and returns a logical vector. } \usage{ \method{duplicated}{ppp}(x, \dots, rule=c("spatstat", "deldir")) \method{duplicated}{ppx}(x, \dots) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{rule}{ Character string. The rule for determining duplicated points. } } \value{ A logical vector of length equal to the number of points in \code{x}. } \details{ These are methods for the generic function \code{duplicated} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. This function determines which points in \code{x} duplicate other points that appeared earlier in the sequence. It returns a logical vector with entries that are \code{TRUE} for duplicated points and \code{FALSE} for unique (non-duplicated) points. If \code{rule="spatstat"} (the default), duplicated points are determined by testing equality of their coordinates and marks using \code{==}. This is the most stringent possible test. If \code{rule="deldir"}, duplicated points are determined using the function \code{\link[deldir]{duplicatedxy}} in the package \pkg{deldir}, which currently uses \code{\link{duplicated.data.frame}}. Setting \code{rule="deldir"} will ensure consistency with functions in the \pkg{deldir} package. } \seealso{ \code{\link{ppp.object}}, \code{\link{unique.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) duplicated(X) duplicated(X, rule="deldir") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/iplot.Rd0000755000176000001440000000427512237642732014361 0ustar ripleyusers\name{iplot} %DontDeclareMethods \alias{iplot} \alias{iplot.ppp} \alias{iplot.layered} \alias{iplot.default} \title{Point and Click Interface for Displaying Spatial Data} \description{ Plot spatial data with interactive (point-and-click) control over the plot. } \usage{ iplot(x, ...) \method{iplot}{ppp}(x, ..., xname) \method{iplot}{layered}(x, ..., xname) \method{iplot}{default}(x, ..., xname) } \arguments{ \item{x}{ The spatial object to be plotted. An object of class \code{"ppp"}, \code{"psp"}, \code{"im"}, \code{"owin"}, or \code{"layered"}. } \item{\dots}{Ignored.} \item{xname}{ Optional. Character string to use as the title of the dataset. } } \value{ \code{NULL}. } \details{ The function \code{iplot} generates a plot of the spatial dataset \code{x} and allows interactive control over the appearance of the plot using a point-and-click interface. The function \code{iplot} is generic, with methods for for point patterns (\code{\link{iplot.ppp}}), layered objects (\code{\link{iplot.layered}}) and a default method. The default method will handle objects of class \code{"psp"}, \code{"im"} and \code{"owin"} at least. A new popup window is launched. The spatial dataset \code{x} is displayed in the middle of the window using the appropriate \code{plot} method. The left side of the window contains buttons and sliders allowing the user to change the plot parameters. The right side of the window contains navigation controls for zooming (changing magnification), panning (shifting the field of view relative to the data), redrawing and exiting. If the user clicks in the area where the point pattern is displayed, the field of view will be re-centred at the point that was clicked. } \seealso{ \code{\link{istat}} } \examples{ if(interactive()) { iplot(cells) iplot(amacrine) iplot(lansing) L <- layered(D=distmap(cells), P=cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) iplot(L) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/ppp.Rd0000755000176000001440000001576512237642733014040 0ustar ripleyusers\name{ppp} \alias{ppp} \title{Create a Point Pattern} \description{ Creates an object of class \code{"ppp"} representing a point pattern dataset in the two-dimensional plane. } \usage{ ppp(x,y, \dots, window, marks, check=TRUE) } \arguments{ \item{x}{Vector of \eqn{x} coordinates of data points} \item{y}{Vector of \eqn{y} coordinates of data points} \item{window}{window of observation, an object of class \code{"owin"}} \item{\dots}{arguments passed to \code{\link{owin}} to create the window, if \code{window} is missing} \item{marks}{(optional) mark values for the points. A vector or data frame.} \item{check}{Logical flag indicating whether to check that all the \eqn{(x,y)} points lie inside the specified window. Do not set this to \code{FALSE} unless you are sure that this check is unnecessary. } } \value{ An object of class \code{"ppp"} describing a point pattern in the two-dimensional plane (see \code{\link{ppp.object}}). } \details{ In the \pkg{spatstat} library, a point pattern dataset is described by an object of class \code{"ppp"}. This function creates such objects. The vectors \code{x} and \code{y} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the points in the pattern. A point pattern dataset is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"ppp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. You can specify the observation window in several (mutually exclusive) ways: \itemize{ \item \code{xrange, yrange} specify a rectangle with these dimensions; \item \code{poly} specifies a polygonal boundary. If the boundary is a single polygon then \code{poly} must be a list with components \code{x,y} giving the coordinates of the vertices. If the boundary consists of several disjoint polygons then \code{poly} must be a list of such lists so that \code{poly[[i]]$x} gives the \eqn{x} coordinates of the vertices of the \eqn{i}th boundary polygon. \item \code{mask} specifies a binary pixel image with entries that are \code{TRUE} if the corresponding pixel is inside the window. \item \code{window} is an object of class \code{"owin"} (see \code{\link{owin.object}}) specifying the window. } The arguments \code{xrange, yrange} or \code{poly} or \code{mask} are passed to the window creator function \code{\link{owin}} for interpretation. See \code{\link{owin}} for further details. The argument \code{window}, if given, must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another point pattern, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of this class. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. See the section on Rejected Points. The name of the unit of length for the \code{x} and \code{y} coordinates can be specified in the dataset, using the argument \code{unitname}, which is passed to \code{\link{owin}}. See the examples below, or the help file for \code{\link{owin}}. The optional argument \code{marks} is given if the point pattern is marked, i.e. if each data point carries additional information. For example, points which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. Data recording the locations and heights of trees in a forest can be regarded as a marked point pattern where the mark is the tree height. The argument \code{marks} can be either \itemize{ \item a vector, of the same length as \code{x} and \code{y}, which is interpreted so that \code{marks[i]} is the mark attached to the point \code{(x[i],y[i])}. If the mark is a real number then \code{marks} should be a numeric vector, while if the mark takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. \item a data frame, with the number of rows equal to the number of points in the point pattern. The \code{i}th row of the data frame is interpreted as containing the mark values for the \code{i}th point in the point pattern. The columns of the data frame correspond to different mark variables (e.g. tree species and tree diameter). } See \code{\link{ppp.object}} for a description of the class \code{"ppp"}. Users would normally invoke \code{ppp} to create a point pattern, but the functions \code{\link{as.ppp}} and \code{scanpp} may sometimes be convenient. } \section{Rejected points}{ The points with coordinates \code{x} and \code{y} \bold{must} lie inside the specified window, in order to define a valid object of class \code{"ppp"}. Any points which do not lie inside the window will be removed from the point pattern, and a warning will be issued. The rejected points are still accessible: they are stored as an attribute of the point pattern called \code{"rejects"} (which is an object of class \code{"ppp"} containing the rejected points in a large window). However, rejected points in a point pattern will be ignored by all other functions except \code{\link{plot.ppp}}. To remove the rejected points altogether, use \code{\link{as.ppp}}. To include the rejected points, you will need to find a larger window that contains them, and use this larger window in a call to \code{ppp}. } \seealso{ \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ # some arbitrary coordinates in [0,1] x <- runif(20) y <- runif(20) # the following are equivalent X <- ppp(x, y, c(0,1), c(0,1)) X <- ppp(x, y) X <- ppp(x, y, window=owin(c(0,1),c(0,1))) # specify that the coordinates are given in metres X <- ppp(x, y, c(0,1), c(0,1), unitname=c("metre","metres")) \dontrun{plot(X)} # marks m <- sample(1:2, 20, replace=TRUE) m <- factor(m, levels=1:2) X <- ppp(x, y, c(0,1), c(0,1), marks=m) \dontrun{plot(X)} # polygonal window X <- ppp(x, y, poly=list(x=c(0,10,0), y=c(0,0,10))) \dontrun{plot(X)} # copy the window from another pattern data(cells) X <- ppp(x, y, window=cells$window) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rmh.ppm.Rd0000755000176000001440000001772412237642734014620 0ustar ripleyusers\name{rmh.ppm} \alias{rmh.ppm} \title{Simulate from a Fitted Point Process Model} \description{ Given a point process model fitted to data, generate a random simulation of the model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{ppm}(model, start=NULL, control=default.rmhcontrol(model), \dots, project=TRUE, verbose=TRUE) } \arguments{ \item{model}{A fitted point process model (object of class \code{"ppm"}, see \code{\link{ppm.object}}) which it is desired to simulate. This fitted model is usually the result of a call to \code{\link{ppm}}. See \bold{Details} below. } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(x.start=data.ppm(model))} } \item{control}{Data controlling the iterative behaviour of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \value{A point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}}). } \details{ This function generates simulated realisations from a point process model that has been fitted to point pattern data. It is a method for the generic function \code{\link{rmh}} for the class \code{"ppm"} of fitted point process models. To simulate other kinds of point process models, see \code{\link{rmh}} or \code{\link{rmh.default}}. The argument \code{model} describes the fitted model. It must be an object of class \code{"ppm"} (see \code{\link{ppm.object}}), and will typically be the result of a call to the point process model fitting function \code{\link{ppm}}. The current implementation enables simulation from any fitted model involving the interactions \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}} and \code{\link{Softcore}}, including nonstationary models. See the examples. It is also possible to simulate \emph{hybrids} of several such models. See \code{\link{Hybrid}} and the examples. It is possible that the fitted coefficients of a point process model may be ``illegal'', i.e. that there may not exist a mathematically well-defined point process with the given parameter values. For example, a Strauss process with interaction parameter \eqn{\gamma > 1}{gamma > 1} does not exist, but the model-fitting procedure used in \code{\link{ppm}} will sometimes produce values of \eqn{\gamma}{gamma} greater than 1. In such cases, if \code{project=FALSE} then an error will occur, while if \code{project=TRUE} then \code{rmh.ppm} will find the nearest legal model and simulate this model instead. (The nearest legal model is obtained by projecting the vector of coefficients onto the set of valid coefficient vectors. The result is usually the Poisson process with the same fitted intensity.) The arguments \code{start} and \code{control} are lists of parameters determining the initial state and the iterative behaviour, respectively, of the Metropolis-Hastings algorithm. The argument \code{start} is passed directly to \code{\link{rmhstart}}. See \code{\link{rmhstart}} for details of the parameters of the initial state, and their default values. The argument \code{control} is first passed to \code{\link{rmhcontrol}}. Then if any additional arguments \code{\dots} are given, \code{\link{update.rmhcontrol}} is called to update the parameter values. See \code{\link{rmhcontrol}} for details of the iterative behaviour parameters, and \code{\link{default.rmhcontrol}} for their default values. Note that if you specify expansion of the simulation window using the parameter \code{expand} (so that the model will be simulated on a window larger than the original data window) then the model must be capable of extrapolation to this larger window. This is usually not possible for models which depend on external covariates, because the domain of a covariate image is usually the same as the domain of the fitted model. After extracting the relevant information from the fitted model object \code{model}, \code{rmh.ppm} invokes the default \code{rmh} algorithm \code{\link{rmh.default}}, unless the model is Poisson. If the model is Poisson then the Metropolis-Hastings algorithm is not needed, and the model is simulated directly, using one of \code{\link{rpoispp}}, \code{\link{rmpoispp}}, \code{\link{rpoint}} or \code{\link{rmpoint}}. See \code{\link{rmh.default}} for further information about the implementation, or about the Metropolis-Hastings algorithm. } \section{Warnings}{ See Warnings in \code{\link{rmh.default}}. } \seealso{ \code{\link{simulate.ppm}}, \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhcontrol}}, \code{\link{default.rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{rmh.default}}, \code{\link{ppp.object}}, \code{\link{ppm}}, Interactions: \code{\link{AreaInter}}, \code{\link{DiggleGratton}}, \code{\link{DiggleGatesStibbard}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}} } \examples{ live <- interactive() op <- spatstat.options() spatstat.options(rmh.nrep=1e5) Nrep <- 1e5 X <- swedishpines if(live) plot(X, main="Swedish Pines data") # Poisson process fit <- ppm(X, ~1, Poisson()) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Poisson model") # Strauss process fit <- ppm(X, ~1, Strauss(r=7)) Xsim <- rmh(fit) if(live) plot(Xsim, main="simulation from fitted Strauss model") \dontrun{ # Strauss process simulated on a larger window # then clipped to original window Xsim <- rmh(fit, control=list(nrep=Nrep, expand=1.1, periodic=TRUE)) Xsim <- rmh(fit, nrep=Nrep, expand=2, periodic=TRUE) } \dontrun{ X <- rSSI(0.05, 100) # piecewise-constant pairwise interaction function fit <- ppm(X, ~1, PairPiece(seq(0.02, 0.1, by=0.01))) Xsim <- rmh(fit) } # marked point pattern Y <- amacrine \dontrun{ # marked Poisson models fit <- ppm(Y) fit <- ppm(Y,~marks) fit <- ppm(Y,~polynom(x,2)) fit <- ppm(Y,~marks+polynom(x,2)) fit <- ppm(Y,~marks*polynom(x,y,2)) Ysim <- rmh(fit) } # multitype Strauss models MS <- MultiStrauss(types = levels(Y$marks), radii=matrix(0.07, ncol=2, nrow=2)) \dontrun{ fit <- ppm(Y, ~marks, MS) Ysim <- rmh(fit) } fit <- ppm(Y,~marks*polynom(x,y,2), MS) Ysim <- rmh(fit) if(live) plot(Ysim, main="simulation from fitted inhomogeneous Multitype Strauss") spatstat.options(op) \dontrun{ # Hybrid model fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) Y <- rmh(fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{datagen} spatstat/man/nncross.Rd0000755000176000001440000001621312252030012014665 0ustar ripleyusers\name{nncross} \alias{nncross} \alias{nncross.ppp} \alias{nncross.default} \title{Nearest Neighbours Between Two Patterns} \description{ Given two point patterns \code{X} and \code{Y}, finds the nearest neighbour in \code{Y} of each point of \code{X}. Alternatively \code{Y} may be a line segment pattern. } \usage{ nncross(X, Y, \dots) \method{nncross}{ppp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y"), is.sorted.X = FALSE, is.sorted.Y = FALSE) \method{nncross}{default}(X, Y, \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{Y}{Either a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}).} \item{iX, iY}{Optional identifiers, applicable only in the case where \code{Y} is a point pattern, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. Alternatively if \code{X} is a point pattern and \code{Y} is a line segment pattern, the function finds the nearest line segment to each point of \code{X}, and computes the distance. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, where \code{Y} is a point pattern, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- runifpoint(15) Y <- runifpoint(20) N <- nncross(X,Y)$which # note that length(N) = 15 plot(superimpose(X=X,Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # third-nearest neighbour NXY <- nncross(X, Y, k=3) NXY[1:3,] # second and third nearest neighbours NXY <- nncross(X, Y, k=2:3) NXY[1:3,] # two patterns with some points in common Z <- runifpoint(50) X <- Z[1:30] Y <- Z[20:50] iX <- 1:30 iY <- 20:50 N <- nncross(X,Y, iX, iY)$which N <- nncross(X,Y, iX, iY, what="which") #faster plot(superimpose(X=X, Y=Y), main="nncross", cols=c("red","blue")) arrows(X$x, X$y, Y[N]$x, Y[N]$y, length=0.15) # point pattern and line segment pattern X <- runifpoint(15) Y <- rpoisline(10) N <- nncross(X,Y) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz}, and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/as.rectangle.Rd0000755000176000001440000000354712237642732015601 0ustar ripleyusers\name{as.rectangle} \alias{as.rectangle} \title{Window Frame} \description{ Extract the window frame of a window or other spatial dataset } \usage{ as.rectangle(w, \dots) } \arguments{ \item{w}{A window, or a dataset that has a window. Either a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}) or other data determining such a window. } \item{\dots}{ Optional. Auxiliary data to help determine the window. If \code{w} does not belong to a recognised class, the arguments \code{w} and \code{\dots} are passed to \code{\link{as.owin}} to determine the window. } } \value{ A window (object of class \code{"owin"}) of type \code{"rectangle"} representing a rectangle. } \details{ This function is the quickest way to determine a bounding rectangle for a spatial dataset. If \code{w} is a window, the function just extracts the outer bounding rectangle of \code{w} as given by its elements \code{xrange,yrange}. The function can also be applied to any spatial dataset that has a window: for example, a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). The bounding rectangle of the window of the dataset is extracted. Use the function \code{\link{bounding.box}} to compute the \emph{smallest} bounding rectangle of a dataset. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{bounding.box}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- as.rectangle(w) # returns a 10 x 10 rectangle data(lansing) as.rectangle(lansing) data(copper) as.rectangle(copper$SouthLines) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rpoisppx.Rd0000755000176000001440000000252312237642734015112 0ustar ripleyusers\name{rpoisppx} \alias{rpoisppx} \title{ Generate Poisson Point Pattern in Any Dimensions } \description{ Generate a random multi-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoisppx(lambda, domain) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } } \details{ This function generates a realisation of the homogeneous Poisson process in multi dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the multi-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"boxx"}). } \value{ The simulated multi-dimensional point pattern (an object of class \code{"ppx"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpointx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- rpoisppx(10, w) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/crossdist.default.Rd0000755000176000001440000000537412237642732016673 0ustar ripleyusers\name{crossdist.default} \alias{crossdist.default} \title{Pairwise distances between two different sets of points} \description{ Computes the distances between each pair of points taken from two different sets of points. } \usage{ \method{crossdist}{default}(X, Y, x2, y2, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Numeric vectors of equal length specifying the coordinates of the first set of points. } \item{x2,y2}{ Numeric vectors of equal length specifying the coordinates of the second set of points. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in the first set of points to the \code{j}-th point in the second set of points. } \details{ Given two sets of points, this function computes the Euclidean distance from each point in the first set to each point in the second set, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}}. This function expects \code{X} and \code{Y} to be numeric vectors of equal length specifying the coordinates of the first set of points. The arguments \code{x2},\code{y2} specify the coordinates of the second set of points. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ d <- crossdist(runif(7), runif(7), runif(12), runif(12)) d <- crossdist(runif(7), runif(7), runif(12), runif(12), period=c(1,1)) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/rcellnumber.Rd0000644000176000001440000000231512237642733015533 0ustar ripleyusers\name{rcellnumber} \alias{rcellnumber} \title{ Generate Random Numbers of Points for Cell Process } \description{ Generates random integers for the Baddeley-Silverman counterexample. } \usage{ rcellnumber(n, N = 10) } \arguments{ \item{n}{ Number of random integers to be generated. } \item{N}{ Distributional parameter: the largest possible value. An integer greater than 1. } } \details{ This function generates random integers which have mean and variance equal to 1, but which do not have a Poisson distribution. The random integers take the values \eqn{0}, \eqn{1} and \eqn{N} with probabilities \eqn{1/N}, \eqn{(N-2)/(N-1)} and \eqn{1/(N(N-1))} respectively. See Baddeley and Silverman (1984). } \value{ An integer vector of length \code{n}. } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rcell}} } \examples{ rcellnumber(30, 3) } \keyword{datagen} spatstat/man/summary.quad.Rd0000644000176000001440000000332012237642734015645 0ustar ripleyusers\name{summary.quad} \alias{summary.quad} \alias{print.summary.quad} \title{Summarizing a Quadrature Scheme} \description{ \code{summary} method for class \code{"quad"}. } \usage{ \method{summary}{quad}(object, \dots, checkdup=FALSE) \method{print}{summary.quad}(x, \dots, dp=3) } \arguments{ \item{object}{A quadrature scheme.} \item{\dots}{Ignored.} \item{checkdup}{ Logical value indicating whether to test for duplicated points. } \item{dp}{Number of significant digits to print.} \item{x}{Object of class \code{"summary.quad"} returned by \code{summary.quad}.} } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"quad"}. An object of class \code{"quad"} describes a quadrature scheme, used to fit a point process model. See \code{\link{quad.object}}) for details of this class. \code{summary.quad} extracts information about the quadrature scheme, and \code{print.summary.quad} prints this information in a comprehensible format. In normal usage, \code{print.summary.quad} is invoked implicitly when the user calls \code{summary.quad} without assigning its value to anything. See the examples. } \value{ \code{summary.quad} returns an object of class \code{"summary.quad"}, while \code{print.summary.quad} returns \code{NULL}. } \examples{ # make a quadrature scheme Q <- quadscheme(rpoispp(42)) # summarize it summary(Q) # save the summary s <- summary(Q) # print it print(s) s # extract total quadrature weight s$w$all$sum } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/edges2vees.Rd0000644000176000001440000000323012237642732015251 0ustar ripleyusers\name{edges2vees} \alias{edges2vees} \title{ List Dihedral Triples in a Graph } \description{ Given a list of edges between vertices, compile a list of all \sQuote{vees} or dihedral triples formed by these edges. } \usage{ edges2vees(iedge, jedge, nvert=max(iedge, jedge), \dots, check=TRUE) } \arguments{ \item{iedge,jedge}{ Integer vectors, of equal length, specifying the edges. } \item{nvert}{ Number of vertices in the network. } \item{\dots}{Ignored} \item{check}{Logical. Whether to check validity of input data.} } \details{ Given a finite graph with \code{nvert} vertices and with edges specified by \code{iedge, jedge}, this low-level function finds all \sQuote{vees} or \sQuote{dihedral triples} in the graph, that is, all triples of vertices \code{(i,j,k)} where \code{i} and \code{j} are joined by an edge and \code{i} and \code{k} are joined by an edge. The interpretation of \code{iedge, jedge} is that each successive pair of entries specifies an edge in the graph. The \eqn{k}th edge joins vertex \code{iedge[k]} to vertex \code{jedge[k]}. Entries of \code{iedge} and \code{jedge} must be integers from 1 to \code{nvert}. } \value{ A 3-column matrix of integers, in which each row represents a triple of vertices, with the first vertex joined to the other two vertices. } \seealso{ \code{\link{edges2triangles}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ i <- c(1, 2, 5, 5, 1, 4, 2) j <- c(2, 3, 3, 1, 3, 2, 5) edges2vees(i, j) } \keyword{spatial} \keyword{manip} spatstat/man/rho2hat.Rd0000755000176000001440000000731512237642734014601 0ustar ripleyusers\name{rho2hat} \alias{rho2hat} \title{ Smoothed Relative Density of Pairs of Covariate Values } \description{ Given a point pattern and two spatial covariates \eqn{Z_1}{Z1} and \eqn{Z_2}{Z2}, construct a smooth estimate of the relative risk of the pair \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } \usage{ rho2hat(object, cov1, cov2, ..., method=c("ratio", "reweight")) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"}). } \item{cov1,cov2}{ The two covariates. Each argument is either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location, or one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{\dots}{ Additional arguments passed to \code{\link{density.ppp}} to smooth the scatterplots. } \item{method}{ Character string determining the smoothing method. See Details. } } \details{ This is a bivariate version of \code{\link{rhohat}}. If \code{object} is a point pattern, this command produces a smoothed version of the scatterplot of the values of the covariates \code{cov1} and \code{cov2} observed at the points of the point pattern. The covariates \code{cov1,cov2} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z_1(u), Z_2(u)) \kappa(u) }{ lambda(u) = rho(Z1(u), Z2(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}, and \eqn{\rho(z_1,z_2)}{rho(z1, z2)} is a function to be estimated. The algorithm computes a smooth estimate of the function \eqn{\rho}{rho}. The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z_1, z_2)}{rho(z1, z2)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. \item If \code{method="reweight"}, then \eqn{\rho(z_1, z_2)}{rho(z1,z2)} is estimated by applying density estimation to the points \eqn{(Z_1(y_i), Z_2(y_i))}{(Z1(y[i]), Z2(y[i]))} obtained by evaluating the two covariate \eqn{Z_1, Z_2}{Z1, Z2} at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{(Z_1,Z_2)}{(Z1, Z2)}. } } \value{ A pixel image (object of class \code{"im"}). Also belongs to the special class \code{"rho2hat"} which has a plot method. } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. } \author{ Adrian Baddeley } \seealso{ \code{\link{rhohat}}, \code{\link{methods.rho2hat}} } \examples{ data(bei) attach(bei.extra) plot(rho2hat(bei, elev, grad)) fit <- ppm(bei, ~elev, covariates=bei.extra) \dontrun{ plot(rho2hat(fit, elev, grad)) } plot(rho2hat(fit, elev, grad, method="reweight")) } \keyword{spatial} \keyword{models} spatstat/man/gordon.Rd0000644000176000001440000000123512237642732014510 0ustar ripleyusers\name{gordon} \alias{gordon} \docType{data} \title{ People in Gordon Square } \description{ This dataset records the location of people sitting on a grass patch in Gordon Square, London, at 3pm on a sunny afternoon. The dataset \code{gordon} is a point pattern (object of class \code{"ppp"}) containing the spatial coordinates of each person. The grass patch is an irregular polygon with two holes. Coordinates are given in metres. } \usage{data(gordon)} \examples{ data(gordon) plot(gordon) } \source{ Andrew Bevan, University College London. } \references{ Bevan, A. (2012) Manuscript in preparation. } \keyword{datasets} \keyword{spatial} spatstat/man/G3est.Rd0000755000176000001440000000704112237642731014210 0ustar ripleyusers\name{G3est} \Rdversion{1.1} \alias{G3est} \title{ Nearest Neighbour Distance Distribution Function of a Three-Dimensional Point Pattern } \description{ Estimates the nearest-neighbour distance distribution function \eqn{G_3(r)}{G3(r)} from a three-dimensional point pattern. } \usage{ G3est(X, ..., rmax = NULL, nrval = 128, correction = c("rs", "km", "Hanisch")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{G_3(r)}{G3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the nearest-neighbour function is \deqn{ G_3(r) = P(d^\ast(x,\Phi) \le r \mid x \in \Phi) }{ G3(r) = P(d*(x,Phi) <= r | x in Phi) } the cumulative distribution function of the distance \eqn{d^\ast(x,\Phi)}{d*(x,Phi)} from a typical point \eqn{x} in \eqn{\Phi}{Phi} to its nearest neighbour, i.e. to the nearest \emph{other} point of \eqn{\Phi}{Phi}. The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The nearest neighbour function of \eqn{\Phi}{Phi} can then be estimated using techniques described in the References. For each data point, the distance to the nearest neighbour is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is the estimate of \eqn{G_3(r)}{G3(r)}. The available edge corrections are: \describe{ \item{\code{"rs"}:}{ the reduced sample (aka minus sampling, border correction) estimator (Baddeley et al, 1993) } \item{\code{"km"}:}{ the three-dimensional version of the Kaplan-Meier estimator (Baddeley and Gill, 1997) } \item{\code{"Hanisch"}:}{ the three-dimensional generalisation of the Hanisch estimator (Hanisch, 1984). } } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Baddeley, A.J. and Gill, R.D. (1997) Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25}, 263--292. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rana Moyeed. } \section{Warnings}{ A large value of \code{nrval} is required in order to avoid discretisation effects (due to the use of histograms in the calculation). } \seealso{ \code{\link{F3est}}, \code{\link{K3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- G3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/pcfdot.Rd0000755000176000001440000000764112237642733014512 0ustar ripleyusers\name{pcfdot} \alias{pcfdot} \title{Multitype pair correlation function (i-to-any)} \description{ Calculates an estimate of the multitype pair correlation function (from points of type \code{i} to points of any type) for a multitype point pattern. } \usage{ pcfdot(X, i, ...) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type pair correlation function \eqn{g_{i\bullet}(r)}{gdot[i](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{pcf.ppp}}. } } \details{ This is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a nonzero distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of any type at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda g_{i\bullet}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda * gdot[i](r) dx dy } where \eqn{\lambda}{lambda} is the intensity of all points, and \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda}{p(r) = lambda[i] * lambda} so \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = 1}. For a stationary multitype point process, the type-\code{i}-to-any-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i\bullet}(r) = \frac{K_{i\bullet}^\prime(r)}{2\pi r} }{ g(r) = Kdot[i]'(r)/ ( 2 * pi * r) } where \eqn{K_{i\bullet}^\prime}{Kdot[i]'(r)} is the derivative of the type-\code{i}-to-any-type \eqn{K} function \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. of the point process. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Kdot[i](r)}. The command \code{pcfdot} computes a kernel estimate of the multitype pair correlation function from points of type \eqn{i} to points of any type. It uses \code{\link{pcf.ppp}} to compute kernel estimates of the pair correlation functions for several unmarked point patterns, and uses the bilinear properties of second moments to obtain the multitype pair correlation. See \code{\link{pcf.ppp}} for a list of arguments that control the kernel estimation. The companion function \code{\link{pcfcross}} computes the corresponding analogue of \code{\link{Kcross}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}}{gdot[i]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i\bullet}(r) = 1}{gdot[i](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfcross}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kdot}} } \examples{ data(amacrine) p <- pcfdot(amacrine, "on") p <- pcfdot(amacrine, "on", stoyan=0.1) plot(p) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/quadratresample.Rd0000755000176000001440000000477612237642733016433 0ustar ripleyusers\name{quadratresample} \alias{quadratresample} \title{Resample a Point Pattern by Resampling Quadrats} \description{ Given a point pattern dataset, create a resampled point pattern by dividing the window into rectangular quadrats and randomly resampling the list of quadrats. } \usage{ quadratresample(X, nx, ny=nx, ..., replace = FALSE, nsamples = 1, verbose = (nsamples > 1)) } \arguments{ \item{X}{ A point pattern dataset (object of class \code{"ppp"}). } \item{nx,ny}{ Numbers of quadrats in the \eqn{x} and \eqn{y} directions. } \item{\dots}{Ignored.} \item{replace}{ Logical value. Specifies whether quadrats should be sampled with or without replacement. } \item{nsamples}{Number of randomised point patterns to be generated.} \item{verbose}{Logical value indicating whether to print progress reports.} } \details{ This command implements a very simple bootstrap resampling procedure for spatial point patterns \code{X}. The dataset \code{X} must be a point pattern (object of class \code{"ppp"}) and its observation window must be a rectangle. The window is first divided into \code{N = nx * ny} rectangular tiles (quadrats) of equal size and shape. To generate one resampled point pattern, a random sample of \code{N} quadrats is selected from the list of \code{N} quadrats, with replacement (if \code{replace=TRUE}) or without replacement (if \code{replace=FALSE}). The \eqn{i}th quadrat in the original dataset is then replaced by the \eqn{i}th sampled quadrat, after the latter is shifted so that it occupies the correct spatial position. The quadrats are then reconstituted into a point pattern inside the same window as \code{X}. If \code{replace=FALSE}, this procedure effectively involves a random permutation of the quadrats. The resulting resampled point pattern has the same number of points as \code{X}. If \code{replace=TRUE}, the number of points in the resampled point pattern is random. } \value{ A point pattern (if \code{nsamples = 1}) or a list of point patterns (if \code{nsamples > 1}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{quadrats}}, \code{\link{quadratcount}}. See \code{\link{varblock}} to estimate the variance of a summary statistic by block resampling. } \examples{ data(bei) quadratresample(bei, 6, 3) } \keyword{spatial} \keyword{datagen} spatstat/man/Poisson.Rd0000755000176000001440000000375412237642731014664 0ustar ripleyusers\name{Poisson} \alias{Poisson} \title{Poisson Point Process Model} \description{ Creates an instance of the Poisson point process model which can then be fitted to point pattern data. } \usage{ Poisson() } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Poisson point process (namely, there are no interactions). } \details{ The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument \code{interaction} of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Poisson process is provided by the value of the function \code{Poisson}. This works for all types of Poisson processes including multitype and nonstationary Poisson processes. } \seealso{ \code{\link{ppm}}, \code{\link{Strauss}} } \examples{ data(nztrees) ppm(nztrees, ~1, Poisson()) # fit the stationary Poisson process to 'nztrees' # no edge correction needed data(longleaf) \testonly{ longleaf <- longleaf[seq(1, longleaf$n, by=50)] } longadult <- longleaf[longleaf$marks >= 30, ] longadult <- unmark(longadult) ppm(longadult, ~ x, Poisson()) # fit the nonstationary Poisson process # with intensity lambda(x,y) = exp( a + bx) data(lansing) # trees marked by species \testonly{ lansing <- lansing[seq(1,lansing$n, by=30), ] } ppm(lansing, ~ marks, Poisson()) # fit stationary marked Poisson process # with different intensity for each species \dontrun{ ppm(lansing, ~ marks * polynom(x,y,3), Poisson()) } # fit nonstationary marked Poisson process # with different log-cubic trend for each species \testonly{ # equivalent functionality - smaller dataset ppm(amacrine, ~ marks * polynom(x,y,2), Poisson()) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/distcdf.Rd0000644000176000001440000000620712237642732014644 0ustar ripleyusers\name{distcdf} \alias{distcdf} \title{Distribution Function of Interpoint Distance } \description{ Computes the cumulative distribution function of the distance between two independent random points in a given window or windows. } \usage{ distcdf(W, V=W, \dots, dW=1, dV=dW, nr=1024) } \arguments{ \item{W}{ A window (object of class \code{"owin"}) containing the first random point. } \item{V}{ Optional. Another window containing the second random point. Defaults to \code{W}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution for the calculation. } \item{dV, dW}{ Optional. Probability densities (not necessarily normalised) for the first and second random points respectively. Data in any format acceptable to \code{\link{as.im}}, for example, a \code{function(x,y)} or a pixel image or a numeric value. The default corresponds to a uniform distribution over the window. } \item{nr}{ Integer. The number of values of interpoint distance \eqn{r} for which the CDF will be computed. Should be a large value! } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \details{ This command computes the Cumulative Distribution Function \eqn{ CDF(r) = Prob(T \le r) }{ CDF(r) = Prob(T <= r) } of the Euclidean distance \eqn{T = \|X_1 - X_2\|}{T = |X1-X2|} between two independent random points \eqn{X_1}{X1} and \eqn{X_2}{X2}. In the simplest case, the command \code{distcdf(W)}, the random points are assumed to be uniformly distributed in the same window \code{W}. Alternatively the two random points may be uniformly distributed in two different windows \code{W} and \code{V}. In the most general case the first point \eqn{X_1}{X1} is random in the window \code{W} with a probability density proportional to \code{dW}, and the second point \eqn{X_2}{X2} is random in a different window \code{V} with probability density proportional to \code{dV}. The calculation is performed by numerical integration of the set covariance function \code{\link{setcov}} for uniformly distributed points, and by computing the covariance function \code{\link{imcov}} in the general case. The accuracy of the result depends on the pixel resolution used to represent the windows: this is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. For example use \code{eps=0.1} to specify pixels of size 0.1 units. The arguments \code{W} or \code{V} may also be point patterns (objects of class \code{"ppp"}). The result is the cumulative distribution function of the distance from a randomly selected point in the point pattern, to a randomly selected point in the other point pattern or window. } \seealso{ \code{\link{setcov}}, \code{\link{as.mask}}. } \examples{ # The unit disc B <- disc() plot(distcdf(B)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/plot.splitppp.Rd0000755000176000001440000000677212237642733016067 0ustar ripleyusers\name{plot.splitppp} \alias{plot.splitppp} \title{Plot a List of Point Patterns} \description{ Plots a list of point patterns. } \usage{ \method{plot}{splitppp}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), panel.begin=NULL, panel.end=NULL, panel.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2) } \arguments{ \item{x}{ A named list of point patterns, typically obtained from \code{\link{split.ppp}}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppp}} which control the appearance of each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the point patterns side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the point patterns. } \item{mar.panel}{ Optional value of the graphics parameter \code{mar} controlling the size of the margins outside each plot panel. See the help file for \code{\link{par}}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Internal use only. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"splitppp"}. It is typically used to plot the result of the function \code{\link{split.ppp}} but it may also be used to plot any list of point patterns created by the user. The argument \code{x} should be a named list of point patterns (objects of class \code{"ppp"}, see \code{\link{ppp.object}}). Each of these point patterns will be plotted in turn using \code{\link{plot.ppp}}. The arguments \code{panel.begin} and \code{panel.end} may be functions that will be executed before and after each panel is plotted. They will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)}. Alternatively, \code{panel.begin} and \code{panel.end} may be objects of some class that can be plotted with the generic \code{plot} command. They will be plotted before and after each panel is plotted. } \seealso{ \code{\link{split.ppp}}, \code{\link{plot.ppp}}, \code{\link{ppp.object}} } \examples{ # Multitype point pattern data(amacrine) plot(split(amacrine)) plot(split(amacrine), main="", panel.begin=function(i, y, ...) { plot(density(y), ribbon=FALSE, ...) }) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/anemones.Rd0000755000176000001440000000467512237642732015043 0ustar ripleyusers\name{anemones} \alias{anemones} \docType{data} \title{ Beadlet Anemones Data } \description{ These data give the spatial locations and diameters of sea anemones (beadlet anemone \emph{Actinia equina}) in a sample plot on the north face of a boulder, well above low tide level, at Quiberon (Bretagne, France) in May 1976. The data were originally described and discussed by Kooijman (1979a). Kooijman (1979b) shows a hand-drawn plot of the original data. The data are discussed by Upton and Fingleton (1985) as Example 1.8 on pages 64--67. The \code{anemones} dataset is taken directly from Table 1.11 of Upton and Fingleton (1985). The coordinates and diameters are integer multiples of an idiosyncratic unit of length. The boundary is a rectangle 280 by 180 units. } \section{Units}{ There is some confusion about the correct physical scale for these data. According to Upton and Fingleton (1985), one unit in the dataset is approximately 0.475 cm. According to Kooijman (1979a, 1979b) and also quoted by Upton and Fingleton (1985), the physical size of the sample plot was 14.5 by 9.75 cm. However if the data are plotted at this scale, they are too small for a rectangle of this size, and the appearance of the plot does not match the original hand-drawn plot in Kooijman (1979b). To avoid confusion, we have not assigned a unit scale to this dataset. } \format{ \code{anemones} is an object of class \code{"ppp"} representing the point pattern of anemone locations. It is a marked point pattern with numeric marks representing anemone diameter. See \code{\link{ppp.object}} for details of the format. } \usage{data(anemones)} \examples{ data(anemones) # plot diameters on same scale as x, y plot(anemones, markscale=0.5) } \source{ Table 1.11 on pages 62--63 of Upton and Fingleton (1985), who acknowledge Kooijman (1979a) as the source. } \references{ Kooijman, S.A.L.M. (1979a) The description of point patterns. In \emph{Spatial and temporal analysis in ecology} (ed. R.M. Cormack and J.K. Ord), International Cooperative Publishing House, Fairland, Maryland, USA. Pages 305--332. Kooijman, S.A.L.M. (1979b) Inference about dispersal patterns. \emph{Acta Biotheoretica} \bold{28}, 149--189. Upton, G.J.G. and Fingleton, B. (1985) \emph{Spatial data analysis by example}. Volume 1: Point pattern and quantitative data. John Wiley and Sons, Chichester. } \keyword{datasets} \keyword{spatial} spatstat/man/selfcrossing.psp.Rd0000755000176000001440000000217112237642734016527 0ustar ripleyusers\name{selfcrossing.psp} \alias{selfcrossing.psp} \title{Crossing Points in a Line Segment Pattern} \description{ Finds any crossing points between the line segments in a line segment pattern. } \usage{ selfcrossing.psp(A) } \arguments{ \item{A}{ Line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between different line segments in the line segment pattern \code{A}. A crossing point occurs whenever one of the line segments in \code{A} intersects another line segment in \code{A}, at a nonzero angle of intersection. } \seealso{ \code{\link{crossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="selfcrossing.psp") P <- selfcrossing.psp(a) plot(P, add=TRUE, col="red") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/plot.fv.Rd0000755000176000001440000002011312241443111014570 0ustar ripleyusers\name{plot.fv} \alias{plot.fv} \title{Plot Function Values} \description{ Plot method for the class \code{"fv"}. } \usage{ \method{plot}{fv}(x, fmla, \dots, subset=NULL, lty=NULL, col=NULL, lwd=NULL, xlim=NULL, ylim=NULL, xlab=NULL, ylab=NULL, ylim.covers=NULL, legend=!add, legendpos="topleft", legendavoid=missing(legendpos), legendmath=TRUE, legendargs=list(), shade=NULL, shadecol="grey", add=FALSE, log="", limitsonly=FALSE) } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{fmla}{ an R language formula determining which variables or expressions are plotted. Either a formula object, or a string that can be parsed as a formula. See Details. } \item{subset}{ (optional) subset of rows of the data frame that will be plotted. } \item{lty}{ (optional) numeric vector of values of the graphical parameter \code{lty} controlling the line style of each plot. } \item{col}{ (optional) numeric vector of values of the graphical parameter \code{col} controlling the colour of each plot. } \item{lwd}{ (optional) numeric vector of values of the graphical parameter \code{lwd} controlling the line width of each plot. } \item{xlim}{ (optional) range of x axis } \item{ylim}{ (optional) range of y axis } \item{xlab}{ (optional) label for x axis } \item{ylab}{ (optional) label for y axis } \item{\dots}{ Extra arguments passed to \code{plot.default}. } \item{ylim.covers}{ Optional vector of \eqn{y} values that must be included in the \eqn{y} axis. For example \code{ylim.covers=0} will ensure that the \eqn{y} axis includes the origin. } \item{legend}{ Logical flag or \code{NULL}. If \code{legend=TRUE}, the algorithm plots a legend in the top left corner of the plot, explaining the meaning of the different line types and colours. } \item{legendpos}{ The position of the legend. Either a character string keyword (see \code{\link[graphics]{legend}} for keyword options) or a pair of coordinates in the format \code{list(x,y)}. Alternatively if \code{legendpos="float"}, a location will be selected inside the plot region, avoiding the graphics. } \item{legendavoid}{ Whether to avoid collisions between the legend and the graphics. Logical value. If \code{TRUE}, the code will check for collisions between the legend box and the graphics, and will override \code{legendpos} if a collision occurs. If \code{FALSE}, the value of \code{legendpos} is always respected. } \item{legendmath}{ Logical. If \code{TRUE}, the legend will display the mathematical notation for each curve. If \code{FALSE}, the legend text is the identifier (column name) for each curve. } \item{legendargs}{ Named list containing additional arguments to be passed to \code{\link{legend}} controlling the appearance of the legend. } \item{shade}{ An index that identifies two columns of \code{x}. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. Often used for displaying simulation envelopes, by setting \code{shade=c("hi", "lo")}. } \item{shadecol}{ The colour to be used in the \code{shade} plot. A character string or an integer specifying a colour. } \item{add}{ Logical. Whether the plot should be added to an existing plot } \item{log}{ A character string which contains \code{"x"} if the x axis is to be logarithmic, \code{"y"} if the y axis is to be logarithmic and \code{"xy"} or \code{"yx"} if both axes are to be logarithmic. } \item{limitsonly}{ Logical. If \code{FALSE}, plotting is performed normally. If \code{TRUE}, no plotting is performed at all; just the \eqn{x} and \eqn{y} limits of the plot are computed and returned. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"fv"}. The use of the argument \code{fmla} is like \code{plot.formula}, but offers some extra functionality. The left and right hand sides of \code{fmla} are evaluated, and the results are plotted against each other (the left side on the \eqn{y} axis against the right side on the \eqn{x} axis). The left and right hand sides of \code{fmla} may be the names of columns of the data frame \code{x}, or expressions involving these names. If a variable in \code{fmla} is not the name of a column of \code{x}, the algorithm will search for an object of this name in the environment where \code{plot.fv} was called, and then in the enclosing environment, and so on. Multiple curves may be specified by a single formula of the form \code{cbind(y1,y2,\dots,yn) ~ x}, where \code{x,y1,y2,\dots,yn} are expressions involving the variables in the data frame. Each of the variables \code{y1,y2,\dots,yn} in turn will be plotted against \code{x}. See the examples. Convenient abbreviations which can be used in the formula are \itemize{ \item the symbol \code{.} which represents all the columns in the data frame that will be plotted by default; \item the symbol \code{.x} which represents the function argument; \item the symbol \code{.y} which represents the recommended value of the function. } For further information, see \code{\link{fvnames}}. The value returned by this plot function indicates the meaning of the line types and colours in the plot. It can be used to make a suitable legend for the plot if you want to do this by hand. See the examples. The argument \code{shade} can be used to display critical bands or confidence intervals. If it is not \code{NULL}, then it should be a subset index for the columns of \code{x}, that identifies exactly 2 columns. When the corresponding curves are plotted, the region between the curves will be shaded in light grey. See the Examples. The default values of \code{lty}, \code{col} and \code{lwd} can be changed using \code{\link{spatstat.options}("plot.fv")}. Use \code{type = "n"} to create the plot region and draw the axes without plotting any data. Use \code{limitsonly=TRUE} to suppress all plotting and just compute the \eqn{x} and \eqn{y} limits. This can be used to calculate common \eqn{x} and \eqn{y} scales for several plots. } \examples{ K <- Kest(cells) # K is an object of class "fv" plot(K, iso ~ r) # plots iso against r plot(K, sqrt(iso/pi) ~ r) # plots sqrt(iso/r) against r plot(K, cbind(iso,theo) ~ r) # plots iso against r AND theo against r plot(K, . ~ r) # plots all available estimates of K against r plot(K, sqrt(./pi) ~ r) # plots all estimates of L-function # L(r) = sqrt(K(r)/pi) plot(K, cbind(iso,theo) ~ r, col=c(2,3)) # plots iso against r in colour 2 # and theo against r in colour 3 plot(K, iso ~ r, subset=quote(r < 0.2)) # plots iso against r for r < 10 # Can't remember the names of the columns? No problem.. plot(K, sqrt(./pi) ~ .x) # making a legend by hand v <- plot(K, . ~ r, legend=FALSE) legend("topleft", legend=v$meaning, lty=v$lty, col=v$col) # significance bands KE <- envelope(cells, Kest, nsim=19) plot(KE, shade=c("hi", "lo")) # how to display two functions on a common scale Kr <- Kest(redwood) a <- plot(K, limitsonly=TRUE) b <- plot(Kr, limitsonly=TRUE) xlim <- range(a$xlim, b$xlim) ylim <- range(a$ylim, b$ylim) opa <- par(mfrow=c(1,2)) plot(K, xlim=xlim, ylim=ylim) plot(Kr, xlim=xlim, ylim=ylim) par(opa) } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/plot.influence.ppm.Rd0000755000176000001440000000235112237642733016744 0ustar ripleyusers\name{plot.influence.ppm} \alias{plot.influence.ppm} \title{ Plot Influence Measure } \description{ Plots an influence measure that has been computed by \code{\link{influence.ppm}}. } \usage{ \method{plot}{influence.ppm}(x, ...) } \arguments{ \item{x}{ Influence measure (object of class \code{"influence.ppm"}) computed by \code{\link{influence.ppm}}. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{plot.ppp}} to control the plotting. } } \details{ This is the plot method for objects of class \code{"influence.ppm"}. These objects are computed by the command \code{\link{influence.ppm}}. The display shows circles centred at the data points with radii proportional to the influence values. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/rSSI.Rd0000755000176000001440000001017412237642734014047 0ustar ripleyusers\name{rSSI} \alias{rSSI} \title{Simulate Simple Sequential Inhibition} \description{ Generate a random point pattern, a realisation of the Simple Sequential Inhibition (SSI) process. } \usage{ rSSI(r, n=Inf, win = square(1), giveup = 1000, x.init=NULL) } \arguments{ \item{r}{ Inhibition distance. } \item{n}{ Maximum number of points allowed. If \code{n} is finite, stop when the \emph{total} number of points in the point pattern reaches \code{n}. If \code{n} is infinite (the default), stop only when it is apparently impossible to add any more points. See \bold{Details}. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. The default window is the unit square, unless \code{x.init} is specified, when the default window is the window of \code{x.init}. } \item{giveup}{ Number of rejected proposals after which the algorithm should terminate. } \item{x.init}{ Optional. Initial configuration of points. A point pattern (object of class \code{"ppp"}). The pattern returned by \code{rSSI} consists of this pattern together with the points added via simple sequential inhibition. See \bold{Details}. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This algorithm generates a realisation of the Simple Sequential Inhibition point process inside the window \code{win}. Starting with an empty window (or with the point pattern \code{x.init} if specified), the algorithm adds points one-by-one. Each new point is generated uniformly in the window and independently of preceding points. If the new point lies closer than \code{r} units from an existing point, then it is rejected and another random point is generated. The algorithm terminates when either \describe{ \item{(a)}{ the desired number \code{n} of points is reached, or } \item{(b)}{ the current point configuration has not changed for \code{giveup} iterations, suggesting that it is no longer possible to add new points. } } If \code{n} is infinite (the default) then the algorithm terminates only when (b) occurs. The result is sometimes called a \emph{Random Sequential Packing}. Note that argument \code{n} specifies the maximum permitted \bold{total} number of points in the pattern returned by \code{rSSI()}. If \code{x.init} is not \code{NULL} then the number of points that are \emph{added} is at most \code{n - npoints(x.init)} if \code{n} is finite. Thus if \code{x.init} is not \code{NULL} then argument \code{n} must be at least as large as \code{npoints(x.init)}, otherwise an error is given. If \code{n==npoints(x.init)} then a warning is given and the call to \code{rSSI()} has no real effect; \code{x.init} is returned. There is no requirement that the points of \code{x.init} be at a distance at least \code{r} from each other. All of the \emph{added} points will be at a distance at least \code{r} from each other and from any point of \code{x.init}. The points will be generated inside the window \code{win} and the result will be a point pattern in the same window. The default window is the unit square, \code{win = square(1)}, unless \code{x.init} is specified, when the default is \code{win=as.owin(x.init)}, the window of \code{x.init}. If both \code{win} and \code{x.init} are specified, and if the two windows are different, then a warning will be issued. Any points of \code{x.init} lying outside \code{win} will be removed, with a warning. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}. } \examples{ Vinf <- rSSI(0.05) V100 <- rSSI(0.05, 100) X <- runifpoint(100) Y <- rSSI(0.03,142,x.init=X) # Y consists of X together with # 42 added points. \dontrun{ plot(Y) plot(X,add=TRUE,chars=20,cols="red") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/pcf.fasp.Rd0000755000176000001440000001127112237642733014725 0ustar ripleyusers\name{pcf.fasp} \alias{pcf.fasp} \title{Pair Correlation Function obtained from array of K functions} \description{ Estimates the (bivariate) pair correlation functions of a point pattern, given an array of (bivariate) K functions. } \usage{ \method{pcf}{fasp}(X, \dots, method="c") } \arguments{ \item{X}{ An array of multitype \eqn{K} functions (object of class \code{"fasp"}). } \item{\dots}{ Arguments controlling the smoothing spline function \code{smooth.spline}. } \item{method}{ Letter \code{"a"}, \code{"b"}, \code{"c"} or \code{"d"} indicating the method for deriving the pair correlation function from the \code{K} function. } } \value{ A function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. This can be thought of as a matrix \code{Y} each of whose entries \code{Y[i,j]} is a function value table (class \code{"fv"}) representing the pair correlation function between points of type \code{i} and points of type \code{j}. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} from an array of estimates of \eqn{K(r)} or its variants, using smoothing splines to approximate the derivatives. It is a method for the generic function \code{\link{pcf}}. The argument \code{X} should be a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) containing several estimates of \eqn{K} functions. This should have been obtained from \code{\link{alltypes}} with the argument \code{fun="K"}. The smoothing spline operations are performed by \code{\link{smooth.spline}} and \code{\link{predict.smooth.spline}} from the \code{modreg} library. Four numerical methods are available: \itemize{ \item \bold{"a"} apply smoothing to \eqn{K(r)}, estimate its derivative, and plug in to the formula above; \item \bold{"b"} apply smoothing to \eqn{Y(r) = \frac{K(r)}{2 \pi r}}{Y(r) = K(r)/(2 * pi * r)} constraining \eqn{Y(0) = 0}, estimate the derivative of \eqn{Y}, and solve; \item \bold{"c"} apply smoothing to \eqn{Z(r) = \frac{K(r)}{\pi r^2}}{Y(r) = K(r)/(pi * r^2)} constraining \eqn{Z(0)=1}, estimate its derivative, and solve. \item \bold{"d"} apply smoothing to \eqn{V(r) = \sqrt{K(r)}}{V(r) = sqrt(K(r))}, estimate its derivative, and solve. } Method \code{"c"} seems to be the best at suppressing variability for small values of \eqn{r}. However it effectively constrains \eqn{g(0) = 1}. If the point pattern seems to have inhibition at small distances, you may wish to experiment with method \code{"b"} which effectively constrains \eqn{g(0)=0}. Method \code{"a"} seems comparatively unreliable. Useful arguments to control the splines include the smoothing tradeoff parameter \code{spar} and the degrees of freedom \code{df}. See \code{\link{smooth.spline}} for details. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}}, \code{\link{smooth.spline}}, \code{\link{predict.smooth.spline}} } \examples{ # multitype point pattern KK <- alltypes(amacrine, "K") p <- pcf.fasp(KK, spar=0.5, method="b") plot(p) # strong inhibition between points of the same type } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/cut.ppp.Rd0000755000176000001440000001040012237642732014606 0ustar ripleyusers\name{cut.ppp} \alias{cut.ppp} \title{Classify Points in a Point Pattern} \description{ Classifies the points in a point pattern into distinct types according to the numerical marks in the pattern, or according to another variable. } \usage{ \method{cut}{ppp}(x, z=marks(x), ...) } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{z}{ Data determining the classification. A numeric vector, a factor, a pixel image, a tessellation, or a string giving the name of a column of marks. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values in \code{z} to factor values in the output. See \code{\link{cut.default}}. } } \value{ A multitype point pattern, that is, a point pattern object (of class \code{"ppp"}) with a \code{marks} vector that is a factor. } \details{ This function has the effect of classifying each point in the point pattern \code{x} into one of several possible types. The classification is based on the dataset \code{z}, which may be either \itemize{ \item a factor (of length equal to the number of points in \code{z}) determining the classification of each point in \code{x}. Levels of the factor determine the classification. \item a numeric vector (of length equal to the number of points in \code{z}). The range of values of \code{z} will be divided into bands (the number of bands is determined by \code{\dots}) and \code{z} will be converted to a factor using \code{\link{cut.default}}. \item a pixel image (object of class \code{"im"}). The value of \code{z} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}, see \code{\link{tess}}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a character string, giving the name of one of the columns of \code{marks(x)}, if this is a data frame. } The default is to take \code{z} to be the vector of marks in \code{x} (or the first column in the data frame of marks of \code{x}, if it is a data frame). If the marks are numeric, then the range of values of the numerical marks is divided into several intervals, and each interval is associated with a level of a factor. The result is a marked point pattern, with the same window and point locations as \code{x}, but with the numeric mark of each point discretised by replacing it by the factor level. This is a convenient way to transform a marked point pattern which has numeric marks into a multitype point pattern, for example to plot it or analyse it. See the examples. To select some points from a point pattern, use the subset operator \code{\link{[.ppp}} instead. } \seealso{ \code{\link{cut}}, \code{\link{ppp.object}}, \code{\link{tess}} } \examples{ # (1) cutting based on numeric marks of point pattern data(longleaf) # Longleaf Pines data # the marks are positive real numbers indicating tree diameters. \testonly{ # smaller dataset longleaf <- longleaf[seq(1, longleaf$n, by=80)] } \dontrun{ plot(longleaf) } # cut the range of tree diameters into three intervals long3 <- cut(longleaf, breaks=3) \dontrun{ plot(long3) } # adult trees defined to have diameter at least 30 cm long2 <- cut(longleaf, breaks=c(0,30,100), labels=c("Sapling", "Adult")) plot(long2) plot(long2, cols=c("green","blue")) # (2) cutting based on another numeric vector # Divide Swedish Pines data into 3 classes # according to nearest neighbour distance data(swedishpines) plot(cut(swedishpines, nndist(swedishpines), breaks=3)) # (3) cutting based on tessellation # Divide Swedish Pines study region into a 4 x 4 grid of rectangles # and classify points accordingly tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) plot(cut(swedishpines, tes)) plot(tes, lty=2, add=TRUE) # (4) multivariate marks data(finpines) cut(finpines, "height", breaks=4) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/unnormdensity.Rd0000644000176000001440000000454212237642734016144 0ustar ripleyusers\name{unnormdensity} \alias{unnormdensity} \title{ Weighted kernel smoother } \description{ An unnormalised version of kernel density estimation where the weights are not required to sum to 1. The weights may be positive, negative or zero. } \usage{ unnormdensity(x, ..., weights = NULL) } \arguments{ \item{x}{ Numeric vector of data } \item{\dots}{ Arguments passed to \code{\link{density.default}}. Arguments must be \emph{named}. }` \item{weights}{ Optional numeric vector of weights for the data. } } \details{ This is an alternative to the standard \R kernel density estimation function \code{\link{density.default}}. The standard \code{\link{density.default}} requires the \code{weights} to be nonnegative numbers that add up to 1, and returns a probability density (a function that integrates to 1). This function \code{unnormdensity} does not impose any requirement on the \code{weights} except that they be finite. Individual weights may be positive, negative or zero. The result is a function that does not necessarily integrate to 1 and may be negative. The result is the convolution of the kernel \eqn{k} with the weighted data, \deqn{ f(x) = \sum_i w_i k(x- x_i) }{ f(x) = sum of w[i] * k(x - x[i]) } where \eqn{x_i}{x[i]} are the data points and \eqn{w_i}{w[i]} are the weights. The algorithm first selects the kernel bandwidth by applying \code{\link{density.default}} to the data \code{x} with normalised, positive weight vector \code{w = abs(weights)/sum(abs(weights))} and extracting the selected bandwidth. Then the result is computed by applying applying \code{\link{density.default}} to \code{x} twice using the normalised positive and negative parts of the weights. Note that the arguments \code{\dots} must be passed by name, i.e. in the form (\code{name=value}). Arguments that do not match an argument of \code{\link{density.default}} will be ignored \emph{silently}. } \value{ Object of class \code{"density"} as described in \code{\link{density.default}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{density.default}} } \examples{ d <- unnormdensity(1:3, weights=c(-1,0,1)) if(interactive()) plot(d) } \keyword{smooth} spatstat/man/pairwise.family.Rd0000755000176000001440000000313012237642733016323 0ustar ripleyusers\name{pairwise.family} \alias{pairwise.family} \title{Pairwise Interaction Process Family} \description{ An object describing the family of all pairwise interaction Gibbs point processes. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the pairwise interaction family of point process models. If you need to create a specific pairwise interaction model for use in modelling, use the function \code{\link{Pairwise}} or one of the existing functions listed below. Anyway, \code{pairwise.family} is an object of class \code{"isf"} containing a function \code{pairwise.family$eval} for evaluating the sufficient statistics of any pairwise interaction point process model taking an exponential family form. } \seealso{ Other families: \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. Pairwise interactions: \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}. Other interactions: \code{\link{AreaInter}}, \code{\link{Geyer}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/markvario.Rd0000755000176000001440000000732612237642733015226 0ustar ripleyusers\name{markvario} \alias{markvario} \title{Mark Variogram} \description{ Estimate the mark variogram of a marked point pattern. } \usage{ markvario(X, correction = c("isotropic", "Ripley", "translate"), r = NULL, method = "density", ..., normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. It must have marks which are numeric. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} should be evaluated. There is a sensible default. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{If \code{TRUE}, normalise the variogram by dividing it by the estimated mark variance. } } \details{ The mark variogram \eqn{\gamma(r)}{gamma(r)} of a marked point process \eqn{X} is a measure of the dependence between the marks of two points of the process a distance \eqn{r} apart. It is informally defined as \deqn{ \gamma(r) = E[\frac 1 2 (M_1 - M_2)^2] }{ gamma(r) = E[(1/2) * (M1 - M2)^2 ] } where \eqn{E[ ]} denotes expectation and \eqn{M_1,M_2}{M1,M2} are the marks attached to two points of the process a distance \eqn{r} apart. The mark variogram of a marked point process is analogous, but \bold{not equivalent}, to the variogram of a random field in geostatistics. See Waelder and Stoyan (1996). } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark variogram \eqn{\gamma(r)}{gamma(r)} has been estimated } \item{theo}{the theoretical value of \eqn{\gamma(r)}{gamma(r)} when the marks attached to different points are independent; equal to the sample variance of the marks } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{\gamma(r)}{gamma(r)} obtained by the edge corrections named. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Mase, S. (1996) The threshold method for estimating annual rainfall. \emph{Annals of the Institute of Statistical Mathematics} \bold{48} (1996) 201-213. Waelder, O. and Stoyan, D. (1996) On variograms in point process statistics. \emph{Biometrical Journal} \bold{38} (1996) 895-905. } \seealso{ Mark correlation function \code{\link{markcorr}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ # Longleaf Pine data # marks represent tree diameter data(longleaf) # Subset of this large pattern swcorner <- owin(c(0,100),c(0,100)) sub <- longleaf[ , swcorner] # mark correlation function mv <- markvario(sub) plot(mv) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/nncorr.Rd0000755000176000001440000001736512237642733014540 0ustar ripleyusers\name{nncorr} \alias{nncorr} \alias{nnmean} \alias{nnvario} \title{Nearest-Neighbour Correlation Indices of Marked Point Pattern} \description{ Computes nearest-neighbour correlation indices of a marked point pattern, including the nearest-neighbour mark product index (default case of \code{nncorr}), the nearest-neighbour mark index (\code{nnmean}), and the nearest-neighbour variogram index (\code{nnvario}). } \usage{ nncorr(X, f = function(m1, m2) { m1 * m2 }, \dots, use = "all.obs", method = c("pearson", "kendall", "spearman"), denominator=NULL) nnmean(X) nnvario(X) } \arguments{ \item{X}{ The observed point pattern. An object of class \code{"ppp"}. } \item{f}{ Function \eqn{f} used in the definition of the nearest neighbour correlation. There is a sensible default that depends on the type of marks of \code{X}. } \item{\dots}{ Extra arguments passed to \code{f}. } \item{use,method}{ Arguments passed to the standard correlation function \code{\link{cor}}. } \item{denominator}{ Internal use only. } } \details{ The nearest neighbour correlation index \eqn{\bar n_f}{nbar} of a marked point process \eqn{X} is a number measuring the dependence between the mark of a typical point and the mark of its nearest neighbour. The command \code{nncorr} computes the nearest neighbour correlation index based on any test function \code{f} provided by the user. The default behaviour of \code{nncorr} is to compute the nearest neighbour mark product index. The commands \code{nnmean} and \code{nnvario} are convenient abbreviations for other special choices of \code{f}. In the default case, \code{nncorr(X)} computes three different versions of the nearest-neighbour correlation index: the unnormalised, normalised, and classical correlations. \describe{ \item{unnormalised:}{ The \bold{unnormalised} nearest neighbour correlation (Stoyan and Stoyan, 1994, section 14.7) is defined as \deqn{\bar n_f = E[f(M, M^\ast)]}{nbar[f] = E[f(M, M*)]} where \eqn{E[]} denotes mean value, \eqn{M} is the mark attached to a typical point of the point process, and \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour (i.e. the nearest other point of the point process). Here \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2).} For example, in the second case, the unnormalised nearest neighbour correlation \eqn{\bar n_f}{nbar[f]} equals the proportion of points in the pattern which have the same mark as their nearest neighbour. Note that \eqn{\bar n_f}{nbar[f]} is not a ``correlation'' in the usual statistical sense. It can take values greater than 1. } \item{normalised:}{ We can define a \bold{normalised} nearest neighbour correlation by \deqn{\bar m_f = \frac{E[f(M,M^\ast)]}{E[f(M,M')]}}{mbar[f] = E[f(M,M*)]/E[f(M,M')]} where again \eqn{M} is the mark attached to a typical point, \eqn{M^\ast}{M*} is the mark attached to its nearest neighbour, and \eqn{M'} is an independent copy of \eqn{M} with the same distribution. This normalisation is also not a ``correlation'' in the usual statistical sense, but is normalised so that the value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{\bar m_f = 1}{mbar[f] = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. } \item{classical:}{ Finally if the marks of \code{X} are real numbers, we can also compute the \bold{classical} correlation, that is, the correlation coefficient of the two random variables \eqn{M} and \eqn{M^\ast}{M*}. The classical correlation has a value between \eqn{-1} and \eqn{1}. Values close to \eqn{-1} or \eqn{1} indicate strong dependence between the marks. } } In the default case where \code{f} is not given, \code{nncorr(X)} computes \itemize{ \item If the marks of \code{X} are real numbers, the unnormalised and normalised versions of the nearest-neighbour product index \eqn{E[M \, M^\ast]}{E[M * M*]}, and the classical correlation between \eqn{M} and \eqn{M^\ast}{M*}. \item If the marks of \code{X} are factor valued, the unnormalised and normalised versions of the nearest-neighbour equality index \eqn{P[M = M^\ast]}{P[M = M*]}. } The wrapper functions \code{nnmean} and \code{nnvario} compute the correlation indices for two special choices of the function \eqn{f(m_1,m_2)}{f(m1,m2)}. \itemize{ \item \code{nnmean} computes the correlation indices for \eqn{f(m_1,m_2) = m_1}{f(m1,m2) = m1}. The unnormalised index is simply the mean value of the mark of the neighbour of a typical point, \eqn{E[M^\ast]}{E[M*]}, while the normalised index is \eqn{E[M^\ast]/E[M]}{E[M*]/E[M]}, the ratio of the mean mark of the neighbour of a typical point to the mean mark of a typical point. \item \code{nnvario} computes the correlation indices for \eqn{f(m_1,m_2) = (1/2) (m_1-m_2)^2}{f(m1,m2) = (1/2) * (m1-m2)^2}. } The argument \code{X} must be a point pattern (object of class \code{"ppp"}) and must be a marked point pattern. (The marks may be a data frame, containing several columns of mark variables; each column is treated separately.) If the argument \code{f} is given, it must be a function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative. The arguments \code{use} and \code{method} control the calculation of the classical correlation using \code{\link{cor}}, as explained in the help file for \code{\link{cor}}. Other arguments may be passed to \code{f} through the \code{...} argument. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated using the \sQuote{border method} edge correction. } \value{ Labelled vector of length 2 or 3 containing the unnormalised and normalised nearest neighbour correlations, and the classical correlation if appropriate. Alternatively a matrix with 2 or 3 rows, containing this information for each mark variable. } \examples{ data(finpines) nncorr(finpines) # heights of neighbouring trees are slightly negatively correlated data(amacrine) nncorr(amacrine) # neighbouring cells are usually of different type } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.ppp.Rd0000755000176000001440000000147712237642732014444 0ustar ripleyusers\name{is.ppp} \alias{is.ppp} \title{Test Whether An Object Is A Point Pattern} \description{ Checks whether its argument is a point pattern (object of class \code{"ppp"}). } \usage{ is.ppp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"ppp"}. See \code{\link{ppm.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"ppp"}, i.e. if \code{x} has \code{"ppp"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Ord.Rd0000755000176000001440000000371712237642731013755 0ustar ripleyusers\name{Ord} \alias{Ord} \title{Generic Ord Interaction model} \description{ Creates an instance of an Ord-type interaction point process model which can then be fitted to point pattern data. } \usage{ Ord(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. Ord (1977) proposed fitting this model to forestry data when \eqn{g(a)} has a simple ``threshold'' form. That model is implemented in our function \code{\link{OrdThresh}}. The present function \code{Ord} implements the case of a completely general Ord potential \eqn{g(a)} specified as an S language function \code{pot}. This is experimental. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{OrdThresh}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/Jdot.Rd0000755000176000001440000001661612237642731014133 0ustar ripleyusers\name{Jdot} \alias{Jdot} \title{ Multitype J Function (i-to-any) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between the type \eqn{i} points and the points of any type. } \usage{ Jdot(X, i, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{i\bullet}(r)}{Ji.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{i\bullet}(r)}{Ji.(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{i\bullet}(r)}{Ji.(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{i\bullet}(r)}{1 - Gi.(r)} and \eqn{1 - F_{\bullet}(r)}{1 - F.(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{i\bullet}(r)}{Ji.(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gdot}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jdot} and its companions \code{\link{Jcross}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``type \eqn{i} to any type'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{i\bullet}(r) = \frac{1 - G_{i\bullet}(r)}{1 - F_{\bullet}(r)}}{Ji.(r) = (1 - Gi.(r))/(1-F.(r))} where \eqn{G_{i\bullet}(r)}{Gi.(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest other point of the pattern, and \eqn{F_{\bullet}(r)}{F.(r)} is the distribution function of the distance from a fixed point in space to the nearest point of the pattern. An estimate of \eqn{J_{i\bullet}(r)}{Ji.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the pattern is a marked Poisson point process, then \eqn{J_{i\bullet}(r) \equiv 1}{Ji.(r) = 1}. If the subprocess of type \eqn{i} points is independent of the subprocess of points of all types not equal to \eqn{i}, then \eqn{J_{i\bullet}(r)}{Ji.(r)} equals \eqn{J_{ii}(r)}{Jii(r)}, the ordinary \eqn{J} function (see \code{\link{Jest}} and Van Lieshout and Baddeley (1996)) of the points of type \eqn{i}. Hence deviations from zero of the empirical estimate of \eqn{J_{i\bullet} - J_{ii}}{Ji.-Jii} may suggest dependence between types. This algorithm estimates \eqn{J_{i\bullet}(r)}{Ji.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{i\bullet}(r)}{Ji.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jcross}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees data(lansing) \testonly{ lansing <- lansing[seq(1,lansing$n, by=30), ] } Jh. <- Jdot(lansing, "hickory") plot(Jh.) # diagnostic plot for independence between hickories and other trees Jhh <- Jest(lansing[lansing$marks == "hickory", ]) plot(Jhh, add=TRUE, legendpos="bottom") \dontrun{ # synthetic example with two marks "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jdot(pp, "a") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/midpoints.psp.Rd0000755000176000001440000000146112237642733016034 0ustar ripleyusers\name{midpoints.psp} \alias{midpoints.psp} \title{Midpoints of Line Segment Pattern} \description{ Computes the midpoints of each line segment in a line segment pattern. } \usage{ midpoints.psp(x) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ The midpoint of each line segment is computed. } \seealso{ \code{\link{summary.psp}}, \code{\link{lengths.psp}}, \code{\link{angles.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- midpoints.psp(a) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/stieltjes.Rd0000755000176000001440000000400512237642734015231 0ustar ripleyusers\name{stieltjes} \alias{stieltjes} \title{Compute Integral of Function Against Cumulative Distribution} \description{ Computes the Stieltjes integral of a function \eqn{f} with respect to a function \eqn{M}. } \usage{ stieltjes(f, M, ...) } \arguments{ \item{f}{ The integrand. A function in the \R language. } \item{M}{ The cumulative function against which \code{f} will be integrated. An object of class \code{"fv"}. } \item{\dots}{ Additional arguments passed to \code{f}. } } \details{ This command computes the Stieltjes integral \deqn{I = \int f(x) dM(x)}{I = integral f(x) dM(x)} of a real-valued function \eqn{f(x)} with respect to a nondecreasing function \eqn{M(x)}. One common use of the Stieltjes integral is to find the mean value of a random variable from its cumulative distribution function \eqn{F(x)}. The mean value is the Stieltjes integral of \eqn{f(x)=x} with respect to \eqn{F(x)}. The argument \code{f} should be a \code{function} in the \R language. It should accept a numeric vector argument \code{x} and should return a numeric vector of the same length. The argument \code{M} should be a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). Such objects are returned by the commands \code{\link{Kest}}, \code{\link{Gest}}, etc. } \value{ A list containing the value of the Stieltjes integral computed using each of the versions of the function \code{M}. } \seealso{ \code{\link{fv.object}}, \code{\link{Gest}} } \examples{ data(redwood) # estimate cdf of nearest neighbour distance G <- Gest(redwood) # compute estimate of mean nearest neighbour distance stieltjes(function(x){x}, G) # estimated probability of a distance in the interval [0.1,0.2] stieltjes(function(x,a,b){ (x >= a) & (x <= b)}, G, a=0.1, b=0.2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/predict.ppm.Rd0000755000176000001440000003077612237642733015465 0ustar ripleyusers\name{predict.ppm} \alias{predict.ppm} \title{Prediction from a Fitted Point Process Model} \description{ Given a fitted point process model obtained by \code{\link{ppm}}, evaluate the spatial trend or the conditional intensity of the model at new locations. } \usage{ \method{predict}{ppm}(object, window=NULL, ngrid=NULL, locations=NULL, covariates=NULL, type="trend", X=data.ppm(object), correction, \dots, new.coef=NULL, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ A fitted point process model, typically obtained from the model-fitting algorithm \code{\link{ppm}}. An object of class \code{"ppm"} (see \code{\link{ppm.object}}). } \item{window}{ Optional. A window (object of class \code{"owin"}) delimiting the locations where predictions should be computed. Defaults to the window of the original data used to fit the model \code{object}. } \item{ngrid}{ Optional. Dimensions of a rectangular grid of locations inside \code{window} where the predictions should be computed. An integer, or an integer vector of length 2, specifying the number of grid points in the \eqn{y} and \eqn{x} directions. (Incompatible with \code{locations}) } \item{locations}{ Optional. Data giving the \eqn{x,y} coordinates (and marks, if required) of locations at which predictions should be computed. Either a point pattern, or a data frame with columns named \code{x} and \code{y}, or a binary image mask. (Incompatible with \code{ngrid}) } \item{covariates}{ Values of external covariates required by the model. Either a data frame or a list of images. See Details. } \item{type}{ Character string. Indicates which property of the fitted model should be predicted. Options are \code{"trend"} for the spatial trend, \code{"cif"} or \code{"lambda"} for the conditional intensity, and \code{"se"} for the standard error of the fitted spatial trend. } \item{X}{ Optional. A point pattern (object of class \code{"ppp"}) to be taken as the data point pattern when calculating the conditional intensity. The default is to use the original data to which the model was fitted. } \item{correction}{ Name of the edge correction to be used in calculating the conditional intensity. Options include \code{"border"} and \code{"none"}. Other options may include \code{"periodic"}, \code{"isotropic"} and \code{"translate"} depending on the model. The default correction is the one that was used to fit \code{object}. } \item{\dots}{ Ignored. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ \emph{If \code{locations} is given and is a data frame:} a vector of predicted values for the spatial locations (and marks, if required) given in \code{locations}. \emph{If \code{ngrid} is given, or if \code{locations} is given and is a binary image mask:} If \code{object} is an unmarked point process, the result is a pixel image object (of class \code{"im"}, see \code{\link{im.object}}) containing the predictions. If \code{object} is a multitype point process, the result is a list of pixel images, containing the predictions for each type at the same grid of locations. The ``predicted values'' are either values of the spatial trend (if \code{type="trend"}), values of the conditional intensity (if \code{type="cif"} or \code{type="lambda"}), or estimates of standard error for the fitted spatial trend (if \code{type="se"}). } \details{ This function computes the spatial trend and the conditional intensity of a fitted spatial point process model, and the standard error of the estimate of spatial trend. See Baddeley and Turner (2000) for explanation and examples. Given a point pattern dataset, we may fit a point process model to the data using the model-fitting algorithm \code{\link{ppm}}. This returns an object of class \code{"ppm"} representing the fitted point process model (see \code{\link{ppm.object}}). The parameter estimates in this fitted model can be read off simply by printing the \code{ppm} object. The spatial trend and conditional intensity of the fitted model are evaluated using this function \code{predict.ppm}. The default action is to create a rectangular grid of points in the observation window of the data point pattern, and evaluate the spatial trend at these locations. The argument \code{type} specifies the values that are computed: \describe{ \item{If \code{type="trend"}:}{ the ``spatial trend'' of the fitted model is evaluated at each required spatial location \eqn{u}. } \item{If \code{type="cif"}:}{ the conditional intensity \eqn{\lambda(u, X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location \eqn{u}, with respect to the data point pattern \eqn{X}. } \item{If \code{type="se"}:}{ the estimated (asymptotic) standard error of the fitted spatial trend is evaluated at each required spatial location \eqn{u}. This is available only for Poisson point process models. } } Note that the ``spatial trend'' is the same as the intensity function if the fitted model \code{object} is a Poisson point process. However, if the model is not a Poisson process, then the ``spatial trend'' is the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}, while the intensity is a smaller value that is not easy to compute. ] The spatial locations where predictions are required, are determined by the (incompatible) arguments \code{ngrid} and \code{locations}. \itemize{ \item If the argument \code{ngrid} is present, then predictions are performed at a rectangular grid of locations in the window \code{window}. The result of prediction will be a pixel image or images. \item If \code{locations} is present, then predictions will be performed at the spatial locations given by this dataset. These may be an arbitrary list of spatial locations, or they may be a rectangular grid. The result of prediction will be either a numeric vector or a pixel image or images. \item If neither \code{ngrid} nor \code{locations} is given, then \code{ngrid} is assumed. The value of \code{ngrid} defaults to \code{\link{spatstat.options}("npixel")}, which is initialised to 128 when \pkg{spatstat} is loaded. } The argument \code{locations} may be a point pattern, a data frame or a list specifying arbitrary locations; or it may be a binary image mask (an object of class \code{"owin"} with type \code{"mask"}) specifying (a subset of) a rectangular grid of locations. If \code{locations} is a point pattern (object of class \code{"ppp"}), then prediction will be performed at the points of the point pattern. The result of prediction will be a vector of predicted values, one value for each point. If the model is a marked point process, then \code{locations} should be a marked point pattern, with marks of the same kind as the model; prediction will be performed at these marked points. The result of prediction will be a vector of predicted values, one value for each (marked) point. If \code{locations} is a data frame or list, then it must contain vectors \code{locations$x} and \code{locations$y} specifying the \eqn{x,y} coordinates of the prediction locations. Additionally, if the model is a marked point process, then \code{locations} must also contain a factor \code{locations$marks} specifying the marks of the prediction locations. These vectors must have equal length. The result of prediction will be a vector of predicted values, of the same length. If \code{locations} is a binary image mask, then prediction will be performed at each pixel in this binary image where the pixel value is \code{TRUE} (in other words, at each pixel that is inside the window). If the fitted model is an unmarked point process, then the result of prediction will be an image. If the fitted model is a marked point process, then prediction will be performed for each possible value of the mark at each such location, and the result of prediction will be a list of images, one for each mark value. The argument \code{covariates} gives the values of any spatial covariates at the prediction locations. If the trend formula in the fitted model involves spatial covariates (other than the Cartesian coordinates \code{x}, \code{y}) then \code{covariates} is required. The format and use of \code{covariates} are analogous to those of the argument of the same name in \code{\link{ppm}}. It is either a data frame or a list of images. If \code{covariates} is a list of images, then the names of the entries should correspond to the names of covariates in the model formula \code{trend}. Each entry in the list must be an image object (of class \code{"im"}, see \code{\link{im.object}}). The software will look up the pixel values of each image at the quadrature points. If \code{covariates} is a data frame, then the \code{i}th row of \code{covariates} is assumed to contain covariate data for the \code{i}th location. When \code{locations} is a data frame, this just means that each row of \code{covariates} contains the covariate data for the location specified in the corresponding row of \code{locations}. When \code{locations} is a binary image mask, the row \code{covariates[i,]} must correspond to the location \code{x[i],y[i]} where \code{x = as.vector(raster.x(locations))} and \code{y = as.vector(raster.y(locations))}. Note that if you only want to use prediction in order to generate a plot of the predicted values, it may be easier to use \code{\link{plot.ppm}} which calls this function and plots the results. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{plot.ppm}}, \code{\link{print.ppm}}, \code{\link{fitted.ppm}}, \code{\link{spatstat.options}} } \section{Warnings}{ The current implementation invokes \code{\link{predict.glm}} so that \bold{prediction is wrong} if the trend formula in \code{object} involves terms in \code{ns()}, \code{bs()} or \code{poly()}. This is a weakness of \code{\link{predict.glm}} itself! Error messages may be very opaque, as they tend to come from deep in the workings of \code{\link{predict.glm}}. If you are passing the \code{covariates} argument and the function crashes, it is advisable to start by checking that all the conditions listed above are satisfied. } \examples{ data(cells) \testonly{op <- spatstat.options(npixel=32)} m <- ppm(cells, ~ polynom(x,y,2), Strauss(0.05)) trend <- predict(m, type="trend") \dontrun{ image(trend) points(cells) } cif <- predict(m, type="cif") \dontrun{ persp(cif) } data(japanesepines) mj <- ppm(japanesepines, ~harmonic(x,y,2)) se <- predict(mj, type="se") # prediction at arbitrary locations predict(mj, locations=data.frame(x=0.3, y=0.4)) X <- runifpoint(5, as.owin(japanesepines)) predict(mj, locations=X) predict(mj, locations=X, type="se") # multitype data(amacrine) ma <- ppm(amacrine, ~marks, MultiStrauss(c("off","on"),matrix(0.06, 2, 2))) Z <- predict(ma) Z <- predict(ma, type="cif") predict(ma, locations=data.frame(x=0.8, y=0.5,marks="on"), type="cif") \testonly{spatstat.options(op)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/envelope.Rd0000755000176000001440000006362512237642732015053 0ustar ripleyusers\name{envelope} \alias{envelope} \alias{envelope.ppp} \alias{envelope.ppm} \alias{envelope.kppm} \title{Simulation Envelopes of Summary Function} \description{ Computes simulation envelopes of a summary function. } \usage{ envelope(Y, fun, ...) \method{envelope}{ppp}(Y, fun=Kest, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{ppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, clipdata=TRUE, start=NULL, control=update(default.rmhcontrol(Y), nrep=nrep), nrep=1e5, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{kppm}(Y, fun=Kest, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ Object containing point pattern data. A point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"}). } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{clipdata}{ Logical flag indicating whether the data point pattern should be clipped to the same window as the simulated patterns, before the summary function for the data is computed. This should usually be \code{TRUE} to ensure that the data and simulations are properly comparable. } \item{start,control}{ Optional. These specify the arguments \code{start} and \code{control} of \code{rmh}, giving complete control over the simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{nrep}{ Number of iterations in the Metropolis-Hastings simulation algorithm. Applicable only when \code{Y} is a fitted model of class \code{"ppm"}. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields an error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be printed and plotted directly. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the summary function \code{fun} has been estimated } \item{obs}{ values of the summary function for the data point pattern } \item{lo}{ lower envelope of simulations } \item{hi}{ upper envelope of simulations } and \emph{either} \item{theo}{ theoretical value of the summary function under CSR (Complete Spatial Randomness, a uniform Poisson point process) if the simulations were generated according to CSR } \item{mmean}{ estimated theoretical value of the summary function, computed by averaging simulated values, if the simulations were not generated according to CSR. } Additionally, if \code{savepatterns=TRUE}, the return value has an attribute \code{"simpatterns"} which is a list containing the \code{nsim} simulated patterns. If \code{savefuns=TRUE}, the return value has an attribute \code{"simfuns"} which is an object of class \code{"fv"} containing the summary functions computed for each of the \code{nsim} simulated patterns. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. For the most basic use, if you have a point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, Kest,nsim=39))} to see the \eqn{K} function for \code{X} plotted together with the envelopes of the \eqn{K} function for 39 simulations of CSR. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described here. There is also a method for the class \code{"pp3"} which is described separately as \code{\link{envelope.pp3}}. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{Y} is a point pattern (an object of class \code{"ppp"}) and \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. (If \code{Y} is a multitype point pattern, then the simulated patterns are also given independent random marks; the probability distribution of the random marks is determined by the relative frequencies of marks in \code{Y}.) \item If \code{Y} is a fitted point process model (an object of class \code{"ppm"} or \code{"kppm"}) and \code{simulate=NULL}, then this routine generates \code{nsim} simulated realisations of that model. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. It may be either \itemize{ \item an expression in the R language, typically containing a call to a random generator. This expression will be evaluated \code{nsim} times to yield \code{nsim} point patterns. For example if \code{simulate=expression(runifpoint(100))} then each simulated pattern consists of exactly 100 independent uniform random points. \item a list of point patterns. The entries in this list will be taken as the simulated patterns. \item an object of class \code{"envelope"}. This should have been produced by calling \code{envelope} with the argument \code{savepatterns=TRUE}. The simulated point patterns that were saved in this object will be extracted and used as the simulated patterns for the new envelope computation. This makes it possible to plot envelopes for two different summary functions based on exactly the same set of simulated point patterns. } } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{Kest}, \code{Gest}, \code{Fest}, \code{Jest}, \code{pcf}, \code{Kcross}, \code{Kdot}, \code{Gcross}, \code{Gdot}, \code{Jcross}, \code{Jdot}, \code{Kmulti}, \code{Gmulti}, \code{Jmulti} or \code{Kinhom}. It may also be a character string containing the name of one of these functions. The statistic \code{fun} can also be a user-supplied function; if so, then it must have arguments \code{X} and \code{r} like those in the functions listed above, and it must return an object of class \code{"fv"}. Upper and lower critical envelopes are computed in one of the following ways: \describe{ \item{pointwise:}{by default, envelopes are calculated pointwise (i.e. for each value of the distance argument \eqn{r}), by sorting the \code{nsim} simulated values, and taking the \code{m}-th lowest and \code{m}-th highest values, where \code{m = nrank}. For example if \code{nrank=1}, the upper and lower envelopes are the pointwise maximum and minimum of the simulated values. The pointwise envelopes are \bold{not} \dQuote{confidence bands} for the true value of the function! Rather, they specify the critical points for a Monte Carlo test (Ripley, 1981). The test is constructed by choosing a \emph{fixed} value of \eqn{r}, and rejecting the null hypothesis if the observed function value lies outside the envelope \emph{at this value of} \eqn{r}. This test has exact significance level \code{alpha = 2 * nrank/(1 + nsim)}. } \item{simultaneous:}{if \code{global=TRUE}, then the envelopes are determined as follows. First we calculate the theoretical mean value of the summary statistic (if we are testing CSR, the theoretical value is supplied by \code{fun}; otherwise we perform a separate set of \code{nsim2} simulations, compute the average of all these simulated values, and take this average as an estimate of the theoretical mean value). Then, for each simulation, we compare the simulated curve to the theoretical curve, and compute the maximum absolute difference between them (over the interval of \eqn{r} values specified by \code{ginterval}). This gives a deviation value \eqn{d_i}{d[i]} for each of the \code{nsim} simulations. Finally we take the \code{m}-th largest of the deviation values, where \code{m=nrank}, and call this \code{dcrit}. Then the simultaneous envelopes are of the form \code{lo = expected - dcrit} and \code{hi = expected + dcrit} where \code{expected} is either the theoretical mean value \code{theo} (if we are testing CSR) or the estimated theoretical value \code{mmean} (if we are testing another model). The simultaneous critical envelopes have constant width \code{2 * dcrit}. The simultaneous critical envelopes allow us to perform a different Monte Carlo test (Ripley, 1981). The test rejects the null hypothesis if the graph of the observed function lies outside the envelope \bold{at any value of} \eqn{r}. This test has exact significance level \code{alpha = nrank/(1 + nsim)}. This test can also be performed using \code{\link{mad.test}}. } \item{based on sample moments:}{if \code{VARIANCE=TRUE}, the algorithm calculates the (pointwise) sample mean and sample variance of the simulated functions. Then the envelopes are computed as mean plus or minus \code{nSD} standard deviations. These envelopes do not have an exact significance interpretation. They are a naive approximation to the critical points of the Neyman-Pearson test assuming the summary statistic is approximately Normally distributed. } } The return value is an object of class \code{"fv"} containing the summary function for the data point pattern, the upper and lower simulation envelopes, and the theoretical expected value (exact or estimated) of the summary function for the model being tested. It can be plotted using \code{\link{plot.envelope}}. If \code{VARIANCE=TRUE} then the return value also includes the sample mean, sample variance and other quantities. Arguments can be passed to the function \code{fun} through \code{...}. In particular, the argument \code{correction} determines the edge correction to be used to calculate the summary statistic. See the section on Edge Corrections, and the Examples. If \code{Y} is a fitted cluster point process model (object of class \code{"kppm"}), and \code{simulate=NULL}, then the model is simulated directly using \code{\link{simulate.kppm}}. If \code{Y} is a fitted Gibbs point process model (object of class \code{"ppm"}), and \code{simulate=NULL}, then the model is simulated by running the Metropolis-Hastings algorithm \code{\link{rmh}}. Complete control over this algorithm is provided by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. For simultaneous critical envelopes (\code{global=TRUE}) the following options are also useful: \describe{ \item{\code{ginterval}}{determines the interval of \eqn{r} values over which the deviation between curves is calculated. It should be a numeric vector of length 2. There is a sensible default (namely, the recommended plotting interval for \code{fun(X)}, or the range of \code{r} values if \code{r} is explicitly specified). } \item{\code{transform}}{specifies a transformation of the summary function \code{fun} that will be carried out before the deviations are computed. It must be an expression object using the symbol \code{.} to represent the function value. For example, the conventional way to normalise the \eqn{K} function (Ripley, 1981) is to transform it to the \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)} and this is implemented by setting \code{transform=expression(sqrt(./pi))}. Such transforms are useful if \code{global=TRUE}. } } It is also possible to extract the summary functions for each of the individual simulated point patterns, by setting \code{savefuns=TRUE}. Then the return value also has an attribute \code{"simfuns"} containing all the summary functions for the individual simulated patterns. It is an \code{"fv"} object containing functions named \code{sim1, sim2, ...} representing the \code{nsim} summary functions. It is also possible to save the simulated point patterns themselves, by setting \code{savepatterns=TRUE}. Then the return value also has an attribute \code{"simpatterns"} which is a list of length \code{nsim} containing all the simulated point patterns. See \code{\link{plot.envelope}} and \code{\link{plot.fv}} for information about how to plot the envelopes. Different envelopes can be recomputed from the same data using \code{\link{envelope.envelope}}. Envelopes can be combined using \code{\link{pool.envelope}}. } \section{Errors and warnings}{ An error may be generated if one of the simulations produces a point pattern that is empty, or is otherwise unacceptable to the function \code{fun}. The upper envelope may be \code{NA} (plotted as plus or minus infinity) if some of the function values computed for the simulated point patterns are \code{NA}. Whether this occurs will depend on the function \code{fun}, but it usually happens when the simulated point pattern does not contain enough points to compute a meaningful value. } \section{Confidence intervals}{ Simulation envelopes do \bold{not} compute confidence intervals; they generate significance bands. If you really need a confidence interval for the true summary function of the point process, use \code{\link{lohboot}}. See also \code{\link{varblock}}. } \section{Edge corrections}{ It is common to apply a correction for edge effects when calculating a summary function such as the \eqn{K} function. Typically the user has a choice between several possible edge corrections. In a call to \code{envelope}, the user can specify the edge correction to be applied in \code{fun}, using the argument \code{correction}. See the Examples below. \describe{ \item{Summary functions in \pkg{spatstat}}{ Summary functions that are available in \pkg{spatstat}, such as \code{\link{Kest}}, \code{\link{Gest}} and \code{\link{pcf}}, have a standard argument called \code{correction} which specifies the name of one or more edge corrections. The list of available edge corrections is different for each summary function, and may also depend on the kind of window in which the point pattern is recorded. In the case of \code{Kest} (the default and most frequently used value of \code{fun}) the best edge correction is Ripley's isotropic correction if the window is rectangular or polygonal, and the translation correction if the window is a binary mask. See the help files for the individual functions for more information. All the summary functions in \pkg{spatstat} recognise the option \code{correction="best"} which gives the \dQuote{best} (most accurate) available edge correction for that function. In a call to \code{envelope}, if \code{fun} is one of the summary functions provided in \pkg{spatstat}, then the default is \code{correction="best"}. This means that \emph{by default, the envelope will be computed using the \dQuote{best} available edge correction}. The user can override this default by specifying the argument \code{correction}. For example the computation can be accelerated by choosing another edge correction which is less accurate than the \dQuote{best} one, but faster to compute. } \item{User-written summary functions}{ If \code{fun} is a function written by the user, then \code{envelope} has to guess what to do. If \code{fun} has an argument called \code{correction}, or has \code{\dots} arguments, then \code{envelope} assumes that the function can handle a correction argument. To compute the envelope, \code{fun} will be called with a \code{correction} argument. The default is \code{correction="best"}, unless overridden in the call to \code{envelope}. Otherwise, if \code{fun} does not have an argument called \code{correction} and does not have \code{\dots} arguments, then \code{envelope} assumes that the function \emph{cannot} handle a correction argument. To compute the envelope, \code{fun} is called without a correction argument. } } } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs}, to appear. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Arnold, 2003. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{dclf.test}}, \code{\link{mad.test}} for envelope-based tests. \code{\link{fv.object}}, \code{\link{plot.envelope}}, \code{\link{plot.fv}}, \code{\link{envelope.envelope}}, \code{\link{pool.envelope}} for handling envelopes. \code{\link{Kest}}, \code{\link{Gest}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{pcf}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ X <- simdat # Envelope of K function under CSR \dontrun{ plot(envelope(X)) } \testonly{ plot(envelope(X, nsim=3)) } # Translation edge correction (this is also FASTER): \dontrun{ plot(envelope(X, correction="translate")) } \testonly{ E <- envelope(X, nsim=3, correction="translate") } # Envelope of K function for simulations from Gibbs model \dontrun{ fit <- ppm(cells, ~1, Strauss(0.05)) plot(envelope(fit)) plot(envelope(fit), global=TRUE) } \testonly{ fit <- ppm(cells, ~1, Strauss(0.05), nd=20) E <- envelope(fit, nsim=3, correction="border", nrep=100) E <- envelope(fit, nsim=3, correction="border", global=TRUE, nrep=100) } # Envelope of K function for simulations from cluster model fit <- kppm(redwood, ~1, "Thomas") \dontrun{ plot(envelope(fit, Gest)) plot(envelope(fit, Gest, global=TRUE)) } \testonly{ E <- envelope(fit, Gest, correction="rs", nsim=3, global=TRUE, nrep=100) } # Envelope of G function under CSR \dontrun{ plot(envelope(X, Gest)) } \testonly{ E <- envelope(X, Gest, correction="rs", nsim=3) } # Envelope of L function under CSR # L(r) = sqrt(K(r)/pi) \dontrun{ E <- envelope(X, Kest) plot(E, sqrt(./pi) ~ r) } \testonly{ E <- envelope(X, Kest, correction="border", nsim=3) plot(E, sqrt(./pi) ~ r) } # Simultaneous critical envelope for L function # (alternatively, use Lest) \dontrun{ plot(envelope(X, Kest, transform=expression(sqrt(./pi)), global=TRUE)) } \testonly{ E <- envelope(X, Kest, nsim=3, correction="border", transform=expression(sqrt(./pi)), global=TRUE) } # How to pass arguments needed to compute the summary functions: # We want envelopes for Jcross(X, "A", "B") # where "A" and "B" are types of points in the dataset 'demopat' data(demopat) \dontrun{ plot(envelope(demopat, Jcross, i="A", j="B")) } \testonly{ plot(envelope(demopat, Jcross, correction="rs", i="A", j="B", nsim=3)) } # Use of `simulate' \dontrun{ plot(envelope(cells, Gest, simulate=expression(runifpoint(42)))) plot(envelope(cells, Gest, simulate=expression(rMaternI(100,0.02)))) } \testonly{ plot(envelope(cells, Gest, correction="rs", simulate=expression(runifpoint(42)), nsim=3)) plot(envelope(cells, Gest, correction="rs", simulate=expression(rMaternI(100, 0.02)), nsim=3, global=TRUE)) } # Envelope under random toroidal shifts data(amacrine) \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.25)))) } # Envelope under random shifts with erosion \dontrun{ plot(envelope(amacrine, Kcross, i="on", j="off", simulate=expression(rshift(amacrine, radius=0.1, edge="erode")))) } # Envelope of INHOMOGENEOUS K-function with fitted trend # Note that the principle of symmetry, essential to the validity of # simulation envelopes, requires that both the observed and # simulated patterns be subjected to the same method of intensity # estimation. In the following example it would be incorrect to set the # argument 'lambda=red.dens' in the envelope command, because this # would mean that the inhomogeneous K functions of the simulated # patterns would be computed using the intensity function estimated # from the original redwood data, violating the symmetry. There is # still a concern about the fact that the simulations are generated # from a model that was fitted to the data; this is only a problem in # small datasets. \dontrun{ red.dens <- density(redwood, sigma=bw.diggle) plot(envelope(redwood, Kinhom, sigma=bw.diggle, simulate=expression(rpoispp(red.dens)))) } # Precomputed list of point patterns \dontrun{ nX <- npoints(X) PatList <- list() for(i in 1:19) PatList[[i]] <- runifpoint(nX) E <- envelope(X, Kest, nsim=19, simulate=PatList) } \testonly{ PatList <- list() for(i in 1:3) PatList[[i]] <- runifpoint(10) E <- envelope(X, Kest, nsim=3, simulate=PatList) } # re-using the same point patterns \dontrun{ EK <- envelope(X, Kest, savepatterns=TRUE) EG <- envelope(X, Gest, simulate=EK) } \testonly{ EK <- envelope(X, Kest, nsim=3, savepatterns=TRUE) EG <- envelope(X, Gest, nsim=3, simulate=EK) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/methods.objsurf.Rd0000644000176000001440000000264412240447360016333 0ustar ripleyusers\name{methods.objsurf} \Rdversion{1.1} \alias{methods.objsurf} %DoNotExport \alias{print.objsurf} \alias{plot.objsurf} \alias{persp.objsurf} \alias{image.objsurf} \alias{contour.objsurf} \title{ Methods for Objective Function Surfaces } \description{ Methods for printing and plotting an objective function surface. } \usage{ \method{print}{objsurf}(x, ...) \method{plot}{objsurf}(x, ...) \method{image}{objsurf}(x, ...) \method{contour}{objsurf}(x, ...) \method{persp}{objsurf}(x, ...) } \arguments{ \item{x}{ Object of class \code{"objsurf"} representing an objective function surface. } \item{\dots}{ Additional arguments passed to plot methods. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{image}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"objsurf"}. } \value{ For \code{print.objsurf}, \code{plot.objsurf} and \code{image.objsurf} the value is \code{NULL}. For \code{contour.objsurf} and \code{persp.objsurf} the value is described in the help for \code{\link{contour.default}} and \code{\link{persp.default}} respectively. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Ege Rubak. } \seealso{ \code{\link{objsurf}} } \examples{ example(objsurf) os plot(os) contour(os, add=TRUE) persp(os) } \keyword{spatial} \keyword{hplot} spatstat/man/npfun.Rd0000755000176000001440000000156712237642733014362 0ustar ripleyusers\name{npfun} \alias{npfun} \title{ Dummy Function Returns Number of Points } \description{ Returns a summary function which is constant with value equal to the number of points in the point pattern. } \usage{ npfun(X, ..., r) } \arguments{ \item{X}{ Point pattern. } \item{\dots}{ Ignored. } \item{r}{ Vector of values of the distance argument \eqn{r}. } } \details{ This function is normally not called by the user. Instead it is passed as an argument to the function \code{\link{psst}}. } \value{ Object of class \code{"fv"} representing a constant function. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ \code{\link{psst}} } \examples{ fit0 <- ppm(cells, ~1, nd=10) v <- psst(fit0, npfun) } \keyword{spatial} \keyword{nonparametric} spatstat/man/connected.ppp.Rd0000644000176000001440000000361612237642732015765 0ustar ripleyusers\name{connected.ppp} %DontDeclareMethods \Rdversion{1.1} \alias{connected.ppp} \title{ Connected components of a point pattern } \description{ Finds the topologically-connected components of a point pattern, when all pairs of points closer than a threshold distance are joined. } \usage{ \method{connected}{ppp}(X, R, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{R}{ Threshold distance. Pairs of points closer than \code{R} units apart will be joined together. } \item{\dots}{ Other arguments, not recognised by these methods. } } \details{ This function can be used to identify clumps of points in a point pattern. The function \code{connected} is generic. This is the method for point patterns (objects of class \code{"ppp"}). The point pattern \code{X} is first converted into an abstract graph by joining every pair of points that lie closer than \code{R} units apart. Then the connected components of this graph are identified. Two points in \code{X} belong to the same connected component if they can be reached by a series of steps between points of \code{X}, each step being shorter than \code{R} units in length. The result is a vector of labels for the points of \code{X} where all the points in a connected component have the same label. } \value{ A point pattern, equivalent to \code{X} except that the points have factor-valued marks, with levels corresponding to the connected components. } \seealso{ \code{\link{connected.im}}, \code{\link{im.object}}, \code{\link{tess}} } \examples{ Y <- connected(redwood, 0.13) if(interactive()) { plot(Y, cols=1:length(levels(f))) plot(split(Y)) } } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/nnwhich.pp3.Rd0000755000176000001440000000512312237642733015363 0ustar ripleyusers\name{nnwhich.pp3} \alias{nnwhich.pp3} \title{Nearest neighbours in three dimensions} \description{ Finds the nearest neighbour of each point in a three-dimensional point pattern. } \usage{ \method{nnwhich}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given three-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"pp3"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ X <- runifpoint3(30) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on two-dimensional code by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/SatPiece.Rd0000755000176000001440000001115712237642731014723 0ustar ripleyusers\name{SatPiece} \alias{SatPiece} \title{Piecewise Constant Saturated Pairwise Interaction Point Process Model} \description{ Creates an instance of a saturated pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ SatPiece(r, sat) } \arguments{ \item{r}{vector of jump points for the potential function} \item{sat}{ vector of saturation values, or a single saturation value } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is a generalisation of the Geyer saturation point process model, described in \code{\link{Geyer}}, to the case of multiple interaction distances. It can also be described as the saturated analogue of a pairwise interaction process with piecewise-constant pair potential, described in \code{\link{PairPiece}}. The saturated point process with interaction radii \eqn{r_1,\ldots,r_k}{r[1], ..., r[k]}, saturation thresholds \eqn{s_1,\ldots,s_k}{s[1],...,s[k]}, intensity parameter \eqn{\beta}{beta} and interaction parameters \eqn{\gamma_1,\ldots,gamma_k}{gamma[1], ..., gamma[k]}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma_1^{v_1(x_i, X)} \ldots gamma_k^{v_k(x_i,X)} }{ beta gamma[1]^v(1, x_i, X) ... gamma[k]^v(k, x_i, X) } to the probability density of the point pattern, where \deqn{ v_j(x_i, X) = \min( s_j, t_j(x_i,X) ) }{ v(j, x_i, X) = min(s[j], t(j, x_i, X)) } where \eqn{t_j(x_i, X)}{t(j,x[i],X)} denotes the number of points in the pattern \eqn{X} which lie at a distance between \eqn{r_{j-1}}{r[j-1]} and \eqn{r_j}{r[j]} from the point \eqn{x_i}{x[i]}. We take \eqn{r_0 = 0}{r[0] = 0} so that \eqn{t_1(x_i,X)}{t(1, x[i], X)} is the number of points of \eqn{X} that lie within a distance \eqn{r_1}{r[1]} of the point \eqn{x_i}{x[i]}. \code{SatPiece} is used to fit this model to data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant Saturated pairwise interaction is yielded by the function \code{SatPiece()}. See the examples below. Simulation of this point process model is not yet implemented. This model is not locally stable (the conditional intensity is unbounded). The argument \code{r} specifies the vector of interaction distances. The entries of \code{r} must be strictly increasing, positive numbers. The argument \code{sat} specifies the vector of saturation parameters. It should be a vector of the same length as \code{r}, and its entries should be nonnegative numbers. Thus \code{sat[1]} corresponds to the distance range from \code{0} to \code{r[1]}, and \code{sat[2]} to the distance range from \code{r[1]} to \code{r[2]}, etc. Alternatively \code{sat} may be a single number, and this saturation value will be applied to every distance range. Infinite values of the saturation parameters are also permitted; in this case \eqn{v_j(x_i,X) = t_j(x_i,X)}{v(j, x_i, X) = t(j, x_i, X)} and there is effectively no `saturation' for the distance range in question. If all the saturation parameters are set to \code{Inf} then the model is effectively a pairwise interaction process, equivalent to \code{\link{PairPiece}} (however the interaction parameters \eqn{\gamma}{gamma} obtained from \code{\link{SatPiece}} are the square roots of the parameters \eqn{\gamma}{gamma} obtained from \code{\link{PairPiece}}). If \code{r} is a single number, this model is virtually equivalent to the Geyer process, see \code{\link{Geyer}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{PairPiece}}, \code{\link{BadGey}}. } \examples{ SatPiece(c(0.1,0.2), c(1,1)) # prints a sensible description of itself SatPiece(c(0.1,0.2), 1) data(cells) ppm(cells, ~1, SatPiece(c(0.07, 0.1, 0.13), 2)) # fit a stationary piecewise constant Saturated pairwise interaction process \dontrun{ ppm(cells, ~polynom(x,y,3), SatPiece(c(0.07, 0.1, 0.13), 2)) # nonstationary process with log-cubic polynomial trend } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} in collaboration with Hao Wang and Jeff Picka } \keyword{spatial} \keyword{models} spatstat/man/area.owin.Rd0000755000176000001440000000331712237642732015111 0ustar ripleyusers\name{area.owin} \alias{area.owin} \alias{volume.owin} \title{Area of a Window} \description{ Computes the area of a window } \usage{ area.owin(w) \method{volume}{owin}(x) } \arguments{ \item{w}{A window, whose area will be computed. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } \item{x}{Object of class \code{\link{owin}}} } \value{ A numerical value giving the area of the window. } \details{ If the window \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the area of this rectangular window is computed by analytic geometry. If \code{w} is of type \code{"mask"} the area of the discrete raster approximation of the window is computed by summing the binary image values and adjusting for pixel size. The function \code{volume.owin} is identical to \code{area.owin} except for the argument name. It is a method for the generic function \code{volume}. } \seealso{ \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ w <- unit.square() area.owin(w) # returns 1.00000 k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) area.owin(mas) # returns approx area of k-gon mas <- as.mask(square(2), eps=0.01) X <- raster.x(mas) Y <- raster.y(mas) mas$m <- ((X - 1)^2 + (Y - 1)^2 <= 1) area.owin(mas) # returns 3.14 approx } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/tiles.Rd0000755000176000001440000000174312237642734014351 0ustar ripleyusers\name{tiles} \alias{tiles} \title{Extract List of Tiles in a Tessellation} \description{ Extracts a list of the tiles that make up a tessellation. } \usage{ tiles(x) } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. The tiles that make up the tessellation \code{x} are returned in a list. } \value{ A list of windows (objects of class \code{"owin"}). } \seealso{ \code{\link{tess}}, \code{\link{tilenames}}, \code{\link{tile.areas}} } \examples{ A <- tess(xgrid=0:2,ygrid=0:2) tiles(A) v <- as.im(function(x,y){factor(round(x^2 + y^2))}, W=owin()) E <- tess(image=v) tiles(E) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/unmark.Rd0000755000176000001440000000257712237642734014534 0ustar ripleyusers\name{unmark} \alias{unmark} \alias{unmark.ppp} \alias{unmark.splitppp} \alias{unmark.psp} \alias{unmark.ppx} \title{Remove Marks} \description{ Remove the mark information from a spatial dataset. } \usage{ unmark(X) \method{unmark}{ppp}(X) \method{unmark}{splitppp}(X) \method{unmark}{psp}(X) \method{unmark}{ppx}(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}), a split point pattern (object of class \code{"splitppp"}), a line segment pattern (object of class \code{"psp"}) or a multidimensional space-time point pattern (object of class \code{"ppx"}). } } \value{ An object of the same class as \code{X} with any mark information deleted. } \details{ A `mark' is a value attached to each point in a spatial point pattern, or attached to each line segment in a line segment pattern, etc. The function \code{unmark} is a simple way to remove the marks from such a dataset. } \seealso{ \code{\link{ppp.object}}, \code{\link{psp.object}} } \examples{ data(lansing) hicks <- lansing[lansing$marks == "hickory", ] \dontrun{ plot(hicks) # still a marked point pattern, but only 1 value of marks plot(unmark(hicks)) # unmarked } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/sharpen.Rd0000755000176000001440000000631112237642734014665 0ustar ripleyusers\name{sharpen} %DontDeclareMethods \alias{sharpen} \alias{sharpen.ppp} \title{Data Sharpening of Point Pattern} \description{ Performs Choi-Hall data sharpening of a spatial point pattern. } \usage{ sharpen(X, ...) \method{sharpen}{ppp}(X, sigma=NULL, ..., varcov=NULL, edgecorrect=FALSE) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{edgecorrect}{ Logical value indicating whether to apply edge effect bias correction. } \item{\dots}{Arguments passed to \code{\link{density.ppp}} to control the pixel resolution of the result.} } \details{ Choi and Hall (2001) proposed a procedure for \emph{data sharpening} of spatial point patterns. This procedure is appropriate for earthquake epicentres and other point patterns which are believed to exhibit strong concentrations of points along a curve. Data sharpening causes such points to concentrate more tightly along the curve. If the original data points are \eqn{X_1, \ldots, X_n}{X[1],..., X[n]} then the sharpened points are \deqn{ \hat X_i = \frac{\sum_j X_j k(X_j-X_i)}{\sum_j k(X_j - X_i)} }{ X^[i] = (sum[j] X[j] * k(X[j] - X[i]))/(sum[j] k(X[j] - X[i])) } where \eqn{k} is a smoothing kernel in two dimensions. Thus, the new point \eqn{\hat X_i}{X^[i]} is a vector average of the nearby points \eqn{X[j]}. The function \code{sharpen} is generic. It currently has only one method, for two-dimensional point patterns (objects of class \code{"ppp"}). If \code{sigma} is given, the smoothing kernel is the isotropic two-dimensional Gaussian density with standard deviation \code{sigma} in each axis. If \code{varcov} is given, the smoothing kernel is the Gaussian density with variance-covariance matrix \code{varcov}. The data sharpening procedure tends to cause the point pattern to contract away from the boundary of the window. That is, points \code{X_i}{X[i]} that lie `quite close to the edge of the window of the point pattern tend to be displaced inward. If \code{edgecorrect=TRUE} then the algorithm is modified to correct this vector bias. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as the original pattern \code{X}, and with the same marks as \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(shapley) X <- unmark(shapley) \dontshow{ if(!(interactive())) X <- rthin(X, 0.05) } Y <- sharpen(X, sigma=0.5) } \references{ Choi, E. and Hall, P. (2001) Nonparametric analysis of earthquake point-process data. In M. de Gunst, C. Klaassen and A. van der Vaart (eds.) \emph{State of the art in probability and statistics: Festschrift for Willem R. van Zwet}, Institute of Mathematical Statistics, Beachwood, Ohio. Pages 324--344. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/nztrees.Rd0000755000176000001440000000246712237642733014726 0ustar ripleyusers\name{nztrees} \alias{nztrees} \docType{data} \title{ New Zealand Trees Point Pattern } \description{ The data give the locations of trees in a forest plot. They were collected by Mark and Esler (1970) and were extracted and analysed by Ripley (1981, pp. 169-175). They represent the positions of 86 trees in a forest plot approximately 140 by 85 feet. Ripley discarded from his analysis the eight trees at the right-hand edge of the plot (which appear to be part of a planted border) and trimmed the window by a 5-foot margin accordingly. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations. The Cartesian coordinates are in feet. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(nztrees)} \source{Mark and Esler (1970), Ripley (1981).} \section{Note}{ To trim a 5-foot margin off the window, type \code{nzsub <- nztrees[ , owin(c(0,148),c(0,95)) ]} } \references{ Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Mark, A.F. and Esler, A.E. (1970) An assessment of the point-centred quarter method of plotless sampling in some New Zealand forests. \emph{Proceedings of the New Zealand Ecological Society} \bold{17}, 106--110. } \keyword{datasets} \keyword{spatial} spatstat/man/mincontrast.Rd0000755000176000001440000001435212237642733015571 0ustar ripleyusers\name{mincontrast} \alias{mincontrast} \title{Method of Minimum Contrast} \description{ A general low-level algorithm for fitting theoretical point process models to point pattern data by the Method of Minimum Contrast. } \usage{ mincontrast(observed, theoretical, startpar, \dots, ctrl=list(q = 1/4, p = 2, rmin=NULL, rmax=NULL), fvlab=list(label=NULL, desc="minimum contrast fit"), explain=list(dataname=NULL, modelname=NULL, fname=NULL)) } \arguments{ \item{observed}{ Summary statistic, computed for the data. An object of class \code{"fv"}. } \item{theoretical}{ An R language function that calculates the theoretical expected value of the summary statistic, given the model parameters. See Details. } \item{startpar}{ Vector of initial values of the parameters of the point process model (passed to \code{theoretical}). } \item{\dots}{ Additional arguments passed to the function \code{theoretical} and to the optimisation algorithm \code{\link[stats]{optim}}. } \item{ctrl}{ Optional. List of arguments controlling the optimisation. See Details. } \item{fvlab}{ Optional. List containing some labels for the return value. See Details. } \item{explain}{ Optional. List containing strings that give a human-readable description of the model, the data and the summary statistic. } } \details{ This function is a general algorithm for fitting point process models by the Method of Minimum Contrast. If you want to fit the Thomas process, see \code{\link{thomas.estK}}. If you want to fit a log-Gaussian Cox process, see \code{\link{lgcp.estK}}. If you want to fit the Matern cluster process, see \code{\link{matclust.estK}}. The Method of Minimum Contrast (Diggle and Gratton, 1984) is a general technique for fitting a point process model to point pattern data. First a summary function (typically the \eqn{K} function) is computed from the data point pattern. Second, the theoretical expected value of this summary statistic under the point process model is derived (if possible, as an algebraic expression involving the parameters of the model) or estimated from simulations of the model. Then the model is fitted by finding the optimal parameter values for the model to give the closest match between the theoretical and empirical curves. The argument \code{observed} should be an object of class \code{"fv"} (see \code{\link{fv.object}}) containing the values of a summary statistic computed from the data point pattern. Usually this is the function \eqn{K(r)} computed by \code{\link{Kest}} or one of its relatives. The argument \code{theoretical} should be a user-supplied function that computes the theoretical expected value of the summary statistic. It must have an argument named \code{par} that will be the vector of parameter values for the model (the length and format of this vector are determined by the starting values in \code{startpar}). The function \code{theoretical} should also expect a second argument (the first argument other than \code{par}) containing values of the distance \eqn{r} for which the theoretical value of the summary statistic \eqn{K(r)} should be computed. The value returned by \code{theoretical} should be a vector of the same length as the given vector of \eqn{r} values. The argument \code{ctrl} determines the contrast criterion (the objective function that will be minimised). The algorithm minimises the criterion \deqn{ D(\theta)= \int_{r_{\mbox{\scriptsize min}}}^{r_{\mbox{\scriptsize max}}} |\hat F(r)^q - F_\theta(r)^q|^p \, {\rm d}r }{ D(theta) = integral from rmin to rmax of abs(Fhat(r)^q - F(theta,r)^q)^p } where \eqn{\theta}{theta} is the vector of parameters of the model, \eqn{\hat F(r)}{Fhat(r)} is the observed value of the summary statistic computed from the data, \eqn{F_\theta(r)}{F(theta,r)} is the theoretical expected value of the summary statistic, and \eqn{p,q} are two exponents. The default is \code{q = 1/4}, \code{p=2} so that the contrast criterion is the integrated squared difference between the fourth roots of the two functions (Waagepetersen, 2006). The other arguments just make things print nicely. The argument \code{fvlab} contains labels for the component \code{fit} of the return value. The argument \code{explain} contains human-readable strings describing the data, the model and the summary statistic. The \code{"..."} argument of \code{mincontrast} can be used to pass extra arguments to the function \code{theoretical} and/or to the optimisation function \code{\link[stats]{optim}}. In this case, the function \code{theoretical} should also have a \code{"..."} argument and should ignore it (so that it ignores arguments intended for \code{\link[stats]{optim}}). } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } \item{opt }{The return value from the optimizer \code{\link{optim}}.} \item{crtl }{The control parameters of the algorithm.} \item{info }{List of explanatory strings.} } \references{ Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2006). An estimation function approach to inference for inhomogeneous Neyman-Scott processes. Submitted. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk}, adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{matclust.estK}}, \code{\link{thomas.estK}}, } \keyword{spatial} \keyword{models} spatstat/man/Pairwise.Rd0000755000176000001440000000714212237642731015010 0ustar ripleyusers\name{Pairwise} \alias{Pairwise} \title{Generic Pairwise Interaction model} \description{ Creates an instance of a pairwise interaction point process model which can then be fitted to point pattern data. } \usage{ Pairwise(pot, name, par, parnames, printfun) } \arguments{ \item{pot}{An R language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} \item{par}{List of numerical values for irregular parameters} \item{parnames}{Vector of names of irregular parameters} \item{printfun}{Do not specify this argument: for internal use only.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This code constructs a member of the pairwise interaction family \code{\link{pairwise.family}} with arbitrary pairwise interaction potential given by the user. Each pair of points in the point pattern contributes a factor \eqn{h(d)} to the probability density, where \eqn{d} is the distance between the two points. The factor term \eqn{h(d)} is \deqn{h(d) = \exp(-\theta \mbox{pot}(d))}{h(d) = exp(-theta * pot(d))} provided \eqn{\mbox{pot}(d)}{pot(d)} is finite, where \eqn{\theta}{theta} is the coefficient vector in the model. The function \code{pot} must take as its first argument a matrix of interpoint distances, and evaluate the potential for each of these distances. The result must be either a matrix with the same dimensions as its input, or an array with its first two dimensions the same as its input (the latter case corresponds to a vector-valued potential). If irregular parameters are present, then the second argument to \code{pot} should be a vector of the same type as \code{par} giving those parameter values. The values returned by \code{pot} may be finite numeric values, or \code{-Inf} indicating a hard core (that is, the corresponding interpoint distance is forbidden). We define \eqn{h(d) = 0} if \eqn{\mbox{pot}(d) = -\infty}{pot(d) = -Inf}. Thus, a potential value of minus infinity is \emph{always} interpreted as corresponding to \eqn{h(d) = 0}, regardless of the sign and magnitude of \eqn{\theta}{theta}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ #This is the same as StraussHard(r=0.7,h=0.05) strpot <- function(d,par) { r <- par$r h <- par$h value <- (d <= r) value[d < h] <- -Inf value } mySH <- Pairwise(strpot, "StraussHard process", list(r=0.7,h=0.05), c("interaction distance r", "hard core distance h")) data(cells) ppm(cells, ~ 1, mySH, correction="isotropic") # Fiksel (1984) double exponential interaction # see Stoyan, Kendall, Mecke 1987 p 161 fikspot <- function(d, par) { r <- par$r h <- par$h zeta <- par$zeta value <- exp(-zeta * d) value[d < h] <- -Inf value[d > r] <- 0 value } Fiksel <- Pairwise(fikspot, "Fiksel double exponential process", list(r=3.5, h=1, zeta=1), c("interaction distance r", "hard core distance h", "exponential coefficient zeta")) data(spruces) fit <- ppm(unmark(spruces), ~1, Fiksel, rbord=3.5) fit plot(fitin(fit), xlim=c(0,4)) coef(fit) # corresponding values obtained by Fiksel (1984) were -1.9 and -6.0 } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/hyperframe.Rd0000755000176000001440000000670312237642732015372 0ustar ripleyusers\name{hyperframe} \alias{hyperframe} \title{Hyper Data Frame} \description{ Create a hyperframe: a two-dimensional array in which each column consists of values of the same atomic type (like the columns of a data frame) or objects of the same class. } \usage{ hyperframe(..., row.names=NULL, check.rows=FALSE, check.names=TRUE, stringsAsFactors=default.stringsAsFactors()) } \arguments{ \item{\dots}{ Arguments of the form \code{value} or \code{tag=value}. Each \code{value} is either an atomic vector, or a list of objects of the same class, or a single atomic value, or a single object. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. See Details. } \item{row.names,check.rows,check.names,stringsAsFactors}{ Arguments passed to \code{\link{data.frame}} controlling the names of the rows, whether to check that rows are consistent, whether to check validity of the column names, and whether to convert character columns to factors. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. A hyperframe is a two-dimensional array in which each column consists of values of one atomic type (as in a data frame) or consists of objects of one class. The arguments \code{\dots} are any number of arguments of the form \code{value} or \code{tag=value}. Each \code{value} will become a column of the array. The \code{tag} determines the name of the column. Each \code{value} can be either \itemize{ \item an atomic vector or factor (i.e. numeric vector, integer vector, character vector, logical vector, complex vector or factor) \item a list of objects which are all of the same class \item one atomic value, which will be replicated to make an atomic vector or factor \item one object, which will be replicated to make a list of objects. } All columns (vectors, factors and lists) must be of the same length, if their length is greater than 1. } \section{Methods for Hyperframes}{ There are methods for \code{print}, \code{plot}, \code{summary}, \code{with}, \code{[}, \code{[<},\code{$}, \code{$<-}, \code{names}, \code{as.data.frame} \code{as.list}, \code{cbind} and \code{rbind} for the class of hyperframes. There is also \code{is.hyperframe} and \code{\link{as.hyperframe}}. } \value{ An object of class \code{"hyperframe"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{as.hyperframe}}, \code{\link{as.hyperframe.ppx}}, \code{\link{plot.hyperframe}}, \code{\link{with.hyperframe}}, \code{\link{as.data.frame.hyperframe}}, \code{\link{cbind.hyperframe}}, \code{\link{rbind.hyperframe}} } \examples{ # equivalent to a data frame hyperframe(X=1:10, Y=3) # list of functions hyperframe(f=list(sin, cos, tan)) # table of functions and matching expressions hyperframe(f=list(sin, cos, tan), e=list(expression(sin(x)), expression(cos(x)), expression(tan(x)))) hyperframe(X=1:10, Y=letters[1:10], Z=factor(letters[1:10]), stringsAsFactors=FALSE) lambda <- runif(4, min=50, max=100) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) h h$lambda2 <- lambda^2 h[, "lambda3"] <- lambda^3 h[, "Y"] <- X } \keyword{spatial} \keyword{manip} spatstat/man/incircle.Rd0000755000176000001440000000230312237642732015010 0ustar ripleyusers\name{incircle} \alias{incircle} \title{Find Largest Circle Inside Window} \description{ Find the largest circle contained in a given window. } \usage{ incircle(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \details{ Given a window \code{W} of any type and shape, this function determines the largest circle that is contained inside \code{W}. For non-rectangular windows, the incircle is computed approximately by finding the maximum of the distance map (see \code{\link{distmap}}) of the complement of the window. } \value{ A list with entries \code{x,y,r} giving the location \code{(x,y)} and radius \code{r} of the incircle. } \seealso{ \code{\link{centroid.owin}} } \examples{ W <- square(1) Wc <- incircle(W) plot(W) plot(disc(Wc$r, c(Wc$x, Wc$y)), add=TRUE) data(letterR) plot(letterR) Rc <- incircle(letterR) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) W <- as.mask(letterR) plot(W) Rc <- incircle(W) plot(disc(Rc$r, c(Rc$x, Rc$y)), add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/is.hybrid.Rd0000644000176000001440000000371712237642732015122 0ustar ripleyusers\name{is.hybrid} %DontDeclareMethods \alias{is.hybrid} \alias{is.hybrid.ppm} \alias{is.hybrid.interact} \title{ Test Whether Object is a Hybrid } \description{ Tests where a point process model or point process interaction is a hybrid of several interactions. } \usage{ is.hybrid(x) \method{is.hybrid}{ppm}(x) \method{is.hybrid}{interact}(x) } \arguments{ \item{x}{ A point process model (object of class \code{"ppm"}) or a point process interaction structure (object of class \code{"interact"}). } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2012) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The function \code{is.hybrid} is generic, with methods for point process models (objects of class \code{"ppm"}) and point process interactions (objects of class \code{"interact"}). These functions return \code{TRUE} if the object \code{x} is a hybrid, and \code{FALSE} if it is not a hybrid. Hybrids of two or more interpoint interactions are created by the function \code{\link{Hybrid}}. Such a hybrid interaction can then be fitted to point pattern data using \code{\link{ppm}}. } \value{ \code{TRUE} if the object is a hybrid, and \code{FALSE} otherwise. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{Hybrid}} } \examples{ S <- Strauss(0.1) is.hybrid(S) H <- Hybrid(Strauss(0.1), Geyer(0.2, 3)) is.hybrid(H) data(redwood) fit <- ppm(redwood, ~1, H) is.hybrid(fit) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/diagnose.ppm.Rd0000755000176000001440000003655712237642732015626 0ustar ripleyusers\name{diagnose.ppm} \alias{diagnose.ppm} \alias{plot.diagppm} \title{ Diagnostic Plots for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce diagnostic plots based on residuals. } \usage{ diagnose.ppm(object, \dots, type="raw", which="all", sigma=NULL, rbord=reach(object), cumulative=TRUE, plot.it=TRUE, rv = NULL, compute.sd=TRUE, compute.cts=TRUE, typename, check=TRUE, repair=TRUE, oldstyle=FALSE) \method{plot}{diagppm}(x, \dots, which, plot.neg="image", plot.smooth="imagecontour", plot.sd=TRUE, spacing=0.1, srange=NULL, monochrome=FALSE, main=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{which}{ Character string or vector indicating the choice(s) of plots to be generated. Options are \code{"all"}, \code{"marks"}, \code{"smooth"}, \code{"x"}, \code{"y"} and \code{"sum"}. Multiple choices may be given but must be matched exactly. See Details. } \item{sigma}{ Bandwidth for kernel smoother in \code{"smooth"} option. } \item{rbord}{ Width of border to avoid edge effects. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. } \item{cumulative}{ Logical flag indicating whether the lurking variable plots for the \eqn{x} and \eqn{y} coordinates will be the plots of cumulative sums of marks (\code{cumulative=TRUE}) or the plots of marginal integrals of the smoothed residual field (\code{cumulative=FALSE}). } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, the computed diagnostic quantities are returned without plotting them. } \item{plot.neg}{ One of \code{"discrete"} or \code{"image"} indicating how the density part of the residual measure should be plotted. } \item{plot.smooth}{ One of \code{"image"}, \code{"persp"}, \code{"contour"} or \code{"imagecontour"} indicating how the smoothed residual field should be plotted. } \item{compute.sd,plot.sd}{ Logical values indicating whether error bounds should be computed and added to the \code{"x"} and \code{"y"} plots. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{rv}{ Usually absent. Advanced use only. If this argument is present, the values of the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from \code{rv}. } \item{spacing}{ The spacing between plot panels (when a four-panel plot is generated) expressed as a fraction of the width of the window of the point pattern. } \item{srange}{ Vector of length 2 that will be taken as giving the range of values of the smoothed residual field, when generating an image plot of this field. This is useful if you want to generate diagnostic plots for two different fitted models using the same colour map. } \item{monochrome}{ Flag indicating whether images should be displayed in greyscale (suitable for publication) or in colour (suitable for the screen). The default is to display in colour. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{x}{The value returned from a previous call to \code{diagnose.ppm}. An object of class \code{"diagppm"}. } \item{typename}{String to be used as the name of the residuals.} \item{main}{Main title for the plot.} \item{\dots}{ Extra arguments, controlling either the resolution of the smoothed image (passed from \code{diagnose.ppm} to \code{\link{density.ppp}}) or the appearance of the plots (passed from \code{diagnose.ppm} to \code{plot.diagppm} and from \code{plot.diagppm} to \code{\link{plot.default}}). } \item{compute.cts}{Advanced use only.} } \value{ An object of class \code{"diagppm"} which contains the coordinates needed to reproduce the selected plots. This object can be plotted using \code{plot.diagppm} and printed using \code{print.diagppm}. } \details{ This function generates several diagnostic plots for a fitted point process model. The plots display the residuals from the fitted model (Baddeley et al, 2005) or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991). These plots can be used to assess goodness-of-fit, to identify outliers in the data, and to reveal departures from the fitted model. See also the companion function \code{\link{qqplot.ppm}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{type} selects the type of residual or weight that will be computed. Current options are: \describe{ \item{\code{"eem"}:}{ exponential energy marks (Stoyan and Grabarnik, 1991) computed by \code{\link{eem}}. These are positive weights attached to the data points (i.e. the points of the point pattern dataset to which the model was fitted). If the fitted model is correct, then the sum of these weights for all data points in a spatial region \eqn{B} has expected value equal to the area of \eqn{B}. See \code{\link{eem}} for further explanation. } \item{\code{"raw"}, \code{"inverse"} or \code{"pearson"}:}{ point process residuals (Baddeley et al, 2005) computed by the function \code{\link{residuals.ppm}}. These are residuals attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals in a spatial region \eqn{B} has mean zero. The options are \itemize{ \item \code{"raw"}: the raw residuals; \item \code{"inverse"}: the `inverse-lambda' residuals, a counterpart of the exponential energy weights; \item \code{"pearson"}: the Pearson residuals. } See \code{\link{residuals.ppm}} for further explanation. } } The argument \code{which} selects the type of plot that is produced. Options are: \describe{ \item{\code{"marks"}:}{ plot the residual measure. For the exponential energy weights (\code{type="eem"}) this displays circles centred at the points of the data pattern, with radii proportional to the exponential energy weights. For the residuals (\code{type="raw"}, \code{type="inverse"} or \code{type="pearson"}) this again displays circles centred at the points of the data pattern with radii proportional to the (positive) residuals, while the plotting of the negative residuals depends on the argument \code{plot.neg}. If \code{plot.neg="image"} then the negative part of the residual measure, which is a density, is plotted as a colour image. If \code{plot.neg="discrete"} then the discretised negative residuals (obtained by approximately integrating the negative density using the quadrature scheme of the fitted model) are plotted as squares centred at the dummy points with side lengths proportional to the (negative) residuals. [To control the size of the circles and squares, use the argument \code{maxsize}.] } \item{\code{"smooth"}:}{ plot a kernel-smoothed version of the residual measure. Each data or dummy point is taken to have a `mass' equal to its residual or exponential energy weight. (Note that residuals can be negative). This point mass is then replaced by a bivariate isotropic Gaussian density with standard deviation \code{sigma}. The value of the smoothed residual field at any point in the window is the sum of these weighted densities. If the fitted model is correct, this smoothed field should be flat, and its height should be close to 0 (for the residuals) or 1 (for the exponential energy weights). The field is plotted either as an image, contour plot or perspective view of a surface, according to the argument \code{plot.smooth}. The range of values of the smoothed field is printed if the option \code{which="sum"} is also selected. } \item{\code{"x"}:}{ produce a `lurking variable' plot for the \eqn{x} coordinate. This is a plot of \eqn{h(x)} against \eqn{x} (solid lines) and of \eqn{E(h(x))} against \eqn{x} (dashed lines), where \eqn{h(x)} is defined below, and \eqn{E(h(x))} denotes the expectation of \eqn{h(x)} assuming the fitted model is true. \itemize{ \item if \code{cumulative=TRUE} then \eqn{h(x)} is the cumulative sum of the weights or residuals for all points which have \eqn{X} coordinate less than or equal to \eqn{x}. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } area of the subset of the window to the left of the line \eqn{X=x}. \item if \code{cumulative=FALSE} then \eqn{h(x)} is the marginal integral of the smoothed residual field (see the case \code{which="smooth"} described above) on the \eqn{x} axis. This is approximately the derivative of the plot for \code{cumulative=TRUE}. The value of \eqn{h(x)} is computed by summing the values of the smoothed residual field over all pixels with the given \eqn{x} coordinate. For the residuals \eqn{E(h(x)) = 0}, and for the exponential energy weights \eqn{E(h(x)) = } length of the intersection between the observation window and the line \eqn{X=x}. } If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{h(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. } \item{\code{"y"}:}{ produce a similar lurking variable plot for the \eqn{y} coordinate. } \item{\code{"sum"}:}{ print the sum of the weights or residuals for all points in the window (clipped by a margin \code{rbord} if required) and the area of the same window. If the fitted model is correct the sum of the exponential energy weights should equal the area of the window, while the sum of the residuals should equal zero. Also print the range of values of the smoothed field displayed in the \code{"smooth"} case. } \item{\code{"all"}:}{ All four of the diagnostic plots listed above are plotted together in a two-by-two display. Top left panel is \code{"marks"} plot. Bottom right panel is \code{"smooth"} plot. Bottom left panel is \code{"x"} plot. Top right panel is \code{"y"} plot, rotated 90 degrees. } } The argument \code{rbord} ensures there are no edge effects in the computation of the residuals. The diagnostic calculations will be confined to those points of the data pattern which are at least \code{rbord} units away from the edge of the window. The value of \code{rbord} should be greater than or equal to the range of interaction permitted in the model. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{rv} would normally be used only by experts. It enables the user to substitute arbitrary values for the residuals or marks, overriding the usual calculations. If \code{rv} is present, then instead of calculating the residuals from the fitted model, the algorithm takes the residuals from the object \code{rv}, and plots them in the manner appropriate to the type of residual or mark selected by \code{type}. If \code{type ="eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector of length equal to the number of points in the original data point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, it should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. The return value of \code{diagnose.ppm} is an object of class \code{"diagppm"}. There are methods for \code{plot} and \code{print} for such objects. See the Examples. See also the companion functions \code{\link{qqplot.ppm}}, which produces a Q-Q plot of the residuals, and \code{\link{lurking}}, which produces lurking variable plots for any spatial covariate. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{qqplot.ppm}}, \code{\link{lurking}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~x, Strauss(r=0.15)) diagnose.ppm(fit) \dontrun{ diagnose.ppm(fit, type="pearson") } diagnose.ppm(fit, which="marks") diagnose.ppm(fit, type="raw", plot.neg="discrete") # save the diagnostics and plot them later u <- diagnose.ppm(fit, rbord=0.15, plot.it=FALSE) \dontrun{ plot(u) plot(u, which="marks") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/linearpcfcross.inhom.Rd0000644000176000001440000001054112237642732017346 0ustar ripleyusers\name{linearpcfcross.inhom} \alias{linearpcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{pcfcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link[spatstat]{linearpcf}}, \code{\link[spatstat]{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } g <- linearpcfcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/kstest.Rd0000755000176000001440000002155712237642732014551 0ustar ripleyusers\name{kstest.ppm} %DontDeclareMethods \alias{kstest} \alias{kstest.ppm} \alias{kstest.lppm} \alias{kstest.lpp} \alias{kstest.ppp} \alias{kstest.slrm} \title{Kolmogorov-Smirnov Test for Point Pattern or Point Process Model} \description{ Performs a Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model. The test compares the observed and predicted distributions of the values of a spatial covariate. } \usage{ kstest(...) \method{kstest}{ppp}(X, covariate, ..., jitter=TRUE) \method{kstest}{ppm}(model, covariate, ..., jitter=TRUE) \method{kstest}{lpp}(X, covariate, ..., jitter=TRUE) \method{kstest}{lppm}(model, covariate, ..., jitter=TRUE) \method{kstest}{slrm}(model, covariate, ..., modelname=NULL, covname=NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) or fitted spatial logistic regression (object of class \code{"slrm"}). } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image (object of class \code{"im"}), a list of pixel images, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. } \item{\dots}{ Arguments passed to \code{\link{ks.test}} to control the test. } \item{jitter}{ Logical flag. If \code{jitter=TRUE}, values of the covariate will be slightly perturbed at random, to avoid tied values in the test. } \item{modelname,covname}{ Character strings giving alternative names for \code{model} and \code{covariate} to be used in labelling plot axes. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test. The function \code{kstest} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}), point process models (\code{"ppm"} or \code{"lppm"}) and spatial logistic regression models (\code{"slrm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"}), then \code{kstest(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. For a multitype point pattern, the uniform intensity is assumed to depend on the type of point (sometimes called Complete Spatial Randomness and Independence, CSRI). \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{kstest(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. \item If \code{model} is a fitted spatial logistic regression (object of class \code{"slrm"}) then \code{kstest(model, ...)} performs a test of goodness-of-fit for this fitted model. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model, using the classical Kolmogorov-Smirnov test. Thus, you must nominate a spatial covariate for this test. If \code{X} is a point pattern that does not have marks, the argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. If \code{X} is a multitype point pattern, the argument \code{covariate} can be either a \code{function(x,y,marks)}, or a pixel image, or a list of pixel images corresponding to each possible mark value, or one of the characters \code{"x"} or \code{"y"} indicating the Cartesian coordinates. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The Kolmogorov-Smirnov test of uniformity is applied using \code{\link{ks.test}}. This test was apparently first described (in the context of spatial data) by Berman (1986). See also Baddeley et al (2005). The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. The return value also belongs to the class \code{"kstest"} for which there is a plot method \code{\link{plot.kstest}}. The plot method displays the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, plotted against the value of the covariate. The argument \code{jitter} controls whether covariate values are randomly perturbed, in order to avoid ties. If the original data contains any ties in the covariate (i.e. points with equal values of the covariate), and if \code{jitter=FALSE}, then the Kolmogorov-Smirnov test implemented in \code{\link{ks.test}} will issue a warning that it cannot calculate the exact \eqn{p}-value. To avoid this, if \code{jitter=TRUE} each value of the covariate will be perturbed by adding a small random value. The perturbations are normally distributed with standard deviation equal to one hundredth of the range of values of the covariate. This prevents ties, and the \eqn{p}-value is still correct. There is a very slight loss of power. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link{ks.test}} for details. The return value can be printed to give an informative summary of the test. The value also belongs to the class \code{"kstest"} for which there is a plot method. } \section{Warning}{ The outcome of the test involves a small amount of random variability, because (by default) the coordinates are randomly perturbed to avoid tied values. Hence, if \code{kstest} is executed twice, the \eqn{p}-values will not be exactly the same. To avoid this behaviour, set \code{jitter=FALSE}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{plot.kstest}}, \code{\link{quadrat.test}}, \code{\link{bermantest}}, \code{\link{ks.test}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ # test of CSR using x coordinate kstest(nztrees, "x") # test of CSR using a function of x and y fun <- function(x,y){2* x + y} kstest(nztrees, fun) # test of CSR using an image covariate funimage <- as.im(fun, W=as.owin(nztrees)) kstest(nztrees, funimage) # fit inhomogeneous Poisson model and test model <- ppm(nztrees, ~x) kstest(model, "x") if(interactive()) { # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # fit correct nonuniform Poisson process fit1 <- ppm(X, ~x) # test wrong model kstest(fit0, "x") # test right model kstest(fit1, "x") } # multitype point pattern kstest(amacrine, "x") yimage <- as.im(function(x,y){y}, W=as.owin(amacrine)) kstest(ppm(amacrine, ~marks+y), yimage) } \keyword{htest} \keyword{spatial} spatstat/man/summary.listof.Rd0000755000176000001440000000167412237642734016230 0ustar ripleyusers\name{summary.listof} \alias{summary.listof} \title{Summary of a List of Things} \description{ Prints a useful summary of each item in a list of things. } \usage{ \method{summary}{listof}(object, \dots) } \arguments{ \item{object}{ An object of class \code{"listof"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"listof"} is effectively a list of things which are all of the same class. This function extracts a useful summary of each of the items in the list. } \seealso{ \code{\link{summary}}, \code{\link{plot.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) summary(x) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/box3.Rd0000755000176000001440000000320112237642732014071 0ustar ripleyusers\name{box3} \Rdversion{1.1} \alias{box3} \title{ Three-Dimensional Box } \description{ Creates an object representing a three-dimensional box. } \usage{ box3(xrange = c(0, 1), yrange = xrange, zrange = yrange, unitname = NULL) } \arguments{ \item{xrange, yrange, zrange}{ Dimensions of the box in the \eqn{x,y,z} directions. Each of these arguments should be a numeric vector of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a three-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a three-dimensional point pattern (see \code{\link{pp3}}) and in various geometrical calculations (see \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. The function \code{\link{as.box3}} can be used to convert other kinds of data to this format. } \value{ An object of class \code{"box3"}. There is a print method for this class. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{as.box3}}, \code{\link{pp3}}, \code{\link{volume.box3}}, \code{\link{diameter.box3}}, \code{\link{eroded.volumes}}. } \examples{ box3() box3(c(0,10),c(0,10),c(0,5), unitname=c("metre","metres")) box3(c(-1,1)) } \keyword{spatial} \keyword{datagen} spatstat/man/gorillas.Rd0000644000176000001440000001103512237642732015033 0ustar ripleyusers\name{gorillas} \alias{gorillas} \alias{gorillas.extra} \docType{data} \title{ Gorilla Nesting Sites } \description{ Locations of nesting sites of gorillas, and associated covariates, in a National Park in Cameroon. } \usage{data(gorillas)} \format{ \code{gorillas} is a marked point pattern (object of class \code{"ppp"}) representing nest site locations. \code{gorillas.extra} is a named list of 7 pixel images (objects of class \code{"im"}) containing spatial covariates. It also belongs to the class \code{"listof"}. All spatial coordinates are in metres. The coordinate reference system is \code{WGS_84_UTM_Zone_32N}. } \details{ These data come from a study of gorillas in the Kagwene Gorilla Sanctuary, Cameroon, by the Wildlife Conservation Society Takamanda-Mone Landscape Project (WCS-TMLP). A detailed description and analysis of the data is reported in Funwi-Gabga and Mateu (2012). The dataset \code{gorillas} is a marked point pattern (object of class \code{"ppp"}) giving the spatial locations of 647 nesting sites of gorilla groups observed in the sanctuary over time. Locations are given as UTM (Zone 32N) coordinates in metres. The observation window is the boundary of the sanctuary, represented as a polygon. Marks attached to the points are: \describe{ \item{group}{Identifier of the gorilla group that constructed the nest site: a categorical variable with values \code{major} or \code{minor}. } \item{season}{Season in which data were collected: categorical, either \code{rainy} or \code{dry}. } \item{date}{ Day of observation. A value of class \code{"Date"}. } } Note that the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. The accompanying dataset \code{gorillas.extra} contains spatial covariate information. It is a named list containing seven pixel images (objects of class \code{"im"}) giving the values of seven covariates over the study region. It also belongs to the class \code{"listof"} so that it can be plotted. The component images are: \describe{ \item{aspect}{ Compass direction of the terrain slope. Categorical, with levels \code{N}, \code{NE}, \code{E}, \code{SE}, \code{S}, \code{SW}, \code{W} and \code{NW}. } \item{elevation}{ Digital elevation of terrain, in metres. } \item{heat}{ Heat Load Index at each point on the surface (Beer's aspect), discretised. Categorical with values \code{Warmest} (Beer's aspect between 0 and 0.999), \code{Moderate} (Beer's aspect between 1 and 1.999), \code{Coolest} (Beer's aspect equals 2). } \item{slopeangle}{ Terrain slope, in degrees. } \item{slopetype}{ Type of slope. Categorical, with values \code{Valley}, \code{Toe} (toe slope), \code{Flat}, \code{Midslope}, \code{Upper} and \code{Ridge}. } \item{vegetation}{ Vegetation or cover type. Categorical, with values \code{Disturbed} (highly disturbed forest), \code{Colonising} (colonising forest), \code{Grassland} (savannah), \code{Primary} (primary forest), \code{Secondary} (secondary forest), and \code{Transition} (transitional vegetation). } \item{waterdist}{ Euclidean distance from nearest water body, in metres. } } For further information see Funwi-Gabga and Mateu (2012). } \source{ Field data collector: Wildlife Conservation Society Takamanda-Mone Landscape Project (WCS-TMLP). \emph{Please acknowledge WCS-TMLP in any use of these data.} Data kindly provided by Funwi-Gabga Neba, Data Coordinator of A.P.E.S. Database Project, Department of Primatology, Max Planck Institute for Evolutionary Anthropology, Leipzig, Germany. The collaboration of Prof Jorge Mateu, Universitat Jaume I, Castellon, Spain is gratefully acknowledged. } \references{ Funwi-Gabga, N. (2008) \emph{A pastoralist survey and fire impact assessment in the Kagwene Gorilla Sanctuary, Cameroon}. M.Sc. thesis, Geology and Environmental Science, University of Buea. Funwi-Gabga, N. and Mateu, J. (2012) Understanding the nesting spatial behaviour of gorillas in the Kagwene Sanctuary, Cameroon. \emph{Stochastic Environmental Research and Risk Assessment}, in press. } \examples{ summary(gorillas) plot(gorillas) plot(gorillas.extra) } \keyword{datasets} spatstat/man/identify.psp.Rd0000755000176000001440000000353612237642732015645 0ustar ripleyusers\name{identify.psp} \alias{identify.psp} \title{Identify Segments in a Line Segment Pattern} \description{ If a line segment pattern is plotted in the graphics window, this function will find the segment which is nearest to the mouse position, and print its serial number. } \usage{ \method{identify}{psp}(x, \dots, labels=seq_len(nsegments(x)), n=nsegments(x), plot=TRUE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{labels}{ Labels associated with the segments, to be plotted when the segments are identified. A character vector or numeric vector of length equal to the number of segments in \code{x}. } \item{n}{ Maximum number of segments to be identified. } \item{plot}{ Logical. Whether to plot the labels when a segment is identified. } } \value{ Vector containing the serial numbers of the segments in the pattern \code{x} that were identified. } \details{ This is a method for the generic function \code{\link{identify}} for line segment pattern objects. The line segment pattern \code{x} should first be plotted using \code{\link{plot.psp}}. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the segment in the pattern \code{x} that is closest to the mouse position. This segment's index will be returned as part of the value of the call. Each time a segment is identified, text will be displayed next to the point, showing its serial number (or the relevant entry of \code{labels}). } \seealso{ \code{\link{identify}}, \code{\link{identify.ppp}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{iplot} spatstat/man/periodify.Rd0000755000176000001440000000753512237642733015227 0ustar ripleyusers\name{periodify} %DontDeclareMethods \alias{periodify} \alias{periodify.owin} \alias{periodify.ppp} \alias{periodify.psp} \title{ Make Periodic Copies of a Spatial Pattern } \description{ Given a spatial pattern (point pattern, line segment pattern, window, etc) make shifted copies of the pattern and optionally combine them to make a periodic pattern. } \usage{ periodify(X, ...) \method{periodify}{ppp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{psp}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, check=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) \method{periodify}{owin}(X, nx = 1, ny = 1, ..., combine=TRUE, warn=TRUE, ix=(-nx):nx, iy=(-ny):ny, ixy=expand.grid(ix=ix,iy=iy)) } \arguments{ \item{X}{ An object representing a spatial pattern (point pattern, line segment pattern or window). } \item{nx,ny}{ Integers. Numbers of additional copies of \code{X} in each direction. The result will be a grid of \code{2 * nx + 1} by \code{2 * ny + 1} copies of the original object. (Overruled by \code{ix, iy, ixy}). } \item{\dots}{ Ignored. } \item{combine}{ Logical flag determining whether the copies should be superimposed to make an object like \code{X} (if \code{combine=TRUE}) or simply returned as a list of objects (\code{combine=FALSE}). } \item{warn}{ Logical flag determining whether to issue warnings. } \item{check}{ Logical flag determining whether to check the validity of the combined pattern. } \item{ix, iy}{ Integer vectors determining the grid positions of the copies of \code{X}. (Overruled by \code{ixy}). } \item{ixy}{ Matrix or data frame with two columns, giving the grid positions of the copies of \code{X}. } } \details{ Given a spatial pattern (point pattern, line segment pattern, etc) this function makes a number of shifted copies of the pattern and optionally combines them. The function \code{periodify} is generic, with methods for various kinds of spatial objects. The default is to make a 3 by 3 array of copies of \code{X} and combine them into a single pattern of the same kind as \code{X}. This can be used (for example) to compute toroidal or periodic edge corrections for various operations on \code{X}. If the arguments \code{nx}, \code{ny} are given and other arguments are missing, the original object will be copied \code{nx} times to the right and \code{nx} times to the left, then \code{ny} times upward and \code{ny} times downward, making \code{(2 * nx + 1) * (2 * ny + 1)} copies altogether, arranged in a grid, centred on the original object. If the arguments \code{ix}, \code{iy} or \code{ixy} are specified, then these determine the grid positions of the copies of \code{X} that will be made. For example \code{(ix,iy) = (1, 2)} means a copy of \code{X} shifted by the vector \code{(ix * w, iy * h)} where \code{w,h} are the width and height of the bounding rectangle of \code{X}. If \code{combine=TRUE} (the default) the copies of \code{X} are superimposed to create an object of the same kind as \code{X}. If \code{combine=FALSE} the copies of \code{X} are returned as a list. } \value{ If \code{combine=TRUE}, an object of the same class as \code{X}. If \code{combine=FALSE}, a list of objects of the same class as \code{X}. } \seealso{ \code{\link{shift}} } \examples{ data(cells) plot(periodify(cells)) a <- lapply(periodify(cells$window, combine=FALSE), plot, add=TRUE,lty=2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/areaLoss.Rd0000755000176000001440000000421512237642732014775 0ustar ripleyusers\name{areaLoss} \alias{areaLoss} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaLoss(X, r, ..., W=as.owin(X), subset=NULL, exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{X}{ Locations of the centres of discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{Ignored.} \item{W}{ Optional. Window (object of class \code{"owin"}) inside which the area should be calculated. } \item{subset}{ Optional. Index identifying a subset of the points of \code{X} for which the area difference should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{X} (or \code{X[subset]}) and one column for each value in \code{r}. } \details{ This function computes, for each point \code{X[i]} in \code{X} and for each radius \code{r}, the area of that part of the disc of radius \code{r} centred at the location \code{X[i]} that is \emph{not} covered by any of the other discs of radius \code{r} centred at the points \code{X[j]} for \code{j} not equal to \code{i}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. The result is a matrix, with one row for each point in \code{X} and one column for each entry of \code{r}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaGain}}, \code{\link{dilated.areas}} } \examples{ data(cells) areaLoss(cells, 0.1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Gcom.Rd0000755000176000001440000002200412237642731014104 0ustar ripleyusers\name{Gcom} \Rdversion{1.1} \alias{Gcom} \title{ Model Compensator of Nearest Neighbour Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the nearest neighbour distance distribution function \eqn{G} based on the fitted model (as well as the usual nonparametric estimates of \eqn{G} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{G} functions serves as a diagnostic for the model. } \usage{ Gcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "Hanisch"), conditional = !is.poisson(object), restrict=FALSE, trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection="border", truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{G(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{correction}{ Edge correction(s) to be employed in calculating the compensator. Options are \code{"border"}, \code{"Hanisch"} and \code{"best"}. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{\dots}{ Extra arguments passed to \code{\link{ppm}}. } \item{ppmcorrection}{ The \code{correction} argument to \code{\link{ppm}}. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes different estimates of the nearest neighbour distance distribution function \eqn{G} of the dataset, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{G} function. It then also computes the \emph{model-compensated} \eqn{G} function. The different functions are returned as columns in a data frame (of class \code{"fv"}). The interpretation of the columns is as follows (ignoring edge corrections): \describe{ \item{\code{bord}:}{ the nonparametric border-correction estimate of \eqn{G(r)}, \deqn{ \hat G(r) = \frac{\sum_i I\{ d_i \le r\} I\{ b_i > r \}}{\sum_i I\{ b_i > r\}} }{ G(r) = (sum[i] I(d[i] <= r) I(b[i] > r))/(sum[i] I(b[i] > r)) } where \eqn{d_i}{d[i]} is the distance from the \eqn{i}-th data point to its nearest neighbour, and \eqn{b_i}{b[i]} is the distance from the \eqn{i}-th data point to the boundary of the window \eqn{W}. } \item{\code{bcom}:}{ the model compensator of the border-correction estimate \deqn{ {\bf C}\, \hat G(r) = \frac{\int \lambda(u,x) I\{ b(u) > r\} I\{ d(u,x) \le r\}}{ 1 + \sum_i I\{ b_i > r\} } }{ C G(r) = (integral[u] lambda(u,x) I(b(u) > r) I( d(u,x) <= r ))/(1 + sum[i] I(b[i] > r)) } where \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{d(u,x)} denotes the distance from \eqn{u} to the nearest point in \eqn{x}, while \eqn{b(u)} denotes the distance from \eqn{u} to the boundary of the window\eqn{W}. } \item{\code{han}:}{ the nonparametric Hanisch estimate of \eqn{G(r)} \deqn{ \hat G(r) = \frac{D(r)}{D(\infty)} }{ G(r) = D(r)/D(infty) } where \deqn{ D(r) = \sum_i \frac{ I\{x_i \in W_{\ominus d_i}\} I\{d_i \le r\} }{ \mbox{area}(W_{\ominus d_i}) } }{ D(r) = sum[i] I(x[i] in W[-r]) I(d[i] <= r)/area(W[-d[i]]) } in which \eqn{W_{\ominus r}}{W[-r]} denotes the erosion of the window \eqn{W} by a distance \eqn{r}. } \item{\code{hcom}:}{ the corresponding model-compensated function \deqn{ {\bf C} \, G(r) = \int_W \frac{ \lambda(u,x) I(u \in W_{\ominus d(u)}) I(d(u) \le r) }{ \hat D(\infty) \mbox{area}(W_{\ominus d(u)}) + 1 } }{ C G(r) = integral[u] lambda(u,x) I(u in W[-d(u)]) I(d(u) <= r)/ (1 + D(infty) area(W[-d(u)])) } where \eqn{d(u) = d(u, x)} is the (`empty space') distance from location \eqn{u} to the nearest point of \eqn{x}. } } If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix E of Baddeley, Rubak and Moller (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The border-corrected and Hanisch-corrected estimates of \eqn{G(r)} are approximately unbiased estimates of the \eqn{G}-function, assuming the point process is stationary. The model-compensated functions are unbiased estimates \emph{of the mean value of the corresponding nonparametric estimate}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric and model-compensated estimates is approximately zero. To compute the difference between the nonparametric and model-compensated functions, use \code{\link{Gres}}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Related functions: \code{\link{Gest}}, \code{\link{Gres}}. Alternative functions: \code{\link{Kcom}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gcom(fit0) G0 plot(G0) # uniform Poisson is clearly not correct # Hanisch estimates only plot(Gcom(fit0), cbind(han, hcom) ~ r) fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gcom(fit1), cbind(han, hcom) ~ r) # Try adjusting interaction distance fit2 <- update(fit1, Strauss(0.10)) plot(Gcom(fit2), cbind(han, hcom) ~ r) G3 <- Gcom(cells, interaction=Strauss(0.12)) plot(G3, cbind(han, hcom) ~ r) } \keyword{spatial} \keyword{models} spatstat/man/quad.object.Rd0000755000176000001440000000606612237642733015432 0ustar ripleyusers\name{quad.object} \alias{quad.object} %DoNotExport \title{Class of Quadrature Schemes} \description{ A class \code{"quad"} to represent a quadrature scheme. } \details{ A (finite) quadrature scheme is a list of quadrature points \eqn{u_j}{u[j]} and associated weights \eqn{w_j}{w[j]} which is used to approximate an integral by a finite sum: \deqn{ \int f(x) dx \approx \sum_j f(u_j) w_j }{ integral(f(x) dx) ~= sum( f(u[j]) w[j] ) } Given a point pattern dataset, a \emph{Berman-Turner} quadrature scheme is one which includes all these data points, as well as a nonzero number of other (``dummy'') points. These quadrature schemes are used to approximate the pseudolikelihood of a point process, in the method of Baddeley and Turner (2000) (see Berman and Turner (1992)). Accuracy and computation time both increase with the number of points in the quadrature scheme. An object of class \code{"quad"} represents a Berman-Turner quadrature scheme. It can be passed as an argument to the model-fitting function \code{\link{ppm}}, which requires a quadrature scheme. An object of this class contains at least the following elements: \tabular{ll}{ \code{data}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the data points.\cr \code{dummy}: \tab an object of class \code{"ppp"} \cr \tab giving the locations (and marks) of the dummy points.\cr \code{w}: \tab vector of nonnegative weights for the quadrature points\cr } Users are strongly advised not to manipulate these entries directly. The domain of quadrature is specified by \code{dummy$window} while the observation window (if this needs to be specified separately) is taken to be \code{data$window}. The weights vector \code{w} may also have an attribute \code{attr(w, "zeroes")} equivalent to the logical vector \code{(w == 0)}. If this is absent then all points are known to have positive weights. To create an object of class \code{"quad"}, users would typically call the high level function \code{\link{quadscheme}}. (They are actually created by the low level function \code{quad}.) Entries are extracted from a \code{"quad"} object by the functions \code{x.quad}, \code{y.quad}, \code{w.quad} and \code{marks.quad}, which extract the \eqn{x} coordinates, \eqn{y} coordinates, weights, and marks, respectively. The function \code{n.quad} returns the total number of quadrature points (dummy plus data). An object of class \code{"quad"} can be converted into an ordinary point pattern by the function \code{\link{union.quad}} which simply takes the union of the data and dummy points. Quadrature schemes can be plotted using \code{\link{plot.quad}} (a method for the generic \code{\link{plot}}). } \seealso{ \code{\link{quadscheme}}, \code{\link{ppm}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/smooth.msr.Rd0000755000176000001440000000323012237642734015333 0ustar ripleyusers\name{smooth.msr} \alias{smooth.msr} \alias{Smooth.msr} \title{ Smooth a Signed or Vector-Valued Measure } \description{ Apply kernel smoothing to a signed measure or vector-valued measure. } \usage{ smooth.msr(X, ...) \method{Smooth}{msr}(X, ...) } \arguments{ \item{X}{ Object of class \code{"msr"} representing a signed measure or vector-valued measure. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} controlling the smoothing bandwidth and the pixel resolution. } } \details{ This function applies kernel smoothing to a signed measure or vector-valued measure \code{X}. The Gaussian kernel is used. The object \code{X} would typically have been created by \code{\link{residuals.ppm}} or \code{\link{msr}}. } \value{ For signed measures, a pixel image (object of class \code{"im"}). For vector-valued measures, a list of pixel images; the list also belongs to the class \code{"listof"} so that it can be printed and plotted. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{Smooth}}, \code{\link{msr}}, \code{\link{plot.msr}} } \examples{ example(msr) plot(Smooth(rp)) plot(Smooth(rs)) } \keyword{spatial} \keyword{models} spatstat/man/Kmodel.Rd0000755000176000001440000000343012237642731014434 0ustar ripleyusers\name{Kmodel} \alias{Kmodel} \alias{pcfmodel} \alias{Kmodel.kppm} \alias{pcfmodel.kppm} \title{K function of a model} \description{ Returns the theoretical \eqn{K} function or the pair correlation function of a point process model. } \usage{ Kmodel(model, \dots) pcfmodel(model, \dots) \method{Kmodel}{kppm}(model, \dots) \method{pcfmodel}{kppm}(model, \dots) } \arguments{ \item{model}{ A fitted cluster point process model, typically obtained from the model-fitting algorithm \code{\link{kppm}}. An object of class \code{"kppm"}. } \item{\dots}{ Ignored. } } \value{ A \code{function} in the \R language, which takes one argument \code{r}. } \details{ For certain types of point process models, it is possible to write down a mathematical expression for the \eqn{K} function or the pair correlation function of the model. In particular this is possible for a fitted cluster point process model (object of class \code{"kppm"} obtained from \code{\link{kppm}}). The functions \code{Kmodel} and \code{pcfmodel} are generic. Currently the only method is for the class \code{"kppm"}. The return value is a \code{function} in the \R language, which takes one argument \code{r}. Evaluation of this function, on a numeric vector \code{r}, yields values of the desired \eqn{K} function or pair correlation function at these distance values. } \seealso{ \code{\link{kppm}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(redwood) fit <- kppm(redwood, ~x, "MatClust") K <- Kmodel(fit) K(c(0.1, 0.2)) curve(K(x), from=0, to=0.25) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/model.frame.ppm.Rd0000755000176000001440000000432012237642733016206 0ustar ripleyusers\name{model.frame.ppm} \alias{model.frame.ppm} \alias{model.frame.kppm} \alias{model.frame.lppm} \title{ Extract the Variables in a Point Process Model } \description{ Given a fitted point process model, this function returns a data frame containing all the variables needed to fit the model using the Berman-Turner device. } \usage{ \method{model.frame}{ppm}(formula, ...) \method{model.frame}{kppm}(formula, ...) \method{model.frame}{lppm}(formula, ...) } \arguments{ \item{formula}{ A fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}. } \item{\dots}{ Additional arguments passed to \code{\link{model.frame.glm}}. } } \details{ The function \code{\link{model.frame}} is generic. These functions are method for \code{\link{model.frame}} for fitted point process models (objects of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). The first argument should be a fitted point process model; it has to be named \code{formula} for consistency with the generic function. The result is a data frame containing all the variables used in fitting the model. The data frame has one row for each quadrature point used in fitting the model. The quadrature scheme can be extracted using \code{\link{quad.ppm}}. } \value{ A \code{data.frame} containing all the variables used in the fitted model, plus additional variables specified in \code{\dots}. It has an additional attribute \code{"terms"} containing information about the model formula. For details see \code{\link{model.frame.glm}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \seealso{ \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{lppm}}, \code{\link{model.frame}}, \code{\link{model.matrix.ppm}} } \examples{ fit <- ppm(cells, ~x) mf <- model.frame(fit) kfit <- kppm(redwood, ~x, "Thomas") kmf <- model.frame(kfit) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/humberside.Rd0000755000176000001440000000633312237642732015356 0ustar ripleyusers\name{humberside} \alias{humberside} \alias{humberside.convex} \docType{data} \title{Humberside Data on Childhood Leukaemia and Lymphoma} \description{ Spatial locations of cases of childhood leukaemia and lymphoma, and randomly-selected controls, in North Humberside. A marked point pattern. } \format{ The dataset \code{humberside} is an object of class \code{"ppp"} representing a marked point pattern. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of home address \cr \code{y} \tab Cartesian \eqn{y}-coordinate of home address \cr \code{marks} \tab factor with levels \code{case} and \code{control} \cr \tab indicating whether this is a disease case\cr \tab or a control. } See \code{\link{ppp.object}} for details of the format. The dataset \code{humberside.convex} is an object of the same format, representing the same point pattern data, but contained in a larger, 5-sided convex polygon. } \usage{data(humberside)} \examples{ humberside summary(humberside) plot(humberside) plot(humberside.convex$window, add=TRUE, lty=2) } \source{ Dr Ray Cartwright and Dr Freda Alexander. Published and analysed in Cuzick and Edwards (1990), see Table 1. Pentagonal boundary from Diggle and Chetwynd (1991), Figure 1. Point coordinates and pentagonal boundary supplied by Andrew Lawson. Detailed region boundary was digitised by Adrian Baddeley, 2005, from a reprint of Cuzick and Edwards (1990). } \section{Notes}{ Cuzick and Edwards (1990) first presented and analysed these data. The data record 62 cases of childhood leukaemia and lymphoma diagnosed in the North Humberside region of England between 1974 and 1986, together with 141 controls selected at random from the birth register for the same period. The data are represented as a marked point pattern, with the points giving the spatial location of each individual's home address (actually, the centroid for the postal code) and the marks identifying cases and controls. Coordinates are expressed in units of 100 metres, and the resolution is 100 metres. At this resolution, there are some duplicated points. To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. Two versions of the dataset are supplied, both containing the same point coordinates, but using different windows. The dataset \code{humberside} has a polygonal window with 102 edges which closely approximates the Humberside region, while \code{humberside.convex} has a convex 5-sided polygonal window originally used by Diggle and Chetwynd (1991) and shown in Figure 1 of that paper. (This pentagon has been modified slightly from the original data, by shifting two vertices horizontally by 1 unit, so that the pentagon contains all the data points.) } \references{ J. Cuzick and R. Edwards (1990) Spatial clustering for inhomogeneous populations. \emph{Journal of the Royal Statistical Society, series B}, \bold{52} (1990) 73-104. P.J. Diggle and A.G. Chetwynd (1991) Second-order analysis of spatial clustering for inhomogeneous populations. \emph{Biometrics} 47 (1991) 1155-1163. } \keyword{datasets} \keyword{spatial} spatstat/man/as.im.Rd0000755000176000001440000001552712237642732014243 0ustar ripleyusers\name{as.im} %DontDeclareMethods \alias{as.im} \alias{as.im.im} \alias{as.im.leverage.ppm} \alias{as.im.owin} \alias{as.im.matrix} \alias{as.im.tess} \alias{as.im.function} \alias{as.im.distfun} \alias{as.im.nnfun} \alias{as.im.default} \title{Convert to Pixel Image} \description{ Converts various kinds of data to a pixel image } \usage{ as.im(X, \dots) \method{as.im}{im}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{owin}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL, value=1) \method{as.im}{matrix}(X, W=NULL, \dots) \method{as.im}{tess}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{function}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{distfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{nnfun}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) \method{as.im}{leverage.ppm}(X, \dots) \method{as.im}{default}(X, W=NULL, \dots, eps=NULL, dimyx=NULL, xy=NULL, na.replace=NULL) } \arguments{ \item{X}{Data to be converted to a pixel image.} \item{W}{Window object which determines the spatial domain and pixel array geometry. } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function.} \item{eps,dimyx,xy}{ Optional parameters passed to \code{\link{as.mask}} which determine the pixel array geometry. See \code{\link{as.mask}}. } \item{na.replace}{Optional value to replace \code{NA} entries in the output image. } \item{value}{Optional. The value to be assigned to pixels inside the window, if \code{X} is a window. } } \details{ This function converts the data \code{X} into a pixel image object of class \code{"im"} (see \code{\link{im.object}}). The function \code{as.im} is generic, with methods for the classes listed above. Currently \code{X} may be any of the following: \itemize{ \item a pixel image object, of class \code{"im"}. \item a window object, of class \code{"owin"} (see \code{\link{owin.object}}). The result is an image with all pixel entries equal to \code{value} inside the window \code{X}, and \code{NA} outside. \item a matrix. \item a tessellation (object of class \code{"tess"}). The result is a factor-valued image, with one factor level corresponding to each tile of the tessellation. Pixels are classified according to the tile of the tessellation into which they fall. \item a single number (or a single logical, complex, factor or character value). The result is an image with all pixel entries equal to this constant value inside the window \code{W} (and \code{NA} outside, unless the argument \code{na.replace} is given). Argument \code{W} is required. \item a function of the form \code{function(x, y, ...)} which is to be evaluated to yield the image pixel values. In this case, the additional argument \code{W} must be present. This window will be converted to a binary image mask. Then the function \code{X} will be evaluated in the form \code{X(x, y, ...)} where \code{x} and \code{y} are \bold{vectors} containing the \eqn{x} and \eqn{y} coordinates of all the pixels in the image mask, and \code{...} are any extra arguments given. This function must return a vector or factor of the same length as the input vectors, giving the pixel values. \item an object of class \code{"distfun"} representing a distance function (created by the command \code{\link{distfun}}). \item an object of class \code{"nnfun"} representing a nearest neighbour function (created by the command \code{\link{nnfun}}). \item a list with entries \code{x, y, z} in the format expected by the standard \code{R} functions \code{\link{image.default}} and \code{\link{contour.default}}. That is, \code{z} is a matrix of pixel values, \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates respectively, and \code{z[i,j]} is the pixel value for the location \code{(x[i],y[j])}. \item a point pattern (object of class \code{"ppp"}). See the separate documentation for \code{\link{as.im.ppp}}. } The spatial domain (enclosing rectangle) of the pixel image is determined by the argument \code{W}. If \code{W} is absent, the spatial domain is determined by \code{X}. When \code{X} is a function, a matrix, or a single numerical value, \code{W} is required. The pixel array dimensions of the final resulting image are determined by (in priority order) \itemize{ \item the argument \code{eps}, \code{dimyx} or \code{xy} if present; \item the pixel dimensions of the window \code{W}, if it is present and if it is a binary mask; \item the pixel dimensions of \code{X} if it is an image, a binary mask, or a \code{list(x,y,z)}; \item the default pixel dimensions, controlled by \code{\link{spatstat.options}}. } Note that if \code{eps}, \code{dimyx} or \code{xy} is given, this will override the pixel dimensions of \code{X} if it has them. Thus, \code{as.im} can be used to change an image's pixel dimensions. If the argument \code{na.replace} is given, then all \code{NA} entries in the image will be replaced by this value. The resulting image is then defined everwhere on the full rectangular domain, instead of a smaller window. Here \code{na.replace} should be a single value, of the same type as the other entries in the image. If \code{X} is a pixel image that was created by an older version of \pkg{spatstat}, the command \code{X <- as.im(X)} will repair the internal format of \code{X} so that it conforms to the current version of \pkg{spatstat}. } \value{ An image object of class \code{"im"}. } \seealso{ Separate documentation for \code{\link{as.im.ppp}} } \examples{ data(demopat) # window object W <- demopat$window plot(W) Z <- as.im(W) image(Z) # function Z <- as.im(function(x,y) {x^2 + y^2}, unit.square()) image(Z) # function with extra arguments f <- function(x, y, x0, y0) { sqrt((x - x0)^2 + (y-y0)^2) } Z <- as.im(f, unit.square(), x0=0.5, y0=0.5) image(Z) # Revisit the Sixties data(letterR) Z <- as.im(f, letterR, x0=2.5, y0=2) image(Z) # usual convention in S stuff <- list(x=1:10, y=1:10, z=matrix(1:100, nrow=10)) Z <- as.im(stuff) # convert to finer grid Z <- as.im(Z, dimyx=256) # pixellate the Dirichlet tessellation Di <- dirichlet(runifpoint(10)) plot(as.im(Di)) plot(Di, add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/pool.rat.Rd0000644000176000001440000000540512237642733014762 0ustar ripleyusers\name{pool.rat} \alias{pool.rat} \title{ Pool Data from Several Ratio Objects } \description{ Pool the data from several ratio objects (objects of class \code{"rat"}) and compute a pooled estimate. } \usage{ \method{pool}{rat}(...) } \arguments{ \item{\dots}{ Objects of class \code{"rat"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"rat"} of ratio objects. It is used to combine several estimates of the same quantity when each estimate is a ratio. Each of the arguments \code{\dots} must be an object of class \code{"rat"} representing a ratio object (basically a numerator and a denominator; see \code{\link{rat}}). We assume that these ratios are all estimates of the same quantity. If the objects are called \eqn{R_1, \ldots, R_n}{R[1], \dots, R[n]} and if \eqn{R_i}{R[i]} has numerator \eqn{Y_i}{Y[i]} and denominator \eqn{X_i}{X[i]}, so that notionally \eqn{R_i = Y_i/X_i}{R[i] = Y[i]/X[i]}, then the pooled estimate is the ratio-of-sums estimator \deqn{ R = \frac{\sum_i Y_i}{\sum_i X_i}. }{ R = (Y[1]+\dots+Y[n])/(X[1]+\dots+X[n]). } The standard error of \eqn{R} is computed using the delta method as described in Baddeley \emph{et al.} (1993) or Cochran (1977, pp 154, 161). This calculation is implemented only for certain classes of objects where the arithmetic can be performed. This calculation is currently implemented only for objects which also belong to the class \code{"fv"} (function value tables). For example, if \code{\link{Kest}} is called with argument \code{ratio=TRUE}, the result is a suitable object (belonging to the classes \code{"rat"} and \code{"fv"}). Warnings or errors will be issued if the ratio objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of the same class as the input. } \seealso{ \code{\link{rat}}, \code{\link{pool}}, \code{\link{Kest}} } \examples{ K1 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K2 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K3 <- Kest(runifpoint(42), ratio=TRUE, correction="iso") K <- pool(K1, K2, K3) plot(K, pooliso ~ r, shade=c("hiiso", "loiso")) } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Cochran, W.G. (1977) \emph{Sampling techniques}, 3rd edition. New York: John Wiley and Sons. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/Saturated.Rd0000755000176000001440000000155612237642731015164 0ustar ripleyusers\name{Saturated} \alias{Saturated} \title{Saturated Pairwise Interaction model} \description{ Experimental. } \usage{ Saturated(pot, name) } \arguments{ \item{pot}{An S language function giving the user-supplied pairwise interaction potential.} \item{name}{Character string.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ This is experimental. It constructs a member of the ``saturated pairwise'' family \code{\link{pairsat.family}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairsat.family}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{ppm.object}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/cbind.hyperframe.Rd0000755000176000001440000000343712237642732016451 0ustar ripleyusers\name{cbind.hyperframe} \alias{cbind.hyperframe} \alias{rbind.hyperframe} \title{ Combine Hyperframes by Rows or by Columns } \description{ Methods for \code{cbind} and \code{rbind} for hyperframes. } \usage{ \method{cbind}{hyperframe}(...) \method{rbind}{hyperframe}(...) } \arguments{ \item{\dots}{ Any number of hyperframes (objects of class \code{\link{hyperframe}}). } } \details{ These are methods for \code{\link{cbind}} and \code{\link{rbind}} for hyperframes. Note that \emph{all} the arguments must be hyperframes (because of the peculiar dispatch rules of \code{\link{cbind}} and \code{\link{rbind}}). To combine a hyperframe with a data frame, one should either convert the data frame to a hyperframe using \code{\link{as.hyperframe}}, or explicitly invoke the function \code{cbind.hyperframe} or \code{rbind.hyperframe}. In other words: if \code{h} is a hyperframe and \code{d} is a data frame, the result of \code{cbind(h,d)} will be the same as \code{cbind(as.data.frame(h), d)}, so that all hypercolumns of \code{h} will be deleted (and a warning will be issued). To combine \code{h} with \code{d} so that all columns of \code{h} are retained, type either \code{cbind(h, as.hyperframe(d))} or \code{cbind.hyperframe(h,d)}. } \value{ Another hyperframe. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe}} } \examples{ lambda <- runif(5, min=10, max=30) X <- lapply(as.list(lambda), function(x) { rpoispp(x) }) h <- hyperframe(lambda=lambda, X=X) g <- hyperframe(id=letters[1:5], Y=rev(X)) gh <- cbind(h, g) hh <- rbind(h, h) } \keyword{spatial} \keyword{manip} spatstat/man/intensity.lpp.Rd0000644000176000001440000000225212237642732016040 0ustar ripleyusers\name{intensity.lpp} %DontDeclareMethods \alias{intensity.lpp} \title{ Empirical Intensity of Point Pattern on Linear Network } \description{ Computes the average number of points per unit length in a point pattern on a linear network } \usage{ \method{intensity}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}} It computes the empirical intensity of a point pattern on a linear network (object of class \code{"lpp"}), i.e. the average density of points per unit length. If the point pattern is multitype, the intensities of the different types are computed separately. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ intensity(chicago) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/msr.Rd0000755000176000001440000001143512237642733014030 0ustar ripleyusers\name{msr} \alias{msr} \title{ Signed or Vector-Valued Measure } \description{ Defines an object representing a signed measure or vector-valued measure on a spatial domain. } \usage{ msr(qscheme, discrete, density, check=TRUE) } \arguments{ \item{qscheme}{ A quadrature scheme (object of class \code{"quad"} usually extracted from a fitted point process model). } \item{discrete}{ Vector or matrix containing the values (masses) of the discrete component of the measure, for each of the data points in \code{qscheme}. } \item{density}{ Vector or matrix containing values of the density of the diffuse component of the measure, for each of the quadrature points in \code{qscheme}. } \item{check}{ Logical. Whether to check validity of the arguments. } } \details{ This function creates an object that represents a signed or vector valued \emph{measure} on the two-dimensional plane. It is not normally called directly by the user. A signed measure is a classical mathematical object (Diestel and Uhl, 1977) which can be visualised as a collection of electric charges, positive and/or negative, spread over the plane. Electric charges may be concentrated at specific points (atoms), or spread diffusely over a region. An object of class \code{"msr"} represents a signed (i.e. real-valued) or vector-valued measure in the \pkg{spatstat} package. Spatial residuals for point process models (Baddeley et al, 2005, 2008) take the form of a real-valued or vector-valued measure. The function \code{\link{residuals.ppm}} returns an object of class \code{"msr"} representing the residual measure. The function \code{msr} would not normally be called directly by the user. It is the low-level creator function that makes an object of class \code{"msr"} from raw data. The first argument \code{qscheme} is a quadrature scheme (object of class \code{"quad"}). It is typically created by \code{\link{quadscheme}} or extracted from a fitted point process model using \code{\link{quad.ppm}}. A quadrature scheme contains both data points and dummy points. The data points of \code{qscheme} are used as the locations of the atoms of the measure. All quadrature points (i.e. both data points and dummy points) of \code{qscheme} are used as sampling points for the density of the continuous component of the measure. The argument \code{discrete} gives the values of the atomic component of the measure for each \emph{data point} in \code{qscheme}. It should be either a numeric vector with one entry for each data point, or a numeric matrix with one row for each data point. The argument \code{density} gives the values of the \emph{density} of the diffuse component of the measure, at each \emph{quadrature point} in \code{qscheme}. It should be either a numeric vector with one entry for each quadrature point, or a numeric matrix with one row for each quadrature point. If both \code{discrete} and \code{density} are vectors (or one-column matrices) then the result is a signed (real-valued) measure. Otherwise, the result is a vector-valued measure, with the dimension of the vector space being determined by the number of columns in the matrices \code{discrete} and/or \code{density}. (If one of these is a \eqn{k}-column matrix and the other is a 1-column matrix, then the latter is replicated to \eqn{k} columns). The class \code{"msr"} has methods for \code{print}, \code{plot} and \code{[}. There is also a function \code{\link{Smooth.msr}} for smoothing a measure. } \value{ An object of class \code{"msr"} that can be plotted by \code{\link{plot.msr}}. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. Diestel, J. and Uhl, J.J. Jr (1977) \emph{Vector measures}. Providence, RI, USA: American Mathematical Society. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{plot.msr}}, \code{\link{Smooth.msr}}, \code{\link{[.msr}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) rp <- residuals(fit, type="pearson") rp rs <- residuals(fit, type="score") rs colnames(rs) # An equivalent way to construct the Pearson residual measure by hand Q <- quad.ppm(fit) lambda <- fitted(fit) slam <- sqrt(lambda) Z <- is.data(Q) m <- msr(Q, discrete=1/slam[Z], density = -slam) m } \keyword{spatial} \keyword{models} spatstat/man/clickpoly.Rd0000755000176000001440000000401712237642732015215 0ustar ripleyusers\name{clickpoly} \alias{clickpoly} \title{Interactively Define a Polygon} \description{ Allows the user to create a polygon by point-and-click in the display. } \usage{ clickpoly(add=FALSE, nv=NULL, np=1, ...) } \arguments{ \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{nv}{ Number of vertices of the polygon (if this is predetermined). } \item{np}{ Number of polygons to create. } \item{\dots}{ Arguments passed to \code{\link{locator}} to control the interactive plot. } } \value{ A window (object of class \code{"owin"}) representing the polygon. } \details{ This function allows the user to create a polygonal window by interactively clicking on the screen display. The user is prompted to point the mouse at any desired locations for the polygon vertices, and click the left mouse button to add each point. Interactive input stops after \code{nv} clicks (if \code{nv} was given) or when the middle mouse button is pressed. The return value is a window (object of class \code{"owin"}) representing the polygon. This function uses the \R command \code{\link{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link{locator}} and \code{\link{par}}. Multiple polygons can also be drawn, by specifying \code{np > 1}. The polygons must be disjoint. The result is a single window object consisting of all the polygons. } \seealso{ \code{\link{identify.ppp}}, \code{\link{clickppp}}, \code{\link{locator}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{iplot} spatstat/man/runiflpp.Rd0000755000176000001440000000210112237642734015055 0ustar ripleyusers\name{runiflpp} \alias{runiflpp} \title{ Uniform Random Points on a Linear Network } \description{ Generates \eqn{n} random points, independently and uniformly distributed, on a linear network. } \usage{ runiflpp(n, L) } \arguments{ \item{n}{ Number of random points to generate. A nonnegative integer, or a vector of integers specifying the number of points of each type. } \item{L}{ A linear network (object of class \code{"linnet"}, see \code{\link{linnet}}). } } \details{ This function uses \code{\link{runifpointOnLines}} to generate the random points. } \value{ A point pattern on the linear network, i.e.\ an object of class \code{"lpp"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{rpoislpp}}, \code{\link{lpp}}, \code{\link{linnet}} } \examples{ data(simplenet) X <- runiflpp(10, simplenet) plot(X) # marked Z <- runiflpp(c(a=10, b=3), simplenet) } \keyword{spatial} \keyword{datagen} spatstat/man/clickjoin.Rd0000755000176000001440000000445212237642732015174 0ustar ripleyusers\name{clickjoin} \alias{clickjoin} \title{ Interactively join vertices on a plot } \description{ Given a point pattern representing a set of vertices, this command gives a point-and-click interface allowing the user to join pairs of selected vertices by edges. } \usage{ clickjoin(X, ..., add = TRUE, m = NULL, join = TRUE) } \arguments{ \item{X}{ Point pattern of vertices. An object of class \code{"ppp"}. } \item{\dots}{ Arguments passed to \code{\link{segments}} to control the plotting of the new edges. } \item{add}{ Logical. Whether the point pattern \code{X} should be added to the existing plot (\code{add=TRUE}) or a new plot should be created (\code{add=FALSE}). } \item{m}{ Optional. Logical matrix specifying an initial set of edges. There is an edge between vertices \code{i} and \code{j} if \code{m[i,j] = TRUE}. } \item{join}{ Optional. If \code{TRUE}, then each user click will join a pair of vertices. If \code{FALSE}, then each user click will delete an existing edge. This is only relevant if \code{m} is supplied. } } \details{ This function makes it easier for the user to create a linear network or a planar graph, given a set of vertices. The function first displays the point pattern \code{X}, then repeatedly prompts the user to click on a pair of points in \code{X}. Each selected pair of points will be joined by an edge. The function returns a logical matrix which has entries equal to \code{TRUE} for each pair of vertices joined by an edge. The selection of points is performed using \code{\link{identify.ppp}} which typically expects the user to click the left mouse button. This point-and-click interaction continues until the user terminates it, by pressing the middle mouse button, or pressing the right mouse button and selecting \code{stop}. The return value can be used in \code{\link{linnet}} to create a linear network. } \value{ Logical matrix \code{m} with value \code{m[i,j] = TRUE} for every pair of vertices \code{X[i]} and \code{X[j]} that should be joined by an edge. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linnet}}, \code{\link{clickppp}} } \keyword{spatial} \keyword{datagen} spatstat/man/idw.Rd0000755000176000001440000001005212237642732014003 0ustar ripleyusers\name{idw} \alias{idw} \title{Inverse-distance weighted smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations using inverse-distance weighting. } \usage{ idw(X, power=2, at="pixels", ...) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{power}{Numeric. Power of distance used in the weighting.} \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution of the result.} } \details{ This function performs spatial smoothing of numeric values observed at a set of irregular locations. Smoothing is performed by inverse distance weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is \deqn{ g(u) = \frac{\sum_i w_i v_i}{\sum_i w_i} }{ g(u) = (sum of w[i] * v[i])/(sum of w[i]) } where the weights are the inverse \eqn{p}-th powers of distance, \deqn{ w_i = \frac 1 {d(u,x_i)^p} }{ w[i] = 1/d(u,x[i])^p } where \eqn{d(u,x_i) = ||u - x_i||}{d(u,x[i])} is the Euclidean distance from \eqn{u} to \eqn{x_i}{x[i]}. The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame. Then the smoothing procedure is applied to each column of marks. If \code{at="pixels"} (the default), the smoothed mark value is calculated at a grid of pixels, and the result is a pixel image. The arguments \code{\dots} control the pixel resolution. See \code{\link{as.mask}}. If \code{at="points"}, the smoothed mark values are calculated at the data points only, using a leave-one-out rule (the mark value at a data point is excluded when calculating the smoothed value for that point). An alternative to inverse-distance weighting is kernel smoothing, which is performed by \code{\link{Smooth.ppp}}. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{listof}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } } \seealso{ \code{\link{density.ppp}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{Smooth.ppp}} for kernel smoothing and \code{\link{nnmark}} for nearest-neighbour interpolation. To perform other kinds of interpolation, see also the \code{akima} package. } \examples{ # data frame of marks: trees marked by diameter and height data(finpines) plot(idw(finpines)) idw(finpines, at="points")[1:5,] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rlabel.Rd0000755000176000001440000000442612237642734014473 0ustar ripleyusers\name{rlabel} \alias{rlabel} \title{Random Re-Labelling of Point Pattern} \description{ Randomly allocates marks to a point pattern, or permutes the existing marks, or resamples from the existing marks. } \usage{ rlabel(X, labels=marks(X), permute=TRUE) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}, \code{"lpp"}, \code{"pp3"} or \code{"ppx"}). } \item{labels}{ Vector of values from which the new marks will be drawn at random. Defaults to the vector of existing marks. } \item{permute}{ Logical value indicating whether to generate new marks by randomly permuting \code{labels} or by drawing a random sample with replacement. } } \value{ A marked point pattern (of the same class as \code{X}). } \details{ This very simple function allocates random marks to an existing point pattern \code{X}. It is useful for hypothesis testing purposes. In the simplest case, the command \code{rlabel(X)} yields a point pattern obtained from \code{X} by randomly permuting the marks of the points. If \code{permute=TRUE}, then \code{labels} should be a vector of length equal to the number of points in \code{X}. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random permutation of \code{labels} (i.e. a random sample without replacement). If \code{permute=FALSE}, then \code{labels} may be a vector of any length. The result of \code{rlabel} will be a point pattern with locations given by \code{X} and marks given by a random sample from \code{labels} (with replacement). } \seealso{ \code{\link{marks<-}} to assign arbitrary marks. } \examples{ data(amacrine) # Randomly permute the marks "on" and "off" # Result always has 142 "off" and 152 "on" Y <- rlabel(amacrine) # randomly allocate marks "on" and "off" # with probabilities p(off) = 0.48, p(on) = 0.52 Y <- rlabel(amacrine, permute=FALSE) # randomly allocate marks "A" and "B" with equal probability data(cells) Y <- rlabel(cells, labels=factor(c("A", "B")), permute=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/copper.Rd0000755000176000001440000000750012237642732014514 0ustar ripleyusers\name{copper} \alias{copper} \docType{data} \title{ Berman-Huntington points and lines data } \description{ These data come from an intensive geological survey of a 70 x 158 km region in central Queensland, Australia. They consist of 67 points representing copper ore deposits, and 146 line segments representing geological `lineaments'. Lineaments are linear features, visible on a satellite image, that are believed to consist largely of geological faults (Berman, 1986, p. 55). It would be of great interest to predict the occurrence of copper deposits from the lineament pattern, since the latter can easily be observed on satellite images. These data were introduced and analysed by Berman (1986). They have also been studied by Berman and Diggle (1989), Berman and Turner (1992), Baddeley and Turner (2000, 2005), Foxall and Baddeley (2002) and Baddeley et al (2005). Many analyses have been performed on the southern half of the data only. This subset is also provided. } \format{ \code{copper} is a list with the following entries: \describe{ \item{Points}{a point pattern (object of class \code{"ppp"}) representing the full point pattern of copper deposits. See \code{\link{ppp.object}} for details of the format. } \item{Lines}{a line segment pattern (object of class \code{"psp"}) representing the lineaments in the full dataset. See \code{\link{psp.object}} for details of the format. } \item{SouthWindow}{the window delineating the southern half of the study region. An object of class \code{"owin"}. } \item{SouthPoints}{the point pattern of copper deposits in the southern half of the study region. An object of class \code{"ppp"}. } \item{SouthLines}{the line segment pattern of the lineaments in the southern half of the study region. An object of class \code{"psp"}. } } } \usage{data(copper)} \examples{ data(copper) # Plot full dataset plot(copper$Points) plot(copper$Lines, add=TRUE) # Plot southern half of data plot(copper$SouthPoints) plot(copper$SouthLines, add=TRUE) \dontrun{ Z <- distmap(copper$SouthLines) plot(Z) X <- copper$SouthPoints ppm(X, ~D, covariates=list(D=Z)) } } \source{ Dr Jonathan Huntington, CSIRO Earth Science and Resource Engineering, Sydney, Australia. Coordinates kindly provided by Dr. Mark Berman and Dr. Andy Green, CSIRO, Sydney, Australia. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Berman, M. (1986). Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. Berman, M. and Diggle, P.J. (1989) Estimating Weighted Integrals of the Second-order Intensity of a Spatial Point Process. \emph{Journal of the Royal Statistical Society, series B} \bold{51}, 81--92. Berman, M. and Turner, T.R. (1992) Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41}, 31--38. Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \keyword{datasets} \keyword{spatial} spatstat/man/Kscaled.Rd0000755000176000001440000002002012237642731014561 0ustar ripleyusers\name{Kscaled} \alias{Kscaled} \alias{Lscaled} \title{Locally Scaled K-function} \description{ Estimates the template \eqn{K} function of a locally-scaled point process. } \usage{ Kscaled(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "isotropic", "translate"), sigma=NULL, varcov=NULL) Lscaled(\dots) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the locally scaled \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Arguments passed from \code{Lscaled} to \code{Kscaled} and from \code{Kscaled} to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the locally scaled \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm scaled}(r)}{Kscaled(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ \code{Kscaled} computes an estimate of the \eqn{K} function for a locally scaled point process. \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. Locally scaled point processes are a class of models for inhomogeneous point patterns, introduced by Hahn et al (2003). They include inhomogeneous Poisson processes, and many other models. The template \eqn{K} function of a locally-scaled process is a counterpart of the ``ordinary'' Ripley \eqn{K} function, in which the distances between points of the process are measured on a spatially-varying scale (such that the locally rescaled process has unit intensity). The template \eqn{K} function is an indicator of interaction between the points. For an inhomogeneous Poisson process, the theoretical template \eqn{K} function is approximately equal to \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Values \eqn{K_{\rm scaled}(r) > \pi r^2}{Kscaled(r) > pi * r^2} are suggestive of clustering. \code{Kscaled} computes an estimate of the template \eqn{K} function and \code{Lscaled} computes the corresponding \eqn{L} function \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. The locally scaled interpoint distances are computed using an approximation proposed by Hahn (2007). The Euclidean distance between two points is multiplied by the average of the square roots of the intensity values at the two points. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm scaled}}{Kscaled}. First the interpoint distances are rescaled, and then edge corrections are applied as in \code{\link{Kest}}. See \code{\link{Kest}} for details of the edge corrections and the options for the argument \code{correction}. The pair correlation function can also be applied to the result of \code{Kscaled}; see \code{\link{pcf}} and \code{\link{pcf.fv}}. } \references{ Hahn, U. (2007) \emph{Global and Local Scaling in the Statistics of Spatial Point Processes}. Habilitationsschrift, Universitaet Augsburg. Hahn, U., Jensen, E.BV., van Lieshout, M.N.M. and Nielsen, L.S. (2003) Inhomogeneous spatial point processes by location-dependent scaling. \emph{Advances in Applied Probability} \bold{35}, 319--336. Prokesova, M., Hahn, U. and Vedel Jensen, E.B. (2006) Statistics for locally scaled point patterns. In A. Baddeley, P. Gregori, J. Mateu, R. Stoica and D. Stoyan (eds.) \emph{Case Studies in Spatial Point Pattern Modelling}. Lecture Notes in Statistics 185. New York: Springer Verlag. Pages 99--123. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(bronzefilter) X <- unmark(bronzefilter) K <- Kscaled(X) fit <- ppm(X, ~x) lam <- predict(fit) K <- Kscaled(X, lam) } \author{Ute Hahn, Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/append.psp.Rd0000755000176000001440000000231712237642732015275 0ustar ripleyusers\name{append.psp} \alias{append.psp} \title{Combine Two Line Segment Patterns} \description{ Combine two line segment patterns into a single pattern. } \usage{ append.psp(A, B) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } } \value{ Another line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose two line segment patterns \code{A} and \code{B}. The two patterns must have \bold{identical} windows. If one pattern has marks, then the other must also have marks of the same type. It the marks are data frames then the number of columns of these data frames, and the names of the columns must be identical. (To combine two point patterns, see \code{superimpose}). } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{superimpose}}, } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- psp(runif(5), runif(5), runif(5), runif(5), window=owin()) append.psp(X,Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/ppm.object.Rd0000755000176000001440000001375612237642733015300 0ustar ripleyusers\name{ppm.object} \alias{ppm.object} %DoNotExport \alias{methods.ppm} %DoNotExport \title{Class of Fitted Point Process Models} \description{ A class \code{ppm} to represent a fitted stochastic model for a point process. The output of \code{\link{ppm}}. } \details{ An object of class \code{ppm} represents a stochastic point process model that has been fitted to a point pattern dataset. Typically it is the output of the model fitter, \code{\link{ppm}}. The class \code{ppm} has methods for the following standard generic functions: \tabular{lll}{ generic \tab method \tab description \cr \code{print} \tab \code{\link{print.ppm}} \tab print details \cr \code{plot} \tab \code{\link{plot.ppm}} \tab plot fitted model \cr \code{predict} \tab \code{\link{predict.ppm}} \tab fitted intensity and conditional intensity \cr \code{fitted} \tab \code{\link{fitted.ppm}} \tab fitted intensity \cr \code{coef} \tab \code{\link{coef.ppm}} \tab fitted coefficients of model \cr \code{anova} \tab \code{\link{anova.ppm}} \tab Analysis of Deviance \cr \code{formula} \tab \code{\link{formula.ppm}} \tab Extract model formula \cr \code{terms} \tab \code{\link{terms.ppm}} \tab Terms in the model formula \cr \code{labels} \tab \code{labels.ppm} \tab Names of estimable terms in the model formula \cr \code{residuals} \tab \code{\link{residuals.ppm}} \tab Point process residuals \cr \code{simulate} \tab \code{\link{simulate.ppm}} \tab Simulate the fitted model \cr \code{update} \tab \code{\link{update.ppm}} \tab Change or refit the model \cr \code{vcov} \tab \code{\link{vcov.ppm}} \tab Variance/covariance matrix of parameter estimates \cr \code{model.frame} \tab \code{\link{model.frame.ppm}} \tab Model frame \cr \code{model.matrix} \tab \code{\link{model.matrix.ppm}} \tab Design matrix \cr \code{logLik} \tab \code{\link{logLik.ppm}} \tab log \emph{pseudo} likelihood \cr \code{extractAIC} \tab \code{\link{extractAIC.ppm}} \tab pseudolikelihood counterpart of AIC \cr \code{nobs} \tab \code{\link{nobs.ppm}} \tab number of observations } Objects of class \code{ppm} can also be handled by the following standard functions, without requiring a special method: \tabular{ll}{ name \tab description \cr \code{\link{confint}} \tab Confidence intervals for parameters \cr \code{\link{step}} \tab Stepwise model selection \cr \code{\link{drop1}} \tab One-step model improvement \cr \code{\link{add1}} \tab One-step model improvement } The class \code{ppm} also has methods for the following generic functions defined in the \pkg{spatstat} package: \tabular{lll}{ generic \tab method \tab description \cr \code{\link{as.interact}} \tab \code{\link{as.interact.ppm}} \tab Interpoint interaction structure \cr \code{\link{as.owin}} \tab \code{\link{as.owin.ppm}} \tab Observation window of data \cr \code{\link{bermantest}} \tab \code{\link{bermantest.ppm}} \tab Berman's test \cr \code{\link{envelope}} \tab \code{\link{envelope.ppm}} \tab Simulation envelopes \cr \code{\link{fitin}} \tab \code{\link{fitin.ppm}} \tab Fitted interaction \cr \code{\link{is.marked}} \tab \code{\link{is.marked.ppm}} \tab Determine whether the model is marked \cr \code{\link{is.multitype}} \tab \code{\link{is.multitype.ppm}} \tab Determine whether the model is multitype \cr \code{\link{is.poisson}} \tab \code{\link{is.poisson.ppm}} \tab Determine whether the model is Poisson \cr \code{\link{is.stationary}} \tab \code{\link{is.stationary.ppm}} \tab Determine whether the model is stationary \cr \code{\link{kstest}} \tab \code{\link{kstest.ppm}} \tab Kolmogorov-Smirnov test \cr \code{\link{quadrat.test}} \tab \code{\link{quadrat.test.ppm}} \tab Quadrat counting test \cr \code{\link{reach}} \tab \code{\link{reach.ppm}} \tab Interaction range of model \cr \code{\link{rmhmodel}} \tab \code{\link{rmhmodel.ppm}} \tab Model in a form that can be simulated \cr \code{\link{rmh}} \tab \code{\link{rmh.ppm}} \tab Perform simulation \cr \code{\link{unitname}} \tab \code{\link{unitname.ppm}} \tab Name of unit of length } Information about the data (to which the model was fitted) can be extracted using \code{\link{data.ppm}}, \code{\link{dummy.ppm}} and \code{\link{quad.ppm}}. } \section{Internal format}{ If you really need to get at the internals, a \code{ppm} object contains at least the following entries: \tabular{ll}{ \code{coef} \tab the fitted regular parameters (as returned by \code{glm}) \cr \code{trend} \tab the trend formula or \code{NULL} \cr \code{interaction} \tab the point process interaction family (an object of class \code{"interact"}) or \code{NULL} \cr \code{Q} \tab the quadrature scheme used \cr \code{maxlogpl} \tab the maximised value of log pseudolikelihood \cr \code{correction} \tab name of edge correction method used \cr } See \code{\link{ppm}} for explanation of these concepts. The irregular parameters (e.g. the interaction radius of the Strauss process) are encoded in the \code{interaction} entry. However see the Warnings. } \seealso{ \code{\link{ppm}}, \code{\link{coef.ppm}}, \code{\link{fitted.ppm}}, \code{\link{print.ppm}}, \code{\link{predict.ppm}}, \code{\link{plot.ppm}}. } \section{Warnings}{ The internal representation of \code{ppm} objects may change slightly between releases of the \pkg{spatstat} package. } \examples{ data(cells) fit <- ppm(cells, ~ x, Strauss(0.1), correction="periodic") fit coef(fit) \dontrun{ pred <- predict(fit) } pred <- predict(fit, ngrid=20, type="trend") \dontrun{ plot(fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/rcell.Rd0000755000176000001440000000613012237642733014324 0ustar ripleyusers\name{rcell} \alias{rcell} \title{Simulate Baddeley-Silverman Cell Process} \description{ Generates a random point pattern, a simulated realisation of the Baddeley-Silverman cell process model. } \usage{ rcell(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx, N=10) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{ Number of columns of cells in the window. Incompatible with \code{dx}. } \item{ny}{ Number of rows of cells in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{ Width of the cells. Incompatible with \code{nx}. } \item{dy}{ Height of the cells. Incompatible with \code{ny}. } \item{N}{ Integer. Distributional parameter: the maximum number of random points in each cell. Passed to \code{\link{rcellnumber}}. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function generates a simulated realisation of the \dQuote{cell process} (Baddeley and Silverman, 1984), a random point process with the same second-order properties as the uniform Poisson process. In particular, the \eqn{K} function of this process is identical to the \eqn{K} function of the uniform Poisson process (aka Complete Spatial Randomness). The same holds for the pair correlation function and all other second-order properties. The cell process is a counterexample to the claim that the \eqn{K} function completely characterises a point pattern. A cell process is generated by dividing space into equal rectangular tiles. In each tile, a random number of random points is placed. By default, there are either \eqn{0}, \eqn{1} or \eqn{10} points, with probabilities \eqn{1/10}, \eqn{8/9} and \eqn{1/90} respectively. The points within a tile are independent and uniformly distributed in that tile, and the numbers of points in different tiles are independent random integers. The tile width is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The tile height is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The cell process is then generated in these tiles. The random numbers of points are generated by \code{\link{rcellnumber}}. Some of the resulting random points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. } \seealso{ \code{\link{rcellnumber}}, \code{\link{rstrat}}, \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{Kest}} } \examples{ X <- rcell(nx=15) plot(X) plot(Kest(X)) } \references{ Baddeley, A.J. and Silverman, B.W. (1984) A cautionary example on the use of second-order methods for analyzing point patterns. \emph{Biometrics} \bold{40}, 1089-1094. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/interp.colourmap.Rd0000644000176000001440000000300712237642732016520 0ustar ripleyusers\name{interp.colourmap} \alias{interp.colourmap} \title{ Interpolate smoothly between specified colours } \description{ Given a colourmap object which maps numbers to colours, this function interpolates smoothly between the colours, yielding a new colour map. } \usage{ interp.colourmap(m, n = 512) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{n}{ Number of colour steps to be created in the new colour map. } } \details{ Given a colourmap object \code{m}, which maps numerical values to colours, this function interpolates the mapping, yielding a new colour map. This makes it easy to build a colour map that has smooth gradation between different colours or shades. First specify a small vector of numbers \code{x} which should be mapped to specific colours \code{y}. Use \code{m <- colourmap(y, inputs=x)} to create a colourmap that represents this simple mapping. Then apply \code{interp.colourmap(m)} to obtain a smooth transition between these points. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{tweak.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(inputs=c(0, 0.5, 1), c("black", "red", "white")) plot(interp.colourmap(co)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{color} spatstat/man/overlap.owin.Rd0000644000176000001440000000177112237642733015651 0ustar ripleyusers\name{overlap.owin} \alias{overlap.owin} \title{ Compute Area of Overlap } \description{ Computes the area of the overlap (intersection) of two windows. } \usage{ overlap.owin(A, B) } \arguments{ \item{A,B}{ Windows (objects of class \code{"owin"}). } } \details{ This function computes the area of the overlap between the two windows \code{A} and \code{B}. If one of the windows is a binary mask, then both windows are converted to masks on the same grid, and the area is computed by counting pixels. Otherwise, the area is computed analytically (using the discrete Stokes theorem). } \value{ A single numeric value. } \seealso{ \code{\link{intersect.owin}}, \code{\link{area.owin}}, \code{\link{setcov}}. } \examples{ A <- square(1) B <- shift(A, c(0.3, 0.2)) overlap.owin(A, B) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math}spatstat/man/cauchy.estK.Rd0000644000176000001440000001313512251535221015371 0ustar ripleyusers\name{cauchy.estK} \alias{cauchy.estK} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast. } \usage{ cauchy.estK(X, startpar=c(kappa=1,eta2=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * omega^2} where \code{omega} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{vargamma.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estK(redwood) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/hyytiala.Rd0000755000176000001440000000231112237642732015043 0ustar ripleyusers\name{hyytiala} \alias{hyytiala} \docType{data} \title{ Scots pines and other trees at Hyytiala } \description{ This dataset is a spatial point pattern of trees recorded at \ifelse{latex}{\out{Hyyti\"{a}l\"{a}}}{Hyytiala}, Finland. The majority of the trees are Scots pines. See Kokkila et al (2002). The dataset \code{hyytiala} is a point pattern (object of class \code{"ppp"}) containing the spatial coordinates of each tree, marked by species (a factor with levels \code{aspen}, \code{birch}, \code{pine} and \code{rowan}). The survey region is a 20 by 20 metre square. Coordinates are given in metres. } \usage{data(hyytiala)} \examples{ data(hyytiala) plot(hyytiala, cols=2:5) } \source{ Nicolas Picard } \references{ Kokkila, T., \ifelse{latex}{\out{M{\"a}kel{\"a}}}{Makela}, A. and Nikinmaa E. (2002) A method for generating stand structures using Gibbs marked point process. \emph{Silva Fennica} \bold{36} 265--277. Picard, N, Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009) The multi-scale marked area-interaction point process: a model for the spatial pattern of trees. \emph{Scandinavian Journal of Statistics} \bold{36} 23--41 } \keyword{datasets} \keyword{spatial} spatstat/man/linearpcfdot.Rd0000644000176000001440000000555612237642732015704 0ustar ripleyusers\name{linearpcfdot} \alias{linearpcfdot} \title{ Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{pcfdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link[spatstat]{linearpcf}}. \code{\link[spatstat]{pcfcross}}. } \examples{ data(chicago) g <- linearpcfdot(chicago, "assault") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairdist.Rd0000755000176000001440000000272312237642733015046 0ustar ripleyusers\name{pairdist} \alias{pairdist} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of `things' in a dataset } \usage{ pairdist(X, \dots) } \arguments{ \item{X}{ Object specifying the locations of a set of `things' (such as a set of points or a set of line segments). } \item{\dots}{ Further arguments depending on the method. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the `things' numbered \code{i} and \code{j}. } \details{ Given a dataset \code{X} and \code{Y} (representing either a point pattern or a line segment pattern) \code{pairdist} computes the distance between each pair of `things' in the dataset, and returns a matrix containing these distances. The function \code{pairdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}) and a default method. See the documentation for \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}} or \code{\link{pairdist.default}} for details. } \seealso{ \code{\link{pairdist.ppp}}, \code{\link{pairdist.psp}}, \code{\link{pairdist.default}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/residuals.mppm.Rd0000644000176000001440000000473312241443112016153 0ustar ripleyusers\name{residuals.mppm} \alias{residuals.mppm} \title{Residuals for Point Process Model Fitted to Multiple Point Patterns} \description{ Given a point process model fitted to multiple point patterns, compute residuals for each pattern. } \usage{ \method{residuals}{mppm}(object, type = "raw", ..., fittedvalues = fitted.mppm(object)) } \arguments{ \item{object}{Fitted point process model (object of class \code{"mppm"}).} \item{\dots}{Ignored.} \item{type}{Type of residuals: either \code{"raw"}, \code{"pearson"} or \code{"inverse"}. Partially matched.} \item{fittedvalues}{Advanced use only. Fitted values of the model to be used in the calculation. } } \details{ Baddeley et al (2005) defined residuals for the fit of a point process model to spatial point pattern data. For an explanation of these residuals, see the help file for \code{\link[spatstat]{residuals.ppm}}. This function computes the residuals for a point process model fitted to \emph{multiple} point patterns. The \code{object} should be an object of class \code{"mppm"} obtained from \code{\link{mppm}}. The return value is a list. The number of entries in the list equals the number of point patterns in the original data. Each entry in the list has the same format as the output of \code{\link[spatstat]{residuals.ppm}}. That is, each entry in the list is a signed measure (object of class \code{"msr"}) giving the residual measure for the corresponding point pattern. } \value{ A list of signed measures (objects of class \code{"msr"}) giving the residual measure for each of the original point patterns. See Details. } \examples{ data(waterstriders) fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) r <- residuals(fit) # compute total residual for each point pattern rtot <- sapply(r, integral.msr) # standardise the total residuals areas <- sapply(windows.mppm(fit), area.owin) rtot/sqrt(areas) } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \seealso{ \code{\link{mppm}}, \code{\link{residuals.mppm}}, \code{\link{mppm}} } \keyword{spatial} \keyword{models} spatstat/man/as.mask.Rd0000755000176000001440000000623312237642732014563 0ustar ripleyusers\name{as.mask} \alias{as.mask} \title{Pixel Image Approximation of a Window} \description{ Obtain a discrete (pixel image) approximation of a given window } \usage{ as.mask(w, eps=NULL, dimyx=NULL, xy=NULL) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or data acceptable to \code{\link{as.owin}}.} \item{eps}{(optional) width and height of pixels.} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) pixel coordinates} } \value{ A window (object of class \code{"owin"}) of type \code{"mask"} representing a binary pixel image. } \details{ This function generates a rectangular grid of locations in the plane, tests whether each of these locations lies inside the window \code{w}, and stores the results as a binary pixel image or `mask' (an object of class \code{"owin"}, see \code{\link{owin.object}}). The most common use of this function is to approximate the shape of another window \code{w} by a binary pixel image. In this case, we will usually want to have a very fine grid of pixels. This function can also be used to generate a coarsely-spaced grid of locations inside a window, for purposes such as subsampling and prediction. The grid spacing and location are controlled by the arguments \code{eps}, \code{dimyx} and \code{xy}, which are mutually incompatible. If \code{eps} is given, then it determines the grid spacing. If \code{eps} is a single number, then the grid spacing will be approximately \code{eps} in both the \eqn{x} and \eqn{y} directions. If \code{eps} is a vector of length 2, then the grid spacing will be approximately \code{eps[1]} in the \eqn{x} direction and \code{eps[2]} in the \eqn{y} direction. If \code{dimyx} is given, then the pixel grid will be an \eqn{m \times n}{m x n} rectangular grid where \eqn{m, n} are given by \code{dimyx[2]}, \code{dimyx[1]} respectively. \bold{Warning:} \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{xy} is given, then this should be a structure containing two elements \code{x} and \code{y} which are the vectors of \eqn{x} and \code{y} coordinates of the margins of the grid. The pixel coordinates will be generated from these two vectors. In this case \code{w} may be omitted. If neither \code{eps} nor \code{dimyx} nor \code{xy} is given, the pixel raster dimensions are obtained from \code{\link{spatstat.options}("npixel")}. There is no inverse of this function. However, the function \code{\link{as.polygonal}} will compute a polygonal approximation of a binary mask. } \seealso{ \code{\link{owin.object}}, \code{\link{as.rectangle}}, \code{\link{as.polygonal}}, \code{\link{spatstat.options}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) \dontrun{plot(w)} m <- as.mask(w) \dontrun{plot(m)} x <- 1:9 y <- seq(0.25, 9.75, by=0.5) m <- as.mask(w, xy=list(x=x, y=y)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/as.data.frame.hyperframe.Rd0000755000176000001440000000314512237642732017772 0ustar ripleyusers\name{as.data.frame.hyperframe} \alias{as.data.frame.hyperframe} \title{Coerce Hyperframe to Data Frame} \description{ Converts a hyperframe to a data frame. } \usage{ \method{as.data.frame}{hyperframe}(x, row.names = NULL, optional = FALSE, ..., discard=TRUE, warn=TRUE) } \arguments{ \item{x}{Point pattern (object of class \code{"hyperframe"}).} \item{row.names}{Optional character vector of row names.} \item{optional}{Argument passed to \code{\link{as.data.frame}} controlling what happens to row names.} \item{\dots}{Ignored.} \item{discard}{Logical. Whether to discard columns of the hyperframe that do not contain atomic data. See Details. } \item{warn}{Logical. Whether to issue a warning when columns are discarded.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class of hyperframes (see \code{\link{hyperframe}}. If \code{discard=TRUE}, any columns of the hyperframe that do not contain atomic data will be removed (and a warning will be issued if \code{warn=TRUE}). If \code{discard=FALSE}, then such columns are converted to strings indicating what class of data they originally contained. } \value{ A data frame. } \examples{ h <- hyperframe(X=1:3, Y=letters[1:3], f=list(sin, cos, tan)) as.data.frame(h, discard=TRUE, warn=FALSE) as.data.frame(h, discard=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/affine.linnet.Rd0000644000176000001440000000407412237642732015744 0ustar ripleyusers\name{affine.linnet} %DontDeclareMethods \alias{affine.linnet} \alias{shift.linnet} \alias{rotate.linnet} \alias{rescale.linnet} \alias{scalardilate.linnet} \title{Apply Geometrical Transformations to a Linear Network} \description{ Apply geometrical transformations to a linear network. } \usage{ \method{affine}{linnet}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{linnet}(X, \dots) \method{rotate}{linnet}(X, angle=pi/2, \dots) \method{scalardilate}{linnet}(X, f, \dots) \method{rescale}{linnet}(X, s) } \arguments{ \item{X}{Linear network (object of class \code{"linnet"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } } \value{ Another linear network (of class \code{"linnet"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"linnet"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{linnet}} and \code{\link{as.linnet}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ U <- rotate(simplenet, pi) stretch <- diag(c(2,3)) Y <- affine(simplenet, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(simplenet, mat=shear, vec=c(0, 1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/layered.Rd0000755000176000001440000000516412237642732014655 0ustar ripleyusers\name{layered} \alias{layered} \title{ Create List of Plotting Layers } \description{ Given several objects which are capable of being plotted, create a list containing these objects as if they were successive layers of a plot. The list can then be plotted in different ways. } \usage{ layered(..., plotargs = NULL, LayerList=NULL) } \arguments{ \item{\dots}{ Objects which can be plotted by \code{plot}. } \item{plotargs}{ Default values of the plotting arguments for each of the objects. A list of lists of arguments of the form \code{name=value}. } \item{LayerList}{ A list of objects. Incompatible with \code{\dots}. } } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to issue the plot command, to switch on or off the plotting of each individual layer, and to control the plotting arguments that are passed to each layer. Each individual layer in the plot should be saved as an object that can be plotted using \code{plot}. It will typically belong to some class, which has a method for the generic function \code{plot}. The command \code{layered} simply saves the objects \code{\dots} as a list of class \code{"layered"}. This list can then be plotted by the method \code{\link{plot.layered}}. Thus, you only need to type a single \code{plot} command to produce the multi-layered plot. Individual layers of the plot can be switched on or off, or manipulated, using arguments to \code{\link{plot.layered}}. The argument \code{plotargs} contains default values of the plotting arguments for each layer. It should be a list, with one entry for each object in \code{\dots}. Each entry of \code{plotargs} should be a list of arguments in the form \code{name=value}, which are recognised by the \code{plot} method for the relevant layer. } \value{ A list, belonging to the class \code{"layered"}. There are methods for \code{plot}, \code{"["}, \code{"shift"}, \code{"affine"}, \code{"rotate"} and \code{"rescale"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{plot.layered}}, \code{\link{methods.layered}}, \code{\link{[.layered}}, \code{\link{layerplotargs}}. } \examples{ data(cells) D <- distmap(cells) L <- layered(D, cells) L L <- layered(D, cells, plotargs=list(list(ribbon=FALSE), list(pch=16))) plot(L) } \keyword{spatial} \keyword{hplot} spatstat/man/progressreport.Rd0000755000176000001440000000324212237642733016324 0ustar ripleyusers\name{progressreport} \alias{progressreport} \title{Print Progress Reports} \description{ Prints Progress Reports during a loop or iterative calculation. } \usage{ progressreport(i, n, every = min(100,max(1, ceiling(n/100))), nperline = min(charsperline, every * ceiling(charsperline/(every + 3))), charsperline = 60, style=spatstat.options("progress")) } \arguments{ \item{i}{ Integer. The current iteration number (from 1 to \code{n}). } \item{n}{ Integer. The (maximum) number of iterations to be computed. } \item{every}{ Optional integer. The number of iterations between successive reports. } \item{nperline}{ Optional integer. The maximum number of reports to be printed per line of output. } \item{charsperline}{ Optional integer. The number of characters in a line of output. } \item{style}{ Character string determining the style of display. See Details. } } \details{ This is a convenient function for reporting progress during an iterative sequence of calculations or a suite of simulations. If \code{style="txtbar"} then \code{\link[utils]{txtProgressBar}} is used to represent progress as a bar made of text characters in the \R interpreter window. If \code{style="tty"}, then progress reports are printed using \code{cat}. This only seems to work well under Linux. } \value{ Null. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ for(i in 1:40) progressreport(i, 40) } \keyword{print} spatstat/man/summary.im.Rd0000755000176000001440000000366112237642734015333 0ustar ripleyusers\name{summary.im} \alias{summary.im} \alias{print.summary.im} \title{Summarizing a Pixel Image} \description{ \code{summary} method for class \code{"im"}. } \usage{ \method{summary}{im}(object, \dots) \method{print}{summary.im}(x, \dots) } \arguments{ \item{object}{A pixel image.} \item{\dots}{Ignored.} \item{x}{Object of class \code{"summary.im"} as returned by \code{summary.im}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"im"}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. \code{summary.im} extracts information about the pixel image, and \code{print.summary.im} prints this information in a comprehensible format. In normal usage, \code{print.summary.im} is invoked implicitly when the user calls \code{summary.im} without assigning its value to anything. See the examples. The information extracted by \code{summary.im} includes \describe{ \item{range}{The range of the image values.} \item{mean}{The mean of the image values.} \item{integral}{The ``integral'' of the image values, calculated as the sum of the image values multiplied by the area of one pixel.} \item{dim}{The dimensions of the pixel array: \code{dim[1]} is the number of rows in the array, corresponding to the \bold{y} coordinate.} } } \value{ \code{summary.im} returns an object of class \code{"summary.im"}, while \code{print.summary.im} returns \code{NULL}. } \examples{ # make an image X <- as.im(function(x,y) {x^2}, unit.square()) # summarize it summary(X) # save the summary s <- summary(X) # print it print(X) s # extract stuff X$dim X$range X$integral } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/Kest.Rd0000755000176000001440000003117212237642731014133 0ustar ripleyusers\name{Kest} \alias{Kest} \title{K-function} \description{ Estimates Ripley's reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape. } \usage{ Kest(X, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), nlarge=3000, domain=NULL, var.approx=FALSE, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ Optional. A character vector containing any selection of the options \code{"none"}, \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"}, \code{"good"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed (by default), using a fast algorithm. } \item{domain}{ Optional. Calculations will be restricted to this subset of the window. See Details. } \item{var.approx}{Logical. If \code{TRUE}, the approximate variance of \eqn{\hat K(r)}{Kest(r)} under CSR will also be computed. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K(r)} obtained by the edge corrections named. If \code{var.approx=TRUE} then the return value also has columns \code{rip} and \code{ls} containing approximations to the variance of \eqn{\hat K(r)}{Kest(r)} under CSR. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The \eqn{K} function (variously called ``Ripley's K-function'' and the ``reduced second moment function'') of a stationary point process \eqn{X} is defined so that \eqn{\lambda K(r)}{lambda K(r)} equals the expected number of additional random points within a distance \eqn{r} of a typical random point of \eqn{X}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K} function is determined by the second order moment properties of \eqn{X}. An estimate of \eqn{K} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1977, 1988). In exploratory analyses, the estimate of \eqn{K} is a useful statistic summarising aspects of inter-point ``dependence'' and ``clustering''. For inferential purposes, the estimate of \eqn{K} is usually compared to the true value of \eqn{K} for a completely random (Poisson) point process, which is \eqn{K(r) = \pi r^2}{K(r) = pi * r^2}. Deviations between the empirical and theoretical \eqn{K} curves may suggest spatial clustering or spatial regularity. This routine \code{Kest} estimates the \eqn{K} function of a stationary point process, given observation of the process inside a known, bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{K} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented for rectangular and polygonal windows (not for binary masks). } \item{translate/translation}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } \item{none}{ Uncorrected estimate. An estimate of the K function \emph{without} edge correction. (i.e. setting \eqn{e_{ij} = 1}{e[i,j] = 1} in the equation below. This estimate is \bold{biased} and should not be used for data analysis, \emph{unless} you have an extremely large point pattern (more than 100,000 points). } \item{best}{ Selects the best edge correction that is available for the geometry of the window. Currently this is Ripley's isotropic correction for a rectangular or polygonal window, and the translation correction for masks. } \item{good}{ Selects the best edge correction that can be computed in a reasonable time. This is the same as \code{"best"} for datasets with fewer than 3000 points; otherwise the selected edge correction is \code{"border"}, unless there are more than 100,000 points, when it is \code{"none"}. } } The estimates of \eqn{K(r)} are of the form \deqn{ \hat K(r) = \frac a {n (n-1) } \sum_i \sum_j I(d_{ij}\le r) e_{ij} }{ Kest(r) = (a/(n * (n-1))) * sum[i,j] I(d[i,j] <= r) e[i,j]) } where \eqn{a} is the area of the window, \eqn{n} is the number of data points, and the sum is taken over all ordered pairs of points \eqn{i} and \eqn{j} in \code{X}. Here \eqn{d_{ij}}{d[i,j]} is the distance between the two points, and \eqn{I(d_{ij} \le r)}{I(d[i,j] <= r)} is the indicator that equals 1 if the distance is less than or equal to \eqn{r}. The term \eqn{e_{ij}}{e[i,j]} is the edge correction weight (which depends on the choice of edge correction listed above). Note that this estimator assumes the process is stationary (spatially homogeneous). For inhomogeneous point patterns, see \code{\link{Kinhom}}. If the point pattern \code{X} contains more than about 3000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. If \code{X} contains more than about 100,000 points, even the border correction is time-consuming. You may want to consider setting \code{correction="none"} in this case. There is an even faster algorithm for the uncorrected estimate. Approximations to the variance of \eqn{\hat K(r)}{Kest(r)} are available, for the case of the isotropic edge correction estimator, \bold{assuming complete spatial randomness} (Ripley, 1988; Lotwick and Silverman, 1982; Diggle, 2003, pp 51-53). If \code{var.approx=TRUE}, then the result of \code{Kest} also has a column named \code{rip} values of Ripley's (1988) approximation to \eqn{\mbox{var}(\hat K(r))}{var(Kest(r))}, and (if the window is a rectangle) a column named \code{ls} giving values of Lotwick and Silverman's (1982) approximation. If the argument \code{domain} is given, the calculations will be restricted to a subset of the data. In the formula for \eqn{K(r)} above, the \emph{first} point \eqn{i} will be restricted to lie inside \code{domain}. The result is an approximately unbiased estimate of \eqn{K(r)} based on pairs of points in which the first point lies inside \code{domain} and the second point is unrestricted. This is useful in bootstrap techniques. The argument \code{domain} should be a window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. It must be a subset of the window of the point pattern \code{X}. The estimator \code{Kest} ignores marks. Its counterparts for multitype point patterns are \code{\link{Kcross}}, \code{\link{Kdot}}, and for general marked point patterns see \code{\link{Kmulti}}. Some writers, particularly Stoyan (1994, 1995) advocate the use of the ``pair correlation function'' \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}. See \code{\link{pcf}} on how to estimate this function. } \section{Envelopes, significance bands and confidence intervals}{ To compute simulation envelopes for the \eqn{K}-function under CSR, use \code{\link{envelope}}. To compute a confidence interval for the true \eqn{K}-function, use \code{\link{varblock}} or \code{\link{lohboot}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \section{Warnings}{ The estimator of \eqn{K(r)} is approximately unbiased for each fixed \eqn{r}. Bias increases with \eqn{r} and depends on the window geometry. For a rectangular window it is prudent to restrict the \eqn{r} values to a maximum of \eqn{1/4} of the smaller side length of the rectangle. Bias may become appreciable for point patterns consisting of fewer than 15 points. While \eqn{K(r)} is always a non-decreasing function, the estimator of \eqn{K} is not guaranteed to be non-decreasing. This is rarely a problem in practice. } \seealso{ \code{\link{localK}} to extract individual summands in the \eqn{K} function. \code{\link{pcf}} for the pair correlation. \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} for alternative summary functions. \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kmulti}} for counterparts of the \eqn{K} function for multitype point patterns. \code{\link{reduced.sample}} for the calculation of reduced sample estimators. } \examples{ pp <- runifpoint(50) K <- Kest(pp) data(cells) K <- Kest(cells, correction="isotropic") plot(K) plot(K, main="K function for cells") # plot the L function plot(K, sqrt(iso/pi) ~ r) plot(K, sqrt(./pi) ~ r, ylab="L(r)", main="L function for cells") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/simplenet.Rd0000755000176000001440000000053212237642734015224 0ustar ripleyusers\name{simplenet} \alias{simplenet} \docType{data} \title{ Simple Example of Linear Network } \description{ A simple, artificially created, example of a linear network. } \format{ \code{simplenet} is an object of class \code{"linnet"}. } \usage{data(simplenet)} \source{ Created by Adrian Baddeley. } \keyword{datasets} \keyword{spatial} spatstat/man/influence.ppm.Rd0000755000176000001440000000646712237642732016002 0ustar ripleyusers\name{influence.ppm} \alias{influence.ppm} \title{ Influence Measure for Spatial Point Process Model } \description{ Computes the influence measure for a fitted spatial point process model. } \usage{ \method{influence}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ Given a fitted spatial point process model \code{model}, this function computes the influence measure described in Baddeley, Chang and Song (2013). The function \code{\link[stats]{influence}} is generic, and \code{influence.ppm} is the method for objects of class \code{"ppm"} representing point process models. The influence of a point process model is a value attached to each data point (i.e. each point of the point pattern to which the \code{model} was fitted). The influence value \eqn{s(x_i)}{s(x[i])} at a data point \eqn{x_i}{x[i]} represents the change in the maximised log (pseudo)likelihood that occurs when the point \eqn{x_i}{x[i]} is deleted. A relatively large value of \eqn{s(x_i)}{s(x[i])} indicates a data point with a large influence on the fitted model. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the influence calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{influence.ppm} is an object of class \code{"influence.ppm"}. It can be plotted (by \code{\link{plot.influence.ppm}}), or converted to a marked point pattern by \code{as.ppp} (see \code{\link{as.ppp.influence.ppm}}). } \value{ An object of class \code{"influence.ppm"} that can be plotted by \code{\link{plot.influence.ppm}}. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{leverage.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{plot.influence.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(influence(fit)) } \keyword{spatial} \keyword{models} spatstat/man/rmh.Rd0000755000176000001440000000604412237642734014016 0ustar ripleyusers\name{rmh} \alias{rmh} \title{Simulate point patterns using the Metropolis-Hastings algorithm.} \description{ Generic function for running the Metropolis-Hastings algorithm to produce simulated realisations of a point process model. } \usage{rmh(model, \dots)} \arguments{ \item{model}{The point process model to be simulated. } \item{\dots}{Further arguments controlling the simulation. } } \details{ The Metropolis-Hastings algorithm can be used to generate simulated realisations from a wide range of spatial point processes. For caveats, see below. The function \code{rmh} is generic; it has methods \code{\link{rmh.ppm}} (for objects of class \code{"ppm"}) and \code{\link{rmh.default}} (the default). The actual implementation of the Metropolis-Hastings algorithm is contained in \code{\link{rmh.default}}. For details of its use, see \code{\link{rmh.ppm}} or \code{\link{rmh.default}}. [If the model is a Poisson process, then Metropolis-Hastings is not used; the Poisson model is generated directly using \code{\link{rpoispp}} or \code{\link{rmpoispp}}.] In brief, the Metropolis-Hastings algorithm is a Markov Chain, whose states are spatial point patterns, and whose limiting distribution is the desired point process. After running the algorithm for a very large number of iterations, we may regard the state of the algorithm as a realisation from the desired point process. However, there are difficulties in deciding whether the algorithm has run for ``long enough''. The convergence of the algorithm may indeed be extremely slow. No guarantees of convergence are given! While it is fashionable to decry the Metropolis-Hastings algorithm for its poor convergence and other properties, it has the advantage of being easy to implement for a wide range of models. } \section{Warning}{ As of version 1.22-1 of \code{spatstat} a subtle change was made to \code{rmh.default()}. We had noticed that the results produced were sometimes not ``scalable'' in that two models, differing in effect only by the units in which distances are measured and starting from the same seed, gave different results. This was traced to an idiosyncracy of floating point arithmetic. The code of \code{rmh.default()} has been changed so that the results produced by \code{rmh} are now scalable. The downside of this is that code which users previously ran may now give results which are different from what they formerly were. In order to recover former behaviour (so that previous results can be reproduced) set \code{spatstat.options(scalable=FALSE)}. See the last example in the help for \code{\link{rmh.default}}. } \value{ A point pattern, in the form of an object of class \code{"ppp"}. See \code{\link{rmh.default}} for details. } \seealso{ \code{\link{rmh.default}} } \examples{ # See examples in rmh.default and rmh.ppm } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/delaunay.distance.Rd0000644000176000001440000000267712237642732016626 0ustar ripleyusers\name{delaunay.distance} \alias{delaunay.distance} \title{Distance on Delaunay Triangulation} \description{ Computes the graph distance in the Delaunay triangulation of a point pattern. } \usage{ delaunay.distance(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The \emph{graph distance} in the Delaunay triangulation between two points \code{X[i]} and \code{X[j]} is the minimum number of edges of the Delaunay triangulation that must be traversed to go from \code{X[i]} to \code{X[j]}. This command returns a matrix \code{D} such that \code{D[i,j]} is the graph distance between \code{X[i]} and \code{X[j]}. } \value{ A symmetric square matrix with integer entries. } \seealso{ \code{\link{delaunay}} } \examples{ X <- runifpoint(20) M <- delaunay.distance(X) plot(delaunay(X), lty=3) text(X, labels=M[1, ], cex=2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/quadscheme.Rd0000755000176000001440000001277412237642733015355 0ustar ripleyusers\name{quadscheme} \alias{quadscheme} \title{Generate a Quadrature Scheme from a Point Pattern} \description{ Generates a quadrature scheme (an object of class \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme(data, dummy, method="grid", \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} Defaults to \code{default.dummy(data, ...)} } \item{method}{ The name of the method for calculating quadrature weights: either \code{"grid"} or \code{"dirichlet"}. } \item{\dots}{ Parameters of the weighting method (see below) and parameters for constructing the dummy points if necessary. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Quadrature schemes are created by the function \code{quadscheme}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points (provided by \code{\link{default.dummy}}). Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. There are also functions for creating dummy patterns including \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}} and \code{\link{spokes}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{dummy$window}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{data$window}. If \code{dummy} is missing, then a pattern of dummy points will be generated using \code{\link{default.dummy}}, taking account of the optional arguments \code{...}. Recognised arguments include \code{nd} (the number of grid points in the horizontal and vertical directions) and \code{eps} (the spacing between dummy points). See \code{\link{default.dummy}} for details. If \code{method = "grid"} then the optional arguments (for \code{\dots}) are \code{(nd, ntile, eps)}. The quadrature region (see below) is divided into an \code{ntile[1]} by \code{ntile[2]} grid of rectangular tiles. The weight for each quadrature point is the area of a tile divided by the number of quadrature points in that tile. If \code{method="dirichlet"} then the optional arguments are \code{(exact=TRUE, nd, eps)}. The quadrature points (both data and dummy) are used to construct the Dirichlet tessellation. The quadrature weight of each point is the area of its Dirichlet tile inside the quadrature region. If \code{exact == TRUE} then this area is computed exactly using the package \code{deldir}; otherwise it is computed approximately by discretisation. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}}, \code{\link{quad.object}}, \code{\link{gridweights}}, \code{\link{dirichlet.weights}}, \code{\link{corners}}, \code{\link{gridcentres}}, \code{\link{stratrand}}, \code{\link{spokes}} } \examples{ data(simdat) # grid weights Q <- quadscheme(simdat) Q <- quadscheme(simdat, method="grid") Q <- quadscheme(simdat, eps=0.5) # dummy point spacing 0.5 units Q <- quadscheme(simdat, nd=50) # 1 dummy point per tile Q <- quadscheme(simdat, ntile=25, nd=50) # 4 dummy points per tile # Dirichlet weights Q <- quadscheme(simdat, method="dirichlet", exact=FALSE) # random dummy pattern \dontrun{ D <- runifpoint(250, simdat$window) Q <- quadscheme(simdat, D, method="dirichlet", exact=FALSE) } # polygonal window data(demopat) X <- unmark(demopat) Q <- quadscheme(X) # mask window X$window <- as.mask(X$window) Q <- quadscheme(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/markstat.Rd0000755000176000001440000000710512237642733015054 0ustar ripleyusers\name{markstat} \alias{markstat} \title{Summarise Marks in Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and summarise their marks } \usage{ markstat(X, fun, N=NULL, R=NULL, \dots) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{fun}{ Function to be applied to the vector of marks. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{\dots}{ extra arguments passed to the function \code{fun}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. if each call to \code{fun} returns a single numeric value, the result is a vector of dimension \code{X$n}, the number of points in \code{X}. If each call to \code{fun} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{fun} return vectors of different lengths, the result is a list of length \code{X$n}. } \details{ This algorithm visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, extracts the marks of these neighbouring points, applies the function \code{fun} to the marks, and collects the value or values returned by \code{fun}. The definition of ``neighbours'' depends on the arguments \code{N} and \code{R}, exactly one of which must be given. If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself). If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. Each point of \code{X} is visited; the neighbourhood of the current point is determined; the marks of these points are extracted as a vector \code{v}; then the function \code{fun} is called as: \code{fun(v, \dots)} where \code{\dots} are the arguments passed from the call to \code{markstat}. The results of each call to \code{fun} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See \bold{Value} above. This function is just a convenient wrapper for a common use of the function \code{\link{applynbd}}. For more complex tasks, use \code{\link{applynbd}}. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{applynbd}}, \code{\link{marktable}}, \code{\link{ppp.object}}, \code{\link{apply}} } \examples{ data(longleaf) \testonly{ longleaf <- longleaf[seq(1, longleaf$n, by=6)] } # average diameter of 5 closest neighbours of each tree md <- markstat(longleaf, mean, N=5) # range of diameters of trees within 10 metre radius rd <- markstat(longleaf, range, R=10) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{programming} spatstat/man/methods.rho2hat.Rd0000644000176000001440000000261512237642733016235 0ustar ripleyusers\name{methods.rho2hat} \alias{methods.rho2hat} %DoNotExport \alias{print.rho2hat} \alias{plot.rho2hat} \title{ Methods for Intensity Functions of Two Spatial Covariates } \description{ These are methods for the class \code{"rho2hat"}. } \usage{ \method{plot}{rho2hat}(x, \dots, do.points=FALSE) \method{print}{rho2hat}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"rho2hat"}. } \item{\dots}{ Arguments passed to other methods. } \item{do.points}{ Logical value indicating whether to plot the observed values of the covariates at the data points. } } \details{ These functions are methods for the generic commands \code{\link{print}} and \code{\link{plot}} for the class \code{"rho2hat"}. An object of class \code{"rho2hat"} is an estimate of the intensity of a point process, as a function of two given spatial covariates. See \code{\link{rho2hat}}. The method \code{plot.rho2hat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. } \value{ \code{NULL}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{rho2hat}} } \examples{ data(bei) attach(bei.extra) r2 <- rho2hat(bei, elev, grad) r2 plot(r2) } \keyword{spatial} \keyword{methods} spatstat/man/vargamma.estK.Rd0000644000176000001440000001472512251541120015711 0ustar ripleyusers\name{vargamma.estK} \alias{vargamma.estK} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast. } \usage{ vargamma.estK(X, startpar=c(kappa=1,eta=1), nu.ker = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, nu.pcf=NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu.ker}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{nu.pcf}{ Alternative specifier of the shape parameter. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical \eqn{K} function of the model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu.ker}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). Instead of specifying \code{nu.ker} the user can specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. Exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estpcf}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{cauchy.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{Kmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ \testonly{ u <- vargamma.estK(redwood, startpar=c(kappa=15, eta=0.075)) } if(interactive()) { u <- vargamma.estK(redwood) u plot(u) } } \keyword{spatial} \keyword{models} spatstat/man/scaletointerval.Rd0000755000176000001440000000260012237642734016421 0ustar ripleyusers\name{scaletointerval} %DontDeclareMethods \alias{scaletointerval} \alias{scaletointerval.default} \alias{scaletointerval.im} \title{Rescale Data to Lie Between Specified Limits} \description{ Rescales a dataset so that the values range exactly between the specified limits. } \usage{ scaletointerval(x, from=0, to=1) \method{scaletointerval}{default}(x, from=0, to=1) \method{scaletointerval}{im}(x, from=0, to=1) } \arguments{ \item{x}{Data to be rescaled.} \item{from,to}{Lower and upper endpoints of the interval to which the values of \code{x} should be rescaled. } } \details{ These functions rescale a dataset \code{x} so that its values range exactly between the limits \code{from} and \code{to}. The method for pixel images (objects of class \code{"im"}) applies this scaling to the pixel values of \code{x}. Rescaling cannot be performed if the values in \code{x} are not interpretable as numeric, or if the values in \code{x} are all equal. } \value{ An object of the same type as \code{x}. } \seealso{ \code{\link{scale}} } \examples{ X <- as.im(function(x,y) {x+y+3}, unit.square()) summary(X) Y <- scaletointerval(X) summary(Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/intensity.ppp.Rd0000644000176000001440000000213712237642732016046 0ustar ripleyusers\name{intensity.ppp} %DontDeclareMethods \alias{intensity.ppp} \title{ Empirical Intensity of Point Pattern } \description{ Computes the average number of points per unit area in a point pattern dataset. } \usage{ \method{intensity}{ppp}(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{intensity}} It computes the empirical intensity of a point pattern (object of class \code{"ppp"}), i.e. the average density of points per unit area. If the point pattern is multitype, the intensities of the different types are computed separately. } \value{ A numeric value (giving the intensity) or numeric vector (giving the intensity for each possible type). } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppm}} } \examples{ intensity(cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rgbim.Rd0000755000176000001440000000405712237642734014332 0ustar ripleyusers\name{rgbim} \alias{rgbim} \alias{hsvim} \title{Create Colour-Valued Pixel Image} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image whose pixel values are colours. } \usage{ rgbim(R, G, B, maxColorValue=255) hsvim(H, S, V) } \arguments{ \item{R,G,B}{ Pixel images (objects of class \code{"im"}) or constants giving the red, green, and blue components of a colour, respectively. } \item{maxColorValue}{ Maximum colour value for \code{R,G,B}. } \item{H,S,V}{ Pixel images (objects of class \code{"im"}) or constants giving the hue, saturation, and value components of a colour, respectively. } } \details{ These functions take three pixel images, with real or integer pixel values, and create a single pixel image whose pixel values are colours recognisable to \R. Some of the arguments may be constant numeric values, but at least one of the arguments must be a pixel image. The image arguments should be compatible (in array dimension and in spatial position). \code{rgbim} calls \code{\link{rgb}} to compute the colours, while \code{hsvim} calls \code{\link{hsv}}. See the help for the relevant function for more information about the meaning of the colour channels. } \seealso{ \code{\link{im.object}}, \code{\link{rgb}}, \code{\link{hsv}}. See \code{\link[spatstat:colourtools]{colourtools}} for additional colour tools. } \examples{ \testonly{ op <- spatstat.options(npixel=32) } # create three images with values in [0,1] X <- setcov(owin()) X <- eval.im(pmin(1,X)) M <- as.owin(X) Y <- as.im(function(x,y){(x+1)/2}, W=M) Z <- as.im(function(x,y){(y+1)/2}, W=M) RGB <- rgbim(X, Y, Z, 1) HSV <- hsvim(X, Y, Z) plot(RGB, valuesAreColours=TRUE) plot(HSV, valuesAreColours=TRUE) \testonly{ spatstat.options(op) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/nndist.psp.Rd0000755000176000001440000000561412237642733015331 0ustar ripleyusers\name{nndist.psp} \alias{nndist.psp} \title{Nearest neighbour distances between line segments} \description{ Computes the distance from each line segment to its nearest neighbour in a line segment pattern. Alternatively finds the distance to the second nearest, third nearest etc. } \usage{ \method{nndist}{psp}(X, \dots, k=1, method="Fortran") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{method}{ String specifying which method of calculation to use. Values are \code{"Fortran"}, \code{"C"} and \code{"interpreted"}. Usually not specified. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each line segment. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th segment. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th segment. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th segment. } \details{ This is a method for the generic function \code{\link{nndist}} for the class \code{"psp"}. If \code{k=1}, this function computes the distance from each line segment to the nearest other line segment in \code{X}. In general it computes the distance from each line segment to the \code{k}th nearest other line segment. The argument \code{k} can also be a vector, and this computation will be performed for each value of \code{k}. Distances are calculated using the Hausdorff metric. The Hausdorff distance between two line segments is the maximum distance from any point on one of the segments to the nearest point on the other segment. If there are fewer than \code{max(k)+1} line segments in the pattern, some of the nearest neighbour distances will be infinite (\code{Inf}). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="Fortran"} (the default) then Fortran code is used. The Fortran code is somewhat faster. } \seealso{ \code{\link{nndist}}, \code{\link{nndist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- nndist(L) D <- nndist(L, k=1:3) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/edge.Ripley.Rd0000644000176000001440000000513112237642732015366 0ustar ripleyusers\name{edge.Ripley} \alias{edge.Ripley} \title{ Ripley's Isotropic Edge Correction } \description{ Computes Ripley's isotropic edge correction weights for a point pattern. } \usage{ edge.Ripley(X, r, W = X$window, method = "C", maxweight = 100) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{r}{ Vector or matrix of interpoint distances for which the edge correction should be computed. } \item{method}{ Choice of algorithm. Either \code{"interpreted"} or \code{"C"}. This is needed only for debugging purposes. } \item{maxweight}{ Maximum permitted value of the edge correction weight. } } \details{ This function computes Ripley's (1977) isotropic edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. For a single point \eqn{x} in a window \eqn{W}, and a distance \eqn{r > 0}, the isotropic edge correction weight is \deqn{ e(u, r) = \frac{2\pi r}{\mbox{length}(c(u,r) \cap W)} }{ e(u, r) = 2 * pi * r/length(intersection(c(u,r), W)) } where \eqn{c(u,r)} is the circle of radius \eqn{r} centred at the point \eqn{u}. The denominator is the length of the overlap between this circle and the window \eqn{W}. The function \code{edge.Ripley} computes this edge correction weight for each point in the point pattern \code{X} and for each corresponding distance value in the vector or matrix \code{r}. If \code{r} is a vector, with one entry for each point in \code{X}, then the result is a vector containing the edge correction weights \code{e(X[i], r[i])} for each \code{i}. If \code{r} is a matrix, with one row for each point in \code{X}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], r[i,j])}. For example \code{edge.Ripley(X, pairdist(X))} computes all the edge corrections required for the \eqn{K}-function. If any value of the edge correction weight exceeds \code{maxwt}, it is set to \code{maxwt}. } \value{ A numeric vector or matrix. } \references{ Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \seealso{ \code{\link{edge.Trans}}, \code{\link{Kest}} } \examples{ v <- edge.Ripley(cells, pairdist(cells)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/quadrat.test.splitppp.Rd0000755000176000001440000000401012237642733017507 0ustar ripleyusers\name{quadrat.test.splitppp} \alias{quadrat.test.splitppp} \title{Dispersion Test of CSR for Split Point Pattern Based on Quadrat Counts} \description{ Performs a test of Complete Spatial Randomness for each of the component patterns in a split point pattern, based on quadrat counts. By default performs chi-squared tests; can also perform Monte Carlo based tests. } \usage{ \method{quadrat.test}{splitppp}(X, ..., df=NULL, df.est=NULL, Xname=NULL) } \arguments{ \item{X}{ A split point pattern (object of class \code{"splitppp"}), each component of which will be subjected to the goodness-of-fit test. } \item{\dots}{Arguments passed to \code{\link{quadrat.test.ppp}}.} \item{df,df.est,Xname}{Arguments passed to \code{\link{pool.quadrattest}}.} } \details{ The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), split point patterns (class \code{"splitppp"}) and point process models (class \code{"ppm"}). If \code{X} is a split point pattern, then for each of the component point patterns (taken separately) we test the null hypotheses of Complete Spatial Randomness, then combine the result into a single test. The method \code{quadrat.test.ppp} is applied to each component point pattern. Then the results are pooled using \code{\link{pool.quadrattest}} to obtain a single test. } \seealso{ \code{\link{quadrat.test}}, \code{\link{quadratcount}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{chisq.test}}, \code{\link{kstest}}. To test a Poisson point process model against a specific Poisson alternative, use \code{\link{anova.ppm}}. } \value{ An object of class \code{"quadrattest"} which can be printed and plotted. } \examples{ data(humberside) qH <- quadrat.test(split(humberside), 2, 3) plot(qH) qH } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} spatstat/man/lengths.psp.Rd0000755000176000001440000000146212237642732015472 0ustar ripleyusers\name{lengths.psp} \alias{lengths.psp} \title{Lengths of Line Segments} \description{ Computes the length of each line segment in a line segment pattern. } \usage{ lengths.psp(x) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } } \value{ Numeric vector. } \details{ The length of each line segment is computed and the lengths are returned as a numeric vector. } \seealso{ \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{angles.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- lengths.psp(a) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/ppm.Rd0000755000176000001440000007073412237642733014032 0ustar ripleyusers\name{ppm} \alias{ppm} \title{ Fit Point Process Model to Data } \description{ Fits a point process model to an observed point pattern } \usage{ ppm(Q, trend=~1, interaction=Poisson(), \dots, covariates=NULL, covfunargs = list(), correction="border", rbord=reach(interaction), use.gam=FALSE, method="mpl", forcefit=FALSE, project=FALSE, nd = NULL, eps = NULL, gcontrol=list(), nsim=100, nrmh=1e5, start=NULL, control=list(nrep=nrmh), verb=TRUE, callstring=NULL) } \arguments{ \item{Q}{ A data point pattern (of class \code{"ppp"}) to which the model will be fitted, or a quadrature scheme (of class \code{"quad"}) containing this pattern. } \item{trend}{ An \R formula object specifying the spatial trend to be fitted. The default formula, \code{~1}, indicates the model is stationary and no trend is to be fitted. } \item{interaction}{ An object of class \code{"interact"} describing the point process interaction structure, or \code{NULL} indicating that a Poisson process (stationary or nonstationary) should be fitted. } \item{\dots}{Ignored.} \item{covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. Either a data frame, or a list whose entries are images, functions, windows or single numbers. See Details. } \item{covfunargs}{ A named list containing the values of any additional arguments required by covariate functions. } \item{correction}{ The name of the edge correction to be used. The default is \code{"border"} indicating the border correction. Other possibilities may include \code{"Ripley"}, \code{"isotropic"}, \code{"periodic"}, \code{"translate"} and \code{"none"}, depending on the \code{interaction}. } \item{rbord}{ If \code{correction = "border"} this argument specifies the distance by which the window should be eroded for the border correction. } \item{use.gam}{ Logical flag; if \code{TRUE} then computations are performed using \code{gam} instead of \code{\link{glm}}. } \item{method}{ The method used to fit the model. Options are \code{"mpl"} for the method of Maximum PseudoLikelihood, \code{"logi"} for the Logistic Likelihood method, and \code{"ho"} for the Huang-Ogata approximate maximum likelihood method. } \item{forcefit}{ Logical flag for internal use. If \code{forcefit=FALSE}, some trivial models will be fitted by a shortcut. If \code{forcefit=TRUE}, the generic fitting method will always be used. } \item{project}{ Logical. Setting \code{project=TRUE} will ensure that the fitted model is always a valid point process by applying \code{\link{project.ppm}}. } \item{nd}{ Optional. Integer or pair of integers. The dimension of the grid of dummy points (\code{nd * nd} or \code{nd[1] * nd[2]}) used to evaluate the integral in the pseudolikelihood. Incompatible with \code{eps}. } \item{eps}{ Optional. A positive number, or a vector of two positive numbers, giving the horizontal and vertical spacing, respectively, of the grid of dummy points. Incompatible with \code{nd}. } \item{gcontrol}{ Optional. List of parameters passed to \code{\link{glm.control}} (or passed to \code{\link{gam.control}} if \code{use.gam=TRUE}) controlling the model-fitting algorithm. } \item{nsim}{ Number of simulated realisations to generate (for \code{method="ho"}) } \item{nrmh}{ Number of Metropolis-Hastings iterations for each simulated realisation (for \code{method="ho"}) } \item{start,control}{ Arguments passed to \code{\link{rmh}} controlling the behaviour of the Metropolis-Hastings algorithm (for \code{method="ho"}) } \item{verb}{ Logical flag indicating whether to print progress reports (for \code{method="ho"}) } \item{callstring}{ Internal use only. } } \value{ An object of class \code{"ppm"} describing a fitted point process model. See \code{\link{ppm.object}} for details of the format of this object and methods available for manipulating it. } \details{ This function fits a point process model to an observed point pattern. The model may include spatial trend, interpoint interaction, and dependence on covariates. \describe{ \item{basic use:}{ In basic use, \code{Q} is a point pattern dataset (an object of class \code{"ppp"}) to which we wish to fit a model. The syntax of \code{ppm()} is closely analogous to the \R functions \code{\link{glm}} and \code{gam}. The analogy is: \tabular{ll}{ \bold{glm} \tab \bold{ppm} \cr \code{formula} \tab \code{trend} \cr \code{family} \tab \code{interaction} } The point process model to be fitted is specified by the arguments \code{trend} and \code{interaction} which are respectively analogous to the \code{formula} and \code{family} arguments of glm(). Systematic effects (spatial trend and/or dependence on spatial covariates) are specified by the argument \code{trend}. This is an \R formula object, which may be expressed in terms of the Cartesian coordinates \code{x}, \code{y}, the marks \code{marks}, or the variables in \code{covariates} (if supplied), or both. It specifies the \bold{logarithm} of the first order potential of the process. The formula should not use any names beginning with \code{.mpl} as these are reserved for internal use. If \code{trend} is absent or equal to the default, \code{~1}, then the model to be fitted is stationary (or at least, its first order potential is constant). Stochastic interactions between random points of the point process are defined by the argument \code{interaction}. This is an object of class \code{"interact"} which is initialised in a very similar way to the usage of family objects in \code{\link{glm}} and \code{gam}. The models currently available are: \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{Concom}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}} and \code{\link{StraussHard}}. See the examples below. It is also possible to combine several interactions using \code{\link{Hybrid}}. If \code{interaction} is missing or \code{NULL}, then the model to be fitted has no interpoint interactions, that is, it is a Poisson process (stationary or nonstationary according to \code{trend}). In this case the methods of maximum pseudolikelihood and maximum logistic likelihood coincide with maximum likelihood. The fitted point process model returned by this function can be printed (by the print method \code{\link{print.ppm}}) to inspect the fitted parameter values. If a nonparametric spatial trend was fitted, this can be extracted using the predict method \code{\link{predict.ppm}}. } \item{Models with covariates:}{ To fit a model involving spatial covariates other than the Cartesian coordinates \eqn{x} and \eqn{y}, the values of the covariates should be supplied in the argument \code{covariates}. Note that it is not sufficient to have observed the covariate only at the points of the data point pattern; the covariate must also have been observed at other locations in the window. Typically the argument \code{covariates} is a list, with names corresponding to variables in the \code{trend} formula. Each entry in the list is either a pixel image (giving the values of a spatial covariate at a fine grid of locations), or a function (which can be evaluated at any location \code{(x,y)} to obtain the value of the spatial covariate), or a window (interpreted as a logical variable which is \code{TRUE} inside the window and \code{FALSE} outside it) or a single number (indicating a covariate that is constant in this dataset). Each entry in the list must be an image (object of class \code{"im"}, see \code{\link{im.object}}), or a \code{function(x, y, ...)}, or a single number. The software will look up the pixel values of each image at the required locations (quadrature points). In the case of a \code{function(x, y, ...)}, the arguments \code{x} and \code{y} are implicit, and any additional arguments \code{\dots} should be given in \code{covfunargs}. Note that, for covariate functions, only the \emph{name} of the function appears in the trend formula. A covariate function is treated as if it were a single variable. The function arguments do not appear in the trend formula. See the Examples. If \code{covariates} is a list, the list entries should have names corresponding to the names of covariates in the model formula \code{trend}. The variable names \code{x}, \code{y} and \code{marks} are reserved for the Cartesian coordinates and the mark values, and these should not be used for variables in \code{covariates}. If \code{covariates} is a data frame, \code{Q} must be a quadrature scheme (see under Quadrature Schemes below). Then \code{covariates} must have as many rows as there are points in \code{Q}. The \eqn{i}th row of \code{covariates} should contain the values of spatial variables which have been observed at the \eqn{i}th point of \code{Q}. } \item{Quadrature schemes:}{ In advanced use, \code{Q} may be a `quadrature scheme'. This was originally just a technicality but it has turned out to have practical uses, as we explain below. Quadrature schemes are required for our implementation of the method of maximum pseudolikelihood. The definition of the pseudolikelihood involves an integral over the spatial window containing the data. In practice this integral must be approximated by a finite sum over a set of quadrature points. We use the technique of Baddeley and Turner (2000), a generalisation of the Berman-Turner (1992) device. In this technique the quadrature points for the numerical approximation include all the data points (points of the observed point pattern) as well as additional `dummy' points. Quadrature schemes are also required for the method of maximum logistic likelihood, which combines the data points with additional `dummy' points. A quadrature scheme is an object of class \code{"quad"} (see \code{\link{quad.object}}) which specifies both the data point pattern and the dummy points for the quadrature scheme, as well as the quadrature weights associated with these points. If \code{Q} is simply a point pattern (of class \code{"ppp"}, see \code{\link{ppp.object}}) then it is interpreted as specifying the data points only; a set of dummy points specified by \code{\link{default.dummy}()} is added, and the default weighting rule is invoked to compute the quadrature weights. Finer quadrature schemes (i.e. those with more dummy points) generally yield a better approximation, at the expense of higher computational load. An easy way to fit models using a finer quadrature scheme is to let \code{Q} be the original point pattern data, and use the argument \code{nd} to determine the number of dummy points in the quadrature scheme. Complete control over the quadrature scheme is possible. See \code{\link{quadscheme}} for an overview. Use \code{quadscheme(X, D, method="dirichlet")} to compute quadrature weights based on the Dirichlet tessellation, or \code{quadscheme(X, D, method="grid")} to compute quadrature weights by counting points in grid squares, where \code{X} and \code{D} are the patterns of data points and dummy points respectively. Alternatively use \code{\link{pixelquad}} to make a quadrature scheme with a dummy point at every pixel in a pixel image. A practical advantage of quadrature schemes arises when we want to fit a model involving covariates (e.g. soil pH). Suppose we have only been able to observe the covariates at a small number of locations. Suppose \code{cov.dat} is a data frame containing the values of the covariates at the data points (i.e.\ \code{cov.dat[i,]} contains the observations for the \code{i}th data point) and \code{cov.dum} is another data frame (with the same columns as \code{cov.dat}) containing the covariate values at another set of points whose locations are given by the point pattern \code{Y}. Then setting \code{Q = quadscheme(X,Y)} combines the data points and dummy points into a quadrature scheme, and \code{covariates = rbind(cov.dat, cov.dum)} combines the covariate data frames. We can then fit the model by calling \code{ppm(Q, ..., covariates)}. } \item{Model-fitting technique:}{ There are several choices for the technique used to fit the model. \describe{ \item{method="mpl"}{ (the default): the model will be fitted by maximising the pseudolikelihood (Besag, 1975) using the Berman-Turner computational approximation (Berman and Turner, 1992; Baddeley and Turner, 2000). Maximum pseudolikelihood is equivalent to maximum likelihood if the model is a Poisson process. Maximum pseudolikelihood is biased if the interpoint interaction is very strong, unless there is a large number of dummy points. The default settings for \code{method='mpl'} specify a moderately large number of dummy points, striking a compromise between speed and accuracy. } \item{method="logi":}{ the model will be fitted by maximising the logistic likelihood (Baddeley et al, 2013). This technique is roughly equivalent in speed to maximum pseudolikelihood, but is believed to be less biased. Because it is less biased, the default settings for \code{method='logi'} specify a relatively small number of dummy points, so that this method is the fastest, in practice. } \item{method="ho":}{ the model will be fitted by applying the approximate maximum likelihood method of Huang and Ogata (1999). See below. The Huang-Ogata method is slower than the other options, but has better statistical properties. } } Note that \code{method='logi'} and \code{method='ho'} involve randomisation, so that the results are subject to random variation. } \item{Huang-Ogata method:}{ If \code{method="ho"} then the model will be fitted using the Huang-Ogata (1999) approximate maximum likelihood method. First the model is fitted by maximum pseudolikelihood as described above, yielding an initial estimate of the parameter vector \eqn{\theta_0}{theta0}. From this initial model, \code{nsim} simulated realisations are generated. The score and Fisher information of the model at \eqn{\theta=\theta_0}{theta=theta0} are estimated from the simulated realisations. Then one step of the Fisher scoring algorithm is taken, yielding an updated estimate \eqn{\theta_1}{theta1}. The corresponding model is returned. Simulated realisations are generated using \code{\link{rmh}}. The iterative behaviour of the Metropolis-Hastings algorithm is controlled by the arguments \code{start} and \code{control} which are passed to \code{\link{rmh}}. As a shortcut, the argument \code{nrmh} determines the number of Metropolis-Hastings iterations run to produce one simulated realisation (if \code{control} is absent). Also if \code{start} is absent or equal to \code{NULL}, it defaults to \code{list(n.start=N)} where \code{N} is the number of points in the data point pattern. } \item{Edge correction}{ Edge correction should be applied to the sufficient statistics of the model, to reduce bias. The argument \code{correction} is the name of an edge correction method. The default \code{correction="border"} specifies the border correction, in which the quadrature window (the domain of integration of the pseudolikelihood) is obtained by trimming off a margin of width \code{rbord} from the observation window of the data pattern. Not all edge corrections are implemented (or implementable) for arbitrary windows. Other options depend on the argument \code{interaction}, but these generally include \code{correction="periodic"} (the periodic or toroidal edge correction in which opposite edges of a rectangular window are identified) and \code{correction="translate"} (the translation correction, see Baddeley 1998 and Baddeley and Turner 2000). For pairwise interaction models there is also Ripley's isotropic correction, identified by \code{correction="isotropic"} or \code{"Ripley"}. } } } \section{Interaction parameters}{ Apart from the Poisson model, every point process model fitted by \code{ppm} has parameters that determine the strength and range of \sQuote{interaction} or dependence between points. These parameters are of two types: \describe{ \item{regular parameters:}{ A parameter \eqn{\phi}{phi} is called \emph{regular} if the log likelihood is a linear function of \eqn{\theta}{theta} where \eqn{\theta = \theta(\psi)}{theta = theta(psi)} is some transformation of \eqn{\psi}{psi}. [Then \eqn{\theta}{theta} is called the canonical parameter.] } \item{irregular parameters}{ Other parameters are called \emph{irregular}. } } Typically, regular parameters determine the \sQuote{strength} of the interaction, while irregular parameters determine the \sQuote{range} of the interaction. For example, the Strauss process has a regular parameter \eqn{\gamma}{gamma} controlling the strength of interpoint inhibition, and an irregular parameter \eqn{r} determining the range of interaction. The \code{ppm} command is only designed to estimate regular parameters of the interaction. It requires the values of any irregular parameters of the interaction to be fixed. For example, to fit a Strauss process model to the \code{cells} dataset, you could type \code{ppm(cells, ~1, Strauss(r=0.07))}. Note that the value of the irregular parameter \code{r} must be given. The result of this command will be a fitted model in which the regular parameter \eqn{\gamma}{gamma} has been estimated. To determine the irregular parameters, there are several practical techniques, but no general statistical theory available. One useful technique is maximum profile pseudolikelihood, which is implemented in the command \code{\link{profilepl}}. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2013) \emph{A logistic regression estimating function for spatial Gibbs point processes.} Research Report, Centre for Stochastic Geometry and Bioimaging, Denmark. \url{www.csgb.dk} Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Berman, M. and Turner, T.R. Approximating point process likelihoods with GLIM. \emph{Applied Statistics} \bold{41} (1992) 31--38. Besag, J. Statistical analysis of non-lattice data. \emph{The Statistician} \bold{24} (1975) 179-195. Diggle, P.J., Fiksel, T., Grabarnik, P., Ogata, Y., Stoyan, D. and Tanemura, M. On parameter estimation for pairwise interaction processes. \emph{International Statistical Review} \bold{62} (1994) 99-117. Huang, F. and Ogata, Y. Improvements of the maximum pseudo-likelihood estimators in various spatial statistical models. \emph{Journal of Computational and Graphical Statistics} \bold{8} (1999) 510-530. Jensen, J.L. and Moeller, M. Pseudolikelihood for exponential family models of spatial point processes. \emph{Annals of Applied Probability} \bold{1} (1991) 445--461. Jensen, J.L. and Kuensch, H.R. On asymptotic normality of pseudo likelihood estimates for pairwise interaction processes, \emph{Annals of the Institute of Statistical Mathematics} \bold{46} (1994) 475-486. } \seealso{ \code{\link{ppm.object}} for details of how to print, plot and manipulate a fitted model. \code{\link{ppp}} and \code{\link{quadscheme}} for constructing data. Interactions: \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Geyer}}, \code{\link{Fiksel}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Saturated}}, \code{\link{SatPiece}}, \code{\link{Softcore}}, \code{\link{Strauss}} and \code{\link{StraussHard}}. See \code{\link{profilepl}} for advice on fitting nuisance parameters in the interaction, and \code{\link{ippm}} for irregular parameters in the trend. See \code{\link{valid.ppm}} and \code{\link{project.ppm}} for ensuring the fitted model is a valid point process. } \section{Warnings}{ The implementation of the Huang-Ogata method is experimental; several bugs were fixed in \pkg{spatstat} 1.19-0. See the comments above about the possible inefficiency and bias of the maximum pseudolikelihood estimator. The accuracy of the Berman-Turner approximation to the pseudolikelihood depends on the number of dummy points used in the quadrature scheme. The number of dummy points should at least equal the number of data points. The parameter values of the fitted model do not necessarily determine a valid point process. Some of the point process models are only defined when the parameter values lie in a certain subset. For example the Strauss process only exists when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to \eqn{1}, corresponding to a value of \code{ppm()$theta[2]} less than or equal to \code{0}. By default (if \code{project=FALSE}) the algorithm maximises the pseudolikelihood without constraining the parameters, and does not apply any checks for sanity after fitting the model. This is because the fitted parameter value could be useful information for data analysis. To constrain the parameters to ensure that the model is a valid point process, set \code{project=TRUE}. See also the functions \code{\link{valid.ppm}} and \code{\link{project.ppm}}. The \code{trend} formula should not use any variable names beginning with the prefixes \code{.mpl} or \code{Interaction} as these names are reserved for internal use. The data frame \code{covariates} should have as many rows as there are points in \code{Q}. It should not contain variables called \code{x}, \code{y} or \code{marks} as these names are reserved for the Cartesian coordinates and the marks. If the model formula involves one of the functions \code{poly()}, \code{bs()} or \code{ns()} (e.g. applied to spatial coordinates \code{x} and \code{y}), the fitted coefficients can be misleading. The resulting fit is not to the raw spatial variates (\code{x}, \code{x^2}, \code{x*y}, etc.) but to a transformation of these variates. The transformation is implemented by \code{poly()} in order to achieve better numerical stability. However the resulting coefficients are appropriate for use with the transformed variates, not with the raw variates. This affects the interpretation of the constant term in the fitted model, \code{logbeta}. Conventionally, \eqn{\beta}{beta} is the background intensity, i.e. the value taken by the conditional intensity function when all predictors (including spatial or ``trend'' predictors) are set equal to \eqn{0}. However the coefficient actually produced is the value that the log conditional intensity takes when all the predictors, including the \emph{transformed} spatial predictors, are set equal to \code{0}, which is not the same thing. Worse still, the result of \code{\link{predict.ppm}} can be completely wrong if the trend formula contains one of the functions \code{poly()}, \code{bs()} or \code{ns()}. This is a weakness of the underlying function \code{\link{predict.glm}}. If you wish to fit a polynomial trend, we offer an alternative to \code{\link{poly}()}, namely \code{polynom()}, which avoids the difficulty induced by transformations. It is completely analogous to \code{poly} except that it does not orthonormalise. The resulting coefficient estimates then have their natural interpretation and can be predicted correctly. Numerical stability may be compromised. Values of the maximised pseudolikelihood are not comparable if they have been obtained with different values of \code{rbord}. } \examples{ ppm(nztrees) # fit the stationary Poisson process # to point pattern 'nztrees' \dontrun{ Q <- quadscheme(nztrees) ppm(Q) # equivalent. } \dontrun{ ppm(nztrees, nd=128) } \testonly{ ppm(nztrees, nd=16) } fit1 <- ppm(nztrees, ~ x) # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx) # where x,y are the Cartesian coordinates # and a,b are parameters to be estimated fit1 coef(fit1) coef(summary(fit1)) \dontrun{ ppm(nztrees, ~ polynom(x,2)) } \testonly{ ppm(nztrees, ~ polynom(x,2), nd=16) } # fit the nonstationary Poisson process # with intensity function lambda(x,y) = exp(a + bx + cx^2) \dontrun{ library(splines) ppm(nztrees, ~ bs(x,df=3)) } # WARNING: do not use predict.ppm() on this result # Fits the nonstationary Poisson process # with intensity function lambda(x,y) = exp(B(x)) # where B is a B-spline with df = 3 \dontrun{ ppm(nztrees, ~1, Strauss(r=10), rbord=10) } \testonly{ ppm(nztrees, ~1, Strauss(r=10), rbord=10, nd=16) } # Fit the stationary Strauss process with interaction range r=10 # using the border method with margin rbord=10 \dontrun{ ppm(nztrees, ~ x, Strauss(13), correction="periodic") } \testonly{ ppm(nztrees, ~ x, Strauss(13), correction="periodic", nd=16) } # Fit the nonstationary Strauss process with interaction range r=13 # and exp(first order potential) = activity = beta(x,y) = exp(a+bx) # using the periodic correction. # Huang-Ogata fit: \dontrun{ppm(nztrees, ~1, Strauss(r=10), method="ho")} \testonly{ppm(nztrees, ~1, Strauss(r=10), method="ho", nd=16, nsim=10)} # COVARIATES # X <- rpoispp(42) weirdfunction <- function(x,y){ 10 * x^2 + 5 * sin(10 * y) } # # (a) covariate values as function ppm(X, ~ y + Z, covariates=list(Z=weirdfunction)) # # (b) covariate values in pixel image Zimage <- as.im(weirdfunction, unit.square()) ppm(X, ~ y + Z, covariates=list(Z=Zimage)) # # (c) covariate values in data frame Q <- quadscheme(X) xQ <- x.quad(Q) yQ <- y.quad(Q) Zvalues <- weirdfunction(xQ,yQ) ppm(Q, ~ y + Z, covariates=data.frame(Z=Zvalues)) # Note Q not X # COVARIATE FUNCTION WITH EXTRA ARGUMENTS # f <- function(x,y,a){ y - a } ppm(X, ~x + f, covariates=list(f=f), covfunargs=list(a=1/2)) ## MULTITYPE POINT PROCESSES ### # fit stationary marked Poisson process # with different intensity for each species \dontrun{ppm(lansing, ~ marks, Poisson())} \testonly{a <- ppm(amacrine, ~ marks, Poisson(), nd=16)} # fit nonstationary marked Poisson process # with different log-cubic trend for each species \dontrun{ppm(lansing, ~ marks * polynom(x,y,3), Poisson())} \testonly{ppm(amacrine, ~ marks * polynom(x,y,2), Poisson(), nd=16)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/nnfun.lpp.Rd0000644000176000001440000000524312237642733015142 0ustar ripleyusers\name{nnfun.lpp} \Rdversion{1.1} \alias{nnfun.lpp} \title{ Nearest Neighbour Map on Linear Network } \description{ Compute the nearest neighbour function of a point pattern on a linear network. } \usage{ \method{nnfun}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Extra arguments are ignored. } } \details{ The (geodesic) \emph{nearest neighbour function} of a point pattern \code{X} on a linear network \code{L} tells us which point of \code{X} is closest to any given location. If \code{X} is a point pattern on a linear network \code{L}, the \emph{nearest neighbour function} of \code{X} is the mathematical function \eqn{f} defined for any location \eqn{s} on the network by \code{f(s) = i}, where \code{X[i]} is the closest point of \code{X} to the location \code{s} measured by the shortest path. In other words the value of \code{f(s)} is the identifier or serial number of the closest point of \code{X}. The command \code{nnfun.lpp} is a method for the generic command \code{\link[spatstat]{nnfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y, \dots}, that represents the nearest neighbour function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. More efficiently \code{f} can take the arguments \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local coordinates on the network. The result of \code{f <- nnfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} in the \R language, with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To compute the \emph{distance} to the nearest neighbour, see \code{\link{distfun.lpp}}. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/quad.ppm.Rd0000755000176000001440000000475412237642733014762 0ustar ripleyusers\name{quad.ppm} \alias{quad.ppm} \title{Extract Quadrature Scheme Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the quadrature scheme used to fit the model. } \usage{ quad.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"} or \code{"kppm"}). } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{quad.ppm} extracts the quadrature scheme. A typical use of this function would be to inspect the quadrature scheme (points and weights) to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some quadrature points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused quadrature points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. See \code{\link{quad.object}} for a list of all operations that can be performed on objects of class \code{"quad"}. This function can also be applied to objects of class \code{"kppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{quad.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) Q <- quad.ppm(fit) \dontrun{plot(Q)} Q$data$n Q$dummy$n } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/allstats.Rd0000755000176000001440000000622312237642732015054 0ustar ripleyusers\name{allstats} \alias{allstats} \title{Calculate four standard summary functions of a point pattern.} \description{ Calculates the \eqn{F}, \eqn{G}, \eqn{J}, and \eqn{K} summary functions for an unmarked point pattern. Returns them as a function array (of class \code{"fasp"}, see \code{\link{fasp.object}}). } \usage{ allstats(pp, \dots, dataname=NULL, verb=FALSE) } \arguments{ \item{pp}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"}. It must not be marked. } \item{\dots}{ Optional arguments passed to the summary functions \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}}. } \item{dataname}{A character string giving an optional (alternative) name for the point pattern. } \item{verb}{A logical value meaning ``verbose''. If \code{TRUE}, progress reports are printed during calculation. } } \details{ This computes four standard summary statistics for a point pattern: the empty space function \eqn{F(r)}, nearest neighbour distance distribution function \eqn{G(r)}, van Lieshout-Baddeley function \eqn{J(r)} and Ripley's function \eqn{K(r)}. The real work is done by \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} and \code{\link{Kest}} respectively. Consult the help files for these functions for further information about the statistical interpretation of \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K}. If \code{verb} is \code{TRUE}, then ``progress reports'' (just indications of completion) are printed out when the calculations are finished for each of the four function types. The overall title of the array of four functions (for plotting by \code{\link{plot.fasp}}) will be formed from the argument \code{dataname}. If this is not given, it defaults to the expression for \code{pp} given in the call to \code{allstats}. } \value{ A list of length 4 containing the \eqn{F}, \eqn{G}, \eqn{J} and \eqn{K} functions respectively. The list can be plotted directly using \code{plot} (which dispatches to \code{\link{plot.listof}}). Each list entry retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}} or \code{\link{Kest}}. Thus each entry in the list is a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J, and \code{cbind(trans,theo) ~ r} for K. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{plot.listof}}, \code{\link{plot.fv}}, \code{\link{fv.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} } \examples{ data(swedishpines) a <- allstats(swedishpines,dataname="Swedish Pines") \dontrun{ plot(a) plot(a, subset=list("r<=15","r<=15","r<=15","r<=50")) } } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.linim.Rd0000755000176000001440000000347012237642733015314 0ustar ripleyusers\name{plot.linim} \alias{plot.linim} \title{ Plot Pixel Image on Linear Network } \description{ Given a pixel image on a linear network, the pixel values are displayed either as colours or as line widths. } \usage{ \method{plot}{linim}(x, ..., style = c("colour", "width"), scale, adjust = 1) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"linim"}. } \item{\dots}{ Extra graphical parameters, passed to \code{\link[spatstat]{plot.im}} if \code{style="colour"}, or to \code{\link{polygon}} if \code{style="width"}. } \item{style}{ Character string specifying the type of plot. See Details. } \item{scale}{ Physical scale factor for representing the pixel values as line widths. } \item{adjust}{ Adjustment factor for the default scale. } } \details{ This is the \code{plot} method for objects of class \code{"linim"}. Such an object represents a pixel image defined on a linear network. If \code{style="colour"} (the default) then the pixel values of \code{x} are plotted as colours, using \code{\link[spatstat]{plot.im}}. If \code{style="width"} then the pixel values of \code{x} are used to determine the widths of thick lines centred on the line segments of the linear network. } \value{ Null. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linim}}, \code{\link[spatstat]{plot.im}}, \code{\link{polygon}} } \references{ Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. } \examples{ example(linim) plot(X) plot(X, style="width") } \keyword{spatial} spatstat/man/qqplot.ppm.Rd0000755000176000001440000003517312237642733015347 0ustar ripleyusers\name{qqplot.ppm} \alias{qqplot.ppm} \title{ Q-Q Plot of Residuals from Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, produce a Q-Q plot based on residuals from the model. } \usage{ qqplot.ppm(fit, nsim=100, expr=NULL, \dots, type="raw", style="mean", fast=TRUE, verbose=TRUE, plot.it=TRUE, dimyx=NULL, nrep=if(fast) 5e4 else 1e5, control=update(default.rmhcontrol(fit), nrep=nrep), saveall=FALSE, monochrome=FALSE, limcol=if(monochrome) "black" else "red", maxerr=max(100, ceiling(nsim/10)), check=TRUE, repair=TRUE) } \arguments{ \item{fit}{ The fitted point process model, which is to be assessed using the Q-Q plot. An object of class \code{"ppm"}. Smoothed residuals obtained from this fitted model will provide the ``data'' quantiles for the Q-Q plot. } \item{nsim}{ The number of simulations from the ``reference'' point process model. } \item{expr}{ Determines the simulation mechanism which provides the ``theoretical'' quantiles for the Q-Q plot. See Details. } \item{\dots}{ Arguments passed to \code{\link{diagnose.ppm}} influencing the computation of residuals. } \item{type}{ String indicating the type of residuals or weights to be used. Current options are \code{"eem"} for the Stoyan-Grabarnik exponential energy weights, \code{"raw"} for the raw residuals, \code{"inverse"} for the inverse-lambda residuals, and \code{"pearson"} for the Pearson residuals. A partial match is adequate. } \item{style}{ Character string controlling the type of Q-Q plot. Options are \code{"classical"} and \code{"mean"}. See Details. } \item{fast}{ Logical flag controlling the speed and accuracy of computation. Use \code{fast=TRUE} for interactive use and \code{fast=FALSE} for publication standard plots. See Details. } \item{verbose}{ Logical flag controlling whether the algorithm prints progress reports during long computations. } \item{plot.it}{ Logical flag controlling whether the function produces a plot or simply returns a value (silently). } \item{dimyx}{ Dimensions of the pixel grid on which the smoothed residual field will be calculated. A vector of two integers. } \item{nrep}{ If \code{control} is absent, then \code{nrep} gives the number of iterations of the Metropolis-Hastings algorithm that should be used to generate one simulation of the fitted point process. } \item{control}{ List of parameters controlling the Metropolis-Hastings algorithm \code{\link{rmh}} which generates each simulated realisation from the model (unless the model is Poisson). This list becomes the argument \code{control} of \code{\link{rmh.default}}. It overrides \code{nrep}. } \item{saveall}{ Logical flag indicating whether to save all the intermediate calculations. } \item{monochrome}{ Logical flag indicating whether the plot should be in black and white (\code{monochrome=TRUE}), or in colour (\code{monochrome=FALSE}). } \item{limcol}{ String. The colour to be used when plotting the 95\% limit curves. } \item{maxerr}{ Maximum number of failures tolerated while generating simulated realisations. See Details. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{fit}, if it is found to be damaged. } } \value{ An object of class \code{"qqppm"} containing the information needed to reproduce the Q-Q plot. Entries \code{x} and \code{y} are numeric vectors containing quantiles of the simulations and of the data, respectively. } \details{ This function generates a Q-Q plot of the residuals from a fitted point process model. It is an addendum to the suite of diagnostic plots produced by the function \code{\link{diagnose.ppm}}, kept separate because it is computationally intensive. The quantiles of the theoretical distribution are estimated by simulation. In classical statistics, a Q-Q plot of residuals is a useful diagnostic for checking the distributional assumptions. Analogously, in spatial statistics, a Q-Q plot of the (smoothed) residuals from a fitted point process model is a useful way to check the interpoint interaction part of the model (Baddeley et al, 2005). The systematic part of the model (spatial trend, covariate effects, etc) is assessed using other plots made by \code{\link{diagnose.ppm}}. The argument \code{fit} represents the fitted point process model. It must be an object of class \code{"ppm"} (typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). Residuals will be computed for this fitted model using \code{\link{residuals.ppm}}, and the residuals will be kernel-smoothed to produce a ``residual field''. The values of this residual field will provide the ``data'' quantiles for the Q-Q plot. The argument \code{expr} is not usually specified. It provides a way to modify the ``theoretical'' or ``reference'' quantiles for the Q-Q plot. In normal usage we set \code{expr=NULL}. The default is to generate \code{nsim} simulated realisations of the fitted model \code{fit}, re-fit this model to each of the simulated patterns, evaluate the residuals from these fitted models, and use the kernel-smoothed residual field from these fitted models as a sample from the reference distribution for the Q-Q plot. In advanced use, \code{expr} may be an \code{expression}. It will be re-evaluated \code{nsim} times, and should include random computations so that the results are not identical each time. The result of evaluating \code{expr} should be either a point pattern (object of class \code{"ppp"}) or a fitted point process model (object of class \code{"ppm"}). If the value is a point pattern, then the original fitted model \code{fit} will be fitted to this new point pattern using \code{\link{update.ppm}}, to yield another fitted model. Smoothed residuals obtained from these \code{nsim} fitted models will yield the ``theoretical'' quantiles for the Q-Q plot. Simulation is performed (if \code{expr=NULL}) using the Metropolis-Hastings algorithm \code{\link{rmh}}. Each simulated realisation is the result of running the Metropolis-Hastings algorithm from an independent random starting state each time. The iterative and termination behaviour of the Metropolis-Hastings algorithm are governed by the argument \code{control}. See \code{\link{rmhcontrol}} for information about this argument. As a shortcut, the argument \code{nrep} determines the number of Metropolis-Hastings iterations used to generate each simulated realisation, if \code{control} is absent. By default, simulations are generated in an expanded window. Use the argument \code{control} to change this, as explained in the section on \emph{Warning messages}. The argument \code{type} selects the type of residual or weight that will be computed. For options, see \code{\link{diagnose.ppm}}. The argument \code{style} determines the type of Q-Q plot. It is highly recommended to use the default, \code{style="mean"}. \describe{ \item{\code{style="classical"}}{ The quantiles of the residual field for the data (on the \eqn{y} axis) are plotted against the quantiles of the \bold{pooled} simulations (on the \eqn{x} axis). This plot is biased, and therefore difficult to interpret, because of strong autocorrelations in the residual field and the large differences in sample size. } \item{\code{style="mean"}}{ The order statistics of the residual field for the data are plotted against the sample means, over the \code{nsim} simulations, of the corresponding order statistics of the residual field for the simulated datasets. Dotted lines show the 2.5 and 97.5 percentiles, over the \code{nsim} simulations, of each order statistic. } } The argument \code{fast} is a simple way to control the accuracy and speed of computation. If \code{fast=FALSE}, the residual field is computed on a fine grid of pixels (by default 100 by 100 pixels, see below) and the Q-Q plot is based on the complete set of order statistics (usually 10,000 quantiles). If \code{fast=TRUE}, the residual field is computed on a coarse grid (at most 40 by 40 pixels) and the Q-Q plot is based on the \emph{percentiles} only. This is about 7 times faster. It is recommended to use \code{fast=TRUE} for interactive data analysis and \code{fast=FALSE} for definitive plots for publication. The argument \code{dimyx} gives full control over the resolution of the pixel grid used to calculate the smoothed residuals. Its interpretation is the same as the argument \code{dimyx} to the function \code{\link{as.mask}}. Note that \code{dimyx[1]} is the number of pixels in the \eqn{y} direction, and \code{dimyx[2]} is the number in the \eqn{x} direction. If \code{dimyx} is not present, then the default pixel grid dimensions are controlled by \code{spatstat.options("npixel")}. Since the computation is so time-consuming, \code{qqplot.ppm} returns a list containing all the data necessary to re-display the Q-Q plot. It is advisable to assign the result of \code{qqplot.ppm} to something (or use \code{.Last.value} if you forgot to.) The return value is an object of class \code{"qqppm"}. There are methods for \code{\link{plot.qqppm}} and \code{\link{print.qqppm}}. See the Examples. The argument \code{saveall} is usually set to \code{FALSE}. If \code{saveall=TRUE}, then the intermediate results of calculation for each simulated realisation are saved and returned. The return value includes a 3-dimensional array \code{sim} containing the smoothed residual field images for each of the \code{nsim} realisations. When \code{saveall=TRUE}, the return value is an object of very large size, and should not be saved on disk. Errors may occur during the simulation process, because random data are generated. For example: \itemize{ \item one of the simulated patterns may be empty. \item one of the simulated patterns may cause an error in the code that fits the point process model. \item the user-supplied argument \code{expr} may have a bug. } Empty point patterns do not cause a problem for the code, but they are reported. Other problems that would lead to a crash are trapped; the offending simulated data are discarded, and the simulation is retried. The argument \code{maxerr} determines the maximum number of times that such errors will be tolerated (mainly as a safeguard against an infinite loop). } \section{Side Effects}{ Produces a Q-Q plot if \code{plot.it} is TRUE. } \section{Warning messages}{ A warning message will be issued if any of the simulations trapped an error (a potential crash). A warning message will be issued if all, or many, of the simulated point patterns are empty. This usually indicates a problem with the simulation procedure. The default behaviour of \code{qqplot.ppm} is to simulate patterns on an expanded window (specified through the argument \code{control}) in order to avoid edge effects. The model's trend is extrapolated over this expanded window. If the trend is strongly inhomogeneous, the extrapolated trend may have very large (or even infinite) values. This can cause the simulation algorithm to produce empty patterns. The only way to suppress this problem entirely is to prohibit the expansion of the window, by setting the \code{control} argument to something like \code{control=list(nrep=1e6, expand=1)}. Here \code{expand=1} means there will be no expansion. See \code{\link{rmhcontrol}} for more information about the argument \code{control}. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{lurking}}, \code{\link{residuals.ppm}}, \code{\link{eem}}, \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}} } \examples{ data(cells) fit <- ppm(cells, ~1, Poisson()) diagnose.ppm(fit) # no suggestion of departure from stationarity \dontrun{qqplot.ppm(fit, 80) # strong evidence of non-Poisson interaction} \testonly{qqplot.ppm(fit, 4)} \dontrun{ diagnose.ppm(fit, type="pearson") qqplot.ppm(fit, type="pearson") } \testonly{qqplot.ppm(fit, 4, type="pearson")} ########################################### ## oops, I need the plot coordinates mypreciousdata <- .Last.value \dontrun{mypreciousdata <- qqplot.ppm(fit, type="pearson")} \testonly{mypreciousdata <- qqplot.ppm(fit, 4, type="pearson")} plot(mypreciousdata) ###################################################### # Q-Q plots based on fixed n # The above QQ plots used simulations from the (fitted) Poisson process. # But I want to simulate conditional on n, instead of Poisson # Do this by setting rmhcontrol(p=1) fixit <- list(p=1) \dontrun{qqplot.ppm(fit, 100, control=fixit)} \testonly{qqplot.ppm(fit, 4, control=fixit)} ###################################################### # Inhomogeneous Poisson data X <- rpoispp(function(x,y){1000 * exp(-3*x)}, 1000) plot(X) # Inhomogeneous Poisson model fit <- ppm(X, ~x, Poisson()) \dontrun{qqplot.ppm(fit, 100)} \testonly{qqplot.ppm(fit, 4)} # conclusion: fitted inhomogeneous Poisson model looks OK ###################################################### # Advanced use of 'expr' argument # # set the initial conditions in Metropolis-Hastings algorithm # expr <- expression(rmh(fit, start=list(n.start=42), verbose=FALSE)) \dontrun{qqplot.ppm(fit, 100, expr)} \testonly{qqplot.ppm(fit, 4, expr)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/as.matrix.im.Rd0000755000176000001440000000251612237642732015540 0ustar ripleyusers\name{as.matrix.im} \alias{as.matrix.im} \alias{as.array.im} \title{Convert Pixel Image to Matrix or Array} \description{ Converts a pixel image to a matrix or an array. } \usage{ \method{as.matrix}{im}(x, ...) \method{as.array}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{See below.} } \details{ The function \code{as.matrix.im} converts the pixel image \code{x} into a matrix containing the pixel values. It is handy when you want to extract a summary of the pixel values. See the Examples. The function \code{as.array.im} converts the pixel image to an array. By default this is a three-dimensional array of dimension \eqn{n} by \eqn{m} by \eqn{1}. If the extra arguments \code{\dots} are given, they will be passed to \code{\link{array}}, and they may change the dimensions of the array. } \value{ A matrix or array. } \seealso{ \code{\link{as.matrix.owin}} } \examples{ # artificial image Z <- setcov(square(1)) M <- as.matrix(Z) median(M) \dontrun{ # plot the cumulative distribution function of pixel values plot(ecdf(as.matrix(Z))) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/distfun.Rd0000755000176000001440000000545312252057217014700 0ustar ripleyusers\name{distfun} %DontDeclareMethods \Rdversion{1.1} \alias{distfun} \alias{distfun.ppp} \alias{distfun.psp} \alias{distfun.owin} \title{ Distance Map as a Function } \description{ Compute the distance function of an object, and return it as a function. } \usage{ distfun(X, ...) \method{distfun}{ppp}(X, ..., k=1) \method{distfun}{psp}(X, ...) \method{distfun}{owin}(X, ..., invert=FALSE) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Extra arguments are ignored. } \item{k}{ An integer. The distance to the \code{k}th nearest point will be computed. } \item{invert}{ If \code{TRUE}, compute the distance transform of the complement of \code{X}. } } \details{ The \dQuote{distance function} of a set of points \eqn{A} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the shortest distance from \eqn{(x,y)} to \eqn{A}. The command \code{f <- distfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. This should be contrasted with the related command \code{\link{distmap}} which computes the distance function of \code{X} on a grid of locations, and returns the distance values in the form of a pixel image. The result of \code{f <- distfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"distfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{distfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"distfun"} which has a method for \code{print}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distmap}}, \code{\link{plot.funxy}} } \examples{ data(letterR) f <- distfun(letterR) f plot(f) f(0.2, 0.3) plot(distfun(letterR, invert=TRUE), eps=0.1) d <- distfun(cells) d2 <- distfun(cells, k=2) d(0.5, 0.5) d2(0.5, 0.5) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Ldot.inhom.Rd0000755000176000001440000000675112237642731015245 0ustar ripleyusers\name{Ldot.inhom} \alias{Ldot.inhom} \title{ Inhomogeneous Multitype L Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{L} function. } \usage{ Ldot.inhom(X, i, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{i\bullet}(r)}{Li.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Other arguments passed to \code{\link{Kdot.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}(r)}{Li.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{i\bullet}(r)}{Li.(r)} for a marked Poisson process, identical to \eqn{r}. } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}(r)}{Li.(r)} obtained by the edge corrections named. } \details{ This a generalisation of the function \code{\link{Ldot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kdot.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{i\bullet}(r)}{Ki.(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Ldot}}, \code{\link{Linhom}}, \code{\link{Kdot.inhom}}, \code{\link{Lcross.inhom}}. } \examples{ # Lansing Woods data data(lansing) lansing <- lansing[seq(1,lansing$n, by=10)] ma <- split(lansing)$maple lg <- unmark(lansing) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") L <- Ldot.inhom(lansing, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Ldot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/npoints.Rd0000755000176000001440000000201412237642733014712 0ustar ripleyusers\name{npoints} \alias{npoints} \alias{npoints.ppp} \alias{npoints.pp3} \alias{npoints.ppx} \title{Number of Points in a Point Pattern} \description{ Returns the number of points in a point pattern of any kind. } \usage{ npoints(x) \method{npoints}{ppp}(x) \method{npoints}{pp3}(x) \method{npoints}{ppx}(x) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}, \code{"pp3"}, \code{"ppx"} or some other suitable class). } } \value{ Integer. } \details{ This function returns the number of points in a point pattern. The function \code{npoints} is generic with methods for the classes \code{"ppp"}, \code{"pp3"}, \code{"ppx"} and possibly other classes. } \seealso{ \code{\link{ppp.object}}, \code{\link{print.pp3}}, \code{\link{print.ppx}}. } \examples{ data(cells) npoints(cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/kstest.mppm.Rd0000644000176000001440000001404212241443111015466 0ustar ripleyusers\name{kstest.mppm} \alias{kstest.mppm} \title{Kolmogorov-Smirnov Test for Multiple Point Process Model} \description{ Performs a Kolmogorov-Smirnov test of goodness-of-fit of a Poisson point process model fitted to multiple spatial point patterns. The test compares the observed and predicted distributions of the values of a spatial covariate. } \usage{ \method{kstest}{mppm}(model, covariate, ..., verbose=TRUE, interpolate=FALSE, fast=TRUE, jitter=TRUE) } \arguments{ \item{model}{ An object of class \code{"mppm"} representing a point process model fitted to multiple spatial point patterns. } \item{covariate}{ The spatial covariate on which the test will be based. A function, a pixel image, a list of functions, a list of pixel images, a hyperframe, or a character string containing the name of one of the covariates in \code{model}. } \item{\dots}{ Arguments passed to \code{\link{ks.test}} to control the test. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{interpolate}{ Logical flag indicating whether to interpolate between pixel values when code{covariate} is a pixel image. See \emph{Details}. } \item{fast}{ Logical flag. If \code{TRUE}, values of the covariate are only sampled at the original quadrature points used to fit the model. If \code{FALSE}, values of the covariate are sampled at all pixels, which can be slower by three orders of magnitude. } \item{jitter}{ Logical flag. If \code{TRUE}, observed values of the covariate are perturbed by adding small random values, to avoid tied observations. } } \details{ This function is a method for the generic function \code{\link[spatstat]{kstest}} for the class \code{mppm}. This function performs a goodness-of-fit test of a point process model that has been fitted to multiple point patterns. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using the Kolmogorov-Smirnov test. The argument \code{model} should be a fitted point process model fitted to multiple point patterns (object of class \code{"mppm"}). It should be a Poisson point process. The argument \code{covariate} contains the values of a spatial function. It can be \itemize{ \item a \code{function(x,y)} \item a pixel image (object of class \code{"im"} \item a list of \code{function(x,y)}, one for each point pattern \item a list of pixel images, one for each point pattern \item a hyperframe (see \code{\link[spatstat]{hyperframe}}) of which the first column will be taken as containing the covariate \item a character string giving the name of one of the covariates in \code{model}. } If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. The predicted distribution of the values of the \code{covariate} under the fitted \code{model} is computed as follows. The values of the \code{covariate} at all locations in the observation window are evaluated, weighted according to the point process intensity of the fitted model, and compiled into a cumulative distribution function \eqn{F} using \code{\link[spatstat]{ewcdf}}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The Kolmogorov-Smirnov test of uniformity is then applied using the \R core function \code{\link[stats]{ks.test}}. This test was apparently first described (in the context of spatial data) by Berman (1986). See also Baddeley et al (2005). The argument \code{interpolate} determines how pixel values will be handled when code{covariate} is a pixel image. The value of the covariate at a data point is obtained by looking up the value of the nearest pixel if \code{interpolate=FALSE}, or by linearly interpolating between the values of the four nearest pixels if \code{interpolate=TRUE}. Linear interpolation is slower, but is sometimes necessary to avoid tied values of the covariate arising when the pixel grid is coarse. } \value{ An object of class \code{"htest"} containing the results of the test. See \code{\link{ks.test}} for details. } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \seealso{ \code{\link{ks.test}}, \code{\link[spatstat]{quadrat.test}}, \code{\link{mppm}} } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ # three i.i.d. realisations of nonuniform Poisson process lambda <- as.im(function(x,y) { 300 * exp(x) }, square(1)) dat <- hyperframe(X=list(rpoispp(lambda), rpoispp(lambda), rpoispp(lambda))) # fit uniform Poisson process fit0 <- mppm(X~1, dat) # fit correct nonuniform Poisson process fit1 <- mppm(X~x, dat) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model kstest.mppm(fit0, xcoord) # test right model kstest.mppm(fit1, xcoord) } \keyword{htest} \keyword{spatial} spatstat/man/residualspaper.Rd0000755000176000001440000000532312237642734016252 0ustar ripleyusers\name{residualspaper} \alias{residualspaper} \docType{data} \title{ Data and Code From JRSS Discussion Paper on Residuals } \description{ This dataset contains the point patterns used as examples in the paper of Baddeley et al (2005). [Figure 2 is already available in \pkg{spatstat} as the \code{\link{copper}} dataset.] R code is also provided to reproduce all the Figures displayed in Baddeley et al (2005). The component \code{plotfig} is a function, which can be called with a numeric or character argument specifying the Figure or Figures that should be plotted. See the Examples. } \format{ \code{residualspaper} is a list with the following components: \describe{ \item{Fig1}{ The locations of Japanese pine seedlings and saplings from Figure 1 of the paper. A point pattern (object of class \code{"ppp"}). } \item{Fig3}{ The Chorley-Ribble data from Figure 3 of the paper. A list with three components, \code{lung}, \code{larynx} and \code{incin}. Each is a matrix with 2 columns giving the coordinates of the lung cancer cases, larynx cancer cases, and the incinerator, respectively. Coordinates are Eastings and Northings in km. } \item{Fig4a}{ The synthetic dataset in Figure 4 (a) of the paper. } \item{Fig4b}{ The synthetic dataset in Figure 4 (b) of the paper. } \item{Fig4c}{ The synthetic dataset in Figure 4 (c) of the paper. } \item{Fig11}{ The covariate displayed in Figure 11. A pixel image (object of class \code{"im"}) whose pixel values are distances to the nearest line segment in the \code{copper} data. } \item{plotfig}{A function which will compute and plot any of the Figures from the paper. The argument of \code{plotfig} is either a numeric vector or a character vector, specifying the Figure or Figures to be plotted. See the Examples. } } } \usage{data(residualspaper)} \examples{ \dontrun{ data(residualspaper) X <- residualspaper$Fig4a summary(X) plot(X) # reproduce all Figures residualspaper$plotfig() # reproduce Figures 1 to 10 residualspaper$plotfig(1:10) # reproduce Figure 7 (a) residualspaper$plotfig("7a") } } \source{ Figure 1: Prof M. Numata. Data kindly supplied by Professor Y. Ogata with kind permission of Prof M. Tanemura. Figure 3: Professor P.J. Diggle (rescaled by Adrian Baddeley) Figure 4 (a,b,c): Adrian Baddeley } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \keyword{datasets} \keyword{spatial} \keyword{models} spatstat/man/default.rmhcontrol.Rd0000755000176000001440000000277112237642732017043 0ustar ripleyusers\name{default.rmhcontrol} \alias{default.rmhcontrol} \title{Set Default Control Parameters for Metropolis-Hastings Algorithm.} \description{ Given a fitted point process model, this command sets appropriate default values of the parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ default.rmhcontrol(model) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}) } } \value{ An object of class \code{"rmhcontrol"}. See \code{\link{rmhcontrol}}. } \details{ This function sets the values of the parameters controlling the iterative behaviour of the Metropolis-Hastings simulation algorithm. It uses default values that would be appropriate for the fitted point process model \code{model}. The expansion parameter \code{expand} is set to \code{\link{default.expand}(model)}. All other parameters revert to their defaults given in \code{\link{rmhcontrol.default}}. See \code{\link{rmhcontrol}} for the full list of control parameters. To override default parameters, use \code{\link{update.rmhcontrol}}. } \seealso{ \code{\link{rmhcontrol}}, \code{\link{update.rmhcontrol}}, \code{\link{ppm}}, \code{\link{default.expand}} } \examples{ fit <- ppm(cells, ~1, Strauss(0.1)) default.rmhcontrol(fit) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/update.rmhcontrol.Rd0000644000176000001440000000227512237642734016677 0ustar ripleyusers\name{update.rmhcontrol} \alias{update.rmhcontrol} \title{Update Control Parameters of Metropolis-Hastings Algorithm} \description{ \code{update} method for class \code{"rmhcontrol"}. } \usage{ \method{update}{rmhcontrol}(object, \dots) } \arguments{ \item{object}{ Object of class \code{"rmhcontrol"} containing control parameters for a Metropolis-Hastings algorithm. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{rmhcontrol}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"rmhcontrol"}. An object of class \code{"rmhcontrol"} describes a set of control parameters for the Metropolis-Hastings simulation algorithm. See \code{\link{rmhcontrol}}). \code{update.rmhcontrol} will modify the parameters specified by \code{object} according to the new arguments given. } \value{ Another object of class \code{"rmhcontrol"}. } \examples{ a <- rmhcontrol(expand=1) update(a, expand=2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/blur.Rd0000755000176000001440000000642312237642732014173 0ustar ripleyusers\name{blur} \alias{blur} \title{Apply Gaussian Blur to a Pixel Image} \description{ Applies a Gaussian blur to a pixel image. } \usage{ blur(x, sigma = NULL, ..., normalise=FALSE, bleed = TRUE, varcov=NULL) } \arguments{ \item{x}{The pixel image. An object of class \code{"im"}.} \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Ignored. } \item{normalise}{ Logical flag indicating whether the output values should be divided by the corresponding blurred image of the window itself. See Details. } \item{bleed}{ Logical flag indicating whether to allow blur to extend outside the original domain of the image. See Details. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } } \details{ This command applies a Gaussian blur to the pixel image \code{x}. The blurring kernel is the isotropic Gaussian kernel with standard deviation \code{sigma}, or the anisotropic Gaussian kernel with variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. Also \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(sigma^2)}. If the pixel values of \code{x} include some \code{NA} values (meaning that the image domain does not completely fill the rectangular frame) then these \code{NA} values are first reset to zero. The algorithm then computes the convolution \eqn{x \ast G}{x * G} of the (zero-padded) pixel image \eqn{x} with the specified Gaussian kernel \eqn{G}. If \code{normalise=FALSE}, then this convolution \eqn{x\ast G}{x * G} is returned. If \code{normalise=TRUE}, then the convolution \eqn{x \ast G}{x * G} is normalised by dividing it by the convolution \eqn{w \ast G}{w * G} of the image domain \code{w} with the same Gaussian kernel. Normalisation ensures that the result can be interpreted as a weighted average of input pixel values, without edge effects due to the shape of the domain. If \code{bleed=FALSE}, then pixel values outside the original image domain are set to \code{NA}. Thus the output is a pixel image with the same domain as the input. If \code{bleed=TRUE}, then no such alteration is performed, and the result is a pixel image defined everywhere in the rectangular frame containing the input image. Computation is performed using the Fast Fourier Transform. } \value{ A pixel image with the same pixel array as the input image \code{x}. } \seealso{ \code{\link{interp.im}} for interpolating a pixel image to a finer resolution, \code{\link{density.ppp}} for blurring a point pattern, \code{\link{Smooth.ppp}} for interpolating marks attached to points. } \examples{ data(letterR) Z <- as.im(function(x,y) { 4 * x^2 + 3 * y }, letterR) par(mfrow=c(1,3)) plot(Z) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=TRUE)) plot(letterR, add=TRUE) plot(blur(Z, 0.3, bleed=FALSE)) plot(letterR, add=TRUE) par(mfrow=c(1,1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rjitter.Rd0000755000176000001440000000362612237642734014716 0ustar ripleyusers\name{rjitter} \alias{rjitter} \title{Random Perturbation of a Point Pattern} \description{ Applies independent random displacements to each point in a point pattern. } \usage{ rjitter(X, radius, retry=TRUE, giveup = 10000) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{radius}{ Scale of perturbations. A positive numerical value. The displacement vectors will be uniformly distributed in a circle of this radius. } \item{retry}{ What to do when a perturbed point lies outside the window of the original point pattern. If \code{retry=FALSE}, the point will be lost; if \code{retry=TRUE}, the algorithm will try again. } \item{giveup}{ Maximum number of unsuccessful attempts. } } \details{ Each of the points in the point pattern \code{X} is subjected to an independent random displacement. The displacement vectors are uniformly distributed in a circle of radius \code{radius}. If a displaced point lies outside the window, then if \code{retry=FALSE} the point will be lost. However if \code{retry=TRUE}, the algorithm will try again: each time a perturbed point lies outside the window, the algorithm will reject it and generate another proposed perturbation of the original point, until one lies inside the window, or until \code{giveup} unsuccessful attempts have been made. In the latter case, any unresolved points will be included without any perturbation. The return value will always be a point pattern with the same number of points as \code{X}. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as \code{X}. } \examples{ X <- rsyst(owin(), 10, 10) Y <- rjitter(X, 0.02) plot(Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/pairs.im.Rd0000755000176000001440000000474512237642733014757 0ustar ripleyusers\name{pairs.im} \alias{pairs.im} \title{ Scatterplot Matrix for Pixel Images } \description{ Produces a scatterplot matrix of the pixel values in two or more pixel images. } \usage{ \method{pairs}{im}(..., plot=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which is either a pixel image (object of class \code{"im"}) or a named argument to be passed to \code{\link{pairs.default}}. } \item{plot}{ Logical. If \code{TRUE}, the scatterplot matrix is plotted. } } \details{ This is a method for the generic function \code{\link{pairs}} for the class of pixel images. It produces a square array of plot panels, in which each panel shows a scatterplot of the pixel values of one image against the corresponding pixel values of another image. At least two of the arguments \code{\dots} should be pixel images (objects of class \code{"im"}). Their spatial domains must overlap, but need not have the same pixel dimensions. First the pixel image domains are intersected, and converted to a common pixel resolution. Then the corresponding pixel values of each image are extracted. Then \code{\link{pairs.default}} is called to plot the scatterplot matrix. Any arguments in \code{\dots} which are not pixel images will be passed to \code{\link{pairs.default}} to control the plot. } \note{ To control the appearance of the individual scatterplot panels, see \code{\link{pairs.default}}, \code{\link{points}} or \code{\link{par}}. To control the plotting symbol for the points in the scatterplot, use the arguments \code{pch}, \code{col}, \code{bg} as described under \code{\link{points}} (because the default panel plotter is the function \code{\link{points}}). To suppress the tick marks on the plot axes, type \code{par(xaxt="n", yaxt="n")} before calling \code{pairs}. } \value{ Invisible. A \code{data.frame} containing the corresponding pixel values for each image. The return value also belongs to the class \code{plotpairsim} which has a plot method, so that it can be re-plotted. } \seealso{ \code{\link{pairs}}, \code{\link{pairs.default}}, \code{\link{plot.im}}, \code{\link{im}}, \code{\link{par}} } \examples{ X <- density(rpoispp(30)) Y <- density(rpoispp(40)) Z <- density(rpoispp(30)) pairs(X,Y,Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/as.data.frame.psp.Rd0000755000176000001440000000272512237642732016435 0ustar ripleyusers\name{as.data.frame.psp} \alias{as.data.frame.psp} \title{Coerce Line Segment Pattern to a Data Frame} \description{ Extracts the coordinates of the endpoints in a line segment pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{psp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"psp"} of line segment patterns. It extracts the coordinates of the endpoints of the line segments, and returns them as columns named \code{x0}, \code{y0}, \code{x1} and \code{y1} in a data frame. If the line segments were marked, the marks are appended as an extra column or columns to the data frame which is returned. If the marks are a vector then a single column named \code{marks} is appended. in the data frame, with the same type as in the line segment pattern dataset. If the marks are a data frame, then the columns of this data frame are appended (retaining their names). } \value{ A data frame with 4 or 5 columns. } \examples{ data(copper) df <- as.data.frame(copper$Lines) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/contour.im.Rd0000755000176000001440000000433412237642732015323 0ustar ripleyusers\name{contour.im} \alias{contour.im} \title{Contour plot of pixel image} \description{ Generates a contour plot of a pixel image. } \usage{ \method{contour}{im}(x, \dots, main, axes=TRUE, add=FALSE) } \arguments{ \item{x}{ Pixel image to be plotted. An object of class \code{"im"}. } \item{main}{ Character string to be displayed as the main title. } \item{axes}{ Logical. If \code{TRUE}, coordinate axes are plotted (with tick marks) around a region slightly larger than the image window. If \code{FALSE}, no axes are plotted, and a box is drawn tightly around the image window. Ignored if \code{add=TRUE}. } \item{add}{ Logical. If \code{FALSE}, a new plot is created. If \code{TRUE}, the contours are drawn over the existing plot. } \item{\dots}{ Other arguments passed to \code{\link{contour.default}} controlling the contour plot; see Details. } } \details{ This is a method for the generic \code{contour} function, for objects of the class \code{"im"}. An object of class \code{"im"} represents a pixel image; see \code{\link{im.object}}. This function displays the values of the pixel image \code{x} as a contour plot on the current plot device, using equal scales on the \eqn{x} and \eqn{y} axes. The appearance of the plot can be modified using any of the arguments listed in the help for \code{\link{contour.default}}. Useful ones include: \describe{ \item{nlevels}{ Number of contour levels to plot. } \item{drawlabels}{ Whether to label the contour lines with text. } \item{col,lty,lwd}{ Colour, type, and width of contour lines. } } See \code{\link{contour.default}} for a full list of these arguments. The defaults for any of the abovementioned arguments can be reset using \code{\link{spatstat.options}}. } \value{ none. } \examples{ # an image Z <- setcov(owin()) contour(Z) contour(Z, axes=FALSE) } \seealso{ \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{persp.im}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/nndist.ppx.Rd0000755000176000001440000000624112237642733015333 0ustar ripleyusers\name{nndist.ppx} \alias{nndist.ppx} \title{Nearest Neighbour Distances in Any Dimensions} \description{ Computes the distance from each point to its nearest neighbour in a multi-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a multi-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.ppx} is the method for the class \code{"ppx"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{nnwhich}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/affine.owin.Rd0000755000176000001440000000351712237642732015433 0ustar ripleyusers\name{affine.owin} %DontDeclareMethods \alias{affine.owin} \title{Apply Affine Transformation To Window} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a window. } \usage{ \method{affine}{owin}(X, mat=diag(c(1,1)), vec=c(0,0), \dots, rescue=TRUE) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{rescue}{ Logical. If \code{TRUE}, the transformed window will be processed by \code{\link{rescue.rectangle}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed window, if \code{X} is a binary pixel mask. } } \value{ Another window (of class \code{"owin"}) representing the result of applying the affine transformation. } \details{ The window is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # shear transformation shear <- matrix(c(1,0,0.6,1),ncol=2) X <- affine(owin(), shear) \dontrun{ plot(X) } data(letterR) affine(letterR, shear, c(0, 0.5)) affine(as.mask(letterR), shear, c(0, 0.5)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/as.ppm.Rd0000644000176000001440000000351112237642732014415 0ustar ripleyusers\name{as.ppm} \alias{as.ppm} \alias{as.ppm.ppm} \alias{as.ppm.profilepl} \alias{as.ppm.kppm} \title{Extract Fitted Point Process Model} \description{ Extracts the fitted point process model from some kind of fitted model. } \usage{ as.ppm(object) \method{as.ppm}{ppm}(object) \method{as.ppm}{profilepl}(object) \method{as.ppm}{kppm}(object) } \arguments{ \item{object}{An object that includes a fitted Poisson or Gibbs point process model. An object of class \code{"ppm"}, \code{"profilepl"} or \code{"kppm"} or possibly other classes. } } \details{ The function \code{as.ppm} extracts the fitted point process model (of class \code{"ppm"}) from a suitable object. The function \code{as.ppm} is generic, with methods for the classes \code{"ppm"}, \code{"profilepl"} and \code{"kppm"}, and possibly for other classes. For the class \code{"profilepl"} of models fitted by maximum profile pseudolikelihood, the method \code{as.ppm.profilepl} extracts the fitted point process model (with the optimal values of the irregular parameters). For the class \code{"kppm"} of models fitted by minimum contrast using Waagepetersen's two-step estimation procedure (see \code{\link{kppm}}), the method \code{as.ppm.kppm} extracts the Poisson point process model that is fitted in the first stage of the procedure. } \value{ An object of class \code{"ppm"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppm}}, \code{\link{profilepl}}. } \examples{ # fit a model by profile maximum pseudolikelihood rvals <- data.frame(r=(1:10)/100) pfit <- profilepl(rvals, Strauss, cells, ~1) # extract the fitted model fit <- as.ppm(pfit) } \keyword{spatial} \keyword{models} spatstat/man/bdist.tiles.Rd0000755000176000001440000000225412237642732015451 0ustar ripleyusers\name{bdist.tiles} \alias{bdist.tiles} \title{Distance to Boundary of Window} \description{ Computes the shortest distances from each tile in a tessellation to the boundary of the window. } \usage{ bdist.tiles(X) } \arguments{ \item{X}{A tessellation (object of class \code{"tess"}).} } \value{ A numeric vector, giving the shortest distance from each tile in the tessellation to the boundary of the window. Entries of the vector correspond to the entries of \code{tiles(X)}. } \details{ This function computes, for each tile \eqn{s_i}{s[[i]]} in the tessellation \code{X}, the shortest distance from \eqn{s_i}{s[[i]]} to the boundary of the window \eqn{W} containing the tessellation. } \seealso{ \code{\link{tess}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \examples{ P <- runifpoint(15) X <- dirichlet(P) plot(X, col="red") B <- bdist.tiles(X) # identify tiles that do not touch the boundary plot(X[B > 0], add=TRUE, col="green", lwd=3) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/dclf.test.Rd0000644000176000001440000001477512237642732015123 0ustar ripleyusers\name{dclf.test} \alias{dclf.test} \alias{mad.test} \title{ Diggle-Cressie-Loosmore-Ford and Maximum Absolute Deviation Tests } \description{ Perform the Diggle (1986) / Cressie (1991) / Loosmore and Ford (2006) test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.test(X, ..., rinterval = NULL, use.theo=FALSE) mad.test(X, ..., rinterval = NULL, use.theo=FALSE) } \arguments{ \item{X}{ Data for the test. Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class), a simulation envelope (object of class \code{"envelope"}) or a previous result of \code{dclf.test} or \code{mad.test}. } \item{\dots}{ Arguments passed to \code{\link{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, \code{verbose=FALSE} to turn off the messages, and \code{savefuns} or \code{savepatterns} to save the simulation results. } \item{rinterval}{ Interval of values of the summary function argument \code{r} over which the maximum absolute deviation, or the integral, will be computed for the test. A numeric vector of length 2. } \item{use.theo}{ Logical value determining whether to compare the summary function for the data to its theoretical value for CSR (\code{use.theo=TRUE}) or to the sample mean of simulations from CSR (\code{use.theo=FALSE}). } } \details{ These functions perform hypothesis tests for goodness-of-fit of a point pattern dataset to a point process model, based on Monte Carlo simulation from the model. \code{dclf.test} performs the test advocated by Loosmore and Ford (2006) which is also described in Diggle (1986), Cressie (1991, page 667, equation (8.5.42)) and Diggle (2003, page 14). See Baddeley et al (2014). \code{mad.test} performs the \sQuote{global} or \sQuote{Maximum Absolute Deviation} test described by Ripley (1977, 1981). See Baddeley et al (2014). The type of test depends on the type of argument \code{X}. \itemize{ \item If \code{X} is some kind of point pattern, then a test of Complete Spatial Randomness (CSR) will be performed. That is, the null hypothesis is that the point pattern is completely random. \item If \code{X} is a fitted point process model, then a test of goodness-of-fit for the fitted model will be performed. The model object contains the data point pattern to which it was originally fitted. The null hypothesis is that the data point pattern is a realisation of the model. \item If \code{X} is an envelope object generated by \code{\link{envelope}}, then it should have been generated with \code{savefuns=TRUE} or \code{savepatterns=TRUE} so that it contains simulation results. These simulations will be treated as realisations from the null hypothesis. \item Alternatively \code{X} could be a previously-performed test of the same kind (i.e. the result of calling \code{dclf.test} or \code{mad.test}). The simulations used to perform the original test will be re-used to perform the new test (provided these simulations were saved in the original test, by setting \code{savefuns=TRUE} or \code{savepatterns=TRUE}). } In all cases, the algorithm will first call \code{\link{envelope}} to generate or extract the simulated summary functions. The number of simulations that will be generated or extracted, is determined by the argument \code{nsim}, and defaults to 99. The summary function that will be computed is determined by the argument \code{fun} (or the first unnamed argument in the list \code{\dots}) and defaults to \code{\link{Kest}} (except when \code{X} is an envelope object generated with \code{savefuns=TRUE}, when these functions will be taken). The choice of summary function \code{fun} affects the power of the test. It is normally recommended to apply a variance-stabilising transformation (Ripley, 1981). If you are using the \eqn{K} function, the normal practice is to replace this by the \eqn{L} function (Besag, 1977) computed by \code{\link{Lest}}. If you are using the \eqn{F} or \eqn{G} functions, the recommended practice is to apply Fisher's variance-stabilising transformation \eqn{\sin^{-1}\sqrt x}{asin(sqrt(x))} using the argument \code{transform}. See the Examples. } \section{Handling Ties}{ If the observed value of the test statistic is equal to one or more of the simulated values (called a \emph{tied value}), then the tied values will be assigned a random ordering, and a message will be printed. } \value{ An object of class \code{"htest"}. Printing this object gives a report on the result of the test. The \eqn{p}-value is contained in the component \code{p.value}. } \references{ Baddeley, A., Diggle, P.J., Hardegen, A., Lawrence, T., Milne, R.K. and Nair, G. (2014) On tests of spatial pattern based on simulation envelopes. \emph{Ecological Monographs}, to appear. Besag, J. (1977) Discussion of Dr Ripley's paper. \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 193--195. Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neuroscience Methods} \bold{18}, 115--125. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Loosmore, N.B. and Ford, E.D. (2006) Statistical inference using the \emph{G} or \emph{K} point pattern spatial statistics. \emph{Ecology} \bold{87}, 1925--1931. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Andrew Hardegen. } \seealso{ \code{\link{envelope}}, \code{\link{dclf.progress}} } \examples{ dclf.test(cells, Lest, nsim=39) m <- mad.test(cells, Lest, verbose=FALSE, rinterval=c(0, 0.1), nsim=19) m # extract the p-value m$p.value # variance stabilised G function dclf.test(cells, Gest, transform=expression(asin(sqrt(.))), verbose=FALSE, nsim=19) } \keyword{spatial} \keyword{htest} spatstat/man/pairdist.ppp.Rd0000755000176000001440000000477512237642733015655 0ustar ripleyusers\name{pairdist.ppp} \alias{pairdist.ppp} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a point pattern. } \usage{ \method{pairdist}{ppp}(X, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a point pattern \code{X} (an object of class \code{"ppp"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a rectangle, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{pairdist}}, \code{\link{pairdist.default}}, \code{\link{pairdist.psp}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \examples{ data(cells) d <- pairdist(cells) d <- pairdist(cells, periodic=TRUE) d <- pairdist(cells, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/envelope.pp3.Rd0000755000176000001440000001545112237642732015546 0ustar ripleyusers\name{envelope.pp3} \alias{envelope.pp3} \title{Simulation Envelopes of Summary Function for 3D Point Pattern} \description{ Computes simulation envelopes of a summary function for a three-dimensional point pattern. } \usage{ \method{envelope}{pp3}(Y, fun=K3est, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A three-dimensional point pattern (object of class \code{"pp3"}). } \item{fun}{ Function that computes the desired summary statistic for a 3D point pattern. } \item{nsim}{ Number of simulated point patterns to be generated when computing the envelopes. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{maxnerr}{ Maximum number of rejected patterns. If \code{fun} yields an error when applied to a simulated point pattern (for example, because the pattern is empty and \code{fun} requires at least one point), the pattern will be rejected and a new random point pattern will be generated. If this happens more than \code{maxnerr} times, the algorithm will give up. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \value{ A function value table (object of class \code{"fv"}) which can be plotted directly. See \code{\link{envelope}} for further details. } \details{ The \code{envelope} command performs simulations and computes envelopes of a summary statistic based on the simulations. The result is an object that can be plotted to display the envelopes. The envelopes can be used to assess the goodness-of-fit of a point process model to point pattern data. The \code{envelope} function is generic, with methods for the classes \code{"ppp"}, \code{"ppm"} and \code{"kppm"} described in the help file for \code{\link{envelope}}. This function \code{envelope.pp3} is the method for three-dimensional point patterns (objects of class \code{"pp3"}). For the most basic use, if you have a 3D point pattern \code{X} and you want to test Complete Spatial Randomness (CSR), type \code{plot(envelope(X, K3est,nsim=39))} to see the three-dimensional \eqn{K} function for \code{X} plotted together with the envelopes of the three-dimensional \eqn{K} function for 39 simulations of CSR. To create simulation envelopes, the command \code{envelope(Y, ...)} first generates \code{nsim} random point patterns in one of the following ways. \itemize{ \item If \code{simulate=NULL}, then we generate \code{nsim} simulations of Complete Spatial Randomness (i.e. \code{nsim} simulated point patterns each being a realisation of the uniform Poisson point process) with the same intensity as the pattern \code{Y}. \item If \code{simulate} is supplied, then it determines how the simulated point patterns are generated. See \code{\link{envelope}} for details. } The summary statistic \code{fun} is applied to each of these simulated patterns. Typically \code{fun} is one of the functions \code{K3est}, \code{G3est}, \code{F3est} or \code{pcf3est}. It may also be a character string containing the name of one of these functions. For further information, see the documentation for \code{\link{envelope}}. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. } \seealso{ \code{\link{pp3}}, \code{\link{rpoispp3}}, \code{\link{K3est}}, \code{\link{G3est}}, \code{\link{F3est}}, \code{\link{pcf3est}}. } \examples{ X <- rpoispp3(20, box3()) \dontrun{ plot(envelope(X, nsim=39)) } \testonly{ plot(envelope(X, nsim=4)) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/localKinhom.Rd0000755000176000001440000001167712237642732015476 0ustar ripleyusers\name{localKinhom} \alias{localKinhom} \alias{localLinhom} \title{Inhomogeneous Neighbourhood Density Function} \description{ Computes spatially-weighted versions of the the local \eqn{K}-function or \eqn{L}-function. } \usage{ localKinhom(X, lambda, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL) localLinhom(X, lambda, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL, sigma = NULL, varcov = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{correction}{ String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"Ripley"}, \code{"translation"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } \item{sigma, varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the kernel smoothing procedure for estimating \code{lambda}, if \code{lambda} is missing. } } \details{ The functions \code{localKinhom} and \code{localLinhom} are inhomogeneous or weighted versions of the neighbourhood density function implemented in \code{\link{localK}} and \code{\link{localL}}. Given a spatial point pattern \code{X}, the inhomogeneous neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac 1 \pi \sum_j \frac{e_{ij}}{\lambda_j}} }{ L[i](r) = sqrt( (1/pi) * sum[j] e[i,j]/lambda[j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{\lambda_j}{\lambda[j]} is the estimated intensity of the point pattern at the point \eqn{j}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the inhomogeneous L function (see \code{\link{Linhom}}). By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{Kinhom}}, \code{\link{Linhom}}, \code{\link{localK}}, \code{\link{localL}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localLinhom(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) } \author{ Mike Kuhn, Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/split.ppp.Rd0000755000176000001440000001307512237642734015163 0ustar ripleyusers\name{split.ppp} \alias{split.ppp} \alias{split<-.ppp} \title{Divide Point Pattern into Sub-patterns} \description{ Divides a point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=NULL, \dots) \method{split}{ppp}(x, f = marks(x), drop=FALSE, un=missing(f), \dots) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{f}{ Data determining the grouping. Either a factor, a pixel image with factor values, a tessellation, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{\dots}{ Other arguments are ignored. } \item{value}{ List of point patterns. } } \value{ The value of \code{split.ppp} is a list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppp"}. The assignment form \code{split<-.ppp} returns the updated point pattern \code{x}. } \details{ The function \code{split.ppp} divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppp(x)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{f} at each point of \code{x} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{x} will be classified according to the tile of the tessellation into which it falls. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. The result of \code{split.ppp} has class \code{"splitppp"} and can be plotted using \code{\link{plot.splitppp}}. The assignment function \code{split<-.ppp} updates the point pattern \code{x} so that it satisfies \code{split(x, f, drop, un) = value}. The argument \code{value} is expected to be a list of point patterns, one for each level of \code{f}. These point patterns are expected to be compatible with the type of data in the original pattern \code{x}. Splitting can also be undone by the function \code{\link{superimpose}}. } \seealso{ \code{\link{cut.ppp}}, \code{\link{plot.splitppp}}, \code{\link{superimpose}}, \code{\link{im}}, \code{\link{tess}}, \code{\link{ppp.object}} } \examples{ # (1) Splitting by marks # Multitype point pattern: separate into types data(amacrine) u <- split(amacrine) # plot them plot(split(amacrine)) # the following are equivalent: amon <- split(amacrine)$on amon <- unmark(amacrine[amacrine$marks == "on"]) # the following are equivalent: amon <- split(amacrine, un=FALSE)$on amon <- amacrine[amacrine$marks == "on"] # Scramble the locations of the 'on' cells u <- split(amacrine) u$on <- runifpoint(amon$n, amon$window) split(amacrine) <- u # Point pattern with continuous marks data(longleaf) \testonly{ # smaller dataset longleaf <- longleaf[seq(1, longleaf$n, by=80)] } # cut the range of tree diameters into three intervals # using cut.ppp long3 <- cut(longleaf, breaks=3) # now split them long3split <- split(long3) # (2) Splitting by a factor # Unmarked point pattern data(swedishpines) # cut & split according to nearest neighbour distance f <- cut(nndist(swedishpines), 3) u <- split(swedishpines, f) # (3) Splitting over a tessellation tes <- tess(xgrid=seq(0,96,length=5),ygrid=seq(0,100,length=5)) v <- split(swedishpines, tes) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/default.expand.Rd0000755000176000001440000000763612237642732016140 0ustar ripleyusers\name{default.expand} \alias{default.expand} \title{Default Expansion Rule for Simulation of Model} \description{ Defines the default expansion window or expansion rule for simulation of a fitted point process model. } \usage{ default.expand(object, m=2, epsilon=1e-6) } \arguments{ \item{object}{ A point process model (object of class \code{"ppm"} or \code{"rmhmodel"}). } \item{m}{ A single numeric value. The window will be expanded by a distance \code{m * reach(object)} along each side. } \item{epsilon}{ Threshold argument passed to \code{\link{reach}} to determine \code{reach(object)}. } } \value{ A window expansion rule (object of class \code{"rmhexpand"}). } \details{ This function computes a default value for the expansion rule (the argument \code{expand} in \code{\link{rmhcontrol}}) given a fitted point process model \code{object}. This default is used by \code{\link{envelope}}, \code{\link{qqplot.ppm}}, \code{\link{simulate.ppm}} and other functions. Suppose we wish to generate simulated realisations of a fitted point process model inside a window \code{w}. It is advisable to first simulate the pattern on a larger window, and then clip it to the original window \code{w}. This avoids edge effects in the simulation. It is called \emph{expansion} of the simulation window. Accordingly, for the Metropolis-Hastings simulation algorithm \code{\link{rmh}}, the algorithm control parameters specified by \code{\link{rmhcontrol}} include an argument \code{expand} that determines the expansion of the simulation window. The function \code{default.expand} determines the default expansion rule for a fitted point process model \code{object}. If the model is Poisson, then no expansion is necessary. No expansion is performed by default, and \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = as.owin(object)}. If the model depends on external covariates (i.e.\ covariates other than the Cartesian covariates \code{x} and \code{y} and the \code{marks}) then no expansion is feasible, in general, because the spatial domain of the covariates is not guaranteed to be large enough. \code{default.expand} returns a rule representing no expansion. The simulation window is the original window \code{w = as.owin(object)}. If the model depends on the Cartesian covariates \code{x} and \code{y}, it would be feasible to expand the simulation window, and this was the default for \pkg{spatstat} version 1.24-1 and earlier. However this sometimes produces artefacts (such as an empty point pattern) or memory overflow, because the fitted trend, extrapolated outside the original window of the data, may become very large. In \pkg{spatstat} version 1.24-2 and later, the default rule is \emph{not} to expand if the model depends on \code{x} or \code{y}. Again \code{default.expand} returns a rule representing no expansion. Otherwise, expansion will occur. The original window \code{w = as.owin(object)} is expanded by a distance \code{m * rr}, where \code{rr} is the interaction range of the model, computed by \code{\link{reach}}. If \code{w} is a rectangle then each edge of \code{w} is displaced outward by distance \code{m * rr}. If \code{w} is not a rectangle then \code{w} is dilated by distance \code{m * rr} using \code{\link{dilation}}. } \seealso{ \code{\link{rmhexpand}}, \code{\link{rmhcontrol}}, \code{\link{rmh}}, \code{\link{envelope}}, \code{\link{qqplot.ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(0.07)) default.expand(fit) mod <- rmhmodel(cif="strauss", par=list(beta=100, gamma=0.5, r=0.07)) default.expand(fit) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rpoispp3.Rd0000755000176000001440000000244612237642734015011 0ustar ripleyusers\name{rpoispp3} \alias{rpoispp3} \title{ Generate Poisson Point Pattern in Three Dimensions } \description{ Generate a random three-dimensional point pattern using the homogeneous Poisson process. } \usage{ rpoispp3(lambda, domain = box3()) } \arguments{ \item{lambda}{ Intensity of the Poisson process. A single positive number. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } } \details{ This function generates a realisation of the homogeneous Poisson process in three dimensions, with intensity \code{lambda} (points per unit volume). The realisation is generated inside the three-dimensional region \code{domain} which currently must be a rectangular box (object of class \code{"box3"}). } \value{ The simulated three-dimensional point pattern (an object of class \code{"pp3"}). } \note{ The intensity \code{lambda} is the expected number of points \emph{per unit volume}. } \seealso{ \code{\link{runifpoint3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- rpoispp3(50) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.ppp.Rd0000755000176000001440000001222212237642732015431 0ustar ripleyusers\name{Extract.ppp} \alias{[.ppp} \alias{[<-.ppp} \title{Extract or Replace Subset of Point Pattern} \description{ Extract or replace a subset of a point pattern. Extraction of a subset has the effect of thinning the points and/or trimming the window. } \usage{ \method{[}{ppp}(x, i, j, drop, ...) \method{[}{ppp}(x, i, j) <- value } \arguments{ \item{x}{ A two-dimensional point pattern. An object of class \code{"ppp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which points should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window. } \item{value}{ Replacement value for the subset. A point pattern. } \item{j}{ Redundant. Included for backward compatibility. } \item{drop, \dots}{ Ignored. These arguments are required for compatibility with the generic function. } } \value{ A point pattern (of class \code{"ppp"}). } \details{ These functions extract a designated subset of a point pattern, or replace the designated subset with another point pattern. The function \code{[.ppp} is a method for \code{\link{[}} for the class \code{"ppp"}. It extracts a designated subset of a point pattern, either by ``\emph{thinning}'' (retaining/deleting some points of a point pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and retaining only those points which lie in the subregion) or both. The pattern will be ``thinned'' if \code{i} is a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{i} is an object of class \code{"owin"} specifying a window of observation. The points of \code{x} lying inside the new \code{window} will be retained. Alternatively \code{i} may be a pixel image (object of class \code{"im"}) with logical values; the pixels with the value \code{TRUE} will be interpreted as a window. The function \code{[<-.ppp} is a method for \code{\link{[<-}} for the class \code{"ppp"}. It replaces the designated subset with the point pattern \code{value}. The subset of \code{x} to be replaced is designated by the argument \code{i} as above. The replacement point pattern \code{value} must lie inside the window of the original pattern \code{x}. The ordering of points in \code{x} will be preserved if the replacement pattern \code{value} has the same number of points as the subset to be replaced. Otherwise the ordering is unpredictable. If the original pattern \code{x} has marks, then the replacement pattern \code{value} must also have marks, of the same type. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. Use the function \code{\link{split.ppp}} to select those points in a marked point pattern which have a specified mark. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{unmark}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \section{Warnings}{ The function does not check whether \code{window} is a subset of \code{x$window}. Nor does it check whether \code{value} lies inside \code{x$window}. } \examples{ data(longleaf) # Longleaf pines data \dontrun{ plot(longleaf) } \testonly{ longleaf <- longleaf[seq(1,longleaf$n,by=10)] } # adult trees defined to have diameter at least 30 cm adult <- (longleaf$marks >= 30) longadult <- longleaf[adult] \dontrun{ plot(longadult) } # note that the marks are still retained. # Use unmark(longadult) to remove the marks # New Zealand trees data data(nztrees) \dontrun{ plot(nztrees) # plot shows a line of trees at the far right abline(v=148, lty=2) # cut along this line } nzw <- owin(c(0,148),c(0,95)) # the subwindow # trim dataset to this subwindow nzsub <- nztrees[nzw] \dontrun{ plot(nzsub) } # Redwood data data(redwood) \dontrun{ plot(redwood) } # Random thinning: delete 60\% of data retain <- (runif(redwood$n) < 0.4) thinred <- redwood[retain] \dontrun{ plot(thinred) } # Scramble 60\% of data modif <- (runif(redwood$n) < 0.6) scramble <- function(x) { runifpoint(x$n, x$window) } redwood[modif] <- scramble(redwood[modif]) # Lansing woods data - multitype points data(lansing) \testonly{ lansing <- lansing[seq(1, lansing$n, length=100)] } # Hickory trees hicks <- split(lansing)$hickory # Trees in subwindow win <- owin(c(0.3, 0.6),c(0.2, 0.5)) lsub <- lansing[win] # Scramble the locations of trees in subwindow, retaining their marks lansing[win] <- scramble(lsub) \%mark\% (lsub$marks) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/spokes.Rd0000755000176000001440000000557212237642734014541 0ustar ripleyusers\name{spokes} \alias{spokes} \title{Spokes pattern of dummy points} \description{ Generates a pattern of dummy points in a window, given a data point pattern. The dummy points lie on the radii of circles emanating from each data point. } \usage{ spokes(x, y, nrad = 3, nper = 3, fctr = 1.5, Mdefault = 1) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of data points, or a list with components \code{x} and \code{y}, or a point pattern (an object of class \code{ppp}). } \item{y}{ Vector of \eqn{y} coordinates of data points. Ignored unless \code{x} is a vector. } \item{nrad}{ Number of radii emanating from each data point. } \item{nper}{ Number of dummy points per radius. } \item{fctr}{ Scale factor. Length of largest spoke radius is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. } \item{Mdefault}{ Value of \code{M} to be used if \code{x} has length 1. } } \value{ If argument \code{x} is a point pattern, a point pattern with window equal to that of \code{x}. Otherwise a list with two components \code{x} and \code{y}. In either case the components \code{x} and \code{y} of the value are numeric vectors giving the coordinates of the dummy points. } \details{ This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}). Given the data points, the function creates a collection of \code{nrad * nper * length(x)} dummy points. Around each data point \code{(x[i],y[i])} there are \code{nrad * nper} dummy points, lying on \code{nrad} radii emanating from \code{(x[i],y[i])}, with \code{nper} dummy points equally spaced along each radius. The (equal) spacing of dummy points along each radius is controlled by the factor \code{fctr}. The distance from a data point to the furthest of its associated dummy points is \code{fctr * M} where \code{M} is the mean nearest neighbour distance for the data points. If there is only one data point the nearest neighbour distance is infinite, so the value \code{Mdefault} will be used in place of \code{M}. If \code{x} is a point pattern, then the value returned is also a point pattern, which is clipped to the window of \code{x}. Hence there may be fewer than \code{nrad * nper * length(x)} dummy points in the pattern returned. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}}, \code{\link{stratrand}} } \examples{ dat <- runifrect(10) \dontrun{ plot(dat) } dum <- spokes(dat$x, dat$y) \dontrun{ points(dum$x, dum$y, pch=".") } Q <- quadscheme(dat, dum) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/predict.mppm.Rd0000644000176000001440000001123612241443112015606 0ustar ripleyusers\name{predict.mppm} \alias{predict.mppm} \title{Prediction for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model obtained by \code{\link{mppm}}, evaluate the spatial trend and/or the conditional intensity of the model. By default, predictions are evaluated over a grid of locations, yielding pixel images of the trend and conditional intensity. Alternatively predictions may be evaluated at specified locations with specified values of the covariates. } \usage{ \method{predict}{mppm}(object, ..., newdata = NULL, type = c("trend", "cif"), ngrid = 40, locations=NULL, verbose=FALSE) } \arguments{ \item{object}{The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{newdata}{ New values of the covariates, for which the predictions should be computed. If \code{newdata=NULL}, predictions are computed for the original values of the covariates, to which the model was fitted. Otherwise \code{newdata} should be a hyperframe (see \code{\link[spatstat]{hyperframe}}) containing columns of covariates as required by the model. If \code{type} includes \code{"cif"}, then \code{newdata} must also include a column of spatial point pattern responses, in order to compute the conditional intensity. } \item{type}{ Type of predicted values required. A character string or vector of character strings. Options are \code{"trend"} for the spatial trend (first-order term) and \code{"cif"} or \code{"lambda"} for the conditional intensity. } \item{ngrid}{ Dimensions of the grid of spatial locations at which prediction will be performed (if \code{locations=NULL}). An integer or a pair of integers. } \item{locations}{ Optional. The locations at which predictions should be performed. A list of point patterns, with one entry for each row of \code{newdata}. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ This function computes the spatial trend and the conditional intensity of a fitted multiple spatial point process model. See Baddeley and Turner (2000) and Baddeley et al (2007) for explanation and examples. Note that by ``spatial trend'' we mean the (exponentiated) first order potential and not the intensity of the process. [For example if we fit the stationary Strauss process with parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, then the spatial trend is constant and equal to \eqn{\beta}{beta}.] The conditional intensity \eqn{\lambda(u,X)}{lambda(u,X)} of the fitted model is evaluated at each required spatial location u, with respect to the response point pattern X. If \code{locations=NULL}, then predictions are performed at an \code{ngrid} by \code{ngrid} grid of locations in the window for each response point pattern. The result will be a hyperframe containing a column of images of the trend (if selected) and a column of images of the conditional intensity (if selected). The result can be plotted. If \code{locations} is given, then it should be a list of point patterns (objects of class \code{"ppp"}). Predictions are performed at these points. The result is a hyperframe containing a column of marked point patterns where the locations each point. } \value{ A hyperframe with columns named \code{trend} and \code{cif}. If \code{locations=NULL}, the entries of the hyperframe are pixel images. If \code{locations} is not null, the entries are marked point patterns constructed by attaching the predicted values to the \code{locations} point patterns. } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \seealso{ \code{\link{mppm}}, \code{\link{fitted.mppm}}, \code{\link[spatstat]{hyperframe}} } \examples{ data(waterstriders) h <- hyperframe(Bugs=waterstriders) fit <- mppm(Bugs ~ x, data=h, interaction=Strauss(7)) # prediction on a grid p <- predict(fit) plot(p$trend) # prediction at specified locations loc <- with(h, runifpoint(20, Bugs$window)) p2 <- predict(fit, locations=loc) plot(p2$trend) } \keyword{spatial} \keyword{models} spatstat/man/update.kppm.Rd0000755000176000001440000000350312237642734015455 0ustar ripleyusers\name{update.kppm} \alias{update.kppm} \title{Update a Fitted Cluster Point Process Model} \description{ \code{update} method for class \code{"kppm"}. } \usage{ \method{update}{kppm}(object, trend = ~1, ..., clusters = NULL) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}, obtained from \code{\link{kppm}}. } \item{trend}{ A formula without a left hand side, determining the form of the intensity of the model. } \item{\dots}{ Other arguments passed to \code{\link{kppm}}. } \item{clusters}{ The type of cluster mechanism. A character string. See \code{\link{kppm}}. } } \details{ \code{object} should be a fitted cluster point process model, obtained from the model-fitting function \code{\link{kppm}}. The model will be updated according to the new arguments provided. The argument \code{trend} determines the formula for the intensity in the model. It should be an \R formula without a left hand side. It may include the symbols \code{.} and \code{+} or \code{-} to specify addition or deletion of terms in the current model formula, as shown in the Examples below. The model is refitted using \code{\link{kppm}}. } \value{ Another fitted cluster point process model (object of class \code{"kppm"}. } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link[spatstat:methods.kppm]{methods.kppm}}, \code{\link{vcov.kppm}} } \examples{ data(redwood) fit <- kppm(redwood, ~1, "Thomas") fitx <- update(fit, ~ . + x) fitM <- update(fit, clusters="MatClust") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/nndist.pp3.Rd0000755000176000001440000000613512237642733015230 0ustar ripleyusers\name{nndist.pp3} \alias{nndist.pp3} \title{Nearest neighbour distances in three dimensions} \description{ Computes the distance from each point to its nearest neighbour in a three-dimensional point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ \method{nndist}{pp3}(X, \dots, k=1) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. } \details{ This function computes the Euclidean distance from each point in a three-dimensional point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic; this function \code{nndist.pp3} is the method for the class \code{"pp3"}. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{G3est}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist}}, \code{\link{pairdist}}, \code{\link{G3est}}, \code{\link{nnwhich}} } \examples{ X <- runifpoint3(40) # nearest neighbours d <- nndist(X) # second nearest neighbours d2 <- nndist(X, k=2) # first, second and third nearest d1to3 <- nndist(X, k=1:3) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on code for two dimensions by Pavel Grabarnik } \keyword{spatial} \keyword{math} spatstat/man/pppmatching.Rd0000755000176000001440000000510312237642733015534 0ustar ripleyusers\name{pppmatching} \alias{pppmatching} \title{Create a Point Matching} \description{ Creates an object of class \code{"pppmatching"} representing a matching of two planar point patterns (objects of class \code{"ppp"}). } \usage{ pppmatching(X, Y, am, type = NULL, cutoff = NULL, q = NULL, mdist = NULL) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{am}{ An \code{X$n} by \code{Y$n} matrix with entries \eqn{\geq 0}{>= 0} that specifies which points are matched and with what weight; alternatively, an object that can be coerced to this form by \code{as.matrix}. } \item{type}{ A character string giving the type of the matching. One of \code{"spa"}, \code{"ace"} or \code{"mat"}, or \code{NULL} for a generic or unknown matching. } \item{cutoff, q}{ Numerical values specifying the cutoff value \eqn{> 0} for interpoint distances and the order \eqn{q \in [1,\infty]}{q in [0,Inf]} of the average that is applied to them. \code{NULL} if not applicable or unknown. } \item{mdist}{ Numerical value for the distance to be associated with the matching. } } \details{ The argument \code{am} is interpreted as a "generalized adjacency matrix": if the \code{[i,j]}-th entry is positive, then the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched and the value of the entry gives the corresponding weight of the match. For an unweighted matching all the weights should be set to \eqn{1}. The remaining arguments are optional and allow to save additional information about the matching. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for details on the meaning of these parameters. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}} \code{\link{matchingdist}} } \examples{ # a random unweighted complete matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix \dontrun{ plot(m) } # a random weighted complete matching X <- runifpoint(7) Y <- runifpoint(7) am <- r2dtable(1, rep(10,7), rep(10,7))[[1]]/10 # generates a random doubly stochastic matrix m2 <- pppmatching(X, Y, am) summary(m2) m2$matrix \dontrun{ # Note: plotting does currently not distinguish # between different weights plot(m2) } } \keyword{spatial} \keyword{datagen} spatstat/man/flu.Rd0000755000176000001440000001022012237642732014003 0ustar ripleyusers\name{flu} \alias{flu} \docType{data} \title{ Influenza Virus Proteins } \description{ Replicated spatial point patterns giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. } \usage{data(flu)} \format{ A \code{\link{hyperframe}} with 41 rows and four columns: \describe{ \item{pattern}{ List of spatial point patterns (objects of class \code{"ppp"}) with points of two types, identifying the locations of two different proteins on a membrane sheet. } \item{virustype}{ Factor identifying whether the infecting virus was the wild type (\code{wt}) or mutant (\code{mut1}). } \item{stain}{ Factor identifying whether the membrane sheet was stained for the proteins \emph{M2} and \emph{M1} (\code{stain="M2-M1"}) or stained for the proteins \emph{M2} and \emph{HA} (\code{stain="M2-HA"}). } \item{frameid}{ Integer. Serial number of the microscope frame in the original experiment. Frame identifier is not unique across different values of \code{virustype} and \code{stain}. } } The row names of the hyperframe can be used as succinct labels in plots. } \details{ The data consist of 41 spatial point patterns, each giving the locations of two different virus proteins on the membranes of cells infected with influenza virus. Chen et al (2008) conducted the experiment and used spatial analysis to establish evidence for an interaction between the influenza virus proteins M1 and M2 that is important for the study of viral replication. Canine kidney cells were infected with human influenza, Udorn strain, either the wild type or a mutant which encodes a defective M2 protein. At twelve hours post-infection, membrane sheets were prepared and stained for viral proteins, using two antibodies conjugated to gold particles of two sizes (6 nanometre and 12 nanometre diameter) enabling localisation of two different proteins on each sheet. The 6 nm particles were stained for M2 (ion channel protein), while the 12 nm particles were stained either for M1 (matrix protein) or for HA (hemagglutinin). Membrane sheets were visualised in electron microscopy. Experimental technique and spatial analysis of the membranes stained for M2 and M1 is reported in Chen et al (2008). Analysis of the membranes stained for M2 and HA is reported in Rossman et al (2010). The M2-HA data shows a stronger association between the two proteins which has also been observed biochemically and functionally (Rossman et al, 2010). The dataset \code{flu} is a \code{\link{hyperframe}} with one row for each membrane sheet. The column named \code{pattern} contains the spatial point patterns of gold particle locations, with two types of points (either \code{M1} and \code{M2} or \code{HA} and \code{M2}). The column named \code{virustype} is a factor identifying the virus: either wild type \code{wt} or mutant \code{mut1}. The column named \code{stain} is a factor identifying whether the membrane was stained for M1 and M2 (\code{stain="M2-M1"}) or stained for HA and M2 (\code{stain="M2-HA"}). The row names of the hyperframe are a succinct summary of the experimental conditions and can be used as labels in plots. See the Examples. } \source{ Data generously provided by Dr G.P. Leser and Dr R.A. Lamb. Please cite Chen et al (2008) in any use of these data. } \references{ Chen, B.J., Leser, G.P., Jackson, D. and Lamb, R.A. (2008) The influenza virus M2 protein cytoplasmic tail interacts with the M1 protein and influences virus assembly at the site of virus budding. \emph{Journal of Virology} \bold{82}, 10059--10070. Rossman, J.S., Jing, X.H., Leser, G.P. and Lamb, R.A. (2010) Influenza virus M2 protein mediates ESCRT-independent membrane scission \emph{Cell} \bold{142}, 902--913. } \examples{ data(flu) flu Y <- flu$pattern[10] Y <- flu[10, 1, drop=TRUE] wildM1 <- with(flu, virustype == "wt" & stain == "M2-M1") plot(flu[wildM1, 1, drop=TRUE], main=c("flu data", "wild type virus, M2-M1 stain"), pch=c(3,16), cex=0.4, cols=2:3) } \keyword{datasets} spatstat/man/coef.ppm.Rd0000755000176000001440000000407412237642732014736 0ustar ripleyusers\name{coef.ppm} \alias{coef.ppm} \title{ Coefficients of Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{ppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ lambda(u,x) = exp(theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.ppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ data(cells) poi <- ppm(cells, ~1, Poisson()) coef(poi) # This is the log of the fitted intensity of the Poisson process stra <- ppm(cells, ~1, Strauss(r=0.07)) coef(stra) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/plot.kppm.Rd0000755000176000001440000000331412237642733015150 0ustar ripleyusers\name{plot.kppm} \alias{plot.kppm} \title{Plot a fitted cluster point process} \description{ Plots a fitted cluster point process model, displaying the fitted intensity and the fitted \eqn{K}-function. } \usage{ \method{plot}{kppm}(x, ..., what=c("intensity", "statistic")) } \arguments{ \item{x}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{\dots}{ Arguments passed to \code{\link{plot.ppm}} and \code{\link{plot.fv}} to control the plot. } \item{what}{ Character vector determining what will be plotted. } } \details{ This is a method for the generic function \code{\link{plot}} for the class \code{"kppm"} of fitted cluster point process models. The argument \code{x} should be a cluster point process model (object of class \code{"kppm"}) obtained using the function \code{\link{kppm}}. By default, this command will first plot the fitted intensity of the model, using \code{\link{plot.ppm}}, and then plot the empirical and fitted summary statistics, using \code{\link{plot.fv}}. The choice of plots (and the order in which they are displayed) is controlled by the argument \code{what}. The options (partially matched) are \code{"intensity"} and \code{"statistic"}. The option \code{what="intensity"} will be ignored if the model is stationary. } \value{ Null. } \examples{ data(redwood) fit <- kppm(redwood, ~1, "Thomas") plot(fit) } \seealso{ \code{\link{kppm}}, \code{\link{plot.ppm}}, \code{\link{plot.minconfit}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/pairdist.lpp.Rd0000755000176000001440000000250412237642733015635 0ustar ripleyusers\name{pairdist.lpp} \alias{pairdist.lpp} \title{ Pairwise shortest-path distances between points on a linear network } \description{ Given a pattern of points on a linear network, compute the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. } \usage{ \method{pairdist}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the matrix of distances between all pairs of points, measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. } \value{ A symmetric matrix. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lpp}} } \examples{ example(lpp) pairdist(X) } \keyword{spatial} spatstat/man/convolve.im.Rd0000644000176000001440000000465612237642732015471 0ustar ripleyusers\name{convolve.im} \alias{convolve.im} \title{Convolution of Pixel Images} \description{ Computes the convolution of two pixel images. } \usage{ convolve.im(X, Y=X, \dots, reflectX=FALSE, reflectY=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } \item{\dots}{Ignored.} \item{reflectX,reflectY}{ Logical values specifying whether the images \code{X} and \code{Y} (respectively) should be reflected in the origin before computing the convolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the convolution of \code{X} and \code{Y}. } \details{ The \emph{convolution} of two pixel images \eqn{X} and \eqn{Y} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)Y(v-u)\, {\rm d}u }{ C(v) = integral of X(u) * Y(v-u) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} and \eqn{Y(u)} denote the pixel values of \eqn{X} and \eqn{Y} respectively at location \eqn{u}. This command computes a discretised approximation to the convolution, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the convolution. If \code{reflectX = TRUE} then the pixel image \code{X} is reflected in the origin (see \code{\link{reflect}}) before the convolution is computed, so that \code{convolve.im(X,Y,reflectX=TRUE)} is mathematically equivalent to \code{convolve.im(reflect(X), Y)}. (These two commands are not exactly equivalent, because the reflection is performed in the Fourier domain in the first command, and reflection is performed in the spatial domain in the second command). Similarly if \code{reflectY = TRUE} then the pixel image \code{Y} is reflected in the origin before the convolution is computed, so that \code{convolve.im(X,Y,reflectY=TRUE)} is mathematically equivalent to \code{convolve.im(X, reflect(Y))}. } \seealso{ \code{\link{imcov}}, \code{\link{reflect}} } \examples{ X <- as.im(letterR) Y <- as.im(square(1)) plot(convolve.im(X, Y)) plot(convolve.im(X, Y, reflectX=TRUE)) plot(convolve.im(X)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/methods.pp3.Rd0000755000176000001440000000330412237642733015367 0ustar ripleyusers\name{methods.pp3} \Rdversion{1.1} \alias{methods.pp3} %DoNotExport \alias{print.pp3} \alias{summary.pp3} \alias{print.summary.pp3} \alias{unitname.pp3} \alias{unitname<-.pp3} \title{ Methods for three-dimensional point patterns } \description{ Methods for class \code{"pp3"}. } \usage{ \method{print}{pp3}(x, ...) \method{print}{summary.pp3}(x, ...) \method{summary}{pp3}(object, ...) \method{unitname}{pp3}(x) \method{unitname}{pp3}(x) <- value } \arguments{ \item{x,object}{ Object of class \code{"pp3"}. } \item{\dots}{ Ignored. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"pp3"} of three-dimensional point patterns. The \code{print} and \code{summary} methods print a description of the point pattern. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.pp3} the value is \code{NULL}. For \code{unitname.pp3} an object of class \code{"units"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{pp3}}, \code{\link{print}}, \code{\link{unitname}} \code{\link{unitname<-}} } \examples{ X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1), unitname="mm")) X unitname(X) unitname(X) <- c("foot", "feet") summary(X) } \keyword{spatial} \keyword{methods} spatstat/man/compatible.fv.Rd0000755000176000001440000000237212237642732015757 0ustar ripleyusers\name{compatible.fv} %DontDeclareMethods \alias{compatible.fv} \title{Test Whether Function Objects Are Compatible} \description{ Tests whether two or more function objects (class \code{"fv"}) are compatible. } \usage{ \method{compatible}{fv}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function value objects (class \code{"fv"}).} } \details{ An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The functions are compatible if they have been evaluated at the same sequence of values of the argument \code{r}, and if the statistical estimates have the same names. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fv}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rescale.psp.Rd0000755000176000001440000000375512237642734015455 0ustar ripleyusers\name{rescale.psp} \alias{rescale.psp} \title{Convert Line Segment Pattern to Another Unit of Length} \description{ Converts a line segment pattern dataset to another unit of length. } \usage{ \method{rescale}{psp}(X, s) } \arguments{ \item{X}{Line segment pattern (object of class \code{"psp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} } \value{ Another line segment pattern (of class \code{"psp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the line segment pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a line segment pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original segment pattern. If you want to actually change the coordinates by a linear transformation, producing a segment pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{units}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(copper) X <- copper$Lines X # data are in km # convert to metres rescale(X, 1/1000) X # rename unit unitname(X) <- c("metre", "metres") X } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Extract.psp.Rd0000755000176000001440000000556712237642732015452 0ustar ripleyusers\name{Extract.psp} \alias{[.psp} \title{Extract Subset of Line Segment Pattern} \description{ Extract a subset of a line segment pattern. } \usage{ \method{[}{psp}(x, i, j, drop, ...) } \arguments{ \item{x}{ A two-dimensional line segment pattern. An object of class \code{"psp"}. } \item{i}{ Subset index. Either a valid subset index in the usual \R sense, indicating which segments should be retained, or a window (an object of class \code{"owin"}) delineating a subset of the original observation window. } \item{j}{ Redundant - included for backward compatibility. } \item{drop}{ Ignored. Required for compatibility with generic function. } \item{\dots}{ Ignored. } } \value{ A line segment pattern (of class \code{"psp"}). } \details{ These functions extract a designated subset of a line segment pattern. The function \code{[.psp} is a method for \code{\link{[}} for the class \code{"psp"}. It extracts a designated subset of a line segment pattern, either by ``\emph{thinning}'' (retaining/deleting some line segments of a line segment pattern) or ``\emph{trimming}'' (reducing the window of observation to a smaller subregion and clipping the line segments to this boundary) or both. The pattern will be ``thinned'' if \code{subset} is specified. The line segments designated by \code{subset} will be retained. Here \code{subset} can be a numeric vector of positive indices (identifying the line segments to be retained), a numeric vector of negative indices (identifying the line segments to be deleted) or a logical vector of length equal to the number of line segments in the line segment pattern \code{x}. In the latter case, the line segments for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. The pattern will be ``trimmed'' if \code{window} is specified. This should be an object of class \code{\link{owin}} specifying a window of observation to which the line segment pattern \code{x} will be trimmed. Line segments of \code{x} lying inside the new \code{window} will be retained unchanged. Line segments lying partially inside the new \code{window} and partially outside it will be clipped so that they lie entirely inside. Both ``thinning'' and ``trimming'' can be performed together. } \seealso{ \code{\link{psp.object}}, \code{\link{owin.object}} } \examples{ a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) plot(a) # thinning id <- sample(c(TRUE, FALSE), 20, replace=TRUE) b <- a[id] plot(b, add=TRUE, lwd=3) # trimming plot(a) w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- a[,w] plot(b, add=TRUE, col="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Jcross.Rd0000755000176000001440000001625112237642731014471 0ustar ripleyusers\name{Jcross} \alias{Jcross} \title{ Multitype J Function (i-to-j) } \description{ For a multitype point pattern, estimate the multitype \eqn{J} function summarising the interpoint dependence between points of type \eqn{i} and of type \eqn{j}. } \usage{ Jcross(X, i, j, eps=NULL, r=NULL, breaks=NULL, \dots, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{J} function \eqn{J_{ij}(r)}{Jij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{eps}{A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{J}{the recommended estimator of \eqn{J_{ij}(r)}{Jij(r)}, currently the Kaplan-Meier estimator. } \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{ij}(r)}{Jij(r)} has been estimated } \item{km}{the Kaplan-Meier estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{ij}(r)}{Jij(r)} } \item{un}{the ``uncorrected'' estimator of \eqn{J_{ij}(r)}{Jij(r)} formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{ij}(r)}{1 - Gij(r)} and \eqn{1 - F_{j}(r)}{1 - Fj(r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{ij}(r)}{Jij(r)} for a marked Poisson process, namely 1. } The result also has two attributes \code{"G"} and \code{"F"} which are respectively the outputs of \code{\link{Gcross}} and \code{\link{Fest}} for the point pattern. } \details{ This function \code{Jcross} and its companions \code{\link{Jdot}} and \code{\link{Jmulti}} are generalisations of the function \code{\link{Jest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``type \eqn{i} to type \eqn{j}'' multitype \eqn{J} function of a stationary multitype point process \eqn{X} was introduced by Van lieshout and Baddeley (1999). It is defined by \deqn{J_{ij}(r) = \frac{1 - G_{ij}(r)}{1 - F_{j}(r)}}{Jij(r) = (1 - Gij(r))/(1-Fj(r))} where \eqn{G_{ij}(r)}{Gij(r)} is the distribution function of the distance from a type \eqn{i} point to the nearest point of type \eqn{j}, and \eqn{F_{j}(r)}{Fj(r)} is the distribution function of the distance from a fixed point in space to the nearest point of type \eqn{j} in the pattern. An estimate of \eqn{J_{ij}(r)}{Jij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the subprocess of type \eqn{i} points is independent of the subprocess of points of type \eqn{j}, then \eqn{J_{ij}(r) \equiv 1}{Jij(r) = 1}. Hence deviations of the empirical estimate of \eqn{J_{ij}}{Jij} from the value 1 may suggest dependence between types. This algorithm estimates \eqn{J_{ij}(r)}{Jij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}, using the Kaplan-Meier and border corrections. The main work is done by \code{\link{Gmulti}} and \code{\link{Fest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{ij}(r)}{Jij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Jdot}}, \code{\link{Jest}}, \code{\link{Jmulti}} } \examples{ # Lansing woods data: 6 types of trees data(lansing) \testonly{ lansing <- lansing[seq(1,lansing$n, by=30), ] } Jhm <- Jcross(lansing, "hickory", "maple") # diagnostic plot for independence between hickories and maples plot(Jhm) # synthetic example with two types "a" and "b" pp <- runifpoint(30) \%mark\% factor(sample(c("a","b"), 30, replace=TRUE)) J <- Jcross(pp) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/dirichlet.weights.Rd0000755000176000001440000000376212237642732016652 0ustar ripleyusers\name{dirichlet.weights} \alias{dirichlet.weights} \title{Compute Quadrature Weights Based on Dirichlet Tessellation} \description{ Computes quadrature weights for a given set of points, using the areas of tiles in the Dirichlet tessellation. } \usage{ dirichlet.weights(X, window=NULL, exact=TRUE, \dots) } \arguments{ \item{X}{Data defining a point pattern.} \item{window}{Default window for the point pattern} \item{exact}{Logical value. If \code{TRUE}, compute exact areas using the package \code{deldir}. If \code{FALSE}, compute approximate areas using a pixel raster. } \item{\dots}{ Ignored. } } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed using the Dirichlet tessellation. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the Dirichlet tessellation of the points of \code{X} is computed. The weight attached to a point of \code{X} is the area of its Dirichlet tile (inside the window \code{X$window}). If \code{exact=TRUE} the Dirichlet tessellation is computed exactly by the Lee-Schachter algorithm using the package \code{deldir}. Otherwise a pixel raster approximation is constructed and the areas are approximations to the true weights. In all cases the sum of the weights is equal to the area of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{gridweights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- dirichlet.weights(X, exact=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/unitname.Rd0000755000176000001440000000636512237642734015056 0ustar ripleyusers\name{unitname} %DontDeclareMethods \alias{unitname} \alias{unitname.im} \alias{unitname.kppm} \alias{unitname.minconfit} \alias{unitname.owin} \alias{unitname.ppp} \alias{unitname.ppm} \alias{unitname.psp} \alias{unitname.quad} \alias{unitname.slrm} \alias{unitname<-} \alias{unitname<-.im} \alias{unitname<-.kppm} \alias{unitname<-.minconfit} \alias{unitname<-.owin} \alias{unitname<-.ppp} \alias{unitname<-.ppm} \alias{unitname<-.psp} \alias{unitname<-.quad} \alias{unitname<-.slrm} \title{Name for Unit of Length} \description{ Inspect or change the name of the unit of length in a spatial dataset. } \usage{ unitname(x) \method{unitname}{im}(x) \method{unitname}{kppm}(x) \method{unitname}{minconfit}(x) \method{unitname}{owin}(x) \method{unitname}{ppm}(x) \method{unitname}{ppp}(x) \method{unitname}{psp}(x) \method{unitname}{quad}(x) \method{unitname}{slrm}(x) unitname(x) <- value \method{unitname}{im}(x) <- value \method{unitname}{kppm}(x) <- value \method{unitname}{minconfit}(x) <- value \method{unitname}{owin}(x) <- value \method{unitname}{ppm}(x) <- value \method{unitname}{ppp}(x) <- value \method{unitname}{psp}(x) <- value \method{unitname}{quad}(x) <- value \method{unitname}{slrm}(x) <- value } \arguments{ \item{x}{A spatial dataset. Either a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}), a pixel image (object of class \code{"im"}), a quadrature scheme (object of class \code{"quad"}), or a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"slrm"} or \code{"minconfit"}). } \item{value}{ Name of the unit of length. See Details. } } \details{ Spatial datasets in the \pkg{spatstat} package may include the name of the unit of length. This name is used when printing or plotting the dataset, and in some other applications. \code{unitname(x)} extracts this name, and \code{unitname(x) <- value} sets the name to \code{value}. A valid name is either \itemize{ \item a single character string \item a vector of two character strings giving the singular and plural forms of the unit name \item a list of length 3, containing two character strings giving the singular and plural forms of the basic unit, and a number specifying the multiple of this unit. } Note that re-setting the name of the unit of length \emph{does not} affect the numerical values in \code{x}. It changes only the string containing the name of the unit of length. To rescale the numerical values, use \code{\link{rescale}}. } \value{ The return value of \code{unitname} is an object of class \code{"units"} containing the name of the unit of length in \code{x}. There are methods for \code{print} and \code{summary}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rescale}}, \code{\link{owin}}, \code{\link{ppp}} } \examples{ X <- runifpoint(20) # if the unit of length is 1 metre: unitname(X) <- c("metre", "metres") # if the unit of length is 6 inches: unitname(X) <- list("inch", "inches", 6) } \keyword{spatial} \keyword{manip} spatstat/man/quadscheme.logi.Rd0000644000176000001440000001265712237642733016303 0ustar ripleyusers\name{quadscheme.logi} \alias{quadscheme.logi} \title{Generate a Logistic Regression Quadrature Scheme from a Point Pattern} \description{ Generates a logistic regression quadrature scheme (an object of class \code{"logiquad"} inheriting from \code{"quad"}) from point patterns of data and dummy points. } \usage{ quadscheme.logi(data, dummy, dummytype = "stratrand", nd = NULL, mark.repeat = FALSE, \dots) } \arguments{ \item{data}{ The observed data point pattern. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{dummy}{ The pattern of dummy points for the quadrature. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()}. If missing a sensible default is generated. } \item{dummytype}{ The name of the type of dummy points to use when \code{"dummy"} is missing. Currently available options are: \code{"stratrand"} (default), \code{"binomial"}, \code{"poisson"}, \code{"grid"} and \code{"transgrid"}. } \item{nd}{ Integer, or integer vector of length 2 controlling the intensity of dummy points when \code{"dummy"} is missing. } \item{mark.repeat}{ Repeating the dummy points for each level of a marked data pattern when \code{"dummy"} is missing. (See details.) } \item{\dots}{ Ignored. } } \value{ An object of class \code{"logiquad"} inheriting from \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is the primary method for producing a quadrature schemes for use by \code{\link{ppm}} when the logistic regression approximation (Baddeley et al. 2013) to the pseudolikelihood of the model is applied (i.e. when \code{method="logi"} in \code{\link{ppm}}). The function \code{\link{ppm}} fits a point process model to an observed point pattern. When used with the option \code{method="logi"} it requires a quadrature scheme consisting of the original data point pattern and an additional pattern of dummy points. Such quadrature schemes are represented by objects of class \code{"logiquad"}. Quadrature schemes are created by the function \code{quadscheme.logi}. The arguments \code{data} and \code{dummy} specify the data and dummy points, respectively. There is a sensible default for the dummy points. Alternatively the dummy points may be specified arbitrarily and given in any format recognised by \code{\link{as.ppp}}. The quadrature region is the region over which we are integrating, and approximating integrals by finite sums. If \code{dummy} is a point pattern object (class \code{"ppp"}) then the quadrature region is taken to be \code{dummy$window}. If \code{dummy} is just a list of \eqn{x, y} coordinates then the quadrature region defaults to the observation window of the data pattern, \code{data$window}. If \code{dummy} is missing, then a pattern of dummy points will be generated, taking account of the optional arguments \code{dummytype}, \code{nd}, and \code{mark.repeat}. The currently accepted values for \code{dummytype} are: \itemize{ \item \code{"grid"} where the frame of the window is divided into a \code{nd * nd} or \code{nd[1] * nd[2]} regular grid of tiles and the centers constitutes the dummy points. \item \code{"transgrid"} where a regular grid as above is translated by a random vector. \item \code{"stratrand"} where each point of a regular grid as above is randomly translated within its tile. \item \code{"binomial"} where \code{nd * nd} or \code{nd[1] * nd[2]} points are generated uniformly in the frame of the window. \code{"poisson"} where a homogeneous Poisson point process with intensity \code{nd * nd} or \code{nd[1] * nd[2]} is generated within the frame of observation window. } Then if the window is not rectangular, any dummy points lying outside it are deleted. If \code{data} is a multitype point pattern the dummy points should also be marked (with the same levels of the marks as \code{data}). If \code{dummy} is missing and the dummy pattern is generated by \code{quadscheme.logi} the default behaviour is to attach a uniformly distributed mark (from the levels of the marks) to each dummy point. Alternatively, if \code{mark.repeat=TRUE} each dummy point is repeated as many times as there are levels of the marks with a distinct mark value attached to it. Finally, each point (data and dummy) is assigned the weight 1. The weights are never used and only appear to be compatible with the class \code{"quad"} from which the \code{"logiquad"} object inherits. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2013) \emph{A logistic regression estimating function for spatial Gibbs point processes.} Research Report, Centre for Stochastic Geometry and Bioimaging, Denmark. \url{www.csgb.dk} } \seealso{ \code{\link{ppm}}, \code{\link{as.ppp}} } \examples{ data(simdat) Q <- quadscheme.logi(simdat) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz} and Ege Rubak \email{rubak@math.aau.dk}. } \keyword{spatial} \keyword{datagen} spatstat/man/anova.lppm.Rd0000755000176000001440000001004312237642732015273 0ustar ripleyusers\name{anova.lppm} \alias{anova.lppm} \title{ANOVA for Fitted Point Process Models on Linear Network} \description{ Performs analysis of deviance for two or more fitted point process models on a linear network. } \usage{ \method{anova}{lppm}(object, \dots, test=NULL, override=FALSE) } \arguments{ \item{object}{A fitted point process model on a linear network (object of class \code{"lppm"}). } \item{\dots}{ One or more fitted point process models on the same linear network. } \item{test}{ Character string, partially matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}. } \item{override}{ Logical flag indicating whether to proceed even when there is no statistical theory to support the calculation. } } \value{ An object of class \code{"anova"}, or \code{NULL}. } \details{ This is a method for \code{\link{anova}} for fitted point process models on a linear network (objects of class \code{"lppm"}, usually generated by the model-fitting function \code{\link{lppm}}). If the fitted models are all Poisson point processes, then this function performs an Analysis of Deviance of the fitted models. The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided p-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link{anova.glm}}. If some of the fitted models are \emph{not} Poisson point processes, then there is no statistical theory available to support a similar analysis. The function issues a warning, and (by default) returns a \code{NULL} value. However if \code{override=TRUE}, then a kind of analysis of deviance table will be printed. The `deviance' differences in this table are equal to 2 times the differences in the maximised values of the log pseudolikelihood (see \code{\link{ppm}}). At the time of writing, there is no statistical theory to support inferential interpretation of log pseudolikelihood ratios. The \code{override} option is provided for research purposes only! } \section{Errors and warnings}{ \describe{ \item{models not nested:}{ There may be an error message that the models are not \dQuote{nested}. For an Analysis of Deviance the models must be nested, i.e. one model must be a special case of the other. For example the point process model with formula \code{~x} is a special case of the model with formula \code{~x+y}, so these models are nested. However the two point process models with formulae \code{~x} and \code{~y} are not nested. If you get this error message and you believe that the models should be nested, the problem may be the inability of \R to recognise that the two formulae are nested. Try modifying the formulae to make their relationship more obvious. } \item{different sizes of dataset:}{ There may be an error message from \code{anova.glmlist} that \dQuote{models were not all fitted to the same size of dataset}. This generally occurs when the point process models are fitted on different linear networks. } } } \seealso{ \code{\link{lppm}} } \examples{ example(lpp) mod0 <- lppm(X, ~1) modx <- lppm(X, ~x) anova(mod0, modx, test="Chi") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/shift.im.Rd0000755000176000001440000000355012237642734014750 0ustar ripleyusers\name{shift.im} \alias{shift.im} \title{Apply Vector Translation To Pixel Image} \description{ Applies a vector shift to a pixel image } \usage{ \method{shift}{im}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the vector shift. } \details{ The spatial location of each pixel in the image is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the image window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the image will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the image will be shifted to the origin. } \seealso{ \code{\link{shift}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) Y <- shift(X, c(10,10)) plot(Y) # no discernible difference except coordinates are different shift(X, origin="mid") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/pool.fasp.Rd0000755000176000001440000000370012237642733015124 0ustar ripleyusers\name{pool.fasp} \alias{pool.fasp} \title{ Pool Data from Several Function Arrays } \description{ Pool the simulation data from several function arrays (objects of class \code{"fasp"}) and compute a new function array. } \usage{ \method{pool}{fasp}(...) } \arguments{ \item{\dots}{ Objects of class \code{"fasp"}. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"fasp"} of function arrays. It is used to combine the simulation data from several arrays of simulation envelopes and to compute a new array of envelopes based on the combined data. Each of the arguments \code{\dots} must be a function array (object of class \code{"fasp"}) containing simulation envelopes. This is typically created by running the command \code{\link{alltypes}} with the arguments \code{envelope=TRUE} and \code{savefuns=TRUE}. This ensures that each object is an array of simulation envelopes, and that each envelope contains the simulated data (summary function values) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new array of envelopes is computed from the combined set of simulations. Warnings or errors will be issued if the objects \code{\dots} appear to be incompatible. However, the code is not smart enough to decide whether it is sensible to pool the data. } \value{ An object of class \code{"fasp"}. } \seealso{ \code{\link{fasp}}, \code{\link{alltypes}}, \code{\link{pool.envelope}}, \code{\link{pool}} } \examples{ data(amacrine) A1 <- alltypes(amacrine,"K",nsim=9,envelope=TRUE,savefuns=TRUE) A2 <- alltypes(amacrine,"K",nsim=10,envelope=TRUE,savefuns=TRUE) pool(A1, A2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/methods.fii.Rd0000644000176000001440000000517012237642733015434 0ustar ripleyusers\name{methods.fii} \alias{methods.fii} %DoNotExport \Rdversion{1.1} \alias{print.fii} \alias{plot.fii} \alias{coef.fii} \alias{summary.fii} \alias{print.summary.fii} \alias{coef.summary.fii} \title{ Methods for Fitted Interactions } \description{ These are methods specifically for the class \code{"fii"} of fitted interpoint interactions. } \usage{ \method{print}{fii}(x, \dots) \method{coef}{fii}(object, \dots) \method{plot}{fii}(x, \dots) \method{summary}{fii}(object,\dots) \method{print}{summary.fii}(x, ...) \method{coef}{summary.fii}(object, ...) } \arguments{ \item{x,object}{ An object of class \code{"fii"} representing a fitted interpoint interaction. } \item{\dots}{ Arguments passed to other methods. } } \details{ These are methods for the class \code{"fii"}. An object of class \code{"fii"} represents a fitted interpoint interaction. It is usually obtained by using the command \code{\link{fitin}} to extract the fitted interaction part of a fitted point process model. See \code{\link{fitin}} for further explanation of this class. The commands listed here are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{plot}} and \code{\link{coef}} for objects of the class \code{"fii"}. Following the usual convention, \code{summary.fii} returns an object of class \code{summary.fii}, for which there is a print method. The effect is that, when the user types \code{summary(x)}, the summary is printed, but when the user types \code{y <- summary(x)}, the summary information is saved. The method \code{coef.fii} extracts the canonical coefficients of the fitted interaction, and returns them as a numeric vector. The method \code{coef.summary.fii} transforms these values into quantities that are more easily interpretable, in a format that depends on the particular model. There are also methods for the generic commands \code{\link{reach}} and \code{\link{as.interact}}, described elsewhere. } \value{ The \code{print} and \code{plot} methods return \code{NULL}. The \code{summary} method returns an object of class \code{summary.fii}. \code{coef.fii} returns a numeric vector. \code{coef.summary.fii} returns data whose structure depends on the model. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{fitin}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}} } \examples{ mod <- ppm(cells, ~1, Strauss(0.1)) f <- fitin(mod) f summary(f) plot(f) coef(f) coef(summary(f)) } \keyword{spatial} \keyword{methods} spatstat/man/clmfires.Rd0000755000176000001440000001022412237642732015025 0ustar ripleyusers\name{clmfires} \alias{clmfires} \alias{clmcov100} \alias{clmcov200} \docType{data} \title{ Castilla-La Mancha Forest Fires } \description{ This dataset is a record of forest fires in the Castilla-La Mancha region of Spain between 1998 and 2007. This region is approximately 400 by 400 kilometres. The coordinates are recorded in kilometres. The dataset \code{clmfires} is a point pattern (object of class \code{"ppp"}) containing the spatial coordinates of each fire, with marks containing information about each fire. There are 4 columns of marks: \tabular{ll}{ \code{cause} \tab cause of fire (see below) \cr \code{burnt.area} \tab total area burned, in hectares \cr \code{date} \tab the date of fire as an object of class \code{Date} \cr \code{julian.date} \tab number of days elapsed since 1 January 1998 \cr } The \code{cause} of the fire is a factor with the levels \code{lightning}, \code{accident} (for accidents or negligence), \code{intentional} (for intentionally started fires) and \code{other} (for other causes including unknown cause). The format of \code{date} is \dQuote{Year-month-day}, e.g. \dQuote{2005-07-14} (i.e. 14 July, 2005). Accompanying this point pattern, there are two datasets \code{clmcov100} and \code{clmcov200} containing covariate information for the entire Castilla-La Mancha region. Each of these two datasets is a list of four images (objects of class \code{"im"}) named \code{elevation}, \code{orientation}, \code{slope} and \code{landuse}. The \code{landuse} image is factor-valued with the factor having levels \code{urban}, \code{farm} (for farms or orchards), \code{meadow}, \code{denseforest} (for dense forest), \code{conifer} (for conifer forest or plantation), \code{mixedforest}, \code{grassland}, \code{bush}, \code{scrub} and \code{artifgreen} for artificial greens such as golf courses. These images (effectively) provide values for the four covariates at every location in the study area. The images in \code{clmcov100} are 100 by 100 pixels in size, while those in \code{clmcov200} are 200 by 200 pixels. For easy handling, \code{clmcov100} and \code{clmcov200} also belong to the class \code{"listof"} so that they can be plotted and printed immediately. } \format{ \code{clmfires} is a marked point pattern (object of class \code{"ppp"}). See \code{\link[spatstat]{ppp.object}}. \code{clmcov100} and \code{clmcov200} are lists of pixel images (objects of class \code{"im"}). } \section{Remark}{ The precision with which the coordinates of the locations of the fires changed between 2003 and 2004. From 1998 to 2003 many of the locations were recorded as the centroid of the corresponding \dQuote{district unit}; the rest were recorded as exact UTM coordinates of the centroids of the fires. In 2004 the system changed and the exact UTM coordinates of the centroids of the fires were used for \emph{all} fires. There is thus a strongly apparent \dQuote{gridlike} quality to the fire locations for the years 1998 to 2003. There is however no actual duplication of points in the 1998 to 2003 patterns due to \dQuote{jittering} having been applied in order to avoid such duplication. It is not clear just \emph{how} the fire locations were jittered. It seems unlikely that the jittering was done using the \code{jitter()} function from \code{R} or the \pkg{spatstat} function \code{\link{rjitter}}. Of course there are many sets of points which are \emph{virtually} identical, being separated by distances induced by the jittering. Typically these distances are of the order of 40 metres which is unlikely to be meaningful on the scale at which forest fires are observed. Caution should therefore be exercised in any analyses of the patterns for the years 1998 to 2003. } \usage{data(clmfires)} \examples{ plot(clmfires, which.marks="cause", cols=2:5, cex=0.25) plot(clmcov100) # Split the clmfires pattern by year and plot the first and last years: yr <- factor(format(marks(clmfires)$date,format="\%Y")) X <- split(clmfires,f=yr) fAl <- c("1998","2007") plot(X[fAl],use.marks=FALSE,main.panel=fAl,main="") } \source{ Jorge Mateu. } \keyword{datasets} \keyword{spatial} spatstat/man/lppm.Rd0000755000176000001440000000434312237642733014177 0ustar ripleyusers\name{lppm} \alias{lppm} \title{ Fit Point Process Model to Point Pattern on Linear Network } \description{ Fit a point process model to a point pattern dataset on a linear network } \usage{ lppm(X, ..., eps=NULL, nd=1000) } \arguments{ \item{X}{ Object of class \code{"lpp"} specifying a point pattern on a linear network. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{ppm}}. } \item{eps}{ Optional. Spacing between dummy points along each segment of the network. } \item{nd}{ Optional. Number of dummy points equally spaced along each segment of the network. Ignored if \code{eps} is given. } } \details{ This function fits a point process model to data that specify a point pattern on a linear network. It is a counterpart of the model-fitting function \code{\link[spatstat]{ppm}} designed to work with objects of class \code{"lpp"} instead of \code{"ppp"}. The argument \code{X} should be an object of class \code{"lpp"} (created by the command \code{\link{lpp}}) specifying a point pattern on a linear network. The subsequent arguments \code{...} will be passed to \code{\link[spatstat]{ppm}}. They specify the form of the model. } \value{ An object of class \code{"lppm"} representing the fitted model. There are methods for \code{print}, \code{predict}, \code{coef} and similar functions. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{methods.lppm}}, \code{\link{predict.lppm}}, \code{\link{ppm}}, \code{\link{lpp}}. } \examples{ example(lpp) lppm(X, ~1) lppm(X, ~x) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/Extract.fv.Rd0000755000176000001440000000323312237642732015247 0ustar ripleyusers\name{Extract.fv} \alias{[.fv} \title{Extract Subset of Function Values} \description{ Extract a subset of an object of class \code{"fv"}. } \usage{ \method{[}{fv}(x, i, j, \dots, drop=FALSE) } \arguments{ \item{x}{ a function value object, of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame. } \item{i}{ any appropriate subset index. Selects a subset of the rows of the data frame, i.e. a subset of the domain of the function(s) represented by \code{x}. } \item{j}{ any appropriate subset index for the columns of the data frame. Selects some of the functions present in \code{x}. } \item{\dots}{ Ignored. } \item{drop}{ Logical. If \code{TRUE}, the result is a data frame or vector containing the selected rows and columns of data. If \code{FALSE} (the default), the result is another object of class \code{"fv"}. } } \value{ If \code{drop=FALSE}, a function value object (of class \code{"fv"}). If \code{drop=TRUE}, a data frame or vector. } \details{ This is a method for \code{"["} for the class \code{"fv"}. It is very similar to \code{\link{[.data.frame}} except for a few extra checks on the sanity of the result. } \seealso{ \code{\link{fv.object}} } \examples{ K <- Kest(cells) # discard the estimates of K(r) for r > 0.1 Ksub <- K[K$r <= 0.1, ] # discard the border method estimator Ksub <- K[ , names(K) != "border"] # read some values K[5, ,drop=TRUE] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Extract.listof.Rd0000755000176000001440000000233012237642732016131 0ustar ripleyusers\name{Extract.listof} \alias{[<-.listof} \title{Extract or Replace Subset of a List of Things} \description{ Replace a subset of a list of things. } \usage{ \method{[}{listof}(x, i) <- value } \arguments{ \item{x}{ An object of class \code{"listof"} representing a list of things which all belong to one class. } \item{i}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. } } \value{ Another object of class \code{"listof"}. } \details{ This is a subset replacement method for the class \code{"listof"}. The argument \code{x} should be an object of class \code{"listof"} representing a list of things that all belong to one class. The method replaces a designated subset of \code{x}, and returns an object of class \code{"listof"}. } \seealso{ \code{\link{plot.listof}}, \code{\link{summary.listof}} } \examples{ x <- list(A=runif(10), B=runif(10), C=runif(10)) class(x) <- c("listof", class(x)) x[1] <- list(A=rnorm(10)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/inside.owin.Rd0000755000176000001440000000465312237642732015460 0ustar ripleyusers\name{inside.owin} \alias{inside.owin} \title{Test Whether Points Are Inside A Window} \description{ Test whether points lie inside or outside a given window. } \usage{ inside.owin(x, y, w) } \arguments{ \item{x}{ Vector of \eqn{x} coordinates of points to be tested. (Alternatively, a point pattern object providing both \eqn{x} and \eqn{y} coordinates.) } \item{y}{ Vector of \eqn{y} coordinates of points to be tested. } \item{w}{A window. This should be an object of class \code{\link{owin}}, or can be given in any format acceptable to \code{\link{as.owin}()}. } } \value{ Logical vector whose \code{i}th entry is \code{TRUE} if the corresponding point \code{(x[i],y[i])} is inside \code{w}. } \details{ This function tests whether each of the points \code{(x[i],y[i])} lies inside or outside the window \code{w} and returns \code{TRUE} if it is inside. The boundary of the window is treated as being inside. If \code{w} is of type \code{"rectangle"} or \code{"polygonal"}, the algorithm uses analytic geometry (the discrete Stokes theorem). Computation time is linear in the number of points and (for polygonal windows) in the number of vertices of the boundary polygon. Boundary cases are correct to single precision accuracy. If \code{w} is of type \code{"mask"} then the pixel closest to \code{(x[i],y[i])} is tested. The results may be incorrect for points lying within one pixel diameter of the window boundary. Normally \code{x} and \code{y} must be numeric vectors of equal length (length zero is allowed) containing the coordinates of points. Alternatively \code{x} can be a point pattern (object of class \code{"ppp"}) while \code{y} is missing; then the coordinates of the point pattern are extracted. } \seealso{ \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ # hexagonal window k <- 6 theta <- 2 * pi * (0:(k-1))/k co <- cos(theta) si <- sin(theta) mas <- owin(c(-1,1), c(-1,1), poly=list(x=co, y=si)) \dontrun{ plot(mas) } # random points in rectangle x <- runif(30,min=-1, max=1) y <- runif(30,min=-1, max=1) ok <- inside.owin(x, y, mas) \dontrun{ points(x[ok], y[ok]) points(x[!ok], y[!ok], pch="x") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/LambertW.Rd0000644000176000001440000000255312237642731014740 0ustar ripleyusers\name{LambertW} \alias{LambertW} \title{ Lambert's W Function } \description{ Computes Lambert's W-function. } \usage{ LambertW(x) } \arguments{ \item{x}{ Vector of nonnegative numbers. } } \details{ Lambert's W-function is the inverse function of \eqn{f(y) = y e^y}{f(y) = y * exp(y)}. That is, \eqn{W} is the function such that \deqn{ W(x) e^{W(x)} = x }{ W(x) * exp(W(x)) = x } This command \code{LambertW} computes \eqn{W(x)} for each entry in the argument \code{x}. If the library \pkg{gsl} has been installed, then the function \code{lambert_W0} in that library is invoked. Otherwise, values of the W-function are computed by root-finding, using the function \code{\link[stats]{uniroot}}. Computation using \pkg{gsl} is about 100 times faster. } \value{ Numeric vector. } \references{ Corless, R, Gonnet, G, Hare, D, Jeffrey, D and Knuth, D (1996), On the Lambert W function. \emph{Computational Mathematics}, \bold{5}, 325--359. Roy, R and Olver, F (2010), Lambert W function. In Olver, F, Lozier, D and Boisvert, R (eds.), \emph{{NIST} Handbook of Mathematical Functions}, Cambridge University Press. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ LambertW(exp(1)) } \keyword{math} spatstat/man/bw.frac.Rd0000644000176000001440000000437412237642732014551 0ustar ripleyusers\name{bw.frac} \alias{bw.frac} \title{ Bandwidth Selection Based on Window Geometry } \description{ Select a smoothing bandwidth for smoothing a point pattern, based only on the geometry of the spatial window. The bandwidth is a specified quantile of the distance between two independent random points in the window. } \usage{ bw.frac(X, \dots, f=1/4) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or point pattern (object of class \code{"ppp"}) or other data which can be converted to a window using \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{\link{distcdf}}. } \item{f}{ Probability value (between 0 and 1) determining the quantile of the distribution. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{sigma} is computed as a quantile of the distance between two independent random points in the window. The default is the lower quartile of this distribution. If \eqn{F(r)} is the cumulative distribution function of the distance between two independent random points uniformly distributed in the window, then the value returned is the quantile with probability \eqn{f}. That is, the bandwidth is the value \eqn{r} such that \eqn{F(r) = f}. The cumulative distribution function \eqn{F(r)} is computed using \code{\link{distcdf}}. We then we compute the smallest number \eqn{r} such that \eqn{F(r) \ge f}{F(r) >= f}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.frac"} which can be plotted to show the cumulative distribution function and the selected quantile. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.relrisk}}, \code{\link{bw.scott}}, \code{\link{bw.smoothppp}}, \code{\link{bw.stoyan}} } \examples{ h <- bw.frac(letterR) h plot(h, main="bw.frac(letterR)") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/kaplan.meier.Rd0000755000176000001440000000623612237642732015577 0ustar ripleyusers\name{kaplan.meier} \alias{kaplan.meier} \title{Kaplan-Meier Estimator using Histogram Data} \description{ Compute the Kaplan-Meier estimator of a survival time distribution function, from histogram data } \usage{ kaplan.meier(obs, nco, breaks, upperobs=0) } \arguments{ \item{obs}{vector of \eqn{n} integers giving the histogram of all observations (censored or uncensored survival times) } \item{nco}{vector of \eqn{n} integers giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{breaks}{Vector of \eqn{n+1} breakpoints which were used to form both histograms. } \item{upperobs}{ Number of observations beyond the rightmost breakpoint, if any. } } \value{ A list with two elements: \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{lambda}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } These are numeric vectors of length \eqn{n}. } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{obs} of all observed times \eqn{\tilde T_i}{T*[i]}. That is, \code{obs[k]} counts the number of values \eqn{\tilde T_i}{T*[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}. These two histograms are the arguments passed to \code{kaplan.meier}. The vectors \code{km} and \code{lambda} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. The histogram breaks must include \eqn{0}. If the histogram breaks do not span the range of the observations, it is important to count how many survival times \eqn{\tilde T_i}{T*[i]} exceed the rightmost breakpoint, and give this as the value \code{upperobs}. } \seealso{ \code{\link{reduced.sample}}, \code{\link{km.rs}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/simulate.slrm.Rd0000644000176000001440000000475112237642734016027 0ustar ripleyusers\name{simulate.slrm} \alias{simulate.slrm} \title{Simulate a Fitted Spatial Logistic Regression Model} \description{ Generates simulated realisations from a fitted spatial logistic regresson model } \usage{ \method{simulate}{slrm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE) } \arguments{ \item{object}{ Fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"slrm"} of fitted spatial logistic regression models. Simulations are performed by \code{\link{rpoispp}} after the intensity has been computed by \code{\link{predict.slrm}}. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ X <- copper$SouthPoints fit <- slrm(X ~ 1) simulate(fit, 2) fitxy <- slrm(X ~ x+y) simulate(fitxy, 2, window=square(2)) } \seealso{ \code{\link{slrm}}, \code{\link{rpoispp}}, \code{\link{simulate.ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/as.polygonal.Rd0000755000176000001440000000314512237642732015633 0ustar ripleyusers\name{as.polygonal} \Rdversion{1.1} \alias{as.polygonal} \title{ Convert a Window to a Polygonal Window } \description{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. } \usage{ as.polygonal(W) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). } } \details{ Given a window \code{W} of any geometric type (rectangular, polygonal or binary mask), this function returns a polygonal window that represents the same spatial domain. If \code{W} is already polygonal, it is returned without change. If \code{W} is a rectangle, it is converted to a polygon with 4 vertices. If \code{W} is a binary mask, then each pixel in the mask is replaced by a small square or rectangle, and the union of these squares or rectangles is computed. The result is a polygonal window that has only horizontal and vertical edges. (Use \code{\link{simplify.owin}} to remove the staircase appearance, if desired). } \value{ A polygonal window (object of class \code{"owin"} and of type \code{"polygonal"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.mask}}, \code{\link{simplify.owin}} } \examples{ data(letterR) m <- as.mask(letterR, dimyx=32) p <- as.polygonal(m) if(interactive()) { plot(m) plot(p, add=TRUE, lwd=2) } } \keyword{spatial} \keyword{manip} spatstat/man/hist.im.Rd0000755000176000001440000000420012237642732014571 0ustar ripleyusers\name{hist.im} \alias{hist.im} \title{Histogram of Pixel Values in an Image} \description{ Computes and displays a histogram of the pixel values in a pixel image. The \code{hist} method for class \code{"im"}. } \usage{ \method{hist}{im}(x, \dots, probability=FALSE) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Arguments passed to \code{\link{hist.default}} or \code{\link{barplot}}.} \item{probability}{Logical. If \code{TRUE}, the histogram will be normalised to give probabilities or probability densities. } } \details{ This function computes and (by default) displays a histogram of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{hist.im} is a method for the generic function \code{\link{hist}} for the class \code{"im"}. Any arguments in \code{...} are passed to \code{\link{hist.default}} (for numeric valued images) or \code{\link{barplot}} (for factor or logical images). For example, such arguments control the axes, and may be used to suppress the plotting. } \value{ For numeric-valued images, an object of class \code{"histogram"} as returned by \code{\link[graphics:hist]{hist.default}}. This object can be plotted. For factor-valued or logical images, an object of class \code{"barplotdata"}, which can be plotted. This is a list with components called \code{counts} (contingency table of counts of the numbers of pixels taking each possible value), \code{probs} (corresponding relative frequencies) and \code{mids} (graphical \eqn{x}-coordinates of the midpoints of the bars in the barplot). } \seealso{ \code{\link{hist}}, \code{\link{hist.default}}, \code{\link{barplot}}, \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) hist(X) hist(cut(X,3)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/colourmap.Rd0000755000176000001440000001015512237642732015225 0ustar ripleyusers\name{colourmap} \alias{colourmap} \title{Colour Lookup Tables} \description{ Create a colour map (colour lookup table). } \usage{ colourmap(col, ..., range=NULL, breaks=NULL, inputs=NULL) } \arguments{ \item{col}{Vector of values specifying colours} \item{\dots}{Ignored.} \item{range}{ Interval to be mapped. A numeric vector of length 2, specifying the endpoints of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Values to which the colours are associated. A factor or vector of the same length as \code{col}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the colour map. A numeric vector of length equal to \code{length(col)+1}. Incompatible with \code{range} or \code{inputs}. } } \details{ A colour map is a mechanism for associating colours with data. It can be regarded as a function, mapping data to colours. The command \code{colourmap} creates an object representing a colour map, which can then be used to control the plot commands in the \pkg{spatstat} package. It can also be used to compute the colour assigned to any data value. The argument \code{col} specifies the colours to which data values will be mapped. It should be a vector whose entries can be interpreted as colours by the standard \R graphics system. The entries can be string names of colours like \code{"red"}, or integers that refer to colours in the standard palette, or strings containing six-letter hexadecimal codes like \code{"#F0A0FF"}. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{col}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting colour map associates the value \code{inputs[i]} with the colour \code{col[i]}. If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. If \code{breaks} is given, then it determines the precise intervals of the real number line which are mapped to each colour. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the colour \code{col[i]}. The result is an object of class \code{"colourmap"}. There are \code{print} and \code{plot} methods for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of the colour map. The result is also a function \code{f} which can be used to compute the colour assigned to any data value. That is, \code{f(x)} returns the character value of the colour assigned to \code{x}. This also works for vectors of data values. } \value{ A function, which is also an object of class \code{"colourmap"}. } \seealso{ The plot method \code{\link{plot.colourmap}}. See the \R help file on \code{\link[grDevices:colors]{colours}} for information about the colours that \R recognises, and how to manipulate them. To make a smooth transition between colours, see \code{\link{interp.colourmap}}. To alter individual colour values, see \code{\link{tweak.colourmap}}. See \code{\link[spatstat:colourtools]{colourtools}} for more tools to manipulate colour values. See \code{\link{lut}} for lookup tables. } \examples{ # colour map for real numbers, using breakpoints cr <- colourmap(c("red", "blue", "green"), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # a large colour map co <- colourmap(rainbow(100), range=c(-1,1)) co(0.2) # colour map for discrete set of values ct <- colourmap(c("red", "green"), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{color} spatstat/man/simulate.ppm.Rd0000755000176000001440000000737012237642734015651 0ustar ripleyusers\name{simulate.ppm} \alias{simulate.ppm} \title{Simulate a Fitted Gibbs Point Process Model} \description{ Generates simulated realisations from a fitted Gibbs or Poisson point process model. } \usage{ \method{simulate}{ppm}(object, nsim=1, ..., singlerun = FALSE, start = NULL, control = default.rmhcontrol(object), project=TRUE, verbose=FALSE, progress=(nsim > 1)) } \arguments{ \item{object}{ Fitted point process model. An object of class \code{"ppm"}. } \item{nsim}{ Number of simulated realisations. } \item{singlerun}{ Logical. Whether to generate the simulated realisations from a single long run of the Metropolis-Hastings algorithm (\code{singlerun=TRUE}) or from separate, independent runs of the algorithm (\code{singlerun=FALSE}, the default). } \item{start}{Data determining the initial state of the Metropolis-Hastings algorithm. See \code{\link{rmhstart}} for description of these arguments. Defaults to \code{list(n.start=npoints(data.ppm(model)))} meaning that the initial state of the algorithm has the same number of points as the original dataset. } \item{control}{Data controlling the running of the Metropolis-Hastings algorithm. See \code{\link{rmhcontrol}} for description of these arguments. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}}, or to \code{\link{rmh.default}}, or to covariate functions in the model. } \item{project}{ Logical flag indicating what to do if the fitted model is invalid (in the sense that the values of the fitted coefficients do not specify a valid point process). If \code{project=TRUE} the closest valid model will be simulated; if \code{project=FALSE} an error will occur. } \item{verbose}{ Logical flag indicating whether to print progress reports from \code{\link{rmh.ppm}} during the simulation of each point pattern. } \item{progress}{ Logical flag indicating whether to print progress reports for the sequence of simulations. } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"ppm"} of fitted point process models. Simulations are performed by \code{\link{rmh.ppm}}. If \code{singlerun=FALSE} (the default), the simulated patterns are the results of independent runs of the Metropolis-Hastings algorithm. If \code{singlerun=TRUE}, a single long run of the algorithm is performed, and the state of the simulation is saved every \code{nsave} iterations to yield the simulated patterns. In the case of a single run, the behaviour is controlled by the parameters \code{nsave,nburn,nrep}. These are described in \code{\link{rmhcontrol}}. They may be passed in the \code{\dots} arguments or included in \code{control}. It is sufficient to specify two of the three parameters \code{nsave,nburn,nrep}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). It also belongs to the class \code{"listof"}, so that it can be plotted, and the class \code{"timed"}, so that the total computation time is recorded. } \examples{ \testonly{op <- spatstat.options(rmh.nrep=10)} fit <- ppm(japanesepines, ~1, Strauss(0.1)) simulate(fit, 2) simulate(fit, 2, singlerun=TRUE, nsave=1e4, nburn=1e4) \testonly{spatstat.options(op)} } \seealso{ \code{\link{ppm}}, \code{\link{simulate.kppm}}, \code{\link[stats]{simulate}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/ewcdf.Rd0000755000176000001440000000277312237642732014323 0ustar ripleyusers\name{ewcdf} \alias{ewcdf} \title{Weighted Empirical Cumulative Distribution Function} \description{ Compute a weighted version of the empirical cumulative distribution function. } \usage{ ewcdf(x, weights = rep(1/length(x), length(x))) } \arguments{ \item{x}{Numeric vector of observations.} \item{weights}{Numeric vector of non-negative weights for \code{x}.} } \details{ This is a modification of the standard function \code{\link{ecdf}} allowing the observations \code{x} to have weights. The weighted e.c.d.f. (empirical cumulative distribution function) \code{Fn} is defined so that, for any real number \code{y}, the value of \code{Fn(y)} is equal to the total weight of all entries of \code{x} that are less than or equal to \code{y}. That is \code{Fn(y) = sum(weights[x <= y])}. Thus \code{Fn} is a step function which jumps at the values of \code{x}. The height of the jump at a point \code{y} is the total weight of all entries in \code{x} number of tied observations at that value. Missing values are ignored. If \code{weights} is omitted, the default is equivalent to \code{ecdf(x)}. } \value{ A function, of class \code{"ecdf"}, inheriting from \code{"stepfun"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ecdf}} } \examples{ x <- rnorm(100) w <- runif(100) plot(ewcdf(x,w)) } \keyword{nonparametric} \keyword{univar} spatstat/man/with.fv.Rd0000755000176000001440000000764312237642735014624 0ustar ripleyusers\name{with.fv} \alias{with.fv} \title{Evaluate an Expression in a Function Table} \description{ Evaluate an R expression in a function value table (object of class \code{"fv"}). } \usage{ \method{with}{fv}(data, expr, ..., fun = NULL, enclos=NULL) } \arguments{ \item{data}{A function value table (object of class \code{"fv"}) in which the expression will be evaluated. } \item{expr}{The expression to be evaluated. An \R language expression, which may involve the names of columns in \code{data}, the special abbreviations \code{.}, \code{.x} and \code{.y}, and global constants or functions. } \item{\dots}{Ignored.} \item{fun}{Logical value, specifying whether the result should be interpreted as another function (\code{fun=TRUE}) or simply returned as a numeric vector or array (\code{fun=FALSE}). See Details. } \item{enclos}{ An environment in which to search for variables that are not found in \code{data}. Defaults to \code{\link{parent.frame}()}. } } \details{ This is a method for the generic command \code{\link{with}} for an object of class \code{"fv"} (function value table). An object of class \code{"fv"} is a convenient way of storing and plotting several different estimates of the same function. It is effectively a data frame with extra attributes. See \code{\link{fv.object}} for further explanation. This command makes it possible to perform computations that involve different estimates of the same function. For example we use it to compute the arithmetic difference between two different edge-corrected estimates of the \eqn{K} function of a point pattern. The argument \code{expr} should be an \R language expression. The expression may involve \itemize{ \item the name of any column in \code{data}, referring to one of the estimates of the function; \item the symbol \code{.} which stands for all the available estimates of the function; \item the symbol \code{.y} which stands for the recommended estimate of the function (in an \code{"fv"} object, one of the estimates is always identified as the recommended estimate); \item the symbol \code{.x} which stands for the argument of the function; \item global constants or functions. } See the Examples. The expression should be capable of handling vectors and matrices. The interpretation of the argument \code{fun} is as follows: \itemize{ \item If \code{fun=FALSE}, the result of evaluating the expression \code{expr} will be returned as a numeric vector, matrix or data frame. \item If \code{fun=TRUE}, then the result of evaluating \code{expr} will be interpreted as containing the values of a new function. The return value will be an object of class \code{"fv"}. (This can only happen if the result has the right dimensions.) \item The default is \code{fun=TRUE} if the result of evaluating \code{expr} has more than one column, and \code{fun=FALSE} otherwise. } To perform calculations involving \emph{several} objects of class \code{"fv"}, use \code{\link{eval.fv}}. } \value{ A function value table (object of class \code{"fv"}) or a numeric vector or data frame. } \seealso{ \code{\link{with}}, \code{\link{fv.object}}, \code{\link{eval.fv}}, \code{\link{Kest}} } \examples{ # compute 4 estimates of the K function X <- rpoispp(42) K <- Kest(X) plot(K) # derive 4 estimates of the L function L(r) = sqrt(K(r)/pi) L <- with(K, sqrt(./pi)) plot(L) # compute 4 estimates of V(r) = L(r)/r V <- with(L, ./.x) plot(V) # compute the maximum absolute difference between # the isotropic and translation correction estimates of K(r) D <- with(K, max(abs(iso - trans))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/nbfires.Rd0000755000176000001440000002341612237642733014661 0ustar ripleyusers\name{nbfires} \alias{nbfires} \alias{nbfires.extra} \alias{nbw.rect} \docType{data} \title{Point Patterns of New Brunswick Forest Fires} \description{ Point patterns created from yearly records, provided by the New Brunswick Department of Natural Resources, of all fires falling under their jurisdiction for the years 1987 to 2003 inclusive (with the year 1988 omitted until further notice). } \usage{data(nbfires)} \format{ Executing \code{data(nbfires)} gives access to three objects: \code{nbfires}, \code{nbfires.extra} and \code{nbw.rect}. The object \code{nbfires} is a marked point pattern (an object of class \code{"ppp"}) consisting of all of the fires in the years 1987 to 2003 inclusive, with the omission of 1988. The marks consist of a data frame of auxiliary information about the fires; see \emph{Details.} Patterns for individual years can be extracted using the function \code{\link{split.ppp}()}. (See \bold{Examples}.) The object \code{nbw.rect} is a rectangular window which covers central New Brunswick. It is provided for use in illustrative and \sQuote{practice} calculations inasmuch as the use of a rectangular window simplifies some computations considerably. For conformity with other datasets, \code{nbfires.extra} is also provided. It is a list containing just the window \code{nbw.rect}. } \details{ The coordinates of the fire locations were provided in terms of latitude and longitude, to the nearest minute of arc. These were converted to New Brunswick stereographic projection coordinates (Thomson, Mephan and Steeves, 1977) which was the coordinate system in which the map of New Brunswick --- which constitutes the observation window for the pattern --- was obtained. The conversion was done using a \code{C} program kindly provided by Jonathan Beaudoin of the Department of Geodesy and Geomatics, University of New Brunswick. Finally the data and window were rescaled since the use of the New Brunswick stereographic projection coordinate system resulted in having to deal with coordinates which are expressed as very large integers with a bewildering number of digits. Amongst other things, these huge numbers tended to create very untidy axis labels on graphs. The width of the bounding box of the window was made equal to 1000 (nameless) units. In addition the lower left hand corner of this bounding box was shifted to the origin. The height of the bounding box was changed proportionately, resulting in a value of approximately 959. The window for the fire patterns comprises 6 polygonal components, consisting of mainland New Brunswick and the 5 largest islands. Some lakes which should form holes in the mainland component are currently missing; this problem may be remedied in future releases. The window was formed by \sQuote{simplifying} the map that was originally obtained. The simplification consisted in reducing (using an interactive visual technique) the number of polygon edges in each component. For instance the number of edges in the mainland component was reduced from over 138,000 to 500. For some purposes it is probably better to use a discretized (mask type) window. See \bold{Examples}. Because of the coarseness of the coordinates of the original data (1 minute of longitude is approximately 1 kilometer at the latitude of New Brunswick), data entry errors, and the simplification of the observation window, many of the original fire locations appeared to be outside of the window. This problem was addressed by shifting the location of the \sQuote{outsider} points slightly, or deleting them, as seemed appropriate. Note that the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. The columns of the data frame comprising the marks of \code{nbfires} are: \describe{ \item{year}{ This a \emph{factor} with levels 1987, 1989, \ldots, 2002, 2003. Note that 1988 is not present in the levels. } \item{fire.type}{ A factor with levels \code{forest}, \code{grass}, \code{dump}, and \code{other}. } \item{dis.date}{ The discovery date of the fire, which is the nearest possible surrogate for the starting time of the fire. This is an object of class \code{POSIXct} and gives the starting discovery time of the fire to the nearest minute. } \item{dis.julian}{ The discovery date and time of the fire, expressed in \sQuote{Julian days}, i.e. as a decimal fraction representing the number of days since the beginning of the year (midnight 31 December). } \item{out.date}{ The date on which the fire was judged to be \sQuote{out}. This is an object of class \code{POSIXct} and gives the \sQuote{out} time of the fire to the nearest minute. } \item{out.julian}{ The date and time at which the fire was judged to be \sQuote{out}, expressed in Julian days. } \item{cause}{ General cause of the fire. This is a factor with levels \code{unknown}, \code{rrds} (railroads), \code{misc} (miscellaneous), \code{ltning} (lightning), \code{for.ind} (forest industry), \code{incend} (incendiary), \code{rec} (recreation), \code{resid} (resident), and \code{oth.ind} (other industry). Causes \code{unknown}, \code{ltning}, and \code{incend} are supposedly designated as \sQuote{final} by the New Brunswick Department of Natural Resources, meaning (it seems) \dQuote{that's all there is to it}. Other causes are apparently intended to be refined by being combined with \dQuote{source of ignition}. However cross-tabulating \code{cause} with \code{ign.src} --- see below --- reveals that very often these three \sQuote{causes} are associated with an \dQuote{ignition source} as well. } \item{ign.src}{ Source of ignition, a factor with levels \code{cigs} (cigarette/match/pipe/ashes), \code{burn.no.perm} (burning without a permit), \code{burn.w.perm} (burning with a permit), \code{presc.burn} (prescribed burn), \code{wood.spark} (wood spark), \code{mach.spark} (machine spark), \code{campfire}, \code{chainsaw}, \code{machinery}, \code{veh.acc} (vehicle accident), \code{rail.acc} (railroad accident), \code{wheelbox} (wheelbox on railcars), \code{hot.flakes} (hot flakes off railcar wheels), \code{dump.fire} (fire escaping from a dump), \code{ashes} (ashes, briquettes, burning garbage, etc.) } \item{fnl.size}{ The final size of the fire (area burned) in hectares, to the nearest 10th hectare. } } Note that due to data entry errors some of the \dQuote{out dates} and \dQuote{out times} in the original data sets were actually \emph{earlier} than the corresponding \dQuote{discovery dates} and \dQuote{discover times}. In such cases all corresponding entries of the marks data frame (i.e. \code{dis.date}, \code{dis.julian}, \code{out.date}, and \code{out.julian}) were set equal to \code{NA}. Also, some of the dates and times were missing (equal to \code{NA}) in the original data sets. The \sQuote{ignition source} data were given as integer codes in the original data sets. The code book that I obtained gave interpretations for codes 1, 2, \ldots, 15. However the actually also contained codes of 0, 16, 17, 18, and in one instance 44. These may simply be data entry errors. These uninterpretable values were assigned the level \code{unknown}. Many of the years had most, or sometimes all, of the ignition source codes equal to 0 (hence turning out as \code{unknown}, and many of the years had many missing values as well. These were also assigned the level \code{unknown}. Of the 7108 fires in \code{nbfires}, 4354 had an \code{unknown} ignition source. This variable is hence unlikely to be very useful. There are also anomalies between \code{cause} and \code{ign.src}, e.g. \code{cause} being \code{unknown} but \code{ign.src} being \code{cigs}, \code{burn.no.perm}, \code{mach.spark}, \code{hot.flakes}, \code{dump.fire} or \code{ashes}. Particularly worrisome is the fact that the cause \code{ltning} (!!!) is associate with sources of ignition \code{cigs}, \code{burn.w.perm}, \code{presc.burn}, and \code{wood.spark}. } \source{ The data were kindly provided by the New Brunswick Department of Natural Resources. Special thanks are due to Jefferey Betts for a great deal of assistance. } \references{ Turner, Rolf. Point patterns of forest fire locations. \emph{Environmental and Ecological Statistics} \bold{16} (2009) 197 -- 223, doi:10.1007/s10651-007-0085-1. Thomson, D. B., Mephan, M. P., and Steeves, R. R. (1977) The stereographic double projection. Technical Report 46, University of New Brunswick, Fredericton, N. B., Canada URL: \code{gge.unb.ca/Pubs/Pubs.html}. } \examples{ \dontrun{ data(nbfires) # Get the year 2000 data. X <- split(nbfires,"year") Y.00 <- X[["2000"]] # Plot all of the year 2000 data, marked by fire type. plot(Y.00,which.marks="fire.type") # Cut back to forest and grass fires. Y.00 <- Y.00[marks(Y.00)$fire.type \%in\% c("forest","grass")] # Plot the year 2000 forest and grass fires marked by fire duration time. stt <- marks(Y.00)$dis.julian fin <- marks(Y.00)$out.julian marks(Y.00) <- cbind(marks(Y.00),dur=fin-stt) plot(Y.00,which.marks="dur") # Look at just the rectangular subwindow (superimposed on the entire window). nbw.mask <- as.mask(nbfires$window, dimyx=500) plot(nbw.mask, col=c("green", "white")) plot(nbfires$window, border="red", add=TRUE) plot(Y.00[nbw.rect],use.marks=FALSE,add=TRUE) plot(nbw.rect,add=TRUE,border="blue") # Look at the K function for the year 2000 forest and grass fires. K.00 <- Kest(Y.00) plot(K.00) } } \keyword{datasets} \keyword{spatial} spatstat/man/as.matrix.owin.Rd0000644000176000001440000000237312237642732016105 0ustar ripleyusers\name{as.matrix.owin} \alias{as.matrix.owin} \title{Convert Pixel Image to Matrix} \description{ Converts a pixel image to a matrix. } \usage{ \method{as.matrix}{owin}(x, ...) } \arguments{ \item{x}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the pixel resolution.} } \details{ The function \code{as.matrix.owin} converts a window to a logical matrux. It first converts the window \code{x} into a binary pixel mask using \code{\link{as.mask}}. It then extracts the pixel entries as a logical matrix. The resulting matrix has entries that are \code{TRUE} if the corresponding pixel is inside the window, and \code{FALSE} if it is outside. The function \code{as.matrix} is generic. The function \code{as.matrix.owin} is the method for windows (objects of class \code{"owin"}). Use \code{\link{as.im}} to convert a window to a pixel image. } \value{ A logical matrix. } \examples{ m <- as.matrix(letterR) } \seealso{ \code{\link{as.matrix.im}}, \code{\link{as.im}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/logLik.slrm.Rd0000755000176000001440000000336712237642733015431 0ustar ripleyusers\name{logLik.slrm} \Rdversion{1.1} \alias{logLik.slrm} \title{ Loglikelihood of Spatial Logistic Regression } \description{ Computes the (maximised) loglikelihood of a fitted Spatial Logistic Regression model. } \usage{ \method{logLik}{slrm}(object, ..., adjust = TRUE) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } \item{adjust}{ Logical value indicating whether to adjust the loglikelihood of the model to make it comparable with a point process likelihood. See Details. } } \details{ This is a method for \code{\link[stats]{logLik}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It computes the log-likelihood of a fitted spatial logistic regression model. If \code{adjust=FALSE}, the loglikelihood is computed using the standard formula for the loglikelihood of a logistic regression model for a finite set of (pixel) observations. If \code{adjust=TRUE} then the loglikelihood is adjusted so that it is approximately comparable with the likelihood of a point process in continuous space, by subtracting the value \eqn{n \log(a)}{n * log(a)} where \eqn{n} is the number of points in the original point pattern dataset, and \eqn{a} is the area of one pixel. } \value{ A numerical value. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) logLik(fit) logLik(fit, adjust=FALSE) } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/Gmulti.Rd0000755000176000001440000001744212237642731014472 0ustar ripleyusers\name{Gmulti} \alias{Gmulti} \title{ Marked Nearest Neighbour Distance Function } \description{ For a marked point pattern, estimate the distribution of the distance from a typical point in subset \code{I} to the nearest point of subset \eqn{J}. } \usage{ Gmulti(X, I, J, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{G_{IJ}(r)}{GIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. } \item{J}{Subset of points in \code{X} to which distances are measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{IJ}(r)}{GIJ(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{IJ}(r)}{GIJ(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{IJ}(r)}{GIJ(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{IJ}(r)}{GIJ(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{IJ}(r)}{GIJ(r)} for a marked Poisson process with the same estimated intensity } } \details{ The function \code{Gmulti} generalises \code{\link{Gest}} (for unmarked point patterns) and \code{\link{Gdot}} and \code{\link{Gcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. This function computes an estimate of the cumulative distribution function \eqn{G_{IJ}(r)}{GIJ(r)} of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. This algorithm estimates the distribution function \eqn{G_{IJ}(r)}{GIJ(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{IJ}(r)}{GIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{IJ}(r)}{GIJ(r)}. This estimate should be used with caution as \eqn{G_{IJ}(r)}{GIJ(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{IJ}}{GIJ}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{IJ}}{GIJ} as if it were an unbiased estimator of \eqn{G_{IJ}}{GIJ}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{G_{IJ}}{GIJ} does not necessarily have a density. The reduced sample estimator of \eqn{G_{IJ}}{GIJ} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{IJ}}{GIJ} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gdot}}, \code{\link{Gest}} } \examples{ data(longleaf) # Longleaf Pine data: marks represent diameter \testonly{ longleaf <- longleaf[seq(1,longleaf$n, by=50), ] } Gm <- Gmulti(longleaf, longleaf$marks <= 15, longleaf$marks >= 25) plot(Gm) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/intersect.owin.Rd0000755000176000001440000000564012237642732016202 0ustar ripleyusers\name{intersect.owin} \alias{intersect.owin} \alias{union.owin} \alias{setminus.owin} \title{Intersection, Union or Set Subtraction of Two Windows} \description{ Yields the intersection, union or set subtraction of two windows. } \usage{ intersect.owin(A, B, \dots, fatal=TRUE) union.owin(A,B, \dots) setminus.owin(A,B, \dots) } \arguments{ \item{A}{A window object (see Details).} \item{B}{A window object.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the discretisation, if required. } \item{fatal}{Logical. Determines what happens if the intersection is empty. } } \value{ A window (object of class \code{"owin"}). } \details{ The function \code{intersect.owin} computes the intersection between the two windows \code{A} and \code{B}, while \code{union.owin} computes their union. The function \code{setminus.owin} computes the intersection of \code{A} with the complement of \code{B}. The arguments \code{A} and \code{B} must be window objects (either objects of class \code{"owin"}, or data that can be coerced to this class by \code{\link{as.owin}}). If the intersection is empty, then if \code{fatal=FALSE} the result is NULL, while if \code{fatal=TRUE} an error occurs. The intersection or union of more than two windows can also be computed. For \code{intersect.owin} and \code{union.owin} the arguments \code{\dots} can include additional window objects. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{is.subset.owin}}, \code{\link{overlap.owin}}, \code{\link{bounding.box}}, \code{\link{owin.object}} } \examples{ # rectangles u <- unit.square() v <- owin(c(0.5,3.5), c(0.4,2.5)) # polygon data(letterR) # mask m <- as.mask(letterR) # two rectangles intersect.owin(u, v) union.owin(u,v) setminus.owin(u,v) # polygon and rectangle intersect.owin(letterR, v) union.owin(letterR,v) setminus.owin(letterR,v) # mask and rectangle intersect.owin(m, v) union.owin(m,v) setminus.owin(m,v) # mask and polygon p <- rotate(v, 0.2) intersect.owin(m, p) union.owin(m,p) setminus.owin(m,p) # two polygons A <- letterR B <- rotate(letterR, 0.2) plot(bounding.box(A,B), main="intersection") w <- intersect.owin(A, B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(bounding.box(A,B), main="union") w <- union.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) plot(bounding.box(A,B), main="set minus") w <- setminus.owin(A,B) plot(w, add=TRUE, col="lightblue") plot(A, add=TRUE) plot(B, add=TRUE) # intersection and union of three windows C <- shift(B, c(0.2, 0.3)) plot(union.owin(A,B,C)) plot(intersect.owin(A,B,C)) } \keyword{spatial} \keyword{math} spatstat/man/owin.Rd0000755000176000001440000001404612237642733014204 0ustar ripleyusers\name{owin} \alias{owin} \title{Create a Window} \description{ Creates an object of class \code{"owin"} representing an observation window in the two-dimensional plane } \usage{ owin(xrange=c(0,1), yrange=c(0,1), ..., poly=NULL, mask=NULL, unitname=NULL, xy=NULL) } \arguments{ \item{xrange}{\eqn{x} coordinate limits of enclosing box} \item{yrange}{\eqn{y} coordinate limits of enclosing box} \item{\dots}{Ignored.} \item{poly}{ Optional. Polygonal boundary of window. Incompatible with \code{mask}. } \item{mask}{ Optional. Logical matrix giving binary image of window. Incompatible with \code{poly}. } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{xy}{ Optional. List with components \code{x} and \code{y} specifying the pixel coordinates for \code{mask}. } } \value{ An object of class \code{"owin"} describing a window in the two-dimensional plane. } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window of observation. This is represented by an object of class \code{"owin"}. See \code{\link{owin.object}} for an overview. To create a window in its own right, users would normally invoke \code{owin}, although sometimes \code{\link{as.owin}} may be convenient. A window may be rectangular, polygonal, or a mask (a binary image). \itemize{ \item \bold{rectangular windows:} If only \code{xrange} and \code{yrange} are given, then the window will be rectangular, with its \eqn{x} and \eqn{y} coordinate dimensions given by these two arguments (which must be vectors of length 2). If no arguments are given at all, the default is the unit square with dimensions \code{xrange=c(0,1)} and \code{yrange=c(0,1)}. \item \bold{polygonal windows:} If \code{poly} is given, then the window will be polygonal. \itemize{ \item \emph{single polygon:} If \code{poly} is a matrix or data frame with two columns, or a structure with two component vectors \code{x} and \code{y} of equal length, then these values are interpreted as the cartesian coordinates of the vertices of a polygon circumscribing the window. The vertices must be listed \emph{anticlockwise}. No vertex should be repeated (i.e. do not repeat the first vertex). \item \emph{multiple polygons or holes:} If \code{poly} is a list, each entry \code{poly[[i]]} of which is a matrix or data frame with two columns or a structure with two component vectors \code{x} and \code{y} of equal length, then the successive list members \code{poly[[i]]} are interpreted as separate polygons which together make up the boundary of the window. The vertices of each polygon must be listed \emph{anticlockwise} if the polygon is part of the external boundary, but \emph{clockwise} if the polygon is the boundary of a hole in the window. Again, do not repeat any vertex. } \item \bold{binary masks:} If \code{mask} is given, then the window will be a binary image. The argument \code{mask} should be a logical matrix such that \code{mask[i,j]} is \code{TRUE} if the point \code{(x[j],y[i])} belongs to the window, and \code{FALSE} if it does not. Note carefully that rows of \code{mask} correspond to the \eqn{y} coordinate, and columns to the \eqn{x} coordinate. Here \code{x} and \code{y} are vectors of \eqn{x} and \eqn{y} coordinates equally spaced over \code{xrange} and \code{yrange} respectively. The pixel coordinate vectors \code{x} and \code{y} may be specified explicitly using the argument \code{xy}, which should be a list containing components \code{x} and \code{y}. Alternatively there is a sensible default. } To create a window which is mathematically defined by inequalities in the Cartesian coordinates, use \code{\link{raster.x}()} and \code{\link{raster.y}()} as in the examples below. Functions \code{\link{square}} and \code{\link{disc}} will create square and circular windows, respectively. } \seealso{ \code{\link{square}}, \code{\link{disc}}, \code{\link{owin.object}}, \code{\link{as.owin}}, \code{\link{complement.owin}}, \code{\link{ppp.object}}, \code{\link{ppp}} } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(10,20), c(10,30), unitname=c("foot","feet")) # a rectangle of dimensions 10 x 20 feet # with lower left corner at (10,10) # polygon (diamond shape) w <- owin(poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) w <- owin(c(0,1), c(0,2), poly=list(x=c(0.5,1,0.5,0),y=c(0,1,2,1))) # polygon with hole ho <- owin(poly=list(list(x=c(0,1,1,0), y=c(0,0,1,1)), list(x=c(0.6,0.4,0.4,0.6), y=c(0.2,0.2,0.4,0.4)))) w <- owin(c(-1,1), c(-1,1), mask=matrix(TRUE, 100,100)) # 100 x 100 image, all TRUE X <- raster.x(w) Y <- raster.y(w) wm <- owin(w$xrange, w$yrange, mask=(X^2 + Y^2 <= 1)) # discrete approximation to the unit disc \dontrun{ plot(c(0,1),c(0,1),type="n") bdry <- locator() # click the vertices of a polygon (anticlockwise) } \testonly{ bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) } w <- owin(poly=bdry) \dontrun{plot(w)} \dontrun{ im <- as.logical(matrix(scan("myfile"), nrow=128, ncol=128)) # read in an arbitrary 128 x 128 digital image from text file rim <- im[, 128:1] # Assuming it was given in row-major order in the file # i.e. scanning left-to-right in rows from top-to-bottom, # the use of matrix() has effectively transposed rows & columns, # so to convert it to our format just reverse the column order. w <- owin(mask=rim) plot(w) # display it to check! } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Gfox.Rd0000755000176000001440000000715212237642731014131 0ustar ripleyusers\name{Gfox} \alias{Gfox} \alias{Jfox} \title{ Foxall's Distance Functions } \description{ Given a point pattern \code{X} and a spatial object \code{Y}, compute estimates of Foxall's \eqn{G} and \eqn{J} functions. } \usage{ Gfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...) Jfox(X, Y, r = NULL, breaks = NULL, correction = c("km", "rs", "han"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) from which distances will be measured. } \item{Y}{ An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"} to which distances will be measured. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{Gfox(r)} or \eqn{Jfox(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{Gfox(r)} or \eqn{Jfox(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. } \item{\dots}{ Extra arguments affecting the discretisation of distances. These arguments are ignored by \code{Gfox}, but \code{Jfox} passes them to \code{\link{Hest}} to determine the discretisation of the spatial domain. } } \details{ Given a point pattern \code{X} and another spatial object \code{Y}, these functions compute two nonparametric measures of association between \code{X} and \code{Y}, introduced by Foxall (Foxall and Baddeley, 2002). Let the random variable \eqn{R} be the distance from a typical point of \code{X} to the object \code{Y}. Foxall's \eqn{G}-function is the cumulative distribution function of \eqn{R}: \deqn{G(r) = P(R \le r)}{P(R <= r)} Let the random variable \eqn{S} be the distance from a \emph{fixed} point in space to the object \code{Y}. The cumulative distribution function of \eqn{S} is the (unconditional) spherical contact distribution function \deqn{H(r) = P(S \le r)}{H(r) = P(S <= r)} which is computed by \code{\link{Hest}}. Foxall's \eqn{J}-function is the ratio \deqn{ J(r) = \frac{1-G(r)}{1-H(r)} }{ J(r) = (1-G(r))/(1-H(r)) } For further interpretation, see Foxall and Baddeley (2002). Accuracy of \code{Jfox} depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ A function value table (object of class \code{"fv"}) which can be printed, plotted, or converted to a data frame of values. } \references{ Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. } \seealso{ \code{\link{Gest}}, \code{\link{Hest}}, \code{\link{Jest}}, \code{\link{Fest}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines G <- Gfox(X,Y) J <- Jfox(X,Y, correction="km") \testonly{ J <- Jfox(X,Y, correction="km", eps=1) } \dontrun{ J <- Jfox(X,Y, correction="km", eps=0.25) } } \author{Rob Foxall and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/distmap.ppp.Rd0000755000176000001440000000441512237642732015465 0ustar ripleyusers\name{distmap.ppp} %DontDeclareMethods \alias{distmap.ppp} \title{ Distance Map of Point Pattern } \description{ Computes the distance from each pixel to the nearest point in the given point pattern. } \usage{ \method{distmap}{ppp}(X, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose greyscale values are the values of the distance map. The return value has attributes \code{"index"} and \code{"bdry"} which are also pixel images. } \details{ The ``distance map'' of a point pattern \eqn{X} is the function \eqn{f} whose value \code{f(u)} is defined for any two-dimensional location \eqn{u} as the shortest distance from \eqn{u} to \eqn{X}. This function computes the distance map of the point pattern \code{X} and returns the distance map as a pixel image. The greyscale value at a pixel \eqn{u} equals the distance from \eqn{u} to the nearest point of the pattern \code{X}. Additionally, the return value has two attributes, \code{"index"} and \code{"bdry"}, which are also pixel images. The grey values in \code{"bdry"} give the distance from each pixel to the bounding rectangle of the image. The grey values in \code{"index"} are integers identifying which point of \code{X} is closest. This is a method for the generic function \code{\link{distmap}}. Note that this function gives the distance from the \emph{centre of each pixel} to the nearest data point. To compute the exact distance from a given spatial location to the nearest data point in \code{X}, use \code{\link{distfun}} or \code{\link{nncross}}. } \seealso{ Generic function \code{\link{distmap}} and other methods \code{\link{distmap.psp}}, \code{\link{distmap.owin}}. Generic function \code{\link{distfun}}. Nearest neighbour distance \code{\link{nncross}} } \examples{ data(cells) U <- distmap(cells) \dontrun{ plot(U) plot(attr(U, "bdry")) plot(attr(U, "index")) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/nndist.lpp.Rd0000755000176000001440000000251012237642733015312 0ustar ripleyusers\name{nndist.lpp} \alias{nndist.lpp} \title{ Nearest neighbour distances on a linear network } \description{ Given a pattern of points on a linear network, compute the nearest-neighbour distances, measured by the shortest path in the network. } \usage{ \method{nndist}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function computes the nearest neighbour distance for each point (i.e. the distance from each point to the nearest other point), measuring distance by the shortest path in the network. If \code{method="C"} the distances are computed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. } \value{ A numeric vector, of length equal to the number of points in \code{X}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lpp}} } \examples{ example(lpp) nndist(X) } \keyword{spatial} spatstat/man/integral.msr.Rd0000644000176000001440000000154012240173062015611 0ustar ripleyusers\name{integral.msr} \alias{integral.msr} \title{ Integral of a Measure } \description{ Computes the integral (total value) of a measure over its domain. } \usage{ integral.msr(x, ...) } \arguments{ \item{x}{ A signed measure or vector-valued measure (object of class \code{"msr"}). } \item{\dots}{ Ignored. } } \details{ The integral (total value of the measure over its domain) is calculated. } \value{ A numeric value (for a signed measure) or a vector of values (for a vector-valued measure). } \seealso{ \code{\link{msr}}, \code{\link{integral.im}} } \examples{ fit <- ppm(cells, ~x) rp <- residuals(fit) integral.msr(rp) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/coef.slrm.Rd0000755000176000001440000000212512237642732015112 0ustar ripleyusers\name{coef.slrm} \Rdversion{1.1} \alias{coef.slrm} \title{ Coefficients of Fitted Spatial Logistic Regression Model } \description{ Extracts the coefficients (parameters) from a fitted Spatial Logistic Regression model. } \usage{ \method{coef}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for \code{\link{coef}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). It extracts the fitted canonical parameters, i.e.\ the coefficients in the linear predictor of the spatial logistic regression. } \value{ Numeric vector of coefficients. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) coef(fit) } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/anova.slrm.Rd0000755000176000001440000000321712237642732015305 0ustar ripleyusers\name{anova.slrm} \Rdversion{1.1} \alias{anova.slrm} \title{ Analysis of Deviance for Spatial Logistic Regression Models } \description{ Performs Analysis of Deviance for two or more fitted Spatial Logistic Regression models. } \usage{ \method{anova}{slrm}(object, ..., test = NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ additional objects of the same type (optional). } \item{test}{ a character string, (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"}, indicating the reference distribution that should be used to compute \eqn{p}-values. } } \details{ This is a method for \code{\link[stats]{anova}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The output shows the deviance differences (i.e. 2 times log likelihood ratio), the difference in degrees of freedom, and (if \code{test="Chi"}) the two-sided \eqn{p}-values for the chi-squared tests. Their interpretation is very similar to that in \code{\link[stats]{anova.glm}}. } \value{ An object of class \code{"anova"}, inheriting from class \code{"data.frame"}, representing the analysis of deviance table. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit0 <- slrm(X ~ 1) fit1 <- slrm(X ~ x+y) anova(fit0, fit1, test="Chi") } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/model.matrix.slrm.Rd0000644000176000001440000000343312237642733016602 0ustar ripleyusers\name{model.matrix.slrm} \alias{model.matrix.slrm} \title{Extract Design Matrix from Spatial Logistic Regression Model} \description{ This function extracts the design matrix of a spatial logistic regression model. } \usage{ \method{model.matrix}{slrm}(object, ..., keepNA=TRUE) } \arguments{ \item{object}{ A fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } } \details{ This command is a method for the generic function \code{\link{model.matrix}}. It extracts the design matrix of a spatial logistic regression. The \code{object} must be a fitted spatial logistic regression (object of class \code{"slrm"}). Such objects are produced by the model-fitting function \code{\link{slrm}}. Usually the result is a matrix with one column for every constructed covariate in the model, and one row for every pixel in the grid used to fit the model. If \code{object} was fitted using split pixels (by calling \code{\link{slrm}} using the argument \code{splitby}) then the matrix has one row for every pixel or half-pixel. } \value{ A matrix. Columns of the matrix are canonical covariates in the model. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{slrm}}. } \examples{ fit <- slrm(japanesepines ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' } \keyword{spatial} \keyword{models} spatstat/man/sidelengths.owin.Rd0000644000176000001440000000325712237642734016514 0ustar ripleyusers\name{sidelengths.owin} %DontDeclareMethods \alias{sidelengths.owin} \alias{shortside.owin} \title{Side Lengths of Enclosing Rectangle of a Window} \description{ Computes the side lengths of the (enclosing rectangle of) a window. } \usage{ \method{sidelengths}{owin}(x) \method{shortside}{owin}(x) } \arguments{ \item{x}{ A window whose side lengths will be computed. Object of class \code{"owin"}. } } \value{ For \code{sidelengths.owin}, a numeric vector of length 2 giving the side-lengths (\eqn{x} then \eqn{y}) of the enclosing rectangle. For \code{shortside.owin}, a numeric value. } \details{ The functions \code{shortside} and \code{sidelengths} are generic. The functions documented here are the methods for the class \code{"owin"}. \code{sidelengths.owin} computes the side-lengths of the enclosing rectangle of the window \code{x}. For safety, both functions give a warning if the window is not a rectangle. To suppress the warning, first convert the window to a rectangle using \code{\link{as.rectangle}}. \code{shortside.owin} computes the minimum of the two side-lengths. } \seealso{ \code{\link{shortside}}, \code{\link{sidelengths}} for the generic functions. \code{\link{area.owin}}, \code{\link{diameter.owin}}, \code{\link{perimeter}} for other geometric calculations on \code{"owin"} objects. \code{\link{owin}}, \code{\link{as.owin}}. } \examples{ w <- owin(c(0,2),c(-1,3)) sidelengths(w) shortside(as.rectangle(letterR)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/update.ppm.Rd0000755000176000001440000001447212237642734015311 0ustar ripleyusers\name{update.ppm} \alias{update.ppm} \title{Update a Fitted Point Process Model} \description{ \code{update} method for class \code{"ppm"}. } \usage{ \method{update}{ppm}(object, \dots, fixdummy=TRUE, use.internal=NULL, envir=parent.frame()) } \arguments{ \item{object}{ An existing fitted point process model, typically produced by \code{\link{ppm}}. } \item{\dots}{ Arguments to be updated in the new call to \code{\link{ppm}}. } \item{fixdummy}{ Logical flag indicating whether the quadrature scheme for the call to \code{\link{ppm}} should use the same set of dummy points as that in the original call. } \item{use.internal}{ Optional. Logical flag indicating whether the model should be refitted using the internally saved data (\code{use.internal=TRUE}) or by re-evaluating these data in the current frame (\code{use.internal=FALSE}). } \item{envir}{ Environment in which to re-evaluate the call to \code{\link{ppm}}. } } \details{ This is a method for the generic function \code{\link{update}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{update.ppm} will modify the point process model specified by \code{object} according to the new arguments given, then re-fit it. The actual re-fitting is performed by the model-fitting function \code{\link{ppm}}. If you are comparing several model fits to the same data, or fits of the same model to different data, it is strongly advisable to use \code{update.ppm} rather than trying to fit them by hand. This is because \code{update.ppm} re-fits the model in a way which is comparable to the original fit. The arguments \code{...} are matched to the formal arguments of \code{\link{ppm}} as follows. First, all the \emph{named} arguments in \code{...} are matched with the formal arguments of \code{\link{ppm}}. Use \code{name=NULL} to remove the argument \code{name} from the call. Second, any \emph{unnamed} arguments in \code{...} are matched with formal arguments of \code{\link{ppm}} if the matching is obvious from the class of the object. Thus \code{...} may contain \itemize{ \item exactly one argument of class \code{"ppp"} or \code{"quad"}, which will be interpreted as the named argument \code{Q}; \item exactly one argument of class \code{"formula"}, which will be interpreted as the named argument \code{trend} (or as specifying a change to the trend formula); \item exactly one argument of class \code{"interact"}, which will be interpreted as the named argument \code{interaction}; \item exactly one argument of class \code{"data.frame"}, which will be interpreted as the named argument \code{covariates}. } The \code{trend} argument can be a formula that specifies a \emph{change} to the current trend formula. For example, the formula \code{~ . + Z} specifies that the additional covariate \code{Z} will be added to the right hand side of the trend formula in the existing \code{object}. The argument \code{fixdummy=TRUE} ensures comparability of the objects before and after updating. When \code{fixdummy=FALSE}, calling \code{update.ppm} is exactly the same as calling \code{ppm} with the updated arguments. However, the original and updated models are not strictly comparable (for example, their pseudolikelihoods are not strictly comparable) unless they used the same set of dummy points for the quadrature scheme. Setting \code{fixdummy=TRUE} ensures that the re-fitting will be performed using the same set of dummy points. This is highly recommended. The value of \code{use.internal} determines where to find data to re-evaluate the model (data for the arguments mentioned in the original call to \code{ppm} that are not overwritten by arguments to \code{update.ppm}). If \code{use.internal=FALSE}, then arguments to \code{ppm} are \emph{re-evaluated} in the frame where you call \code{update.ppm}. This is like the behaviour of the other methods for \code{\link{update}}. This means that if you have changed any of the objects referred to in the call, these changes will be taken into account. Also if the original call to \code{ppm} included any calls to random number generators, these calls will be recomputed, so that you will get a different outcome of the random numbers. If \code{use.internal=TRUE}, then arguments to \code{ppm} are extracted from internal data stored inside the current fitted model \code{object}. This is useful if you don't want to re-evaluate anything. It is also necessary if if \code{object} has been restored from a dump file using \code{\link{load}} or \code{\link{source}}. In such cases, we have lost the environment in which \code{object} was fitted, and data cannot be re-evaluated. By default, if \code{use.internal} is missing, \code{update.ppm} will re-evaluate the arguments if this is possible, and use internal data if not. } \value{ Another fitted point process model (object of class \code{"ppm"}). } \examples{ data(nztrees) data(cells) # fit the stationary Poisson process fit <- ppm(nztrees, ~ 1) # fit a nonstationary Poisson process fitP <- update(fit, trend=~x) fitP <- update(fit, ~x) # change the trend formula: add another term to the trend fitPxy <- update(fitP, ~ . + y) # change the trend formula: remove the x variable fitPy <- update(fitPxy, ~ . - x) # fit a stationary Strauss process fitS <- update(fit, interaction=Strauss(13)) fitS <- update(fit, Strauss(13)) # refit using a different edge correction fitS <- update(fitS, correction="isotropic") # re-fit the model to a subset # of the original point pattern nzw <- owin(c(0,148),c(0,95)) nzsub <- nztrees[,nzw] fut <- update(fitS, Q=nzsub) fut <- update(fitS, nzsub) # WARNING: the point pattern argument is called 'Q' ranfit <- ppm(rpoispp(42), ~1, Poisson()) ranfit # different random data! update(ranfit) # the original data update(ranfit, use.internal=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rotate.Rd0000755000176000001440000000166312237642734014530 0ustar ripleyusers\name{rotate} \alias{rotate} \title{Rotate} \description{ Applies a rotation to any two-dimensional object, such as a point pattern or a window. } \usage{ rotate(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{\dots}{Data specifying the rotation.} } \value{ Another object of the same type, representing the result of rotating \code{X} through the specified angle. } \details{ This is generic. Methods are provided for point patterns (\code{\link{rotate.ppp}}) and windows (\code{\link{rotate.owin}}). } \seealso{ \code{\link{rotate.ppp}}, \code{\link{rotate.owin}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/rescue.rectangle.Rd0000755000176000001440000000271712237642734016464 0ustar ripleyusers\name{rescue.rectangle} \alias{rescue.rectangle} \title{Convert Window Back To Rectangle} \description{ Determines whether the given window is really a rectangle aligned with the coordinate axes, and if so, converts it to a rectangle object. } \usage{ rescue.rectangle(W) } \arguments{ \item{W}{A window (object of class \code{"owin"}).} } \value{ Another object of class \code{"owin"} representing the same window. } \details{ This function decides whether the window \code{W} is actually a rectangle aligned with the coordinate axes. This will be true if \code{W} is \itemize{ \item a rectangle (window object of type \code{"rectangle"}); \item a polygon (window object of type \code{"polygonal"} with a single polygonal boundary) that is a rectangle aligned with the coordinate axes; \item a binary mask (window object of type \code{"mask"}) in which all the pixel entries are \code{TRUE}. } If so, the function returns this rectangle, a window object of type \code{"rectangle"}. If not, the function returns \code{W}. } \seealso{ \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ w <- owin(poly=list(x=c(0,1,1,0),y=c(0,0,1,1))) rw <- rescue.rectangle(w) w <- as.mask(unit.square()) rw <- rescue.rectangle(w) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/eem.Rd0000755000176000001440000000541412237642732013774 0ustar ripleyusers\name{eem} \alias{eem} \title{ Exponential Energy Marks } \description{ Given a point process model fitted to a point pattern, compute the Stoyan-Grabarnik diagnostic ``exponential energy marks'' for the data points. } \usage{ eem(fit, check=TRUE) } \arguments{ \item{fit}{ The fitted point process model. An object of class \code{"ppm"}. } \item{check}{ Logical value indicating whether to check the internal format of \code{fit}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } } \value{ A vector containing the values of the exponential energy mark for each point in the pattern. } \details{ Stoyan and Grabarnik (1991) proposed a diagnostic tool for point process models fitted to spatial point pattern data. Each point \eqn{x[i]}{x_i} of the data pattern \eqn{X} is given a `mark' or `weight' \deqn{m[i] = 1/lambda-hat(x[i],X)}{m_i = \frac 1 {\hat\lambda(x_i,X)}} where \eqn{lambda-hat(x[i],X)}{\hat\lambda(x_i,X)} is the conditional intensity of the fitted model. If the fitted model is correct, then the sum of these marks for all points in a region \eqn{B} has expected value equal to the area of \eqn{B}. The argument \code{fit} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This fitted model object contains complete information about the original data pattern and the model that was fitted to it. The value returned by \code{eem} is the vector of weights \eqn{m[i]}{m_i} associated with the points \eqn{x[i]}{x_i} of the original data pattern. The original data pattern (in corresponding order) can be extracted from \code{fit} using \code{\link{data.ppm}}. The function \code{\link{diagnose.ppm}} produces a set of sensible diagnostic plots based on these weights. } \references{ Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \seealso{ \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{data.ppm}}, \code{\link{residuals.ppm}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~x, Strauss(r=0.15)) ee <- eem(fit) sum(ee)/area.owin(cells$window) # should be about 1 if model is correct Y <- setmarks(cells, ee) plot(Y, main="Cells data\n Exponential energy marks") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/edge.Trans.Rd0000644000176000001440000000620412237642732015213 0ustar ripleyusers\name{edge.Trans} \alias{edge.Trans} \title{ Translation Edge Correction } \description{ Computes Ohser and Stoyan's translation edge correction weights for a point pattern. } \usage{ edge.Trans(X, Y = X, W = X$window, exact = FALSE, paired = FALSE, trim = spatstat.options("maxedgewt")) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{W}{ Window for which the edge correction is required. } \item{exact}{ Logical. If \code{TRUE}, a slow algorithm will be used to compute the exact value. If \code{FALSE}, a fast algorithm will be used to compute the approximate value. } \item{paired}{ Logical value indicating whether \code{X} and \code{Y} are paired. If \code{TRUE}, compute the edge correction for corresponding points \code{X[i], Y[i]} for all \code{i}. If \code{FALSE}, compute the edge correction for each possible pair of points \code{X[i], Y[j]} for all \code{i} and \code{j}. } \item{trim}{ Maximum permitted value of the edge correction weight. } } \details{ This function computes Ohser and Stoyan's translation edge correction weight, which is used in estimating the \eqn{K} function and in many other contexts. For a pair of points \eqn{x} and \eqn{y} in a window \eqn{W}, the translation edge correction weight is \deqn{ e(u, r) = \frac{\mbox{area}(W)}{\mbox{area}(W \cap (W + y - x))} }{ e(u, r) = area(W) / area(intersect(W, W + y - x)) } where \eqn{W + y - x} is the result of shifting the window \eqn{W} by the vector \eqn{y - x}. The denominator is the area of the overlap between this shifted window and the original window. The function \code{edge.Trans} computes this edge correction weight. If \code{paired=TRUE}, then \code{X} and \code{Y} should contain the same number of points. The result is a vector containing the edge correction weights \code{e(X[i], Y[i])} for each \code{i}. If \code{paired=FALSE}, then the result is a matrix whose \code{i,j} entry gives the edge correction weight \code{e(X[i], Y[j])}. Computation is exact if the window is a rectangle. Otherwise, \itemize{ \item if \code{exact=TRUE}, the edge correction weights are computed exactly using \code{\link{overlap.owin}}, which can be quite slow. \item if \code{exact=FALSE} (the default), the weights are computed rapidly by evaluating the set covariance function \code{\link{setcov}} using the Fast Fourier Transform. } If any value of the edge correction weight exceeds \code{trim}, it is set to \code{trim}. } \value{ Numeric vector or matrix. } \references{ Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. } \seealso{ \code{\link{edge.Ripley}}, \code{\link{setcov}}, \code{\link{Kest}} } \examples{ v <- edge.Trans(cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/japanesepines.Rd0000755000176000001440000000221012237642732016042 0ustar ripleyusers\name{japanesepines} \alias{japanesepines} \docType{data} \title{ Japanese Pines Point Pattern } \description{ The data give the locations of Japanese black pine saplings in a square sampling region in a natural forest. The observations were originally collected by Numata (1961). These data are used as a standard example in the textbook of Diggle (2003); see pages 1, 14, 19, 22, 24, 56--57 and 61. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations in a 5.7 x 5.7 metre square, rescaled to the unit square and rounded to two decimal places. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(japanesepines)} \source{Diggle (2003), obtained from Numata (1961)} \references{ Diggle, P.J. (2003) \emph{Statistical Analysis of Spatial Point Patterns}. Arnold Publishers. Numata, M. (1961) Forest vegetation in the vicinity of Choshi. Coastal flora and vegetation at Choshi, Chiba Prefecture. IV. \emph{Bulletin of Choshi Marine Laboratory, Chiba University} \bold{3}, 28--48 (in Japanese). } \keyword{datasets} \keyword{spatial} spatstat/man/Kdot.inhom.Rd0000755000176000001440000002331012237642731015232 0ustar ripleyusers\name{Kdot.inhom} \alias{Kdot.inhom} \title{ Inhomogeneous Multitype K Dot Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the dot \eqn{K} function, which counts the expected number of points of any type within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kdot.inhom(X, i, lambdaI=NULL, lambdadot=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIdot=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{i\bullet}(r)}{Ki.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity of the entire point process, Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Ignored. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIdot}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdadot} for each pair of points, the first point of type \code{i} and the second of any type. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{i\bullet}(r)}{Ki.(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{i\bullet}(r)}{Ki.(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{i\bullet}(r)}{Ki.(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kdot}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. Briefly, given a multitype point process, consider the points without their types, and suppose this unmarked point process has intensity function \eqn{\lambda(u)}{lambda(u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda(\zeta)}{1/lambda(z)} at each point \eqn{\zeta}{z} of the process. Then the expected total mass per unit area is 1. The inhomogeneous ``dot-type'' \eqn{K} function \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}, discounting this point itself. If the process of type \eqn{i} points were independent of the points of other types, then \eqn{K_{i\bullet}^{\mbox{inhom}}(r)}{K[i.]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{i\bullet}}{Ki.} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j} for \eqn{j\neq i}{j != i}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument \code{i} will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} is missing, it defaults to the first level of the marks factor, \code{i = levels(X$marks)[1]}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly the argument \code{lambdadot} should contain estimated values of the intensity of the entire point process. It may be either a pixel image, a numeric vector of length equal to the number of points in \code{X}, a function, or omitted. For advanced use only, the optional argument \code{lambdaIdot} is a matrix containing estimated values of the products of these two intensities for each pair of points, the first point of type \code{i} and the second of any type. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kdot}}, \code{\link{Kinhom}}, \code{\link{Kcross.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data data(lansing) lansing <- lansing[seq(1,lansing$n, by=10)] ma <- split(lansing)$maple lg <- unmark(lansing) # Estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdadot <- density.ppp(lg, sigma=0.15, at="points") K <- Kdot.inhom(lansing, "maple", lambdaI=lambdaM, lambdadot=lambdadot) # Equivalent K <- Kdot.inhom(lansing, "maple", sigma=0.15) # synthetic example: type A points have intensity 50, # type B points have intensity 50 + 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) lamdot <- as.im(function(x,y) { 100 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kdot.inhom(X, "B", lambdaI=lamB, lambdadot=lamdot) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairsat.family.Rd0000755000176000001440000000454312237642733016154 0ustar ripleyusers\name{pairsat.family} \alias{pairsat.family} \title{Saturated Pairwise Interaction Point Process Family} \description{ An object describing the Saturated Pairwise Interaction family of point process models } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the ``saturated pairwise interaction'' family of point process models. If you need to create a specific interaction model for use in spatial pattern analysis, use the function \code{\link{Saturated}()} or the two existing implementations of models in this family, \code{\link{Geyer}()} and \code{\link{SatPiece}()}. Geyer (1999) introduced the ``saturation process'', a modification of the Strauss process in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{c}. This model is implemented in the function \code{\link{Geyer}()}. The present class \code{pairsat.family} is the extension of this saturation idea to all pairwise interactions. Note that the resulting models are no longer pairwise interaction processes - they have interactions of infinite order. \code{pairsat.family} is an object of class \code{"isf"} containing a function \code{pairwise$eval} for evaluating the sufficient statistics of any saturated pairwise interaction point process model in which the original pair potentials take an exponential family form. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{Geyer}} to create the Geyer saturation process. \code{\link{SatPiece}} to create a saturated process with piecewise constant pair potential. \code{\link{Saturated}} to create a more general saturation model. Other families: \code{\link{inforder.family}}, \code{\link{ord.family}}, \code{\link{pairwise.family}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/rescale.ppp.Rd0000755000176000001440000000376412237642734015452 0ustar ripleyusers\name{rescale.ppp} \alias{rescale.ppp} \title{Convert Point Pattern to Another Unit of Length} \description{ Converts a point pattern dataset to another unit of length. } \usage{ \method{rescale}{ppp}(X, s) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} } \value{ Another point pattern (of class \code{"ppp"}), representing the same data, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the point pattern \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a point pattern representing the \emph{same} data but re-expressed in a different unit. Mark values are unchanged. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original point pattern. If you want to actually change the coordinates by a linear transformation, producing a point pattern that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # convert to metres bram <- rescale(bramblecanes, 1/9) # or equivalently bram <- rescale(bramblecanes) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Jest.Rd0000755000176000001440000002275112237642731014135 0ustar ripleyusers\name{Jest} \alias{Jest} \title{Estimate the J-function} \description{ Estimates the summary function \eqn{J(r)} for a point pattern in a window of arbitrary shape. } \usage{ Jest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{J(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{vector of values for the argument \eqn{r} at which \eqn{J(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details section. } \item{correction}{ Optional. Character string specifying the choice of edge correction(s) in \code{\link{Fest}} and \code{\link{Gest}}. See Details. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{J} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J(r)} computed from the border-corrected estimates of \eqn{F} and \eqn{G} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J(r)} computed from the Kaplan-Meier estimates of \eqn{F} and \eqn{G} } \item{han}{the Hanisch-style estimator of \eqn{J(r)} computed from the Hanisch estimate of \eqn{G} and the Chiu-Stoyan estimate of \eqn{F} } \item{un}{the uncorrected estimate of \eqn{J(r)} computed from the uncorrected estimates of \eqn{F} and \eqn{G} } \item{theo}{the theoretical value of \eqn{J(r)} for a stationary Poisson process: identically equal to \eqn{1} } The data frame also has \bold{attributes} \item{F}{ the output of \code{\link{Fest}} for this point pattern, containing three estimates of the empty space function \eqn{F(r)} and an estimate of its hazard function } \item{G}{ the output of \code{\link{Gest}} for this point pattern, containing three estimates of the nearest neighbour distance distribution function \eqn{G(r)} and an estimate of its hazard function } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{J} function (Van Lieshout and Baddeley, 1996) of a stationary point process is defined as \deqn{J(r) = \frac{1-G(r)}{1-F(r)} }{ % J(r) = (1-G(r))/(1-F(r))} where \eqn{G(r)} is the nearest neighbour distance distribution function of the point process (see \code{\link{Gest}}) and \eqn{F(r)} is its empty space function (see \code{\link{Fest}}). For a completely random (uniform Poisson) point process, the \eqn{J}-function is identically equal to \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} typically indicate spatial clustering or spatial regularity, respectively. The \eqn{J}-function is one of the few characteristics that can be computed explicitly for a wide range of point processes. See Van Lieshout and Baddeley (1996), Baddeley et al (2000), Thonnes and Van Lieshout (1999) for further information. An estimate of \eqn{J} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{J(r)} is compared against the constant function \eqn{1}. Deviations \eqn{J(r) < 1} or \eqn{J(r) > 1} may suggest spatial clustering or spatial regularity, respectively. This algorithm estimates the \eqn{J}-function from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The functions \code{\link{Fest}} and \code{\link{Gest}} are called to compute estimates of \eqn{F(r)} and \eqn{G(r)} respectively. These estimates are then combined by simply taking the ratio \eqn{J(r) = (1-G(r))/(1-F(r))}. In fact several different estimates are computed using different edge corrections (Baddeley, 1998). The Kaplan-Meier estimate (returned as \code{km}) is the ratio \code{J = (1-G)/(1-F)} of the Kaplan-Meier estimates of \eqn{1-F} and \eqn{1-G} computed by \code{\link{Fest}} and \code{\link{Gest}} respectively. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"km"}. The Hanisch-style estimate (returned as \code{han}) is the ratio \code{J = (1-G)/(1-F)} where \code{F} is the Chiu-Stoyan estimate of \eqn{F} and \code{G} is the Hanisch estimate of \eqn{G}. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"cs"} or \code{"han"}. The reduced-sample or border corrected estimate (returned as \code{rs}) is the same ratio \code{J = (1-G)/(1-F)} of the border corrected estimates. This is computed if \code{correction=NULL} or if \code{correction} includes \code{"rs"} or \code{"border"}. These edge-corrected estimators are slightly biased for \eqn{J}, since they are ratios of approximately unbiased estimators. The logarithm of the Kaplan-Meier estimate is exactly unbiased for \eqn{\log J}{log J}. The uncorrected estimate (returned as \code{un} and computed only if \code{correction} includes \code{"none"}) is the ratio \code{J = (1-G)/(1-F)} of the uncorrected (``raw'') estimates of the survival functions of \eqn{F} and \eqn{G}, which are the empirical distribution functions of the empty space distances \code{Fest(X,\dots)$raw} and of the nearest neighbour distances \code{Gest(X,\dots)$raw}. The uncorrected estimates of \eqn{F} and \eqn{G} are severely biased. However the uncorrected estimate of \eqn{J} is approximately unbiased (if the process is close to Poisson); it is insensitive to edge effects, and should be used when edge effects are severe (see Baddeley et al, 2000). The algorithm for \code{\link{Fest}} uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. See \code{\link{Fest}} for details. First-time users are strongly advised not to specify these arguments. Note that the value returned by \code{Jest} includes the output of \code{\link{Fest}} and \code{\link{Gest}} as attributes (see the last example below). If the user is intending to compute the \code{F,G} and \code{J} functions for the point pattern, it is only necessary to call \code{Jest}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37--78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263--292. Baddeley, A., Kerscher, M., Schladitz, K. and Scott, B.T. Estimating the \emph{J} function without edge correction. \emph{Statistica Neerlandica} \bold{54} (2000) 315--328. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344--371. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Thonnes, E. and Van Lieshout, M.N.M, A comparative study on the power of Van Lieshout and Baddeley's J-function. \emph{Biometrical Journal} \bold{41} (1999) 721--734. Van Lieshout, M.N.M. and Baddeley, A.J. A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50} (1996) 344--361. } \seealso{ \code{\link{Jinhom}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) J <- Jest(cells, 0.01) plot(J, main="cells data") # values are far above J = 1, indicating regular pattern data(redwood) J <- Jest(redwood, 0.01, legendpos="center") plot(J, main="redwood data") # values are below J = 1, indicating clustered pattern } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/spatstat-deprecated.Rd0000755000176000001440000000225112237642731017162 0ustar ripleyusers\name{spatstat-deprecated} \alias{clf.test} \alias{conspire} \alias{eval.hyper} \alias{ksmooth.ppp} \alias{ks.test.ppm} \alias{mpl} \alias{rtoro} \alias{superimposePSP} \title{Deprecated spatstat functions} \description{ Deprecated spatstat functions. } \usage{ clf.test(\dots) conspire(\dots) ksmooth.ppp(x, sigma, \dots, edge=TRUE) ks.test.ppm(\dots) mpl(Q, trend, interaction, data, correction, rbord, use.gam) rtoro(X, which=NULL, radius=NULL, width=NULL, height=NULL) eval.hyper(e, h, simplify=TRUE, ee=NULL) superimposePSP(\dots, W=NULL, check=TRUE) } \details{ \code{clf.test} has been renamed \code{\link{dclf.test}}. \code{conspire} has been replaced by \code{\link{plot.fv}}. \code{ksmooth.ppp} has been replaced by \code{\link{density.ppp}}. \code{mpl} has been replaced by \code{\link{ppm}}. \code{ks.test.ppm} has been replaced by \code{\link{kstest.ppm}}. \code{rtoro} has been replaced by \code{\link{rshift}}. \code{eval.hyper} has been replaced by \code{\link{with.hyperframe}}. \code{superimposePSP} has been replaced by \code{\link{superimpose.psp}} which is a method for the generic function \code{\link{superimpose}}. } \keyword{internal} spatstat/man/markcorr.Rd0000644000176000001440000002542712237642733015052 0ustar ripleyusers\name{markcorr} \alias{markcorr} \title{ Mark Correlation Function } \description{ Estimate the marked correlation function of a marked point pattern. } \usage{ markcorr(X, f = function(m1, m2) { m1 * m2}, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, f1=NULL, normalise=TRUE, fargs=NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \value{ A function value table (object of class \code{"fv"}) or a list of function value tables, one for each column of marks. An object of class \code{"fv"} (see \code{\link{fv.object}}) is essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{k_f(r)}{k[f](r)} when the marks attached to different points are independent, namely 1 } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation function \eqn{k_f(r)}{k[f](r)} obtained by the edge corrections named. } \details{ By default, this command calculates an estimate of Stoyan's mark correlation \eqn{k_{mm}(r)}{k[mm](r)} for the point pattern. Alternatively if the argument \code{f} or \code{f1} is given, then it calculates Stoyan's generalised mark correlation \eqn{k_f(r)}{k[f](r)} with test function \eqn{f}. Theoretical definitions are as follows (see Stoyan and Stoyan (1994, p. 262)): \itemize{ \item For a point process \eqn{X} with numeric marks, Stoyan's mark correlation function \eqn{k_{mm}(r)}{k[mm](r)}, is \deqn{ k_{mm}(r) = \frac{E_{0u}[M(0) M(u)]}{E[M,M']} }{ k[mm](r) = E[0u](M(0) * M(u))/E(M * M') } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0),M(u)} denote the marks attached to these two points. On the denominator, \eqn{M,M'} are random marks drawn independently from the marginal distribution of marks, and \eqn{E} is the usual expectation. \item For a multitype point process \eqn{X}, the mark correlation is \deqn{ k_{mm}(r) = \frac{P_{0u}[M(0) M(u)]}{P[M = M']} }{ k[mm](r) = P[0u](M(0) = M(u))/P(M = M') } where \eqn{P} and \eqn{P_{0u}}{P[0u]} denote the probability and conditional probability. \item The \emph{generalised} mark correlation function \eqn{k_f(r)}{k[f](r)} of a marked point process \eqn{X}, with test function \eqn{f}, is \deqn{ k_f(r) = \frac{E_{0u}[f(M(0),M(u))]}{E[f(M,M')]} }{ k[f](r) = E[0u](f(M(0),M(u))]/E(f(M,M')) } } The test function \eqn{f} is any function \eqn{f(m_1,m_2)}{f(m1,m2)} with two arguments which are possible marks of the pattern, and which returns a nonnegative real value. Common choices of \eqn{f} are: for continuous nonnegative real-valued marks, \deqn{f(m_1,m_2) = m_1 m_2}{f(m1,m2)= m1 * m2} for discrete marks (multitype point patterns), \deqn{f(m_1,m_2) = 1(m_1 = m_2)}{f(m1,m2)= (m1 == m2)} and for marks taking values in \eqn{[0,2\pi)}{[0,2 * pi)}, \deqn{f(m_1,m_2) = \sin(m_1 - m_2)}{f(m1,m2) = sin(m1-m2)}. Note that \eqn{k_f(r)}{k[f](r)} is not a ``correlation'' in the usual statistical sense. It can take any nonnegative real value. The value 1 suggests ``lack of correlation'': if the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{k_f(r) \equiv 1}{k[f](r) = 1}. The interpretation of values larger or smaller than 1 depends on the choice of function \eqn{f}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern. The argument \code{f} determines the function to be applied to pairs of marks. It has a sensible default, which depends on the kind of marks in \code{X}. If the marks are numeric values, then \code{f <- function(m1, m2) { m1 * m2}} computes the product of two marks. If the marks are a factor (i.e. if \code{X} is a multitype point pattern) then \code{f <- function(m1, m2) { m1 == m2}} yields the value 1 when the two marks are equal, and 0 when they are unequal. These are the conventional definitions for numerical marks and multitype points respectively. The argument \code{f} may be specified by the user. It must be an \R function, accepting two arguments \code{m1} and \code{m2} which are vectors of equal length containing mark values (of the same type as the marks of \code{X}). (It may also take additional arguments, passed through \code{fargs}). It must return a vector of numeric values of the same length as \code{m1} and \code{m2}. The values must be non-negative, and \code{NA} values are not permitted. Alternatively the user may specify the argument \code{f1} instead of \code{f}. This indicates that the test function \eqn{f} should take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)} where \eqn{f_1(u)}{f1(u)} is given by the argument \code{f1}. The argument \code{f1} should be an \R function with at least one argument. (It may also take additional arguments, passed through \code{fargs}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } If \code{normalise=FALSE} then the algorithm will compute only the numerator \deqn{ c_f(r) = E_{0u} f(M(0),M(u)) }{ c[f](r) = E[0u] f(M(0),M(u)) } of the expression for the mark correlation function. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. \code{\link{markcorrint}} to estimate the indefinite integral of the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter data(spruces) # mark correlation function ms <- markcorr(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(X$n) \dontrun{ Xc <- markcorr(X) plot(Xc) } # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) # (3) Kernel density estimate with Epanecnikov kernel # (as proposed by Stoyan & Stoyan) M <- markcorr(amacrine, function(m1,m2) {m1==m2}, correction="translate", method="density", kernel="epanechnikov") plot(M) # Note: kernel="epanechnikov" comes from help(density) # (4) Same again with explicit control over bandwidth \dontrun{ M <- markcorr(amacrine, correction="translate", method="density", kernel="epanechnikov", bw=0.02) # see help(density) for correct interpretation of 'bw' } \testonly{ niets <- markcorr(amacrine, function(m1,m2){m1 == m2}, method="loess") niets <- markcorr(X, correction="isotropic", method="smrep", hmult=2) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.msr.Rd0000755000176000001440000000301012237642733014773 0ustar ripleyusers\name{plot.msr} \alias{plot.msr} \title{Plot a Signed or Vector-Valued Measure} \description{ Plot a signed measure or vector-valued measure. } \usage{ \method{plot}{msr}(x, \dots) } \arguments{ \item{x}{ The signed or vector measure to be plotted. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{\dots}{ Extra arguments passed to \code{\link{Smooth.ppp}} to control the interpolation of the continuous density component of \code{x}, or passed to \code{\link{plot.im}} or \code{\link{plot.ppp}} to control the appearance of the plot. } } \value{ none. } \details{ This is the \code{plot} method for the class \code{"msr"}. The continuous density component of \code{x} is interpolated from the existing data by \code{\link{Smooth.ppp}}, and then displayed as a colour image by \code{\link{plot.im}}. The discrete atomic component of \code{x} is then superimposed on this image by plotting the atoms as circles (for positive mass) or squares (for negative mass) by \code{\link{plot.ppp}}. To smooth both the discrete and continuous components, use \code{\link{Smooth.msr}}. } \seealso{ \code{\link{msr}}, \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{plot.im}}, \code{\link{plot.ppp}} } \examples{ example(msr) plot(rp) plot(rs) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/bind.fv.Rd0000755000176000001440000000641712237642732014560 0ustar ripleyusers\name{bind.fv} \alias{bind.fv} \alias{cbind.fv} \title{ Combine Function Value Tables } \description{ Advanced Use Only. Combine objects of class \code{"fv"}, or glue extra columns of data onto an existing \code{"fv"} object. } \usage{ \method{cbind}{fv}(...) bind.fv(x, y, labl = NULL, desc = NULL, preferred = NULL) } \arguments{ \item{\dots}{ Any number of arguments, which are objects of class \code{"fv"}. } \item{x}{ An object of class \code{"fv"}. } \item{y}{ Either a data frame or an object of class \code{"fv"}. } \item{labl}{ Plot labels (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{desc}{ Descriptions (see \code{\link{fv}}) for columns of \code{y}. A character vector. } \item{preferred}{ Character string specifying the column which is to be the new recommended value of the function. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. The function \code{cbind.fv} is a method for the generic \R function \code{\link{cbind}}. It combines any number of objects of class \code{"fv"} into a single object of class \code{"fv"}. The objects must be compatible, in the sense that they have identical values of the function argument. The function \code{bind.fv} is a lower level utility which glues additional columns onto an existing object \code{x} of class \code{"fv"}. It has two modes of use: \itemize{ \item If the additional dataset \code{y} is an object of class \code{"fv"}, then \code{x} and \code{y} must be compatible as described above. Then the columns of \code{y} that contain function values will be appended to the object \code{x}. \item Alternatively if \code{y} is a data frame, then \code{y} must have the same number of rows as \code{x}. All columns of \code{y} will be appended to \code{x}. } The arguments \code{labl} and \code{desc} provide plot labels and description strings (as described in \code{\link{fv}}) for the \emph{new} columns. If \code{y} is an object of class \code{"fv"} then \code{labl} and \code{desc} are optional, and default to the relevant entries in the object \code{y}. If \code{y} is a data frame then \code{labl} and \code{desc} must be provided. } \value{ An object of class \code{"fv"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ data(cells) K1 <- Kest(cells, correction="border") K2 <- Kest(cells, correction="iso") # remove column 'theo' to avoid duplication K2 <- K2[, names(K2) != "theo"] cbind(K1, K2) bind.fv(K1, K2, preferred="iso") # constrain border estimate to be monotonically increasing bm <- cumsum(c(0, pmax(0, diff(K1$border)))) bind.fv(K1, data.frame(bmono=bm), "\%s[bmo](r)", "monotone border-corrected estimate of \%s", "bmono") } \seealso{ \code{\link{fv}}, \code{\link{with.fv}}. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \keyword{spatial} \keyword{attribute} spatstat/man/perimeter.Rd0000755000176000001440000000240712237642733015222 0ustar ripleyusers\name{perimeter} \Rdversion{1.1} \alias{perimeter} \title{ Perimeter Length of Window } \description{ Computes the perimeter length of a window } \usage{ perimeter(w) } \arguments{ \item{w}{ A window (object of class \code{"owin"}) or data that can be converted to a window by \code{\link{as.owin}}. } } \details{ This function computes the perimeter (length of the boundary) of the window \code{w}. If \code{w} is a rectangle or a polygonal window, the perimeter is the sum of the lengths of the edges of \code{w}. If \code{w} is a mask, it is first converted to a polygonal window using \code{\link{as.polygonal}}, then staircase edges are removed using \code{\link{simplify.owin}}, and the perimeter of the resulting polygon is computed. } \value{ A numeric value giving the perimeter length of the window. } \seealso{ \code{\link{area.owin}} \code{\link{diameter.owin}}, \code{\link{owin.object}}, \code{\link{as.owin}} } \examples{ perimeter(square(3)) data(letterR) perimeter(letterR) if(interactive()) print(perimeter(as.mask(letterR))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/fitin.Rd0000755000176000001440000000423212237642732014334 0ustar ripleyusers\name{fitin.ppm} \alias{fitin} \alias{fitin.ppm} \title{Extract the Interaction from a Fitted Point Process Model} \description{ Given a point process model that has been fitted to point pattern data, this function extracts the interpoint interaction part of the model as a separate object. } \usage{ fitin(object) \method{fitin}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}). } } \details{ An object of class \code{"ppm"} describes a fitted point process model. It contains information about the original data to which the model was fitted, the spatial trend that was fitted, the interpoint interaction that was fitted, and other data. See \code{\link{ppm.object}}) for details of this class. The function \code{fitin} extracts from this model the information about the fitted interpoint interaction only. The information is organised as an object of class \code{"fii"} (fitted interpoint interaction). This object can be printed or plotted. Users may find this a convenient way to plot the fitted interpoint interaction term, as shown in the Examples. The fitted interaction coefficients can also be extracted from this object using \code{\link{coef}}. } \value{ An object of class \code{"fii"} representing the fitted interpoint interaction. This object can be printed and plotted. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ Methods for handling fitted interactions: \code{\link{methods.fii}}, \code{\link{reach.fii}}, \code{\link{as.interact.fii}}. Background: \code{\link{ppm}}, \code{\link{ppm.object}}. } \examples{ # unmarked model <- ppm(swedishpines, ~1, PairPiece(seq(3,19,by=4))) f <- fitin(model) f plot(f) # extract fitted interaction coefficients coef(f) # multitype # fit the stationary multitype Strauss process to `amacrine' r <- 0.02 * matrix(c(1,2,2,1), nrow=2,ncol=2) model <- ppm(amacrine, ~1, MultiStrauss(c("off","on"), r)) f <- fitin(model) f plot(f) } \keyword{spatial} \keyword{models} spatstat/man/Hardcore.Rd0000755000176000001440000000533512237642731014756 0ustar ripleyusers\name{Hardcore} \alias{Hardcore} \title{The Hard Core Point Process Model} \description{ Creates an instance of the hard core point process model which can then be fitted to point pattern data. } \usage{ Hardcore(hc=NA) } \arguments{ \item{hc}{The hard core distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the hard core process with hard core distance \code{hc}. } \details{ A hard core process with hard core distance \eqn{h} and abundance parameter \eqn{\beta}{beta} is a pairwise interaction point process in which distinct points are not allowed to come closer than a distance \eqn{h} apart. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{\alpha}{alpha} is the normalising constant. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the hard core process pairwise interaction is yielded by the function \code{Hardcore()}. See the examples below. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiHard}}, \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \examples{ Hardcore(0.02) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, Hardcore(0.05)) # fit the stationary hard core process to `cells' } ppm(cells, ~1, Hardcore()) ppm(cells, ~ polynom(x,y,3), Hardcore(0.05)) # fit a nonstationary hard core process # with log-cubic polynomial trend } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/chorley.Rd0000755000176000001440000000722012237642732014670 0ustar ripleyusers\name{chorley} \alias{chorley} \alias{chorley.extra} \docType{data} \title{Chorley-Ribble Cancer Data} \description{ Spatial locations of cases of cancer of the larynx and cancer of the lung, and the location of a disused industrial incinerator. A marked point pattern. } \format{ The dataset \code{chorley} is an object of class \code{"ppp"} representing a marked point pattern. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of home address \cr \code{y} \tab Cartesian \eqn{y}-coordinate of home address \cr \code{marks} \tab factor with levels \code{larynx} and \code{lung} \cr \tab indicating whether this is a case of cancer of the larynx\cr \tab or cancer of the lung. } See \code{\link{ppp.object}} for details of the format. The dataset \code{chorley.extra} is a list with two components. The first component \code{plotit} is a function which will plot the data in a sensible fashion. The second component \code{incin} is a list with entries \code{x} and \code{y} giving the location of the industrial incinerator. Coordinates are given in kilometres, and the resolution is 100 metres (0.1 km) } \usage{data(chorley)} \examples{ chorley summary(chorley) chorley.extra$plotit() } \source{ Coordinates of cases were provided by the Chorley and South Ribble Health Authority, and were kindly supplied by Professor Peter Diggle. Region boundary was digitised by Adrian Baddeley, 2005, from a photograph of an Ordnance Survey map. } \section{Notes}{ The data give the precise domicile addresses of new cases of cancer of the larynx (58 cases) and cancer of the lung (978 cases), recorded in the Chorley and South Ribble Health Authority of Lancashire (England) between 1974 and 1983. The supplementary data give the location of a disused industrial incinerator. The data were first presented and analysed by Diggle (1990). They have subsequently been analysed by Diggle and Rowlingson (1994) and Baddeley et al. (2005). The aim is to assess evidence for an increase in the incidence of cancer of the larynx in the vicinity of the now-disused industrial incinerator. The lung cancer cases serve as a surrogate for the spatially-varying density of the susceptible population. The data are represented as a marked point pattern, with the points giving the spatial location of each individual's home address and the marks identifying whether each point is a case of laryngeal cancer or lung cancer. Coordinates are in kilometres, and the resolution is 100 metres (0.1 km). The dataset \code{chorley} has a polygonal window with 132 edges which closely approximates the boundary of the Chorley and South Ribble Health Authority. Note that, due to the rounding of spatial coordinates, the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Diggle, P. (1990) A point process modelling approach to raised incidence of a rare phenomenon in the vicinity of a prespecified point. \emph{Journal of the Royal Statistical Soc. Series A} \bold{153}, 349-362. Diggle, P. and Rowlingson, B. (1994) A conditional approach to point process modelling of elevated risk. \emph{Journal of the Royal Statistical Soc. Series A} \bold{157}, 433-440. } \keyword{datasets} \keyword{spatial} spatstat/man/pppmatching.object.Rd0000755000176000001440000000663212237642733017011 0ustar ripleyusers\name{pppmatching.object} \alias{pppmatching.object} %DoNotExport \title{Class of Point Matchings} \description{ A class \code{"pppmatching"} to represent a matching of two planar point patterns. Optionally includes information about the construction of the matching and its associated distance between the point patterns. } \details{ This class represents a (possibly weighted and incomplete) matching between two planar point patterns (objects of class \code{"ppp"}). A matching can be thought of as a bipartite weighted graph where the vertices are given by the two point patterns and edges of positive weights are drawn each time a point of the first point pattern is "matched" with a point of the second point pattern. If \code{m} is an object of type \code{pppmatching}, it contains the following elements \tabular{ll}{ \code{pp1, pp2} \tab the two point patterns to be matched (vertices) \cr \code{matrix} \tab a matrix specifying which points are matched \cr \tab and with what weights (edges) \cr \code{type} \tab (optional) a character string for the type of \cr \tab the matching (one of \code{"spa"}, \code{"ace"} or \code{"mat"}) \cr \code{cutoff} \tab (optional) cutoff value for interpoint distances \cr \code{q} \tab (optional) the order for taking averages of \cr \tab interpoint distances \cr \code{distance} \tab (optional) the distance associated with the matching } The element \code{matrix} is a "generalized adjacency matrix". The numbers of rows and columns match the cardinalities of the first and second point patterns, respectively. The \code{[i,j]}-th entry is positive if the \code{i}-th point of \code{X} and the \code{j}-th point of \code{Y} are matched (zero otherwise) and its value then gives the corresponding weight of the match. For an unweighted matching all the weights are set to \eqn{1}. The optional elements are for saving details about matchings in the context of optimal point matching techniques. \code{type} can be one of \code{"spa"} (for "subpattern assignment"), \code{"ace"} (for "assignment only if cardinalities differ") or \code{"mat"} (for "mass transfer"). \code{cutoff} is a positive numerical value that specifies the maximal interpoint distance and \code{q} is a value in \eqn{[1,\infty]}{[1,Inf]} that gives the order of the average applied to the interpoint distances. See the help files for \code{\link{pppdist}} and \code{\link{matchingdist}} for detailed information about these elements. Objects of class \code{"pppmatching"} may be created by the function \code{\link{pppmatching}}, and are most commonly obtained as output of the function \code{\link{pppdist}}. There are methods \code{plot}, \code{print} and \code{summary} for this class. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{matchingdist}} \code{\link{pppmatching}} } \examples{ # a random complete unweighted matching X <- runifpoint(10) Y <- runifpoint(10) am <- r2dtable(1, rep(1,10), rep(1,10))[[1]] # generates a random permutation matrix m <- pppmatching(X, Y, am) summary(m) m$matrix \dontrun{ plot(m) } # an optimal complete unweighted matching m2 <- pppdist(X,Y) summary(m2) m2$matrix \dontrun{ plot(m2) } } \keyword{spatial} \keyword{attribute} spatstat/man/rDiggleGratton.Rd0000644000176000001440000000704112237642733016136 0ustar ripleyusers\name{rDiggleGratton} \alias{rDiggleGratton} \title{Perfect Simulation of the Diggle-Gratton Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gratton process, using a perfect simulation algorithm. } \usage{ rDiggleGratton(beta, delta, rho, kappa=1, W = owin()) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{delta}{ hard core distance (a non-negative number). } \item{rho}{ interaction range (a number greater than \code{delta}). } \item{kappa}{ interaction exponent (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } } \details{ This function generates a realisation of the Diggle-Gratton point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - delta)/(rho - delta))^kappa, { } delta <= t <= rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}. Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa} are parameters. Note that we use the symbol \eqn{\kappa}{kappa} where Diggle and Gratton (1984) use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{delta <= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Berthelsen, K.K. and Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDiggleGratton(50, 0.02, 0.07) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGratton}}, \code{\link{rStrauss}}, \code{\link{rHardcore}}. } \keyword{spatial} \keyword{datagen} spatstat/man/logLik.mppm.Rd0000644000176000001440000000214012241443111015366 0ustar ripleyusers\name{logLik.mppm} \alias{logLik.mppm} \title{Log Likelihood for Poisson Point Process Model} \description{ Extracts the log likelihood of a Poisson point process model that has been fitted to multiple point patterns } \usage{ \method{logLik}{mppm}(object, ...) } \arguments{ \item{object}{Fitted point process model (fitted to multiple point patterns). An object of class \code{"mppm"}. } \item{\dots}{Ignored.} } \details{ The maximised value of the log likelihood for the fitted model (as approximated by quadrature using the Berman-Turner approximation) is extracted. If \code{object} is not a Poisson process, the maximised log \emph{pseudolikelihood} is returned, with a warning. } \value{ A numerical value. } \seealso{ \code{\link{mppm}} } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \examples{ data(waterstriders) fit <- mppm(Bugs ~ x, hyperframe(Bugs=waterstriders)) logLik(fit) AIC(fit) } \keyword{spatial} \keyword{models} spatstat/man/quadratcount.Rd0000755000176000001440000001227412237642733015743 0ustar ripleyusers\name{quadratcount} \alias{quadratcount} \alias{quadratcount.ppp} \alias{quadratcount.splitppp} \title{Quadrat counting for a point pattern} \description{ Divides window into quadrats and counts the numbers of points in each quadrat. } \usage{ quadratcount(X, \dots) \method{quadratcount}{ppp}(X, nx=5, ny=nx, \dots, xbreaks=NULL, ybreaks=NULL, tess=NULL) \method{quadratcount}{splitppp}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or a split point pattern (object of class \code{"splitppp"}). } \item{nx,ny}{ Numbers of rectangular quadrats in the \eqn{x} and \eqn{y} directions. Incompatible with \code{xbreaks} and \code{ybreaks}. } \item{\dots}{Additional arguments passed to \code{quadratcount.ppp}.} \item{xbreaks}{ Numeric vector giving the \eqn{x} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{nx}. } \item{ybreaks}{ Numeric vector giving the \eqn{y} coordinates of the boundaries of the rectangular quadrats. Incompatible with \code{ny}. } \item{tess}{ Tessellation (object of class \code{"tess"}) determining the quadrats. Incompatible with \code{nx,ny,xbreaks,ybreaks}. } } \value{ The value of \code{quadratcount.ppp} is a contingency table containing the number of points in each quadrat. The table is also an object of the special class \code{"quadratcount"} and there is a plot method for this class. The value of \code{quadratcount.splitppp} is a list of such contingency tables, each containing the quadrat counts for one of the component point patterns in \code{X}. This list also has the class \code{"listof"} which has print and plot methods. } \details{ Quadrat counting is an elementary technique for analysing spatial point patterns. See Diggle (2003). \bold{If \code{X} is a point pattern}, then by default, the window containing the point pattern \code{X} is divided into an \code{nx * ny} grid of rectangular tiles or `quadrats'. (If the window is not a rectangle, then these tiles are intersected with the window.) The number of points of \code{X} falling in each quadrat is counted. These numbers are returned as a contingency table. If \code{xbreaks} is given, it should be a numeric vector giving the \eqn{x} coordinates of the quadrat boundaries. If it is not given, it defaults to a sequence of \code{nx+1} values equally spaced over the range of \eqn{x} coordinates in the window \code{X$window}. Similarly if \code{ybreaks} is given, it should be a numeric vector giving the \eqn{y} coordinates of the quadrat boundaries. It defaults to a vector of \code{ny+1} values equally spaced over the range of \eqn{y} coordinates in the window. The lengths of \code{xbreaks} and \code{ybreaks} may be different. Alternatively, quadrats of any shape may be used. The argument \code{tess} can be a tessellation (object of class \code{"tess"}) whose tiles will serve as the quadrats. The algorithm counts the number of points of \code{X} falling in each quadrat, and returns these counts as a contingency table. The return value is a \code{table} which can be printed neatly. The return value is also a member of the special class \code{"quadratcount"}. Plotting the object will display the quadrats, annotated by their counts. See the examples. \bold{If \code{X} is a split point pattern} (object of class \code{"splitppp"} then quadrat counting will be performed on each of the components point patterns, and the resulting contingency tables will be returned in a list. This list can be printed or plotted. Marks attached to the points are ignored by \code{quadratcount.ppp}. To obtain a separate contingency table for each type of point in a multitype point pattern, first separate the different points using \code{\link{split.ppp}}, then apply \code{quadratcount.splitppp}. See the Examples. } \note{ To perform a chi-squared test based on the quadrat counts, use \code{\link{quadrat.test}}. } \seealso{ \code{\link{quadrat.test}}, \code{\link{quadrats}}, \code{\link{quadratresample}}, \code{\link{miplot}} } \references{ Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 2003. Stoyan, D. and Stoyan, H. (1994) \emph{Fractals, random shapes and point fields: methods of geometrical statistics.} John Wiley and Sons. } \examples{ X <- runifpoint(50) quadratcount(X) quadratcount(X, 4, 5) quadratcount(X, xbreaks=c(0, 0.3, 1), ybreaks=c(0, 0.4, 0.8, 1)) qX <- quadratcount(X, 4, 5) # plotting: plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) # irregular window data(humberside) plot(humberside) qH <- quadratcount(humberside, 2, 3) plot(qH, add=TRUE, col="blue", cex=1.5, lwd=2) # multitype - split plot(quadratcount(split(humberside), 2, 3)) # quadrats determined by tessellation: B <- dirichlet(runifpoint(6)) qX <- quadratcount(X, tess=B) plot(X, pch="+") plot(qX, add=TRUE, col="red", cex=1.5, lty=2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/plot.fasp.Rd0000755000176000001440000001250612237642733015135 0ustar ripleyusers\name{plot.fasp} \alias{plot.fasp} \title{Plot a Function Array} \description{ Plots an array of summary functions, usually associated with a point pattern, stored in an object of class \code{"fasp"}. A method for \code{plot}. } \usage{ \method{plot}{fasp}(x,formule=NULL, \dots, subset=NULL, title=NULL, banner=TRUE, samex=FALSE, samey=FALSE, mar.panel=NULL, outerlabels=TRUE, cex.outerlabels=1.25, legend=FALSE) } \arguments{ \item{x}{An object of class \code{"fasp"} representing a function array. } \item{formule}{ A formula or list of formulae indicating what variables are to be plotted against what variable. Each formula is either an R language formula object, or a string that can be parsed as a formula. If \code{formule} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. If the formula is left as \code{NULL}, then \code{plot.fasp} attempts to use the component \code{default.formula} of \code{x}. If that component is NULL as well, it gives up. } \item{\dots}{ Arguments passed to \code{\link{plot.fv}} to control the individual plot panels. } \item{subset}{ A logical vector, or a vector of indices, or an expression or a character string, or a \bold{list} of such, indicating a subset of the data to be included in each plot. If \code{subset} is a list, its \eqn{k^{th}}{k-th} component should be applicable to the \eqn{(i,j)^{th}}{(i,j)-th} plot where \code{x$which[i,j]=k}. } \item{title}{ Overall title for the plot. } \item{banner}{ Logical. If \code{TRUE}, the overall title is plotted. If \code{FALSE}, the overall title is not plotted and no space is allocated for it. } \item{samex,samey}{ Logical values indicating whether all individual plot panels should have the same x axis limits and the same y axis limits, respectively. This makes it easier to compare the plots. } \item{mar.panel}{ Vector of length 4 giving the value of the graphics parameter \code{mar} controlling the size of plot margins for each individual plot panel. See \code{\link{par}}. } \item{outerlabels}{Logical. If \code{TRUE}, the row and column names of the array of functions are plotted in the margins of the array of plot panels. If \code{FALSE}, each individual plot panel is labelled by its row and column name. } \item{cex.outerlabels}{ Character expansion factor for row and column labels of array. } \item{legend}{ Logical flag determining whether to plot a legend in each panel. } } \details{ An object of class \code{"fasp"} represents an array of summary functions, usually associated with a point pattern. See \code{\link{fasp.object}} for details. Such an object is created, for example, by \code{\link{alltypes}}. The function \code{plot.fasp} is a method for \code{plot}. It calls \code{\link{plot.fv}} to plot the individual panels. For information about the interpretation of the arguments \code{formule} and \code{subset}, see \code{\link{plot.fv}}. Arguments that are often passed through \code{...} include \code{col} to control the colours of the different lines in a panel, and \code{lty} and \code{lwd} to control the line type and line width of the different lines in a panel. The argument \code{shade} can also be used to display confidence intervals or significance bands as filled grey shading. See \code{\link{plot.fv}}. The argument \code{title}, if present, will determine the overall title of the plot. If it is absent, it defaults to \code{x$title}. Titles for the individual plot panels will be taken from \code{x$titles}. } \value{None.} \section{Warnings}{ (Each component of) the \code{subset} argument may be a logical vector (of the same length as the vectors of data which are extracted from \code{x}), or a vector of indices, or an \bold{expression} such as \code{expression(r<=0.2)}, or a text string, such as \code{"r<=0.2"}. Attempting a syntax such as \code{subset = r<=0.2} (without wrapping \code{r<=0.2} either in quote marks or in \code{expression()}) will cause this function to fall over. Variables referred to in any formula must exist in the data frames stored in \code{x}. What the names of these variables are will of course depend upon the nature of \code{x}. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fv}}, \code{\link{fasp.object}} } \examples{ \dontrun{ # Bramble Canes data. data(bramblecanes) X.G <- alltypes(bramblecanes,"G",dataname="Bramblecanes",verb=TRUE) plot(X.G) plot(X.G,subset="r<=0.2") plot(X.G,formule=asin(sqrt(cbind(km,theo))) ~ asin(sqrt(theo))) plot(X.G,fo=cbind(km,theo) - theo~r,subset="r<=0.2") # Simulated data. pp <- runifpoint(350, owin(c(0,1),c(0,1))) pp <- pp \%mark\% factor(c(rep(1,50),rep(2,100),rep(3,200))) X.K <- alltypes(pp,"K",verb=TRUE,dataname="Fake Data") plot(X.K,fo=cbind(border,theo)~theo,subset="theo<=0.75") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/plot.layered.Rd0000755000176000001440000000363312237642733015632 0ustar ripleyusers\name{plot.layered} \alias{plot.layered} \title{ Layered Plot } \description{ Generates a layered plot. The plot method for objects of class \code{"layered"}. } \usage{ \method{plot}{layered}(x, ..., which = NULL, plotargs = NULL) } \arguments{ \item{x}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{\dots}{ Arguments to be passed to the \code{plot} method for \emph{every} layer. } \item{which}{ Subset index specifying which layers should be plotted. } \item{plotargs}{ Arguments to be passed to the \code{plot} methods for individual layers. A list of lists of arguments of the form \code{name=value}. } } \details{ Layering is a simple mechanism for controlling a high-level plot that is composed of several successive plots, for example, a background and a foreground plot. The layering mechanism makes it easier to plot, to switch on or off the plotting of each individual layer, and to control the plotting arguments that are passed to each layer. The layers of data to be plotted should first be converted into a single object of class \code{"layered"} using the function \code{\link{layered}}. Then the layers can be plotted using the method \code{plot.layered}. The subset operator \code{\link{[.layered}} can be used to zoom in on a subregion. } \value{ Null. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{layered}}, \code{\link{layerplotargs}}, \code{\link{[.layered}}, \code{\link{plot}}. } \examples{ data(cells) D <- distmap(cells) L <- layered(D, cells) plot(L) plot(L, which = 2) plot(L, plotargs=list(list(ribbon=FALSE), list(pch=3, cols="white"))) # plot a subregion plot(L[, square(0.5)]) } \keyword{spatial} \keyword{hplot} spatstat/man/osteo.Rd0000755000176000001440000001446312237642733014364 0ustar ripleyusers\name{osteo} \alias{osteo} \docType{data} \title{ Osteocyte Lacunae Data: Replicated Three-Dimensional Point Patterns } \description{ These data give the three-dimensional locations of osteocyte lacunae observed in rectangular volumes of solid bone using a confocal microscope. There were four samples of bone, and ten regions were mapped in each bone, yielding 40 spatial point patterns. The data can be regarded as replicated observations of a three-dimensional point process, nested within bone samples. } \usage{data(osteo)} \format{ A \code{\link{hyperframe}} with the following columns: \tabular{ll}{ \code{id} \tab character string identifier of bone sample \cr \code{shortid} \tab last numeral in \code{id} \cr \code{brick} \tab serial number (1 to 10) of sampling volume within this bone sample \cr \code{pts} \tab three dimensional point pattern (class \code{pp3}) \cr \code{depth} \tab the depth of the brick in microns } } \details{ These data are three-dimensional point patterns representing the positions of \emph{osteocyte lacunae}, holes in bone which were occupied by osteocytes (bone-building cells) during life. Observations were made on four different skulls of Macaque monkeys iusing a three-dimensional microscope. From each skull, observations were collected in 10 separate sampling volumes. In all, there are 40 three-dimensional point patterns in the dataset. The data were collected in 1984 by A. Baddeley, A. Boyde, C.V. Howard and S. Reid (see references) using the tandem-scanning reflected light microscope (TSRLM) at University College London. This was one of the first optical confocal microscopes available. Each point pattern dataset gives the \eqn{(x,y,z)} coordinates (in microns) of all points visible in a three-dimensional rectangular box (``brick'') of dimensions \eqn{81 \times 100 \times d}{81 * 100 * d} microns, where \eqn{d} varies. The \eqn{z} coordinate is depth into the bone (depth of the focal plane of the confocal microscope); the \eqn{(x,y)} plane is parallel to the exterior surface of the bone; the relative orientation of the \eqn{x} and \eqn{y} axes is not important. The bone samples were three intact skulls and one skull cap, all originally identified as belonging to the macaque monkey \emph{Macaca fascicularis}, from the collection of the Department of Anatomy, University of London. Later analysis (Baddeley et al, 1993) suggested that the skull cap, given here as the first animal, was a different subspecies, and this was confirmed by anatomical inspection. } \section{Sampling Procedure}{ The following extract from Baddeley et al (1987) describes the sampling procedure. The parietal bones of three fully articulated adult Macaque monkey \emph{(Macaca fascicularis)} skulls from the collection of University College London were used. The right parietal bone was examined, in each case, approximately 1 cm lateral to the sagittal suture and 2 cm posterior to the coronal suture. The skulls were mounted on plasticine on a moving stage placed beneath the TSRLM. Immersion oil was applied and a \eqn{\times 60}{X 60}, NA 1.0 oil immersion objective lens (Lomo) was focussed at 10 microns below the cranial surface. The TV image was produced by a Panasonic WB 1850/B camera on a Sony PVM 90CE TV monitor. A graduated rectangular counting frame \eqn{90 \times 110}{90 * 110} mm (representing \eqn{82 \times 100}{82 * 100} microns in real units) was marked on a Perspex overlay and fixed to the screen. The area of tissue seen within the frame defined a subfield: a guard area of 10 mm width was visible on all sides of the frame. Ten subfields were examined, arranged approximately in a rectangular grid pattern, with at least one field width separating each pair of fields. The initial field position was determined randomly by applying a randomly-generated coordinate shift to the moving stage. Subsequent fields were attained using the coarse controls of the microscope stage, in accordance with the rectangular grid pattern. For each subfield, the focal plane was racked down from its initial 10 micron depth until all visible osteocyte lacunae had been examined. This depth \eqn{d} was recorded. The 3-dimensional sampling volume was therefore a rectangular box of dimensions \eqn{82 \times 100 \times d}{82 * 100 * d} microns, called a ``brick''. For each visible lacuna, the fine focus racking control was adjusted until maximum brightness was obtained. The depth of the focal plane was then recorded as the $z$ coordinate of the ``centre point'' of the lacuna. Without moving the focal plane, the \eqn{x} and \eqn{y} coordinates of the centre of the lacunar image were read off the graduated counting frame. This required a subjective judgement of the position of the centre of the 2-dimensional image. Profiles were approximately elliptical and the centre was considered to be well-defined. Accuracy of the recording procedure was tested by independent repetition (by the same operator and by different operators) and found to be reproducible to plus or minus 2 mm on the screen. A lacuna was counted only if its \eqn{(x, y)} coordinates lay inside the \eqn{90 \times 110}{90 * 110} mm counting frame. } \source{ Adrian Baddeley. } \references{ Baddeley, A.J., Howard, C.V, Boyde, A. and Reid, S.A. (1987) Three dimensional analysis of the spatial distribution of particles using the tandem-scanning reflected light microscope. \emph{Acta Stereologica} \bold{6} (supplement II) 87--100. Baddeley, A.J., Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42} (1993) 641--668. Howard, C.V. and Reid, S. and Baddeley, A.J. and Boyde, A. (1985) Unbiased estimation of particle density in the tandem-scanning reflected light microscope. \emph{Journal of Microscopy} \bold{138} 203--212. } \examples{ data(osteo) osteo \dontrun{ plot(osteo$pts[[1]], main="animal 1, brick 1") ape1 <- osteo[osteo$shortid==4, ] plot(ape1, tick.marks=FALSE) with(osteo, summary(pts)$intensity) plot(with(ape1, K3est(pts))) } } \keyword{datasets} spatstat/man/lut.Rd0000755000176000001440000000642612237642733014037 0ustar ripleyusers\name{lut} \alias{lut} \title{Lookup Tables} \description{ Create a lookup table. } \usage{ lut(outputs, ..., range=NULL, breaks=NULL, inputs=NULL) } \arguments{ \item{outputs}{Vector of output values} \item{\dots}{Ignored.} \item{range}{ Interval of numbers to be mapped. A numeric vector of length 2, specifying the ends of the range of values to be mapped. Incompatible with \code{breaks} or \code{inputs}. } \item{inputs}{ Input values to which the output values are associated. A factor or vector of the same length as \code{outputs}. Incompatible with \code{breaks} or \code{range}. } \item{breaks}{ Breakpoints for the lookup table. A numeric vector of length equal to \code{length(outputs)+1}. Incompatible with \code{range} or \code{inputs}. } } \details{ A lookup table is a function, mapping input values to output values. The command \code{lut} creates an object representing a lookup table, which can then be used to control various behaviour in the \pkg{spatstat} package. It can also be used to compute the output value assigned to any input value. The argument \code{outputs} specifies the output values to which input data values will be mapped. It should be a vector of any atomic type (e.g. numeric, logical, character, complex) or factor values. Exactly one of the arguments \code{range}, \code{inputs} or \code{breaks} must be specified by name. If \code{inputs} is given, then it should be a vector or factor, of the same length as \code{outputs}. The entries of \code{inputs} can be any atomic type (e.g. numeric, logical, character, complex) or factor values. The resulting lookup table associates the value \code{inputs[i]} with the value \code{outputs[i]}. If \code{range} is given, then it determines the interval of the real number line that will be mapped. It should be a numeric vector of length 2. If \code{breaks} is given, then it determines intervals of the real number line which are mapped to each output value. It should be a numeric vector, of length at least 2, with entries that are in increasing order. Infinite values are allowed. Any number in the range between \code{breaks[i]} and \code{breaks[i+1]} will be mapped to the value \code{outputs[i]}. The result is an object of class \code{"lut"}. There is a \code{print} method for this class. Some plot commands in the \pkg{spatstat} package accept an object of this class as a specification of a lookup table. The result is also a function \code{f} which can be used to compute the output value assigned to any input data value. That is, \code{f(x)} returns the output value assigned to \code{x}. This also works for vectors of input data values. } \value{ A function, which is also an object of class \code{"lut"}. } \seealso{ \code{\link{colourmap}}. } \examples{ # lookup table for real numbers, using breakpoints cr <- lut(factor(c("low", "medium", "high")), breaks=c(0,5,10,15)) cr cr(3.2) cr(c(3,5,7)) # lookup table for discrete set of values ct <- lut(c(0,1), inputs=c(FALSE, TRUE)) ct(TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/density.ppp.Rd0000755000176000001440000002515112237642732015503 0ustar ripleyusers\name{density.ppp} \alias{density.ppp} \title{Kernel Smoothed Intensity of Point Pattern} \description{ Compute a kernel smoothed intensity function from a point pattern. } \usage{ \method{density}{ppp}(x, sigma, \dots, weights, edge=TRUE, varcov=NULL, at="pixels", leaveoneout=TRUE, adjust=1, diggle=FALSE) } \arguments{ \item{x}{ Point pattern (object of class \code{"ppp"}). } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. Either a numerical value, or a function that computes an appropriate value of \code{sigma}. } \item{weights}{ Optional vector or matrix of weights to be attached to the points. May include negative values. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{edge}{ Logical flag: if \code{TRUE}, apply edge correction. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the intensity values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{x} (\code{at="points"}). } \item{leaveoneout}{ Logical value indicating whether to compute a leave-one-out estimator. Applicable only when \code{at="points"}. } \item{adjust}{ Optional. Adjustment factor for the smoothing parameter. } \item{diggle}{ Logical. If \code{TRUE}, use Diggle's edge correction, which is more accurate but slower to compute than the correction described under Details. } } \value{ By default, the result is a pixel image (object of class \code{"im"}). Pixel values are estimated intensity values, expressed in \dQuote{points per unit area}. If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{x}. Values are estimated intensity values at the points of \code{x}. In either case, the return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. If \code{weights} is a matrix with more than one column, then the result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). } \details{ This is a method for the generic function \code{density}. It computes a fixed-bandwidth kernel estimate (Diggle, 1985) of the intensity function of the point process that generated the point pattern \code{x}. By default it computes the convolution of the isotropic Gaussian kernel of standard deviation \code{sigma} with point masses at each of the data points in \code{x}. Anisotropic Gaussian kernels are also supported. Each point has unit weight, unless the argument \code{weights} is given (it should be a numeric vector or matrix; weights can be negative or zero). If \code{edge=TRUE}, the intensity estimate is corrected for edge effect bias in one of two ways: \itemize{ \item If \code{diggle=FALSE} (the default) the intensity estimate is correted by dividing it by the convolution of the Gaussian kernel with the window of observation. Thus the intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = e(u) \sum_i k(x_i - u) w_i }{ lambda(u) = e(u) sum[i] k(x[i] - u) w[i] } where \eqn{k} is the Gaussian smoothing kernel, \eqn{e(u)} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. \item If \code{diggle=TRUE} then the method of Diggle (1985) is followed exactly. The intensity value at a point \eqn{u} is \deqn{ \hat\lambda(u) = \sum_i k(x_i - u) w_i e(x_i) }{ lambda(u) = sum[i] k(x[i] - u) w[i] e(x[i]) } where again \eqn{k} is the Gaussian smoothing kernel, \eqn{e(x_i)}{e(x[i])} is an edge correction factor, and \eqn{w_i}{w[i]} are the weights. This computation is slightly slower but more accurate. } In both cases, the edge correction term \eqn{e(u)} is the reciprocal of the kernel mass inside the window: \deqn{ \frac{1}{e(u)} = \int_W k(v-u) \, {\rm d}v }{ 1/e(u) = integral[v in W] k(v-u) dv } where \eqn{W} is the observation window. The smoothing kernel is determined by the arguments \code{sigma}, \code{varcov} and \code{adjust}. \itemize{ \item if \code{sigma} is a single numerical value, this is taken as the standard deviation of the isotropic Gaussian kernel. \item alternatively \code{sigma} may be a function that computes an appropriate bandwidth for the isotropic Gaussian kernel from the data point pattern by calling \code{sigma(x)}. To perform automatic bandwidth selection using cross-validation, it is recommended to use the functions \code{\link{bw.diggle}} or \code{\link{bw.ppl}}. \item The smoothing kernel may be chosen to be any Gaussian kernel, by giving the variance-covariance matrix \code{varcov}. The arguments \code{sigma} and \code{varcov} are incompatible. \item Alternatively \code{sigma} may be a vector of length 2 giving the standard deviations of two independent Gaussian coordinates, thus equivalent to \code{varcov = diag(rep(sigma^2, 2))}. \item if neither \code{sigma} nor \code{varcov} is specified, an isotropic Gaussian kernel will be used, with a default value of \code{sigma} calculated by a simple rule of thumb that depends only on the size of the window. \item The argument \code{adjust} makes it easy for the user to change the bandwidth specified by any of the rules above. The value of \code{sigma} will be multiplied by the factor \code{adjust}. The matrix \code{varcov} will be multiplied by \code{adjust^2}. To double the smoothing bandwidth, set \code{adjust=2}. } By default the intensity values are computed at every location \eqn{u} in a fine grid, and are returned as a pixel image. Computation is performed using the Fast Fourier Transform. Accuracy depends on the pixel resolution, controlled by the arguments \code{\dots} passed to \code{\link{as.mask}}. If \code{at="points"}, the intensity values are computed to high accuracy at the points of \code{x} only. Computation is performed by directly evaluating and summing the Gaussian kernel contributions without discretising the data. The result is a numeric vector giving the density values. The intensity value at a point \eqn{x_i}{x[i]} is (if \code{diggle=FALSE}) \deqn{ \hat\lambda(x_i) = e(x_i) \sum_j k(x_j - x_i) w_j }{ lambda(x[i]) = e(x[i]) sum[j] k(x[j] - x[i]) w[j] } or (if \code{diggle=TRUE}) \deqn{ \hat\lambda(x_i) = \sum_j k(x_j - x_i) w_j e(x_j) }{ lambda(x[i]) = sum[j] k(x[j] - x[i]) w[j] e(x[j]) } If \code{leaveoneout=TRUE} (the default), then the sum in the equation is taken over all \eqn{j} not equal to \eqn{i}, so that the intensity value at a data point is the sum of kernel contributions from all \emph{other} data points. If \code{leaveoneout=FALSE} then the sum is taken over all \eqn{j}, so that the intensity value at a data point includes a contribution from the same point. If \code{weights} is a matrix with more than one column, then the calculation is effectively repeated for each column of weights. The result is a list of images (if \code{at="pixels"}) or a matrix of numerical values (if \code{at="points"}). To select the bandwidth \code{sigma} automatically by cross-validation, use \code{\link{bw.diggle}} or \code{\link{bw.ppl}}. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. For adaptive nonparametric estimation, see \code{\link{adaptive.density}}. For data sharpening, see \code{\link{sharpen.ppp}}. To compute a relative risk surface or probability map for two (or more) types of points, use \code{\link{relrisk}}. } \seealso{ \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{Smooth.ppp}}, \code{\link{sharpen.ppp}}, \code{\link{adaptive.density}}, \code{\link{relrisk}}, \code{\link{ppp.object}}, \code{\link{im.object}} } \note{ This function is often misunderstood. The result of \code{density.ppp} is not a spatial smoothing of the marks or weights attached to the point pattern. To perform spatial interpolation of values that were observed at the points of a point pattern, use \code{\link{Smooth.ppp}}. The result of \code{density.ppp} is not a probability density. It is an estimate of the \emph{intensity function} of the point process that generated the point pattern data. Intensity is the expected number of random points per unit area. The units of intensity are \dQuote{points per unit area}. Intensity is usually a function of spatial location, and it is this function which is estimated by \code{density.ppp}. The integral of the intensity function over a spatial region gives the expected number of points falling in this region. Inspecting an estimate of the intensity function is usually the first step in exploring a spatial point pattern dataset. For more explanation, see the workshop notes (Baddeley, 2008) or Diggle (2003). If you have two (or more) types of points, and you want a probability map or relative risk surface (the spatially-varying probability of a given type), use \code{\link{relrisk}}. } \examples{ data(cells) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(density(cells, 0.05)) plot(density(cells, 0.05, diggle=TRUE)) par(opa) v <- diag(c(0.05, 0.07)^2) plot(density(cells, varcov=v)) } \testonly{ Z <- density(cells, 0.05) Z <- density(cells, 0.05, diggle=TRUE) Z <- density(cells, varcov=diag(c(0.05^2, 0.07^2))) Z <- density(cells, 0.05, weights=data.frame(a=1:42,b=42:1)) } # automatic bandwidth selection plot(density(cells, sigma=bw.diggle(cells))) # equivalent: plot(density(cells, bw.diggle)) # evaluate intensity at points density(cells, 0.05, at="points") } \references{ Baddeley, A. (2010) Analysing spatial point patterns in R. Workshop notes. CSIRO online technical publication. URL: \code{www.uwa.edu.au/resources/pf16h.html} Diggle, P.J. (1985) A kernel method for smoothing point process data. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C) \bold{34} (1985) 138--147. Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/linearKdot.inhom.Rd0000644000176000001440000001011612237642732016423 0ustar ripleyusers\name{linearKdot.inhom} \alias{linearKdot.inhom} \title{ Inhomogeneous multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdadot} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{Kdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link[spatstat]{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } K <- linearKdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKdot.inhom(chicago, "assault", fit, fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/adaptive.density.Rd0000755000176000001440000000573212237642732016504 0ustar ripleyusers\name{adaptive.density} \alias{adaptive.density} \title{Intensity Estimate of Point Pattern Using Tessellation} \description{ Computes an adaptive estimate of the intensity function of a point pattern. } \usage{ adaptive.density(X, f = 0.1, ..., nrep = 1) } \arguments{ \item{X}{Point pattern dataset (object of class \code{"ppp"}).} \item{f}{Fraction (between 0 and 1) of the data points that will be removed from the data and used to determine a tessellation for the intensity estimate. } \item{\dots}{Arguments passed to \code{\link{as.im}} determining the pixel resolution of the result. } \item{nrep}{Number of independent repetitions of the randomised procedure.} } \details{ This function is an alternative to \code{\link{density.ppp}}. It computes an estimate of the intensity function of a point pattern dataset. The dataset \code{X} is randomly split into two patterns \code{A} and \code{B} containing a fraction \code{f} and \code{1-f}, respectively, of the original data. The subpattern \code{A} is used to construct a Dirichlet tessellation (see \code{\link{dirichlet}}). The subpattern \code{B} is retained for counting. For each tile of the Dirichlet tessellation, we count the number of points of \code{B} falling in the tile, and divide by the area of the same tile, to obtain an estimate of the intensity of the pattern \code{B} in the tile. This estimate is divided by \code{1-f} to obtain an estimate of the intensity of \code{X} in the tile. The result is a pixel image of intensity estimates which are constant on each tile of the tessellation. If \code{nrep} is greater than 1, this randomised procedure is repeated \code{nrep} times, and the results are averaged. This technique has been used by Ogata et al. (2003), Ogata (2004) and Baddeley (2007). } \value{ A pixel image (object of class \code{"im"}) whose values are estimates of the intensity of \code{X}. } \seealso{ \code{\link{density.ppp}}, \code{\link{dirichlet}}, \code{\link{im.object}}. } \references{ Baddeley, A. (2007) Validation of statistical models for spatial point patterns. In J.G. Babu and E.D. Feigelson (eds.) \emph{SCMA IV: Statistical Challenges in Modern Astronomy IV}, volume 317 of Astronomical Society of the Pacific Conference Series, San Francisco, California USA, 2007. Pages 22--38. Ogata, Y. (2004) Space-time model for regional seismicity and detection of crustal stress changes. \emph{Journal of Geophysical Research}, \bold{109}, 2004. Ogata, Y., Katsura, K. and Tanemura, M. (2003). Modelling heterogeneous space-time occurrences of earthquake and its residual analysis. \emph{Applied Statistics} \bold{52} 499--509. } \examples{ \dontrun{ data(nztrees) plot(adaptive.density(nztrees)) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/Emark.Rd0000755000176000001440000001406412237642731014265 0ustar ripleyusers\name{Emark} \alias{Emark} \alias{Vmark} \title{ Diagnostics for random marking } \description{ Estimate the summary functions \eqn{E(r)} and \eqn{V(r)} for a marked point pattern, proposed by Schlather et al (2004) as diagnostics for dependence between the points and the marks. } \usage{ Emark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) Vmark(X, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. The pattern should have numeric marks. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If\code{TRUE}, normalise the estimate of \eqn{E(r)} or \eqn{V(r)} so that it would have value equal to 1 if the marks are independent of the points. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{E(r)} or \eqn{V(r)} has been estimated } \item{theo}{the theoretical, constant value of \eqn{E(r)} or \eqn{V(r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{E(r)} or \eqn{V(r)} obtained by the edge corrections named. } \details{ For a marked point process, Schlather et al (2004) defined the functions \eqn{E(r)} and \eqn{V(r)} to be the conditional mean and conditional variance of the mark attached to a typical random point, given that there exists another random point at a distance \eqn{r} away from it. More formally, \deqn{ E(r) = E_{0u}[M(0)] }{ E(r) = E[0u] M(0) } and \deqn{ V(r) = E_{0u}[(M(0)-E(u))^2] }{ V(r) = E[0u]((M(0)-E(u))^2) } where \eqn{E_{0u}}{E[0u]} denotes the conditional expectation given that there are points of the process at the locations \eqn{0} and \eqn{u} separated by a distance \eqn{r}, and where \eqn{M(0)} denotes the mark attached to the point \eqn{0}. These functions may serve as diagnostics for dependence between the points and the marks. If the points and marks are independent, then \eqn{E(r)} and \eqn{V(r)} should be constant (not depending on \eqn{r}). See Schlather et al (2004). The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern with numeric marks. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{k_f(r)}{k[f](r)} is estimated. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The numerator and denominator of the mark correlation function (in the expression above) are estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Schlather, M. and Ribeiro, P. and Diggle, P. (2004) Detecting dependence between marks and locations of marked point processes. \emph{Journal of the Royal Statistical Society, series B} \bold{66} (2004) 79-83. } \seealso{ Mark correlation \code{\link{markcorr}}, mark variogram \code{\link{markvario}} for numeric marks. Mark connection function \code{\link{markconnect}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}} for factor-valued marks. } \examples{ data(spruces) plot(Emark(spruces)) E <- Emark(spruces, method="density", kernel="epanechnikov") plot(Vmark(spruces)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/fasp.object.Rd0000755000176000001440000000640512237642732015425 0ustar ripleyusers\name{fasp.object} \alias{fasp.object} %DoNotExport \title{Function Arrays for Spatial Patterns} \description{ A class \code{"fasp"} to represent a \dQuote{matrix} of functions, amenable to plotting as a matrix of plot panels. } \details{ An object of this class is a convenient way of storing (and later plotting, editing, etc) a set of functions \eqn{f_{i,j}(r)}{f[i,j](r)} of a real argument \eqn{r}, defined for each possible pair \eqn{(i,j)} of indices \eqn{1 \le i,j \le n}{1 <= i,j <= n}. We may think of this as a matrix or array of functions \eqn{f_{i,j}}{f[i,j]}. Function arrays are particularly useful in the analysis of a multitype point pattern (a point pattern in which the points are identified as belonging to separate types). We may want to compute a summary function for the points of type \eqn{i} only, for each of the possible types \eqn{i}. This produces a \eqn{1 \times m}{1 * m} array of functions. Alternatively we may compute a summary function for each possible pair of types \eqn{(i,j)}. This produces an \eqn{m \times m}{m * m} array of functions. For multitype point patterns the command \code{\link{alltypes}} will compute arrays of summary functions for each possible type or for each possible pair of types. The function \code{\link{alltypes}} returns an object of class \code{"fasp"}. An object of class \code{"fasp"} is a list containing at least the following components: \describe{ \item{fns}{ A list of data frames, each representing one of the functions. } \item{which}{ A matrix representing the spatial arrangement of the functions. If \code{which[i,j] = k} then the function represented by \code{fns[[k]]} should be plotted in the panel at position \eqn{(i,j)}. If \code{which[i,j] = NA} then nothing is plotted in that position. } \item{titles}{ A list of character strings, providing suitable plotting titles for the functions. } \item{default.formulae}{ A list of default formulae for plotting each of the functions. } \item{title}{ A character string, giving a default title for the array when it is plotted. } } } \section{Functions available}{ There are methods for \code{plot}, \code{print} and \code{"["} for this class. The plot method displays the entire array of functions. The method \code{\link{[.fasp}} selects a sub-array using the natural indices \code{i,j}. The command \code{\link{eval.fasp}} can be used to apply a transformation to each function in the array, and to combine two arrays. } \seealso{ \code{\link{alltypes}}, \code{\link{plot.fasp}}, \code{\link{[.fasp}}, \code{\link{eval.fasp}} } \examples{ # multitype point pattern data(amacrine) GG <- alltypes(amacrine, "G") plot(GG) # select the row corresponding to cells of type "on" Gon <- GG["on", ] plot(Gon) # extract the G function for i = "on", j = "off" Gonoff <- GG["on", "off", drop=TRUE] # Fisher variance stabilising transformation GGfish <- eval.fasp(asin(sqrt(GG))) plot(GGfish) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/rmhmodel.default.Rd0000755000176000001440000005126512237642734016467 0ustar ripleyusers\name{rmhmodel.default} \alias{rmhmodel.default} \title{Build Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{default}(..., cif=NULL, par=NULL, w=NULL, trend=NULL, types=NULL) } \arguments{ \item{\dots}{Ignored.} \item{cif}{Character string specifying the choice of model} \item{par}{Parameters of the model} \item{w}{Spatial window in which to simulate} \item{trend}{Specification of the trend in the model} \item{types}{A vector of factor levels defining the possible marks, for a multitype process. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.default} is the default method. It builds a description of the point process model from the simple arguments listed. The argument \code{cif} is a character string specifying the choice of interpoint interaction for the point process. The current options are \describe{ \item{\code{'areaint'}}{Area-interaction process.} \item{\code{'badgey'}}{Baddeley-Geyer (hybrid Geyer) process.} \item{\code{'dgs'}}{Diggle, Gates and Stibbard (1987) process} \item{\code{'diggra'}}{Diggle and Gratton (1984) process} \item{\code{'fiksel'}}{Fiksel double exponential process (Fiksel, 1984).} \item{\code{'geyer'}}{Saturation process (Geyer, 1999).} \item{\code{'hardcore'}}{Hard core process} \item{\code{'lennard'}}{Lennard-Jones process} \item{\code{'lookup'}}{General isotropic pairwise interaction process, with the interaction function specified via a ``lookup table''.} \item{\code{'multihard'}}{Multitype hardcore process} \item{\code{'strauss'}}{The Strauss process} \item{\code{'straush'}}{The Strauss process with hard core} \item{\code{'sftcr'}}{The Softcore process} \item{\code{'straussm'}}{ The multitype Strauss process} \item{\code{'straushm'}}{Multitype Strauss process with hard core} \item{\code{'triplets'}}{Triplets process (Geyer, 1999).} } It is also possible to specify a \emph{hybrid} of these interactions in the sense of Baddeley et al (2013). In this case, \code{cif} is a character vector containing names from the list above. For example, \code{cif=c('strauss', 'geyer')} would specify a hybrid of the Strauss and Geyer models. The argument \code{par} supplies parameter values appropriate to the conditional intensity function being invoked. For the interactions listed above, these parameters are: \describe{ \item{areaint:}{ (Area-interaction process.) A \bold{named} list with components \code{beta,eta,r} which are respectively the ``base'' intensity, the scaled interaction parameter and the interaction radius. } \item{badgey:}{ (Baddeley-Geyer process.) A \bold{named} list with components \code{beta} (the ``base'' intensity), \code{gamma} (a vector of non-negative interaction parameters), \code{r} (a vector of interaction radii, of the same length as \code{gamma}, in \emph{increasing} order), and \code{sat} (the saturation parameter(s); this may be a scalar, or a vector of the same length as \code{gamma} and \code{r}; all values should be at least 1). Note that because of the presence of ``saturation'' the \code{gamma} values are permitted to be larger than 1. } \item{dgs:}{ (Diggle, Gates, and Stibbard process. See Diggle, Gates, and Stibbard (1987)) A \bold{named} list with components \code{beta} and \code{rho}. This process has pairwise interaction function equal to \deqn{ e(t) = \sin^2\left(\frac{\pi t}{2\rho}\right) }{ e(t) = sin^2((pi * t)/(2 * rho)) } for \eqn{t < \rho}{t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. } \item{diggra:}{ (Diggle-Gratton process. See Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987).) A \bold{named} list with components \code{beta}, \code{kappa}, \code{delta} and \code{rho}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < \delta}{t < delta}, equal to \deqn{ \left(\frac{t-\delta}{\rho-\delta}\right)^\kappa }{ ((t-delta)/(rho-delta))^kappa } for \eqn{\delta \le t < \rho}{delta <= t < rho}, and equal to 1 for \eqn{t \ge \rho}{t >= rho}. Note that here we use the symbol \eqn{\kappa}{kappa} where Diggle, Gates, and Stibbard use \eqn{\beta}{beta} since we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. } \item{fiksel:}{ (Fiksel double exponential process, see Fiksel (1984)) A \bold{named} list with components \code{beta}, \code{r}, \code{hc}, \code{kappa} and \code{a}. This process has pairwise interaction function \eqn{e(t)} equal to 0 for \eqn{t < hc}, equal to \deqn{ \exp(a \exp(- \kappa t)) }{ exp(a * exp( - kappa * t)) } for \eqn{hc \le t < r}{hc <= t < r}, and equal to 1 for \eqn{t \ge r}{t >= r}. } \item{geyer:}{ (Geyer's saturation process. See Geyer (1999).) A \bold{named} list with components \code{beta}, \code{gamma}, \code{r}, and \code{sat}. The components \code{beta}, \code{gamma}, \code{r} are as for the Strauss model, and \code{sat} is the ``saturation'' parameter. The model is Geyer's ``saturation'' point process model, a modification of the Strauss process in which we effectively impose an upper limit (\code{sat}) on the number of neighbours which will be counted as close to a given point. Explicitly, a saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{\beta \gamma^{\min(s, t(x_i,X))}}{beta gamma^min(s,t(x[i],X))} to the probability density of the point pattern, where \eqn{t(x_i,X)}{t(x[i],X)} denotes the number of ``\eqn{r}-close neighbours'' of \eqn{x_i}{x[i]} in the pattern \eqn{X}. If the saturation threshold \eqn{s} is infinite, the Geyer process reduces to a Strauss process with interaction parameter \eqn{\gamma^2}{gamma^2} rather than \eqn{\gamma}{gamma}. } \item{hardcore:}{ (Hard core process.) A \bold{named} list with components \code{beta} and \code{hc} where \code{beta} is the base intensity and \code{hc} is the hard core distance. This process has pairwise interaction function \eqn{e(t)} equal to 1 if \eqn{t > hc} and 0 if \eqn{t <= hc}. } \item{lennard:}{ (Lennard-Jones process.) A \bold{named} list with components \code{sigma} and \code{epsilon}, where \code{sigma} is the characteristic diameter and \code{epsilon} is the well depth. See \code{\link{LennardJones}} for explanation. } \item{multihard:}{ (Multitype hard core process.) A \bold{named} list with components \code{beta} and \code{hradii}, where \code{beta} is a vector of base intensities for each type of point, and \code{hradii} is a matrix of hard core radii between each pair of types. } \item{strauss:}{ (Strauss process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the pairwise interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. (Note that there is also an algorithm for perfect simulation of the Strauss process, \code{\link{rStrauss}}) } \item{straush:}{ (Strauss process with hardcore.) A \bold{named} list with entries \code{beta,gamma,r,hc} where \code{beta}, \code{gamma}, and \code{r} are as for the Strauss process, and \code{hc} is the hardcore radius. Of course \code{hc} must be less than \code{r}. } \item{sftcr:}{ (Softcore process.) A \bold{named} list with components \code{beta,sigma,kappa}. Again \code{beta} is a ``base'' intensity. The pairwise interaction between two points \eqn{u \neq v}{u != v} is \deqn{ \exp \left \{ - \left ( \frac{\sigma}{||u-v||} \right )^{2/\kappa} \right \} }{-(sigma/||u-v||)^(2/kappa)} Note that it is necessary that \eqn{0 < \kappa < 1}{0 < kappa <1}. } \item{straussm:}{ (Multitype Strauss process.) A \bold{named} list with components \itemize{ \item \code{beta}: A vector of ``base'' intensities, one for each possible type. \item \code{gamma}: A \bold{symmetric} matrix of interaction parameters, with \eqn{\gamma_{ij}}{gamma_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. \item \code{radii}: A \bold{symmetric} matrix of interaction radii, with entries \eqn{r_{ij}}{r_ij} pertaining to the interaction between type \eqn{i} and type \eqn{j}. } } \item{straushm:}{ (Multitype Strauss process with hardcore.) A \bold{named} list with components \code{beta} and \code{gamma} as for \code{straussm} and \bold{two} ``radii'' components: \itemize{ \item \code{iradii}: the interaction radii \item \code{hradii}: the hardcore radii } which are both symmetric matrices of nonnegative numbers. The entries of \code{hradii} must be less than the corresponding entries of \code{iradii}. } \item{triplets:}{ (Triplets process.) A \bold{named} list with components \code{beta,gamma,r} which are respectively the ``base'' intensity, the triplet interaction parameter and the interaction radius. Note that \code{gamma} must be less than or equal to 1. } \item{lookup:}{ (Arbitrary pairwise interaction process with isotropic interaction.) A \bold{named} list with components \code{beta}, \code{r}, and \code{h}, or just with components \code{beta} and \code{h}. This model is the pairwise interaction process with an isotropic interaction given by any chosen function \eqn{H}. Each pair of points \eqn{x_i, x_j}{x[i], x[j]} in the point pattern contributes a factor \eqn{H(d(x_i, x_j))}{H(d(x[i],x[j]))} to the probability density, where \eqn{d} denotes distance and \eqn{H} is the pair interaction function. The component \code{beta} is a (positive) scalar which determines the ``base'' intensity of the process. In this implementation, \eqn{H} must be a step function. It is specified by the user in one of two ways. \itemize{ \item \bold{as a vector of values:} If \code{r} is present, then \code{r} is assumed to give the locations of jumps in the function \eqn{H}, while the vector \code{h} gives the corresponding values of the function. Specifically, the interaction function \eqn{H(t)} takes the value \code{h[1]} for distances \eqn{t} in the interval \code{[0, r[1])}; takes the value \code{h[i]} for distances \eqn{t} in the interval \code{[r[i-1], r[i])} where \eqn{i = 2,\ldots, n}{i = 2, ..., n}; and takes the value 1 for \eqn{t \ge r[n]}{t >= r[n]}. Here \eqn{n} denotes the length of \code{r}. The components \code{r} and \code{h} must be numeric vectors of equal length. The \code{r} values must be strictly positive, and sorted in increasing order. The entries of \code{h} must be non-negative. If any entry of \code{h} is greater than 1, then the entry \code{h[1]} must be 0 (otherwise the specified process is non-existent). Greatest efficiency is achieved if the values of \code{r} are equally spaced. [\bold{Note:} The usage of \code{r} and \code{h} has \emph{changed} from the previous usage in \pkg{spatstat} versions 1.4-7 to 1.5-1, in which ascending order was not required, and in which the first entry of \code{r} had to be 0.] \item \bold{as a stepfun object:} If \code{r} is absent, then \code{h} must be an object of class \code{"stepfun"} specifying a step function. Such objects are created by \code{\link{stepfun}}. The stepfun object \code{h} must be right-continuous (which is the default using \code{\link{stepfun}}.) The values of the step function must all be nonnegative. The values must all be less than 1 unless the function is identically zero on some initial interval \eqn{[0,r)}. The rightmost value (the value of \code{h(t)} for large \code{t}) must be equal to 1. Greatest efficiency is achieved if the jumps (the ``knots'' of the step function) are equally spaced. } } } For a hybrid model, the argument \code{par} should be a list, of the same length as \code{cif}, such that \code{par[[i]]} is a list of the parameters required for the interaction \code{cif[i]}. See the Examples. The optional argument \code{trend} determines the spatial trend in the model, if it has one. It should be a function or image (or a list of such, if the model is multitype) to provide the value of the trend at an arbitrary point. \describe{ \item{trend given as a function:}{A trend function may be a function of any number of arguments, but the first two must be the \eqn{x,y} coordinates of a point. Auxiliary arguments may be passed to the \code{trend} function at the time of simulation, via the \code{\dots} argument to \code{\link{rmh}}. The function \bold{must} be \bold{vectorized}. That is, it must be capable of accepting vector valued \code{x} and \code{y} arguments. Put another way, it must be capable of calculating the trend value at a number of points, simultaneously, and should return the \bold{vector} of corresponding trend values. } \item{trend given as an image:}{ An image (see \code{\link{im.object}}) provides the trend values at a grid of points in the observation window and determines the trend value at other points as the value at the nearest grid point. } } Note that the trend or trends must be \bold{non-negative}; no checking is done for this. The optional argument \code{w} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. The optional argument \code{types} specifies the possible types in a multitype point process. If the model being simulated is multitype, and \code{types} is not specified, then this vector defaults to \code{1:ntypes} where \code{ntypes} is the number of types. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and non-marked Gibbsian point processes. \emph{Electronische Informationsverabeitung und Kybernetika} \bold{20}, 270--278. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings in Respect of ``lookup''}{ For the \code{lookup} cif, the entries of the \code{r} component of \code{par} must be \emph{strictly positive} and sorted into ascending order. Note that if you specify the \code{lookup} pairwise interaction function via \code{\link{stepfun}()} the arguments \code{x} and \code{y} which are passed to \code{stepfun()} are slightly different from \code{r} and \code{h}: \code{length(y)} is equal to \code{1+length(x)}; the final entry of \code{y} must be equal to 1 --- i.e. this value is explicitly supplied by the user rather than getting tacked on internally. The step function returned by \code{stepfun()} must be right continuous (this is the default behaviour of \code{stepfun()}) otherwise an error is given. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \examples{ # Strauss process: mod01 <- rmhmodel(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # The above could also be simulated using 'rStrauss' # Strauss with hardcore: mod04 <- rmhmodel(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) # Hard core: mod05 <- rmhmodel(cif="hardcore",par=list(beta=2,hc=0.3), w=square(5)) # Soft core: w <- square(10) mod07 <- rmhmodel(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) # Baddeley-Geyer process: mod99 <- rmhmodel(cif="badgey",par=list(beta=0.3, gamma=c(0.2,1.8,2.4),r=c(0.035,0.07,0.14),sat=5), w=unit.square()) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) # specify types mod09 <- rmhmodel(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B")) # Multitype Hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod08hard <- rmhmodel(cif="multihard", par=list(beta=beta,hradii=rhc), w=square(250), types=c("A", "B")) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- rmhmodel(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) # Triplets process: mod11 <- rmhmodel(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- rmhmodel(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) # hybrid model modhy <- rmhmodel(cif=c('strauss', 'geyer'), par=list(list(beta=100,gamma=0.5,r=0.05), list(beta=1, gamma=0.7,r=0.1, sat=2)), w=square(1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/discretise.Rd0000755000176000001440000000557412237642732015373 0ustar ripleyusers\name{discretise} \alias{discretise} \title{ Safely Convert Point Pattern Window to Binary Mask } \description{ Given a point pattern, discretise its window by converting it to a binary pixel mask, adjusting the mask so that it still contains all the points. } \usage{ discretise(X, eps = NULL, dimyx = NULL, xy = NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}) to be converted.} \item{eps}{(optional) width and height of each pixel} \item{dimyx}{(optional) pixel array dimensions} \item{xy}{(optional) pixel coordinates} } \details{ This function modifies the point pattern \code{X} by converting its observation window \code{X$window} to a binary pixel image (a window of type \code{"mask"}). It ensures that no points of \code{X} are deleted by the discretisation. The window is first discretised using \code{\link{as.mask}}. It can happen that points of \code{X} that were inside the original window may fall outside the new mask. The \code{discretise} function corrects this by augmenting the mask (so that the mask includes any pixel that contains a point of the pattern). The arguments \code{eps}, \code{dimyx} and \code{xy} control the fineness of the pixel array. They are passed to \code{\link{as.mask}}. If \code{eps}, \code{dimyx} and \code{xy} are all absent or \code{NULL}, and if the window of \code{X} is of type \code{"mask"} to start with, then \code{discretise(X)} returns \code{X} unchanged. See \code{\link{as.mask}} for further details about the arguments \code{eps}, \code{dimyx}, and \code{xy}, and the process of converting a window to one of type \code{mask}. } \section{Error checking}{ Before doing anything, \code{discretise} checks that all the points of the pattern are actually inside the original window. This is guaranteed to be the case if the pattern was constructed using \code{\link{ppp}} or \code{\link{as.ppp}}. However anomalies are possible if the point pattern was created or manipulated inappropriately. These will cause an error. } \value{ A point pattern (object of class \code{"ppp"}), identical to \code{X}, except that its observation window has been converted to one of type \code{mask}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{as.mask}} } \examples{ data(demopat) X <- demopat plot(X, main="original pattern") Y <- discretise(X, dimyx=50) plot(Y, main="discretise(X)") stopifnot(X$n == Y$n) # what happens if we just convert the window to a mask? W <- X$window M <- as.mask(W, dimyx=50) plot(M, main="window of X converted to mask") plot(X, add=TRUE, pch=16) plot(X[M], add=TRUE, pch=1, cex=1.5) XM <- X[M] cat(paste(X$n - XM$n, "points of X lie outside M\n")) } \keyword{spatial} \keyword{manip} spatstat/man/crossdist.Rd0000755000176000001440000000265012237642732015242 0ustar ripleyusers\name{crossdist} \alias{crossdist} \title{Pairwise distances} \description{ Computes the distances between pairs of `things' taken from two different datasets. } \usage{ crossdist(X, Y, \dots) } \arguments{ \item{X,Y}{ Two objects of the same class. } \item{\dots}{ Additional arguments depending on the method. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th thing in the first dataset to the \code{j}-th thing in the second dataset. } \details{ Given two datasets \code{X} and \code{Y} (representing either two point patterns or two line segment patterns) \code{crossdist} computes the Euclidean distance from each thing in the first dataset to each thing in the second dataset, and returns a matrix containing these distances. The function \code{crossdist} is generic, with methods for point patterns (objects of class \code{"ppp"}), line segment patterns (objects of class \code{"psp"}), and a default method. See the documentation for \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}} or \code{\link{crossdist.default}} for further details. } \seealso{ \code{\link{crossdist.ppp}}, \code{\link{crossdist.psp}}, \code{\link{crossdist.default}}, \code{\link{pairdist}}, \code{\link{nndist}} } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/coords.Rd0000755000176000001440000000507312237642732014520 0ustar ripleyusers\name{coords} \Rdversion{1.1} \alias{coords} \alias{coords.ppp} \alias{coords.ppx} \alias{coords<-} \alias{coords<-.ppp} \alias{coords<-.ppx} \title{ Extract or Change Coordinates of a Spatial or Spatiotemporal Point Pattern } \description{ Given any kind of spatial or space-time point pattern, this function extracts the (space and/or time and/or local) coordinates of the points and returns them as a data frame. } \usage{ coords(x, ...) \method{coords}{ppp}(x, ...) \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) coords(x, ...) <- value \method{coords}{ppp}(x, ...) <- value \method{coords}{ppx}(x, ..., spatial = TRUE, temporal = TRUE, local=TRUE) <- value } \arguments{ \item{x}{ A point pattern: either a two-dimensional point pattern (object of class \code{"ppp"}), a three-dimensional point pattern (object of class \code{"pp3"}), or a general multidimensional space-time point pattern (object of class \code{"ppx"}). } \item{\dots}{ Further arguments passed to methods. } \item{spatial,temporal,local}{ Logical values indicating whether to extract spatial, temporal and local coordinates, respectively. The default is to return all such coordinates. (Only relevant to \code{ppx} objects). } \item{value}{ New values of the coordinates. A numeric vector with one entry for each point in \code{x}, or a numeric matrix or data frame with one row for each point in \code{x}. } } \details{ The function \code{coords} extracts the coordinates from a point pattern. The function \code{coords<-} replaces the coordinates of the point pattern with new values. Both functions \code{coords} and \code{coords<-} are generic, with methods for the classes \code{"ppp"}) and \code{"ppx"}. An object of class \code{"pp3"} also inherits from \code{"ppx"} and is handled by the method for \code{"ppx"}. } \value{ \code{coords} returns a \code{data.frame} with one row for each point, containing the coordinates. \code{coords<-} returns the altered point pattern. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppx}}, \code{\link{pp3}}, \code{\link{ppp}}, \code{as.hyperframe.ppx}, \code{as.data.frame.ppx}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) coords(X) coords(X, temporal=FALSE) coords(X) <- matrix(runif(12), ncol=3) } \keyword{spatial} \keyword{manip} spatstat/man/square.Rd0000755000176000001440000000252312237642734014526 0ustar ripleyusers\name{square} \alias{square} \alias{unit.square} \title{Square Window} \description{ Creates a square window } \usage{ square(r=1) unit.square() } \arguments{ \item{r}{Numeric. The side length of the square, or a vector giving the minimum and maximum coordinate values. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ If \code{r} is a number, \code{square(r)} is a shortcut for creating a window object representing the square \eqn{[0,r] \times [0,r]}{[0,r] * [0,r]}. It is equivalent to the command \code{owin(c(0,r),c(0,r))}. If \code{r} is a vector of length 2, then \code{square(r)} creates the square with \code{x} and \code{y} coordinates ranging from \code{r[1]} to \code{r[2]}. \code{unit.square} creates the unit square \eqn{[0,1] \times [0,1]}{[0,1] * [0,1]}. It is equivalent to \code{square(1)} or \code{square()} or \code{owin(c(0,1),c(0,1))}. These commands are included mainly to improve the readability of some code. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}} } \examples{ W <- square(10) W <- square(c(-1,1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/summary.ppm.Rd0000755000176000001440000000454412237642734015523 0ustar ripleyusers\name{summary.ppm} \alias{summary.ppm} \alias{print.summary.ppm} \title{Summarizing a Fitted Point Process Model} \description{ \code{summary} method for class \code{"ppm"}. } \usage{ \method{summary}{ppm}(object, \dots, quick=FALSE) \method{print}{summary.ppm}(x, \dots) } \arguments{ \item{object}{A fitted point process model.} \item{\dots}{Ignored.} \item{quick}{Logical flag controlling the scope of the summary.} \item{x}{Object of class \code{"summary.ppm"} as returned by \code{summary.ppm}. } } \details{ This is a method for the generic \code{\link{summary}} for the class \code{"ppm"}. An object of class \code{"ppm"} describes a fitted point process model. See \code{\link{ppm.object}}) for details of this class. \code{summary.ppm} extracts information about the type of model that has been fitted, the data to which the model was fitted, and the values of the fitted coefficients. (If \code{quick=TRUE} then only the information about the type of model is extracted.) \code{print.summary.ppm} prints this information in a comprehensible format. In normal usage, \code{print.summary.ppm} is invoked implicitly when the user calls \code{summary.ppm} without assigning its value to anything. See the examples. You can also type \code{coef(summary(object))} to extract a table of the fitted coefficients of the point process model \code{object} together with standard errors and confidence limits. } \value{ \code{summary.ppm} returns an object of class \code{"summary.ppm"}, while \code{print.summary.ppm} returns \code{NULL}. } \examples{ # invent some data X <- rpoispp(42) # fit a model to it fit <- ppm(X, ~x, Strauss(r=0.1)) # summarize the fitted model summary(fit) # `quick' option summary(fit, quick=TRUE) # save the full summary s <- summary(fit) # print it print(s) s # extract stuff names(s) coef(s) s$args$correction s$name s$trend$value \dontrun{ # multitype pattern data(demopat) fit <- ppm(demopat, ~marks, Poisson()) summary(fit) } # model with external covariates fitX <- ppm(X, ~Z, covariates=list(Z=function(x,y){x+y})) summary(fitX) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rmhexpand.Rd0000644000176000001440000001312312237642734015207 0ustar ripleyusers\name{rmhexpand} \alias{rmhexpand} \title{ Specify Simulation Window or Expansion Rule } \description{ Specify a spatial domain in which point process simulations will be performed. Alternatively, specify a rule which will be used to determine the simulation window. } \usage{ rmhexpand(x = NULL, ..., area = NULL, length = NULL, distance = NULL) } \arguments{ \item{x}{ Any kind of data determining the simulation window or the expansion rule. A window (object of class \code{"owin"}) specifying the simulation window, a numerical value specifying an expansion factor or expansion distance, a list containing one numerical value, an object of class \code{"rmhexpand"}, or \code{NULL}. } \item{\dots}{ Ignored. } \item{area}{ Area expansion factor. Incompatible with other arguments. } \item{length}{ Length expansion factor. Incompatible with other arguments. } \item{distance}{ Expansion distance (buffer width). Incompatible with other arguments. } } \details{ In the Metropolis-Hastings algorithm \code{\link{rmh}} for simulating spatial point processes, simulations are usually carried out on a spatial domain that is larger than the original window of the point process model, then subsequently clipped to the original window. The command \code{rmhexpand} can be used to specify the simulation window, or to specify a rule which will later be used to determine the simulation window from data. The arguments are all incompatible: at most one of them should be given. If the first argument \code{x} is given, it may be any of the following: \itemize{ \item a window (object of class \code{"owin"}) specifying the simulation window. \item an object of class \code{"rmhexpand"} specifying the expansion rule. \item a single numerical value, without attributes. This will be interpreted as the value of the argument \code{area}. \item either \code{c(area=v)} or \code{list(area=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{area}. \item either \code{c(length=v)} or \code{list(length=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{length}. \item either \code{c(distance=v)} or \code{list(distance=v)}, where \code{v} is a single numeric value. This will be interpreted as the value of the argument \code{distance}. \item \code{NULL}, meaning that the expansion rule is not yet determined. } If one of the arguments \code{area}, \code{length} or \code{distance} is given, then the simulation window is determined from the original data window as follows. \describe{ \item{area}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{area} should be a numerical value, greater than or equal to 1. It specifies the area expansion factor, i.e. the ratio of the area of the simulation window to the area of the original point process window's bounding box. } \item{length}{ The bounding box of the original data window will be extracted, and the simulation window will be a scalar dilation of this rectangle. The argument \code{length} should be a numerical value, greater than or equal to 1. It specifies the length expansion factor, i.e. the ratio of the width (height) of the simulation window to the width (height) of the original point process window's bounding box. } \item{distance}{ The argument \code{distance} should be a numerical value, greater than or equal to 0. It specifies the width of a buffer region around the original data window. If the original data window is a rectangle, then this window is extended by a margin of width equal to \code{distance} around all sides of the original rectangle. The result is a rectangle. If the original data window is not a rectangle, then morphological dilation is applied using \code{\link{dilation.owin}} so that a margin or buffer of width equal to \code{distance} is created around all sides of the original window. The result is a non-rectangular window, typically of a different shape. } } } \section{Undetermined expansion}{ If \code{expand=NULL}, this is interpreted to mean that the expansion rule is \dQuote{not yet decided}. Expansion will be decided later, by the simulation algorithm \code{\link{rmh}}. If the model cannot be expanded (for example if the covariate data in the model are not available on a larger domain) then expansion will not occur. If the model can be expanded, then if the point process model has a finite interaction range \code{r}, the default is \code{rmhexpand(distance=2*r)}, and otherwise \code{rmhexpand(area=2)}. } \value{ An object of class \code{"rmhexpand"} specifying the expansion rule. There is a \code{print} method for this class. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{expand.owin}} to apply the rule to a window. \code{\link{will.expand}} to test whether expansion will occur. \code{\link{rmh}}, \code{\link{rmhcontrol}} for background details. } \examples{ rmhexpand() rmhexpand(2) rmhexpand(1) rmhexpand(length=1.5) rmhexpand(distance=0.1) rmhexpand(letterR) } \keyword{spatial} \keyword{datagen} spatstat/man/is.multitype.ppp.Rd0000755000176000001440000000436712237642732016500 0ustar ripleyusers\name{is.multitype.ppp} \alias{is.multitype.ppp} \title{Test Whether A Point Pattern is Multitype} \description{ Tests whether a point pattern has ``marks'' attached to the points which classify the points into several types. } \usage{ \method{is.multitype}{ppp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a multitype point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points, \bold{and} that the marks are a factor. It is a method for the generic function \code{\link{is.multitype}}. For example, the \code{\link{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppm}} } \examples{ data(cells) is.multitype(cells) #FALSE - no marks data(longleaf) is.multitype(longleaf) #FALSE - real valued marks data(amacrine) is.multitype(amacrine) #TRUE } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/scanpp.Rd0000755000176000001440000000554412237642734014520 0ustar ripleyusers\name{scanpp} \alias{scanpp} \title{Read Point Pattern From Data File} \description{ Reads a point pattern dataset from a text file. } \usage{ scanpp(filename, window, header=TRUE, dir="", multitype=FALSE) } \arguments{ \item{filename}{ String name of the file containing the coordinates of the points in the point pattern, and their marks if any. } \item{window}{ Window for the point pattern. An object of class \code{"owin"}. } \item{header}{ Logical flag indicating whether the first line of the file contains headings for the columns. Passed to \code{\link{read.table}}. } \item{dir}{ String containing the path name of the directory in which \code{filename} is to be found. Default is the current directory. } \item{multitype}{ Logical flag indicating whether marks are to be interpreted as a factor (\code{multitype = TRUE}) or as numerical values (\code{multitype = FALSE}). } } \value{ A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}). } \details{ This simple function reads a point pattern dataset from a file containing the cartesian coordinates of its points, and optionally the mark values for these points. The file identified by \code{filename} in directory \code{dir} should be a text file that can be read using \code{\link{read.table}}. Thus, each line of the file (except possibly the first line) contains data for one point in the point pattern. Data are arranged in columns. There should be either two columns (for an unmarked point pattern) or three columns (for a marked point pattern). If \code{header=FALSE} then the first two columns of data will be interpreted as the \eqn{x} and \eqn{y} coordinates of points. A third column, if present, will be interpreted as containing the marks for these points. The marks will be converted to a factor if \code{multitype = TRUE}. If \code{header=TRUE} then the first line of the file should contain string names for each of the columns of data. If there are columns named \code{x} and \code{y} then these will be taken as the cartesian coordinates, and any remaining column will be taken as the marks. If there are no columns named \code{x} and \code{y} then the first and second columns will be taken as the cartesian coordinates. Note that there is intentionally no default for \code{window}. The window of observation should be specified. If you really need to estimate the window, use the Ripley-Rasson estimator \code{\link{ripras}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{ripras}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{IO} spatstat/man/rmpoint.Rd0000755000176000001440000002465512237642734014730 0ustar ripleyusers\name{rmpoint} \alias{rmpoint} \title{Generate N Random Multitype Points} \description{ Generate a random multitype point pattern with a fixed number of points, or a fixed number of points of each type. } \usage{ rmpoint(n, f=1, fmax=NULL, win=unit.square(), types, ptypes, \dots, giveup=1000, verbose=FALSE) } \arguments{ \item{n}{ Number of marked points to generate. Either a single number specifying the total number of points, or a vector specifying the number of points of each type. } \item{f}{ The probability density of the multitype points, usually un-normalised. Either a constant, a vector, a function \code{f(x,y,m, ...)}, a pixel image, a list of functions \code{f(x,y,...)} or a list of pixel images. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image or list of pixel images. } \item{types}{ All the possible types for the multitype pattern. } \item{ptypes}{ Optional vector of probabilities for each type. } \item{\dots}{ Arguments passed to \code{f} if it is a function. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This function generates random multitype point patterns consisting of a fixed number of points. Three different models are available: \describe{ \item{I. Random location and type:}{ If \code{n} is a single number and the argument \code{ptypes} is missing, then \code{n} independent, identically distributed random multitype points are generated. Their locations \code{(x[i],y[i])} and types \code{m[i]} have joint probability density proportional to \eqn{f(x,y,m)}. } \item{II. Random type, and random location given type:}{ If \code{n} is a single number and \code{ptypes} is given, then \code{n} independent, identically distributed random multitype points are generated. Their types \code{m[i]} have probability distribution \code{ptypes}. Given the types, the locations \code{(x[i],y[i])} have conditional probability density proportional to \eqn{f(x,y,m)}. } \item{III. Fixed types, and random location given type:}{ If \code{n} is a vector, then we generate \code{n[i]} independent, identically distributed random points of type \code{types[i]}. For points of type \eqn{m} the conditional probability density of location \eqn{(x,y)} is proportional to \eqn{f(x,y,m)}. } } Note that the density \code{f} is normalised in different ways in Model I and Models II and III. In Model I the normalised joint density is \eqn{g(x,y,m)=f(x,y,m)/Z} where \deqn{ Z = \sum_m \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y }{ Z = sum_[m] integral lambda(x,y,m) dx dy } while in Models II and III the normalised conditional density is \eqn{g(x,y\mid m) = f(x,y,m)/Z_m}{g(x,y|m) = f(x,y,m)/Z[m]} where \deqn{ Z_m = \int\int \lambda(x,y,m) {\rm d}x \, {\rm d}y. }{ Z[m] = integral lambda(x,y,m) dx dy. } In Model I, the marginal distribution of types is \eqn{p_m = Z_m/Z}{p[m] = Z[m]/Z}. The unnormalised density \code{f} may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{f} is a single number, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is also uniform (all possible types have equal probability). } \item{vector:}{ If \code{f} is a numeric vector, the conditional density of location given type is uniform. That is, the points of each type are uniformly distributed. In Model I, the marginal distribution of types is proportional to the vector \code{f}. In Model II, the marginal distribution of types is \code{ptypes}, that is, the values in \code{f} are ignored. } \item{function:}{ If \code{f} is a function, it will be called in the form \code{f(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. In Model I, the joint probability density of location and type is proportional to \code{f(x,y,m,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f(x,y,m,\dots)}. The function \code{f} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels \code{types}.) The value \code{fmax} must be given and must be an upper bound on the values of \code{f(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{f} is a list of functions, then the functions will be called in the form \code{f[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. In Model I, the joint probability density of location and type is proportional to \code{f[[m]](x,y,\dots)}. In Models II and III, the conditional probability density of location \code{(x,y)} given type \code{m} is proportional to \code{f[[m]](x,y,\dots)}. The function \code{f[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{fmax} must be given and must be an upper bound on the values of \code{f[[i]](x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{seq(f)}. } \item{pixel image:}{ If \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the unnormalised density at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{f} for the pixel nearest to \code{(x,y)}. In Model I, the marginal distribution of types is uniform. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{f} is a list of pixel images, then the image \code{f[[i]]} determines the density values of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{factor(seq(f))}. } } The implementation uses the rejection method. For Model I, \code{\link{rmpoispp}} is called repeatedly until \code{n} points have been generated. It gives up after \code{giveup} calls if there are still fewer than \code{n} points. For Model II, the types are first generated according to \code{ptypes}, then the locations of the points of each type are generated using \code{\link{rpoint}}. For Model III, the locations of the points of each type are generated using \code{\link{rpoint}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}} } \examples{ abc <- c("a","b","c") ##### Model I rmpoint(25, types=abc) rmpoint(25, 1, types=abc) # 25 points, equal probability for each type, uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc) # same as above rmpoint(25, function(x,y,m) { x }, types=abc) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc) rmpoint(25, list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 25 points, UNEQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ##### Model II rmpoint(25, 1, types=abc, ptypes=rep(1,3)/3) rmpoint(25, 1, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # uniformly distributed locations rmpoint(25, function(x,y,m) {rep(1, length(x))}, types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, list(function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}, function(x,y){rep(1, length(x))}), types=abc, ptypes=rep(1,3)) # same as above rmpoint(25, function(x,y,m) { x }, types=abc, ptypes=rep(1,3)) # 25 points, equal probability for each type, # locations nonuniform with density proportional to x rmpoint(25, function(x,y,m) { ifelse(m == "a", 1, x) }, types=abc, ptypes=rep(1,3)) # 25 points, EQUAL probabilities for each type, # type "a" points uniformly distributed, # type "b" and "c" points nonuniformly distributed. ###### Model III rmpoint(c(12, 8, 4), 1, types=abc) # 12 points of type "a", # 8 points of type "b", # 4 points of type "c", # each uniformly distributed rmpoint(c(12, 8, 4), function(x,y,m) { ifelse(m=="a", 1, x)}, types=abc) rmpoint(c(12, 8, 4), list(function(x,y) { rep(1, length(x)) }, function(x,y) { x }, function(x,y) { x }), types=abc) # 12 points of type "a", uniformly distributed # 8 points of type "b", nonuniform # 4 points of type "c", nonuniform ######### ## Randomising an existing point pattern: data(demopat) X <- demopat # same numbers of points of each type, uniform random locations (Model III) rmpoint(table(X$marks), 1, types=levels(X$marks), win=X$window) # same total number of points, distribution of types estimated from X, # uniform random locations (Model II) rmpoint(X$n, 1, types=levels(X$marks), win=X$window, ptypes=table(X$marks)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/intensity.ppm.Rd0000644000176000001440000000356412237642732016050 0ustar ripleyusers\name{intensity.ppm} %DontDeclareMethods \alias{intensity.ppm} \title{ Intensity of Fitted Point Process Model } \description{ Computes the intensity of a fitted point process model. } \usage{ \method{intensity}{ppm}(X, ...) } \arguments{ \item{X}{ A fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Arguments passed to \code{\link{predict.ppm}} in some cases. See Details. } } \details{ This is a method for the generic function \code{\link{intensity}} for fitted point process models (class \code{"ppm"}). The intensity of a point process model is the expected number of random points per unit area. If \code{X} is a Poisson point process model, the intensity of the process is computed exactly. The result is a numerical value if \code{X} is a stationary Poisson point process, and a pixel image if \code{X} is non-stationary. (In the latter case, the resolution of the pixel image is controlled by the arguments \code{\dots} which are passed to \code{\link{predict.ppm}}.) If \code{X} is another Gibbs point process model, the intensity is computed approximately using the Poisson-saddlepoint approximation (Baddeley and Nair, 2012). Currently this is implemented only for pairwise interactions. } \value{ A numeric value (if the model is stationary) or a pixel image. } \references{ Baddeley, A. and Nair, G. (2012) Fast approximation of the intensity of Gibbs point processes. \emph{Electronic Journal of Statistics} \bold{6} 1155--1169. } \seealso{ \code{\link{intensity}}, \code{\link{intensity.ppp}} } \examples{ fitP <- ppm(swedishpines, ~1, Poisson()) intensity(fitP) fitS <- ppm(swedishpines, ~1, Strauss(9)) intensity(fitS) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Gopal Nair. } \keyword{spatial} \keyword{models} spatstat/man/rThomas.Rd0000755000176000001440000001212412237642734014641 0ustar ripleyusers\name{rThomas} \alias{rThomas} \title{Simulate Thomas Process} \description{ Generate a random point pattern, a realisation of the Thomas cluster process. } \usage{ rThomas(kappa, sigma, mu, win = owin(c(0,1),c(0,1))) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{sigma}{ Standard deviation of random displacement (along each coordinate axis) of a point from its cluster centre. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the (`modified') Thomas process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being isotropic Gaussian displacements from the cluster parent location. The resulting point pattern is a realisation of the classical \dQuote{stationary Thomas process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the Thomas process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu * f}, where \code{f} is the Gaussian probability density centred at the parent point. Equivalently we first generate, for each parent point, a Poisson (\code{mumax}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) with independent Gaussian displacements from the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be spatially inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{4 * sigma} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the Thomas process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{f}. The Thomas process with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}} or related functions. Currently it is not possible to fit the Thomas model with inhomogeneous parents. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{thomas.estK}}, \code{\link{thomas.estpcf}}, \code{\link{kppm}} } \references{ Diggle, P. J., Besag, J. and Gleaves, J. T. (1976) Statistical analysis of spatial point patterns by means of distance methods. \emph{Biometrics} \bold{32} 659--667. Thomas, M. (1949) A generalisation of Poisson's binomial limit for use in ecology. \emph{Biometrika} \bold{36}, 18--25. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ #homogeneous X <- rThomas(10, 0.2, 5) #inhomogeneous Z <- as.im(function(x,y){ 5 * exp(2 * x - 1) }, owin()) Y <- rThomas(10, 0.2, Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/shapley.Rd0000755000176000001440000000630712237642734014677 0ustar ripleyusers\name{shapley} \alias{shapley} \alias{shapley.extra} \docType{data} \title{Galaxies in the Shapley Supercluster} \description{ A point pattern recording the sky positions of 4215 galaxies in the Shapley Supercluster. } \format{ \code{shapley} is an object of class \code{"ppp"} representing the point pattern of galaxy locations (see \code{\link{ppp.object}}). \code{shapley.extra} is a list containing additional data described under Notes. } \usage{data(shapley)} \examples{ data(shapley) shapley.extra$plotit(main="Shapley Supercluster") } \source{M.J. Drinkwater, Department of Physics, University of Queensland} \section{Notes}{ This dataset comes from a survey by Drinkwater et al (2004) of the Shapley Supercluster, one of the most massive concentrations of galaxies in the local universe. The data give the sky positions of 4215 galaxies observed using the FLAIR-II spectrograph on the UK Schmidt Telescope (UKST). They were kindly provided by Dr Michael Drinkwater through the Centre for Astrostatistics at Penn State University. Sky positions are given using the coordinates Right Ascension (degrees from 0 to 360) and Declination (degrees from -90 to 90). The point pattern has three mark variables: \describe{ \item{Mag}{ Galaxy magnitude (a negative logarithmic measure of visible brightness). } \item{V}{ Recession velocity (km/sec) inferred from redshift, with corrections applied. } \item{SigV}{ Estimated standard error for \code{V}. } } The region covered by the survey was approximately the UKST's standard quadrilateral survey fields 382 to 384 and 443 to 446. However, a few of the galaxy positions lie outside these fields. The point pattern dataset \code{shapley} consists of all 4215 galaxy locations. The observation window for this pattern is a dilated copy of the convex hull of the galaxy positions, constructed so that all galaxies lie within the window. Note that the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. The auxiliary dataset \code{shapley.extra} contains the following components: \describe{ \item{\code{UKSTfields}}{ a list of seven windows (objects of class \code{"owin"}) giving the UKST standard survey fields. } \item{\code{UKSTdomain}}{ the union of these seven fields, an object of class \code{"owin"}. } \item{\code{plotit}}{ a function (called without arguments) that will plot the data and the survey fields in the conventional astronomical presentation, in which Right Ascension is converted to hours and minutes (1 hour equals 15 degrees) and Right Ascension decreases as we move to the right of the plot. } } } \references{ Drinkwater, M.J., Parker, Q.A., Proust, D., Slezak, E. and Quintana, H. (2004) The large scale distribution of galaxies in the Shapley Supercluster. \emph{Publications of the Astronomical Society of Australia} \bold{21}, 89-96. \code{DOI 10.1071/AS03057} } \keyword{datasets} \keyword{spatial} spatstat/man/as.hyperframe.ppx.Rd0000755000176000001440000000471612237642732016604 0ustar ripleyusers\name{as.hyperframe.ppx} \Rdversion{1.1} \alias{as.hyperframe.ppx} \alias{as.data.frame.ppx} \alias{as.matrix.ppx} \title{ Extract coordinates and marks of multidimensional point pattern } \description{ Given any kind of spatial or space-time point pattern, extract the coordinates and marks of the points. } \usage{ \method{as.hyperframe}{ppx}(x, ...) \method{as.data.frame}{ppx}(x, ...) \method{as.matrix}{ppx}(x, ...) } \arguments{ \item{x}{ A general multidimensional space-time point pattern (object of class \code{"ppx"}). } \item{\dots}{ Ignored. } } \details{ An object of class \code{"ppx"} (see \code{\link{ppx}}) represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The function \code{as.hyperframe.ppx} extracts the coordinates and the marks as a \code{"hyperframe"} (see \code{\link{hyperframe}}) with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.hyperframe}}. The function \code{as.data.frame.ppx} discards those mark variables which are not atomic values, and extracts the coordinates and the remaining marks as a \code{data.frame} with one row of data for each point in the pattern. This is a method for the generic function \code{\link{as.data.frame}}. Finally \code{as.matrix(x)} is equivalent to \code{as.matrix(as.data.frame(x))} for an object of class \code{"ppx"}. Be warned that, if there are any columns of non-numeric data (i.e. if there are mark variables that are factors), the result will be a matrix of character values. } \value{ A \code{hyperframe}, \code{data.frame} or \code{matrix} as appropriate. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppx}}, \code{\link{hyperframe}}, \code{\link{as.hyperframe}}. } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) as.data.frame(X) val <- runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) as.hyperframe(Z) as.data.frame(Z) } \keyword{spatial} \keyword{manip} spatstat/man/is.ppm.Rd0000755000176000001440000000164612237642732014437 0ustar ripleyusers\name{is.ppm} \alias{is.ppm} \alias{is.lppm} \alias{is.kppm} \alias{is.slrm} \title{Test Whether An Object Is A Fitted Point Process Model} \description{ Checks whether its argument is a fitted point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}). } \usage{ is.ppm(x) is.kppm(x) is.lppm(x) is.slrm(x) } \arguments{ \item{x}{Any object.} } \details{ These functions test whether the object \code{x} is a fitted point process model object of the specified class. The result of \code{is.ppm(x)} is \code{TRUE} if \code{x} has \code{"ppm"} amongst its classes, and otherwise \code{FALSE}. Similarly for the other functions. } \value{ A single logical value. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/Extract.splitppp.Rd0000755000176000001440000000271612237642732016514 0ustar ripleyusers\name{Extract.splitppp} \alias{[.splitppp} \alias{[<-.splitppp} \title{Extract or Replace Sub-Patterns} \description{ Extract or replace some of the sub-patterns in a split point pattern. } \usage{ \method{[}{splitppp}(x, ...) \method{[}{splitppp}(x, ...) <- value } \arguments{ \item{x}{ An object of class \code{"splitppp"}, representing a point pattern separated into a list of sub-patterns. } \item{\dots}{ Subset index. Any valid subset index in the usual \R sense. } \item{value}{ Replacement value for the subset. A list of point patterns. } } \value{ Another object of class \code{"splitppp"}. } \details{ These are subset methods for the class \code{"splitppp"}. The argument \code{x} should be an object of class \code{"splitppp"}, representing a point pattern that has been separated into a list of sub-patterns. It is created by \code{\link{split.ppp}}. The methods extract or replace a designated subset of the list \code{x}, and return an object of class \code{"splitppp"}. } \seealso{ \code{\link{split.ppp}}, \code{\link{plot.splitppp}}, \code{\link{summary.splitppp}} } \examples{ data(amacrine) # multitype point pattern y <- split(amacrine) y[1] y["off"] y[1] <- list(runifpoint(42, amacrine$window)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/imcov.Rd0000644000176000001440000000353712237642732014344 0ustar ripleyusers\name{imcov} \alias{imcov} \title{Spatial Covariance of a Pixel Image} \description{ Computes the unnormalised spatial covariance function of a pixel image. } \usage{ imcov(X, Y=X) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}. } \item{Y}{ Optional. Another pixel image. } } \value{ A pixel image (an object of class \code{"im"}) representing the spatial covariance function of \code{X}, or the cross-covariance of \code{X} and \code{Y}. } \details{ The (uncentred, unnormalised) \emph{spatial covariance function} of a pixel image \eqn{X} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as \deqn{ C(v) = \int X(u)X(u-v)\, {\rm d}u }{ C(v) = integral of X(u) * X(u-v) du } where the integral is over all spatial locations \eqn{u}, and where \eqn{X(u)} denotes the pixel value at location \eqn{u}. This command computes a discretised approximation to the spatial covariance function, using the Fast Fourier Transform. The return value is another pixel image (object of class \code{"im"}) whose greyscale values are values of the spatial covariance function. If the argument \code{Y} is present, then \code{imcov(X,Y)} computes the set \emph{cross-covariance} function \eqn{C(u)} defined as \deqn{ C(v) = \int X(u)Y(u-v)\, {\rm d}u. }{ C(v) = integral of X(u) * Y(u-v) du. } Note that \code{imcov(X,Y)} is equivalent to \code{convolve.im(X,Y,reflectY=TRUE)}. } \seealso{ \code{\link{setcov}}, \code{\link{convolve.im}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ X <- as.im(square(1)) v <- imcov(X) plot(v) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/linearmarkconnect.Rd0000644000176000001440000000575512237642732016732 0ustar ripleyusers\name{linearmarkconnect} \alias{linearmarkconnect} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkconnect(X, i, j, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{markconnect}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkequal}}, \code{\link[spatstat]{markconnect}}. } \examples{ pab <- linearmarkconnect(chicago, "assault", "burglary") \dontrun{ plot(alltypes(chicago, linearmarkconnect)) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/triplet.family.Rd0000644000176000001440000000250012237642734016161 0ustar ripleyusers\name{triplet.family} \alias{triplet.family} \title{Triplet Interaction Family} \description{ An object describing the family of all Gibbs point processes with interaction order equal to 3. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the interaction structure of Gibbs point processes which have infinite order of interaction, such as the triplet interaction process \cite{\link{Triplets}}. Anyway, \code{triplet.family} is an object of class \code{"isf"} containing a function \code{triplet.family$eval} for evaluating the sufficient statistics of a Gibbs point process model taking an exponential family form. } \seealso{ \code{\link{Triplets}} to create the triplet interaction process structure. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{inforder.family}}, \code{\link{ord.family}}. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/centroid.owin.Rd0000755000176000001440000000322112237642732016002 0ustar ripleyusers\name{centroid.owin} \alias{centroid.owin} \title{Centroid of a window} \description{ Computes the centroid (centre of mass) of a window } \usage{ centroid.owin(w) } \arguments{ \item{w}{A window} } \value{ A list with components \code{x, y} giving the coordinates of the centroid of the window \code{w}. } \details{ The centroid of the window \code{w} is computed. The centroid (``centre of mass'') is the point whose \eqn{x} and \eqn{y} coordinates are the mean values of the \eqn{x} and \eqn{y} coordinates of all points in the window. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The calculation uses an exact analytic formula for the case of polygonal windows. Note that the centroid of a window is not necessarily inside the window. If the window is convex then it does contain its centroid. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) centroid.owin(w) # returns 0.5, 0.5 data(demopat) w <- demopat$window # an irregular window \dontrun{ plot(w) # plot the window points(centroid.owin(w)) # mark its centroid } wapprox <- as.mask(w) # pixel approximation of window \dontrun{ points(centroid.owin(wapprox)) # should be indistinguishable } \testonly{ centroid.owin(w) centroid.owin(wapprox) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/vcov.kppm.Rd0000755000176000001440000000373012237642734015152 0ustar ripleyusers\name{vcov.kppm} \alias{vcov.kppm} \title{Variance-Covariance Matrix for a Fitted Cluster Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted cluster point process model. } \usage{ \method{vcov}{kppm}(object, ..., what=c("vcov", "corr", "fisher", "internals")) } \arguments{ \item{object}{ A fitted cluster point process model (an object of class \code{"kppm"}.) } \item{\dots}{ Ignored. } \item{what}{ Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical (regression) parameters in the cluster point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. The result is an \code{n * n} matrix where \code{n = length(coef(model))}. To calculate a confidence interval for a regression parameter, use \code{\link[stats]{confint}} as shown in the examples. } \value{ A square matrix. } \references{ Waagepetersen, R. (2007) Estimating functions for inhomogeneous spatial point processes with incomplete covariate data. \emph{Biometrika} \bold{95}, 351--363. } \author{ Abdollah Jalilian and Rasmus Waagepetersen. Ported to \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} } \seealso{ \code{\link{kppm}}, \code{\link{vcov}}, \code{\link{vcov.ppm}} } \examples{ data(redwood) fit <- kppm(redwood, ~ x + y) vcov(fit) vcov(fit, what="corr") # confidence interval confint(fit) # cross-check the confidence interval by hand: sd <- sqrt(diag(vcov(fit))) t(coef(fit) + 1.96 * outer(sd, c(lower=-1, upper=1))) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Softcore.Rd0000755000176000001440000001071112237642731015005 0ustar ripleyusers\name{Softcore} \alias{Softcore} \title{The Soft Core Point Process Model} \description{ Creates an instance of the Soft Core point process model which can then be fitted to point pattern data. } \usage{ Softcore(kappa, sigma0=NA) } \arguments{ \item{kappa}{The exponent \eqn{\kappa}{kappa} of the Soft Core interaction} \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Soft Core process with exponent \eqn{\kappa}{kappa}. } \details{ The (stationary) Soft Core point process with parameters \eqn{\beta}{beta} and \eqn{\sigma}{sigma} and exponent \eqn{\kappa}{kappa} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points contributes a factor \deqn{ \exp \left\{ - \left( \frac{\sigma}{d} \right)^{2/\kappa} \right\} }{ exp( - (sigma/d)^(2/kappa) ) } to the density, where \eqn{d} is the distance between the two points. Thus the process has probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \exp \left\{ - \sum_{i < j} \left( \frac{\sigma}{||x_i-x_j||} \right)^{2/\kappa} \right\} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) exp( - sum (sigma/||x[i]-x[j]||)^(2/kappa)) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{\alpha}{alpha} is the normalising constant, and the sum on the right hand side is over all unordered pairs of points of the pattern. This model describes an ``ordered'' or ``inhibitive'' process, with the interpoint interaction decreasing smoothly with distance. The strength of interaction is controlled by the parameter \eqn{\sigma}{sigma}, a positive real number, with larger values corresponding to stronger interaction; and by the exponent \eqn{\kappa}{kappa} in the range \eqn{(0,1)}, with larger values corresponding to weaker interaction. If \eqn{\sigma = 0}{sigma = 0} the model reduces to the Poisson point process. If \eqn{\sigma > 0}{sigma > 0}, the process is well-defined only for \eqn{\kappa}{kappa} in \eqn{(0,1)}. The limit of the model as \eqn{\kappa \to 0}{kappa -> 0} is the hard core process with hard core distance \eqn{h=\sigma}{h=sigma}. The nonstationary Soft Core process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Soft Core process pairwise interaction is yielded by the function \code{Softcore()}. See the examples below. The main argument is the exponent \code{kappa}. When \code{kappa} is fixed, the model becomes an exponential family with canonical parameters \eqn{\log \beta}{log(beta)} and \deqn{ \log \gamma = \frac{2}{\kappa} \log\sigma }{ log(gamma) = (2/kappa) log(sigma) } The canonical parameters are estimated by \code{\link{ppm}()}, not fixed in \code{Softcore()}. The optional argument \code{sigma0} can be used to improve numerical stability. If \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Ogata, Y, and Tanemura, M. (1981). Estimation of interaction potentials of spatial point patterns through the maximum likelihood procedure. \emph{Annals of the Institute of Statistical Mathematics}, B \bold{33}, 315--338. Ogata, Y, and Tanemura, M. (1984). Likelihood analysis of spatial point patterns. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 496--518. } \examples{ data(cells) ppm(cells, ~1, Softcore(kappa=0.5), correction="isotropic") # fit the stationary Soft Core process to `cells' } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/K3est.Rd0000755000176000001440000000630312237642731014214 0ustar ripleyusers\name{K3est} \Rdversion{1.1} \alias{K3est} \title{ K-function of a Three-Dimensional Point Pattern } \description{ Estimates the \eqn{K}-function from a three-dimensional point pattern. } \usage{ K3est(X, ..., rmax = NULL, nrval = 128, correction = c("translation", "isotropic")) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{K_3(r)}{K3(r)} will be estimated. A large value of \code{nrval} is required to avoid discretisation effects. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the three-dimensional \eqn{K} function is \deqn{ K_3(r) = \frac 1 \lambda E(N(\Phi, x, r) \mid x \in \Phi) }{ K3(r) = (1/lambda) E(N(Phi,x,r) | x in Phi) } where \eqn{\lambda}{lambda} is the intensity of the process (the expected number of points per unit volume) and \eqn{N(\Phi,x,r)}{N(Phi,x,r)} is the number of points of \eqn{\Phi}{Phi}, other than \eqn{x} itself, which fall within a distance \eqn{r} of \eqn{x}. This is the three-dimensional generalisation of Ripley's \eqn{K} function for two-dimensional point processes (Ripley, 1977). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. The empirical cumulative distribution function of these values, with appropriate edge corrections, is renormalised to give the estimate of \eqn{K_3(r)}{K3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rana Moyeed. } \seealso{ \code{\link{F3est}}, \code{\link{G3est}}, \code{\link{pcf3est}} } \examples{ X <- rpoispp3(42) Z <- K3est(X) if(interactive()) plot(Z) } \keyword{spatial} \keyword{nonparametric} spatstat/man/rpoislinetess.Rd0000755000176000001440000000271512237642734016134 0ustar ripleyusers\name{rpoislinetess} \alias{rpoislinetess} \title{Poisson Line Tessellation} \description{ Generate a tessellation delineated by the lines of the Poisson line process } \usage{ rpoislinetess(lambda, win = owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Currently, the window must be a rectangle. } } \details{ This algorithm generates a realisation of the uniform Poisson line process, and divides the window \code{win} into tiles separated by these lines. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \value{ A tessellation (object of class \code{"tess"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rpoisline}} to generate the lines only. } \examples{ X <- rpoislinetess(3) plot(as.im(X), main="rpoislinetess(3)") plot(X, add=TRUE) } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.fasp.Rd0000755000176000001440000000362612237642732015573 0ustar ripleyusers\name{Extract.fasp} \alias{[.fasp} \title{Extract Subset of Function Array} \description{ Extract a subset of a function array (an object of class \code{"fasp"}). } \usage{ \method{[}{fasp}(x, I, J, drop=TRUE,\dots) } \arguments{ \item{x}{ A function array. An object of class \code{"fasp"}. } \item{I}{ any valid expression for a subset of the row indices of the array. } \item{J}{ any valid expression for a subset of the column indices of the array. } \item{drop}{ Logical. When the selected subset consists of only one cell of the array, if \code{drop=FALSE} the result is still returned as a \eqn{1 \times 1}{1 * 1} array of functions (class \code{"fasp"}) while if \code{drop=TRUE} it is returned as a function (class \code{"fv"}). } \item{\dots}{Ignored.} } \value{ A function array (of class \code{"fasp"}). Exceptionally, if the array has only one cell, and if \code{drop=TRUE}, then the result is a function value table (class \code{"fv"}). } \details{ A function array can be regarded as a matrix whose entries are functions. See \code{\link{fasp.object}} for an explanation of function arrays. This routine extracts a sub-array according to the usual conventions for matrix indexing. } \seealso{ \code{\link{fasp.object}} } \examples{ # Lansing woods data - multitype points with 6 types data(lansing) \testonly{ # smaller dataset lansing <- lansing[ seq(1,lansing$n,by=45), ] } # compute 6 x 6 array of all cross-type K functions a <- alltypes(lansing, "K") # extract first three marks only b <- a[1:3,1:3] \dontrun{plot(b)} # subset pertaining to hickories h <- a[levels(lansing$marks) == "hickory", ] \dontrun{plot(h)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/bermantest.Rd0000755000176000001440000001411412237642732015367 0ustar ripleyusers\name{bermantest} %DontDeclareMethods \alias{bermantest} \alias{bermantest.ppm} \alias{bermantest.ppp} \alias{bermantest.lppm} \alias{bermantest.lpp} \title{Berman's Tests for Point Process Model} \description{ Tests the goodness-of-fit of a Poisson point process model using methods of Berman (1986). } \usage{ bermantest(...) \method{bermantest}{ppp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{bermantest}{ppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{bermantest}{lpp}(X, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) \method{bermantest}{lppm}(model, covariate, which = c("Z1", "Z2"), alternative = c("two.sided", "less", "greater"), ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}). } \item{model}{ A fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ The spatial covariate on which the test will be based. An image (object of class \code{"im"}) or a function. } \item{which}{ Character string specifying the choice of test. } \item{alternative}{ Character string specifying the alternative hypothesis. } \item{\dots}{ Ignored. } } \details{ These functions perform a goodness-of-fit test of a Poisson point process model fitted to point pattern data. The observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same values under the model, are compared using either of two test statistics \eqn{Z_1}{Z[1]} and \eqn{Z_2}{Z[2]} proposed by Berman (1986). The function \code{bermantest} is generic, with methods for point patterns (\code{"ppp"} or \code{"lpp"}) and point process models (\code{"ppm"} or \code{"lppm"}). \itemize{ \item If \code{X} is a point pattern dataset (object of class \code{"ppp"} or \code{"lpp"}), then \code{bermantest(X, ...)} performs a goodness-of-fit test of the uniform Poisson point process (Complete Spatial Randomness, CSR) for this dataset. \item If \code{model} is a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}) then \code{bermantest(model, ...)} performs a test of goodness-of-fit for this fitted model. In this case, \code{model} should be a Poisson point process. } The test is performed by comparing the observed distribution of the values of a spatial covariate at the data points, and the predicted distribution of the same covariate under the model. Thus, you must nominate a spatial covariate for this test. The argument \code{covariate} should be either a \code{function(x,y)} or a pixel image (object of class \code{"im"} containing the values of a spatial function. If \code{covariate} is an image, it should have numeric values, and its domain should cover the observation window of the \code{model}. If \code{covariate} is a function, it should expect two arguments \code{x} and \code{y} which are vectors of coordinates, and it should return a numeric vector of the same length as \code{x} and \code{y}. First the original data point pattern is extracted from \code{model}. The values of the \code{covariate} at these data points are collected. Next the values of the \code{covariate} at all locations in the observation window are evaluated. The point process intensity of the fitted model is also evaluated at all locations in the window. \itemize{ \item If \code{which="Z1"}, the test statistic \eqn{Z_1}{Z[1]} is computed as follows. The sum \eqn{S} of the covariate values at all data points is evaluated. The predicted mean \eqn{\mu}{mu} and variance \eqn{\sigma^2}{sigma^2} of \eqn{S} are computed from the values of the covariate at all locations in the window. Then we compute \eqn{Z_1 = (S-\mu)/\sigma}{Z[1]=(S-mu)/sigma}. \item If \code{which="Z2"}, the test statistic \eqn{Z_2}{Z[2]} is computed as follows. The values of the \code{covariate} at all locations in the observation window, weighted by the point process intensity, are compiled into a cumulative distribution function \eqn{F}. The probability integral transformation is then applied: the values of the \code{covariate} at the original data points are transformed by the predicted cumulative distribution function \eqn{F} into numbers between 0 and 1. If the model is correct, these numbers are i.i.d. uniform random numbers. The standardised sample mean of these numbers is the statistic \eqn{Z_2}{Z[2]}. } In both cases the null distribution of the test statistic is the standard normal distribution, approximately. The return value is an object of class \code{"htest"} containing the results of the hypothesis test. The print method for this class gives an informative summary of the test outcome. } \value{ An object of class \code{"htest"} (hypothesis test) and also of class \code{"bermantest"}, containing the results of the test. The return value can be plotted (by \code{\link{plot.bermantest}}) or printed to give an informative summary of the test. } \section{Warning}{ The meaning of a one-sided test must be carefully scrutinised: see the printed output. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{kstest}}, \code{\link{quadrat.test}}, \code{\link{ppm}} } \references{ Berman, M. (1986) Testing for spatial association between a point process and another stochastic process. \emph{Applied Statistics} \bold{35}, 54--62. } \examples{ # Berman's data data(copper) X <- copper$SouthPoints L <- copper$SouthLines D <- distmap(L, eps=1) # test of CSR bermantest(X, D) bermantest(X, D, "Z2") } \keyword{htest} \keyword{spatial} spatstat/man/clickppp.Rd0000755000176000001440000000544212237642732015034 0ustar ripleyusers\name{clickppp} \alias{clickppp} \title{Interactively Add Points} \description{ Allows the user to create a point pattern by point-and-click in the display. } \usage{ clickppp(n=NULL, win=square(1), types=NULL, \dots, add=FALSE, main=NULL, hook=NULL) } \arguments{ \item{n}{ Number of points to be added (if this is predetermined). } \item{win}{ Window in which to create the point pattern. An object of class \code{"owin"}. } \item{types}{ Vector of types, when creating a multitype point pattern. } \item{\dots}{ Optional extra arguments to be passed to \code{\link{locator}} to control the display. } \item{add}{ Logical value indicating whether to create a new plot (\code{add=FALSE}) or draw over the existing plot (\code{add=TRUE}). } \item{main}{ Main heading for plot. } \item{hook}{For internal use only. Do not use this argument.} } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function allows the user to create a point pattern by interactively clicking on the screen display. First the window \code{win} is plotted on the current screen device. Then the user is prompted to point the mouse at any desired locations and click the left mouse button to add each point. Interactive input stops after \code{n} clicks (if \code{n} was given) or when the middle mouse button is pressed. The return value is a point pattern containing the locations of all the clicked points inside the original window \code{win}, provided that all of the clicked locations were inside this window. Otherwise, the window is expanded to a box large enough to contain all the points (as well as containing the original window). If the argument \code{types} is given, then a multitype point pattern will be created. The user is prompted to input the locations of points of type \code{type[i]}, for each successive index \code{i}. (If the argument \code{n} was given, there will be \code{n} points of \emph{each} type.) The return value is a multitype point pattern. This function uses the \R{} command \code{\link{locator}} to input the mouse clicks. It only works on screen devices such as \sQuote{X11}, \sQuote{windows} and \sQuote{quartz}. Arguments that can be passed to \code{\link{locator}} through \code{\dots} include \code{pch} (plotting character), \code{cex} (character expansion factor) and \code{col} (colour). See \code{\link{locator}} and \code{\link{par}}. } \seealso{ \code{\link{identify.ppp}}, \code{\link{locator}}, \code{\link{clickpoly}} } \author{Original by Dominic Schuhmacher. Adapted by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{iplot} spatstat/man/rCauchy.Rd0000644000176000001440000001001412251535221014577 0ustar ripleyusers\name{rCauchy} \alias{rCauchy} \title{Simulate Neyman-Scott Point Process with Cauchy cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Cauchy cluster kernel. } \usage{ rCauchy(kappa, omega, mu, win = owin(), eps = 0.001) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{omega}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number, in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{eps}{ Threshold below which the values of the cluster kernel will be treated as zero for simulation purposes. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Cauchy cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Cauchy kernel. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, using \code{\link{cauchy.estK}}, \code{\link{cauchy.estpcf}} or \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}, or using \code{\link{cauchy.estK}} or \code{\link{cauchy.estpcf}} applied to the inhomogeneous \eqn{K} function. } \seealso{ \code{\link{rpoispp}}, \code{\link{rNeymanScott}}, \code{\link{cauchy.estK}}, \code{\link{cauchy.estpcf}}, \code{\link{kppm}}. } \examples{ # homogeneous X <- rCauchy(30, 0.01, 5) # inhomogeneous Z <- as.im(function(x,y){ exp(2 - 3 * x) }, W= owin()) Y <- rCauchy(50, 0.01, Z) } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{datagen} spatstat/man/density.psp.Rd0000755000176000001440000000316512237642732015507 0ustar ripleyusers\name{density.psp} \alias{density.psp} \title{Kernel Smoothing of Line Segment Pattern} \description{ Compute a kernel smoothed intensity function from a line segment pattern. } \usage{ \method{density}{psp}(x, sigma, \dots, edge=TRUE) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}) to be smoothed. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} which determine the resolution of the resulting image. } \item{edge}{ Logical flag indicating whether to apply edge correction. } } \value{ A pixel image (object of class \code{"im"}). } \details{ This is a method for the generic function \code{\link{density}}. A kernel estimate of the intensity of the line segment pattern is computed. The result is the convolution of the isotropic Gaussian kernel, of standard deviation \code{sigma}, with the line segments. Computation is performed analytically. If \code{edge=TRUE} this result is adjusted for edge effects by dividing it by the convolution of the same Gaussian kernel with the observation window. } \seealso{ \code{\link{psp.object}}, \code{\link{im.object}}, \code{\link{density}} } \examples{ L <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) D <- density(L, sigma=0.03) plot(D, main="density(L)") plot(L, add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/pool.quadrattest.Rd0000644000176000001440000000522612237642733016536 0ustar ripleyusers\name{pool.quadrattest} \alias{pool.quadrattest} \title{ Pool Several Quadrat Tests } \description{ Pool several quadrat tests into a single quadrat test. } \usage{ \method{pool}{quadrattest}(..., df=NULL, df.est=NULL, nsim=1999, Xname=NULL) } \arguments{ \item{\dots}{ Any number of objects, each of which is a quadrat test (object of class \code{"quadrattest"}). } \item{df}{ Optional. Number of degrees of freedom of the test statistic. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df.est}. } \item{df.est}{ Optional. The number of fitted parameters, or the number of degrees of freedom lost by estimation of parameters. Relevant only for \eqn{\chi^2}{chi^2} tests. Incompatible with \code{df}. } \item{nsim}{ Number of simulations, for Monte Carlo test. } \item{Xname}{ Optional. Name of the original data. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"quadrattest"}. An object of class \code{"quadrattest"} represents a \eqn{\chi^2}{chi^2} test or Monte Carlo test of goodness-of-fit for a point process model, based on quadrat counts. Such objects are created by the command \code{\link{quadrat.test}}. Each of the arguments \code{\dots} must be an object of class \code{"quadrattest"}. They must all be the same type of test (chi-squared test or Monte Carlo test, conditional or unconditional) and must all have the same type of alternative hypothesis. The test statistic of the pooled test is the Pearson \eqn{X^2} statistic taken over all cells (quadrats) of all tests. The \eqn{p} value of the pooled test is then computed using either a Monte Carlo test or a \eqn{\chi^2}{chi^2} test. For a pooled \eqn{\chi^2}{chi^2} test, the number of degrees of freedom of the combined test is computed by adding the degrees of freedom of all the tests (equivalent to assuming the tests are independent) unless it is determined by the arguments \code{df} or \code{df.est}. The resulting \eqn{p} value is computed to obtain the pooled test. For a pooled Monte Carlo test, new simulations are performed to determine the pooled Monte Carlo \eqn{p} value. } \value{ Another object of class \code{"quadrattest"}. } \seealso{ \code{\link{pool}}, \code{\link{quadrat.test}} } \examples{ Y <- split(humberside) test1 <- quadrat.test(Y[[1]]) test2 <- quadrat.test(Y[[2]]) pool(test1, test2, Xname="Humberside") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} spatstat/man/split.im.Rd0000755000176000001440000000420512237642734014764 0ustar ripleyusers\name{split.im} \alias{split.im} \title{Divide Image Into Sub-images} \description{ Divides a pixel image into several sub-images according to the value of a factor, or according to the tiles of a tessellation. } \usage{ \method{split}{im}(x, f, ..., drop = FALSE) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{f}{ Splitting criterion. Either a tessellation (object of class \code{"tess"}) or a pixel image with factor values. } \item{\dots}{Ignored.} \item{drop}{Logical value determining whether each subset should be returned as a pixel images (\code{drop=FALSE}) or as a one-dimensional vector of pixel values (\code{drop=TRUE}). } } \details{ This is a method for the generic function \code{\link{split}} for the class of pixel images. The image \code{x} will be divided into subsets determined by the data \code{f}. The result is a list of these subsets. The splitting criterion may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } If \code{drop=FALSE} (the default), the result is a list of pixel images, each one a subset of the pixel image \code{x}, obtained by restricting the pixel domain to one of the subsets. If \code{drop=TRUE}, then the pixel values are returned as numeric vectors. } \value{ If \code{drop=FALSE}, a list of pixel images (objects of class \code{"im"}). It is also of class \code{"listof"} so that it can be plotted immediately. If \code{drop=TRUE}, a list of numeric vectors. } \seealso{ \code{\link{by.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) plot(split(X,Y)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/pcf.ppp.Rd0000755000176000001440000001513412251747576014604 0ustar ripleyusers\name{pcf.ppp} \alias{pcf.ppp} \title{Pair Correlation Function of Point Pattern} \description{ Estimates the pair correlation function of a point pattern using kernel methods. } \usage{ \method{pcf}{ppp}(X, \dots, r = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction=c("translate", "Ripley"), divisor = c("r", "d")) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{density}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{density}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{density}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See Details. } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{the vector of values of the argument \eqn{r} at which the pair correlation function \eqn{g(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g(r)} estimated by Ripley isotropic correction } as required. } \details{ The pair correlation function \eqn{g(r)} is a summary of the dependence between points in a spatial point process. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda^2 g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda^2 * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity of the point process. For a completely random (uniform Poisson) process, \eqn{p(r) = \lambda^2}{p(r) = lambda^2} so \eqn{g(r) = 1}. Formally, the pair correlation function of a stationary point process is defined by \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. This routine computes an estimate of \eqn{g(r)} by kernel smoothing. \itemize{ \item If \code{divisor="r"} (the default), then the standard kernel estimator (Stoyan and Stoyan, 1994, pages 284--285) is used. By default, the recommendations of Stoyan and Stoyan (1994) are followed exactly. \item If \code{divisor="d"} then a modified estimator is used: the contribution from an interpoint distance \eqn{d_{ij}}{d[ij]} to the estimate of \eqn{g(r)} is divided by \eqn{d_{ij}}{d[ij]} instead of dividing by \eqn{r}. This usually improves the bias of the estimator when \eqn{r} is close to zero. } There is also a choice of spatial edge corrections (which are needed to avoid bias due to edge effects associated with the boundary of the spatial window): \itemize{ \item If \code{correction="translate"} or \code{correction="translation"} then the translation correction is used. For \code{divisor="r"} the translation-corrected estimate is given in equation (15.15), page 284 of Stoyan and Stoyan (1994). \item If \code{correction="Ripley"} then Ripley's isotropic edge correction is used. For \code{divisor="r"} the isotropic-corrected estimate is given in equation (15.18), page 285 of Stoyan and Stoyan (1994). \item If \code{correction=c("translate", "Ripley")} then both estimates will be computed. } The choice of smoothing kernel is controlled by the argument \code{kernel} which is passed to \code{\link{density}}. The default is the Epanechnikov kernel, recommended by Stoyan and Stoyan (1994, page 285). The bandwidth of the smoothing kernel can be controlled by the argument \code{bw}. Its precise interpretation is explained in the documentation for \code{\link{density}}. For the Epanechnikov kernel, the argument \code{bw} is equivalent to \eqn{h/\sqrt{5}}{h/sqrt(5)}. Stoyan and Stoyan (1994, page 285) recommend using the Epanechnikov kernel with support \eqn{[-h,h]} chosen by the rule of thumn \eqn{h = c/\sqrt{\lambda}}{h = c/sqrt(lambda)}, where \eqn{\lambda}{lambda} is the (estimated) intensity of the point process, and \eqn{c} is a constant in the range from 0.1 to 0.2. See equation (15.16). If \code{bw} is missing, then this rule of thumb will be applied. The argument \code{stoyan} determines the value of \eqn{c}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. If it is specified, \code{r} must be a vector of increasing numbers starting from \code{r[1] = 0}, and \code{max(r)} must not exceed half the diameter of the window. To compute a confidence band for the true value of the pair correlation function, use \code{\link{lohboot}}. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{density}}, \code{\link{lohboot}}. } \examples{ data(simdat) \testonly{ simdat <- simdat[seq(1,simdat$n, by=4)] } p <- pcf(simdat) plot(p, main="pair correlation function for simdat") # indicates inhibition at distances r < 0.3 pd <- pcf(simdat, divisor="d") # compare estimates plot(p, cbind(iso, theo) ~ r, col=c("blue", "red"), ylim.covers=0, main="", lwd=c(2,1), lty=c(1,3), legend=FALSE) plot(pd, iso ~ r, col="green", lwd=2, add=TRUE) legend("center", col=c("blue", "green"), lty=1, lwd=2, legend=c("divisor=r","divisor=d")) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz} and Martin Hazelton. } \keyword{spatial} \keyword{nonparametric} spatstat/man/rMaternII.Rd0000755000176000001440000000461112237642734015060 0ustar ripleyusers\name{rMaternII} \alias{rMaternII} \title{Simulate Matern Model II} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model II inhibition process. } \usage{ rMaternII(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This algorithm generates a realisation of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. Then each proposal point is marked by an ``arrival time'', a number uniformly distributed in \eqn{[0,1]} independently of other variables. A proposal point is deleted if it lies within \code{r} units' distance of another proposal point \emph{that has an earlier arrival time}. Otherwise it is retained. The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model II. The difference between \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I and II is the italicised statement above. Model II has a higher intensity for the same parameter values. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rMaternI}} } \examples{ X <- rMaternII(20, 0.05) Y <- rMaternII(20, 0.05, stationary=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Ute Hahn, and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Gest.Rd0000755000176000001440000002035512237642731014130 0ustar ripleyusers\name{Gest} \alias{Gest} \alias{nearest.neighbour} \title{ Nearest Neighbour Distance Function G } \description{ Estimates the nearest neighbour distance distribution function \eqn{G(r)} from a point pattern in a window of arbitrary shape. } \usage{ Gest(X, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{G(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{G(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{G(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing some or all of the following columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{G(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G(r)}, i.e. the empirical distribution of the distances from each point in the pattern \code{X} to the nearest other point of the pattern } \item{han}{the Hanisch correction estimator of \eqn{G(r)} } \item{theo}{the theoretical value of \eqn{G(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The nearest neighbour distance distribution function (also called the ``\emph{event-to-event}'' or ``\emph{inter-event}'' distribution) of a point process \eqn{X} is the cumulative distribution function \eqn{G} of the distance from a typical random point of \eqn{X} to the nearest other point of \eqn{X}. An estimate of \eqn{G} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{G} is a useful statistic summarising one aspect of the ``clustering'' of points. For inferential purposes, the estimate of \eqn{G} is usually compared to the true value of \eqn{G} for a completely random (Poisson) point process, which is \deqn{G(r) = 1 - e^{ - \lambda \pi r^2} }{% G(r) = 1 - exp( - lambda * pi * r^2)} where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{G} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the nearest neighbour distance distribution function \eqn{G} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. The estimation of \eqn{G} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or ``\emph{reduced sample}'' estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Hanisch estimator (Hanisch, 1984). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G(r)}. The hazard rate is defined as the derivative \deqn{\lambda(r) = - \frac{d}{dr} \log (1 - G(r))}{% lambda(r) = - (d/dr) log(1 - G(r))} This estimate should be used with caution as \eqn{G} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G}. However it is sometimes useful. It can be returned by the algorithm, by selecting \code{correction="none"}. Care should be taken not to use the uncorrected empirical \eqn{G} as if it were an unbiased estimator of \eqn{G}. To simply compute the nearest neighbour distance for each point in the pattern, use \code{\link{nndist}}. To determine which point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Hanisch, K.-H. (1984) Some remarks on estimators of the distribution function of nearest-neighbour distance in stationary spatial point patterns. \emph{Mathematische Operationsforschung und Statistik, series Statistics} \bold{15}, 409--412. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The function \eqn{G} does not necessarily have a density. Any valid c.d.f. may appear as the nearest neighbour distance distribution function of a stationary point process. The reduced sample estimator of \eqn{G} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{nndist}}, \code{\link{nnwhich}}, \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) G <- Gest(cells) plot(G) # P-P style plot plot(G, cbind(km,theo) ~ theo) # the empirical G is below the Poisson G, # indicating an inhibited pattern \dontrun{ plot(G, . ~ r) plot(G, . ~ theo) plot(G, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnwhich.Rd0000755000176000001440000001335412237642733014667 0ustar ripleyusers\name{nnwhich} \alias{nnwhich} \alias{nnwhich.ppp} \alias{nnwhich.default} \title{Nearest neighbour} \description{ Finds the nearest neighbour of each point in a point pattern. } \usage{ nnwhich(X, \dots) \method{nnwhich}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nnwhich}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nnwhich.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nnwhich.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored by \code{nnwhich.ppp} and \code{nnwhich.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will find the nearest neighbour in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the indices described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ For each point in the given point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic, with method for point patterns (objects of class \code{"ppp"}) and a default method which are described here, as well as a method for three-dimensional point patterns (objects of class \code{"pp3"}, described in \code{\link{nnwhich.pp3}}. The method \code{nnwhich.ppp} expects a single point pattern argument \code{X}. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then find, for each point of \code{X}, the nearest neighbour \emph{in each subset}. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. If there is only one point (if \code{x} has length 1), then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will find, for each point of \code{X}, the nearest neighbour of each type. See the Examples. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nndist}}, \code{\link{nncross}} } \examples{ data(cells) plot(cells) m <- nnwhich(cells) m2 <- nnwhich(cells, k=2) # plot nearest neighbour links b <- cells[m] arrows(cells$x, cells$y, b$x, b$y, angle=15, length=0.15, col="red") # find points which are the neighbour of their neighbour self <- (m[m] == seq(m)) # plot them A <- cells[self] B <- cells[m[self]] plot(cells) segments(A$x, A$y, B$x, B$y) # nearest neighbours of each type head(nnwhich(ants, by=marks(ants))) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/is.marked.ppp.Rd0000755000176000001440000000353712237642732015705 0ustar ripleyusers\name{is.marked.ppp} \alias{is.marked.ppp} \title{Test Whether A Point Pattern is Marked} \description{ Tests whether a point pattern has ``marks'' attached to the points. } \usage{ \method{is.marked}{ppp}(X, na.action="warn", \dots) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) } \item{na.action}{ String indicating what to do if \code{NA} values are encountered amongst the marks. Options are \code{"warn"}, \code{"fatal"} and \code{"ignore"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a marked point pattern. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. This function tests whether the point pattern \code{X} contains or involves marked points. It is a method for the generic function \code{\link{is.marked}}. The argument \code{na.action} determines what action will be taken if the point pattern has a vector of marks but some or all of the marks are \code{NA}. Options are \code{"fatal"} to cause a fatal error; \code{"warn"} to issue a warning and then return \code{TRUE}; and \code{"ignore"} to take no action except returning \code{TRUE}. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppm}} } \examples{ data(cells) is.marked(cells) #FALSE data(longleaf) is.marked(longleaf) #TRUE } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/print.psp.Rd0000755000176000001440000000152712237642733015165 0ustar ripleyusers\name{print.psp} \alias{print.psp} \title{Print Brief Details of a Line Segment Pattern Dataset} \description{ Prints a very brief description of a line segment pattern dataset. } \usage{ \method{print}{psp}(x, \dots) } \arguments{ \item{x}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the line segment pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) a } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} spatstat/man/is.stationary.Rd0000755000176000001440000000577112237642732016043 0ustar ripleyusers\name{is.stationary} \alias{is.stationary} \alias{is.stationary.ppm} \alias{is.stationary.kppm} \alias{is.stationary.lppm} \alias{is.stationary.slrm} \alias{is.stationary.rmhmodel} \alias{is.poisson} \alias{is.poisson.ppm} \alias{is.poisson.kppm} \alias{is.poisson.lppm} \alias{is.poisson.slrm} \alias{is.poisson.rmhmodel} \alias{is.poisson.interact} \title{ Recognise Stationary and Poisson Point Process Models } \description{ Given a point process model that has been fitted to data, determine whether the model is a stationary point process, and whether it is a Poisson point process. } \usage{ is.stationary(x) \method{is.stationary}{ppm}(x) \method{is.stationary}{kppm}(x) \method{is.stationary}{lppm}(x) \method{is.stationary}{slrm}(x) \method{is.stationary}{rmhmodel}(x) is.poisson(x) \method{is.poisson}{ppm}(x) \method{is.poisson}{kppm}(x) \method{is.poisson}{lppm}(x) \method{is.poisson}{slrm}(x) \method{is.poisson}{rmhmodel}(x) \method{is.poisson}{interact}(x) } \arguments{ \item{x}{ A fitted spatial point process model (object of class \code{"ppm"}, \code{"kppm"}, \code{"lppm"} or \code{"slrm"}) or similar object. } } \details{ The argument \code{x} represents a fitted spatial point process model or a similar object. \code{is.stationary(x)} returns \code{TRUE} if \code{x} represents a stationary point process, and \code{FALSE} if not. \code{is.poisson(x)} returns \code{TRUE} if \code{x} represents a Poisson point process, and \code{FALSE} if not. The functions \code{is.stationary} and \code{is.poisson} are generic, with methods for the classes \code{"ppm"} (Gibbs point process models), \code{"kppm"} (cluster or Cox point process models), \code{"slrm"} (spatial logistic regression models) and \code{"rmhmodel"} (model specifications for the Metropolis-Hastings algorithm). Additionally \code{is.poisson} has a method for class \code{"interact"} (interaction structures for Gibbs models). \code{is.poisson.kppm} will return \code{FALSE}, unless the model \code{x} is degenerate: either \code{x} has zero intensity so that its realisations are empty with probability 1, or it is a log-Gaussian Cox process where the log intensity has zero variance. \code{is.poisson.slrm} will always return \code{TRUE}, by convention. } \value{ A logical value. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{is.marked}} to determine whether a model is a marked point process. \code{\link{summary.ppm}} for detailed information. Model-fitting functions \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{lppm}}, \code{\link{slrm}}. } \examples{ data(cells) data(redwood) fit <- ppm(cells, ~x) is.stationary(fit) is.poisson(fit) fut <- kppm(redwood, ~1, "MatClust") is.stationary(fut) is.poisson(fut) fot <- slrm(cells ~ x) is.stationary(fot) is.poisson(fot) } \keyword{spatial} \keyword{models} spatstat/man/LennardJones.Rd0000755000176000001440000001173312237642731015610 0ustar ripleyusers\name{LennardJones} \alias{LennardJones} \title{The Lennard-Jones Potential} \description{ Creates the Lennard-Jones pairwise interaction structure which can then be fitted to point pattern data. } \usage{ LennardJones(sigma0=NA) } \value{ An object of class \code{"interact"} describing the Lennard-Jones interpoint interaction structure. } \arguments{ \item{sigma0}{ Optional. Initial estimate of the parameter \eqn{\sigma}{sigma}. A positive number. } } \details{ In a pairwise interaction point process with the Lennard-Jones pair potential (Lennard-Jones, 1924) each pair of points in the point pattern, a distance \eqn{d} apart, contributes a factor \deqn{ v(d) = \exp \left\{ - 4\epsilon \left[ \left( \frac{\sigma}{d} \right)^{12} - \left( \frac{\sigma}{d} \right)^6 \right] \right\} }{ v(d) = exp( - 4 * epsilon * ((sigma/d)^12 - (sigma/d)^6)) } to the probability density, where \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} are positive parameters to be estimated. See \bold{Examples} for a plot of this expression. This potential causes very strong inhibition between points at short range, and attraction between points at medium range. The parameter \eqn{\sigma}{sigma} is called the \emph{characteristic diameter} and controls the scale of interaction. The parameter \eqn{\epsilon}{epsilon} is called the \emph{well depth} and determines the strength of attraction. The potential switches from inhibition to attraction at \eqn{d=\sigma}{d=sigma}. The maximum value of the pair potential is \eqn{\exp(\epsilon)}{exp(epsilon)} occuring at distance \eqn{d = 2^{1/6} \sigma}{d = 2^(1/6) * sigma}. Interaction is usually considered to be negligible for distances \eqn{d > 2.5 \sigma \max\{1,\epsilon^{1/6}\}}{d > 2.5 * sigma * max(1, epsilon^(1/6))}. This potential is used to model interactions between uncharged molecules in statistical physics. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Lennard-Jones pairwise interaction is yielded by the function \code{LennardJones()}. See the examples below. } \section{Rescaling}{ To avoid numerical instability, the interpoint distances \code{d} are rescaled when fitting the model. Distances are rescaled by dividing by \code{sigma0}. In the formula for \eqn{v(d)} above, the interpoint distance \eqn{d} will be replaced by \code{d/sigma0}. The rescaling happens automatically by default. If the argument \code{sigma0} is missing or \code{NA} (the default), then \code{sigma0} is taken to be the minimum nearest-neighbour distance in the data point pattern (in the call to \code{\link{ppm}}). If the argument \code{sigma0} is given, it should be a positive number, and it should be a rough estimate of the parameter \eqn{\sigma}{sigma}. The ``canonical regular parameters'' estimated by \code{\link{ppm}} are \eqn{\theta_1 = 4 \epsilon (\sigma/\sigma_0)^{12}}{theta1 = 4 * epsilon * (sigma/sigma0)^12} and \eqn{\theta_2 = 4 \epsilon (\sigma/\sigma_0)^6}{theta2 = 4 * epsilon * (sigma/sigma0)^6}. } \section{Warnings and Errors}{ Fitting the Lennard-Jones model is extremely unstable, because of the strong dependence between the functions \eqn{d^{-12}}{d^(-12)} and \eqn{d^{-6}}{d^(-6)}. The fitting algorithm often fails to converge. Try increasing the number of iterations of the GLM fitting algorithm, by setting \code{gcontrol=list(maxit=1e3)} in the call to \code{\link{ppm}}. Errors are likely to occur if this model is fitted to a point pattern dataset which does not exhibit both short-range inhibition and medium-range attraction between points. The values of the parameters \eqn{\sigma}{sigma} and \eqn{\epsilon}{epsilon} may be \code{NA} (because the fitted canonical parameters have opposite sign, which usually occurs when the pattern is completely random). An absence of warnings does not mean that the fitted model is sensible. A negative value of \eqn{\epsilon}{epsilon} may be obtained (usually when the pattern is strongly clustered); this does not correspond to a valid point process model, but the software does not issue a warning. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \examples{ data(demopat) demopat X <- unmark(demopat) X fit <- ppm(X, ~1, LennardJones(), rbord=500) fit plot(fitin(fit), xlim=c(0,50)) } \references{ Lennard-Jones, J.E. (1924) On the determination of molecular fields. \emph{Proc Royal Soc London A} \bold{106}, 463--477. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/redwoodfull.Rd0000755000176000001440000001006312237642733015551 0ustar ripleyusers\name{redwoodfull} \alias{redwoodfull} \alias{redwoodfull.extra} \docType{data} \title{ California Redwoods Point Pattern (Entire Dataset) } \description{ These data represent the locations of 195 seedlings and saplings of California redwood trees in a square sampling region. They were described and analysed by Strauss (1975). This is the ``\bold{full}'' dataset; most writers have analysed a subset extracted by Ripley (1977) which is available as \code{\link{redwood}}. Strauss (1975) divided the sampling region into two subregions I and II demarcated by a diagonal line. The spatial pattern appears to be slightly regular in region I and strongly clustered in region II. Strauss (1975) writes: \dQuote{It was felt that the seedlings would be scattered fairly randomly, except that a number of tight clusters would form around some of the redwood tree stumps present in the plot. A discontinuity in the soil, very roughly demarked by the diagonal line in the figure, was expected to cause a difference in clustering behaviour between regions I and II. Moreover, almost all the redwood stumps were situated in region II.} The dataset \code{redwoodfull} contains the full point pattern of 195 trees. The window has been rescaled to the unit square. Its physical size is approximately 130 feet across. The auxiliary information about the subregions is contained in \code{redwoodfull.extra}, which is a list with entries \tabular{ll}{ \code{rdiag}\tab The coordinates of the diagonal boundary\cr \tab between regions I and II \cr \code{regionI} \tab Region I as a window object \cr \code{regionII} \tab Region II as a window object \cr \code{regionR} \tab Ripley's subrectangle (approximate) \cr \code{plotit} \tab Function to plot the full data and auxiliary markings } Ripley (1977) extracted a subset of these data, containing 62 points, lying within a square subregion which overlaps regions I and II. He rescaled that subset to the unit square. This subset has been re-analysed many times, and is the dataset usually known as ``the redwood data'' in the spatial statistics literature. The exact dataset used by Ripley is supplied in the \pkg{spatstat} library as \code{\link{redwood}}. The approximate position of the square chosen by Ripley within the \code{redwoodfull} pattern is indicated by the window \code{redwoodfull.extra$regionR}. There are some minor inconsistencies with \code{redwood} since it originates from a different digitisation. } \format{ The dataset \code{redwoodfull} is an object of class \code{"ppp"} representing the point pattern of tree locations. See \code{\link{ppp.object}} for details of the format of a point pattern object. The window has been rescaled to the unit square. Its physical size is approximately 128 feet across. The dataset \code{redwoodfull.extra} is a list with entries \tabular{ll}{ \code{rdiag}\tab coordinates of endpoints of a line,\cr \tab in format \code{list(x=numeric(2),y=numeric(2))} \cr \code{regionI} \tab a window object \cr \code{regionII} \tab a window object \cr \code{regionR} \tab a window object \cr \code{plotit} \tab Function with no arguments } } \usage{data(redwoodfull)} \examples{ data(redwoodfull) plot(redwoodfull) redwoodfull.extra$plotit() # extract the pattern in region II redwoodII <- redwoodfull[, redwoodfull.extra$regionII] } \source{Strauss (1975). The plot of the data published by Strauss (1975) was scanned and digitised by Sandra Pereira, University of Western Australia, 2002. } \seealso{ \code{\link{redwood}} } \references{ Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{39}, 172--212. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{63}, 467--475. } \keyword{datasets} \keyword{spatial} spatstat/man/closing.Rd0000755000176000001440000000504612243310060014644 0ustar ripleyusers\name{closing} %DontDeclareMethods \alias{closing} \alias{closing.owin} \alias{closing.ppp} \alias{closing.psp} \title{Morphological Closing} \description{ Perform morphological closing of a window, a line segment pattern or a point pattern. } \usage{ closing(w, r, \dots) \method{closing}{owin}(w, r, \dots, polygonal=NULL) \method{closing}{ppp}(w, r, \dots, polygonal=TRUE) \method{closing}{psp}(w, r, \dots, polygonal=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the closing.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the closed region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological closing (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the set of all points that cannot be separated from \eqn{W} by any circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the closing \eqn{W*} if it is impossible to draw any circle of radius \eqn{r} that has \eqn{x} on the inside and \eqn{W} on the outside. The closing \eqn{W*} contains the original set \eqn{W}. For a small radius \eqn{r}, the closing operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the closing operation smooths out concave features in the boundary. For very large radii, the closed set \eqn{W*} becomes more and more convex. The algorithm applies \code{\link{dilation}} followed by \code{\link{erosion}}. } \seealso{ \code{\link{opening}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- closing(letterR, 0.25) plot(v, main="closing") plot(letterR, add=TRUE) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/pppdist.Rd0000755000176000001440000001746512237642733014723 0ustar ripleyusers\name{pppdist} \alias{pppdist} \title{Distance Between Two Point Patterns} \description{ Given two point patterns, find the distance between them based on optimal point matching. } \usage{ pppdist(X, Y, type = "spa", cutoff = 1, q = 1, matching = TRUE, ccode = TRUE, precision = NULL, approximation = 10, show.rprimal = FALSE, timelag = 0) } \arguments{ \item{X,Y}{Two point patterns (objects of class \code{"ppp"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"} (default), \code{"ace"} or \code{"mat"}, indicating whether the algorithm should find the optimal matching based on "subpattern assignment", "assignment only if cardinalities are equal" or "mass transfer". See details below. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } \item{matching}{ Logical. Whether to return the optimal matching or only the associated distance. } \item{ccode}{ Logical. If \code{FALSE}, \R code is used which allows for higher precision, but is much slower. } \item{precision}{ Index controlling accuracy of algorithm. The \code{q}-th powers of interpoint distances will be rounded to the nearest multiple of \code{10^(-precision)}. There is a sensible default which depends on \code{ccode}. } \item{approximation}{ If \code{q = Inf}, compute distance based on the optimal matching for the corresponding distance of order \code{approximation}. Can be \code{Inf}, but this makes computations extremely slow. } \item{show.rprimal}{ Logical. Whether to display a plot showing the iterative solution of the restricted primal problem. } \item{timelag}{ Time lag, in seconds, between successive displays of the iterative solution of the restricted primal problem. } } \details{ Computes the distance between point patterns \code{X} and \code{Y} based on finding the matching between them which minimizes the average of the distances between matched points (if \code{q=1}), the maximum distance between matched points (if \code{q=Inf}), and in general the \code{q}-th order average (i.e. the \code{1/q}th power of the sum of the \code{q}th powers) of the distances between matched points. Distances between matched points are Euclidean distances cut off at the value of \code{cutoff}. The parameter \code{type} controls the behaviour of the algorithm if the cardinalities of the point patterns are different. For the type \code{"spa"} (subpattern assignment) the subpattern of the point pattern with the larger cardinality \eqn{n} that is closest to the point pattern with the smaller cardinality \eqn{m} is determined; then the \code{q}-th order average is taken over \eqn{n} values: the \eqn{m} distances of matched points and \eqn{n-m} "penalty distances" of value \code{cutoff} for the unmatched points. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is empty and the distance returned is equal to \code{cutoff} if the cardinalities differ. For the type \code{"mat"} (mass transfer) each point pattern is assumed to have total mass \eqn{m} (= the smaller cardinality) distributed evenly among its points; the algorithm finds then the "mass transfer plan" that minimizes the \code{q}-th order weighted average of the distances, where the weights are given by the transferred mass divided by \eqn{m}. The result is a fractional matching (each match of two points has a weight in \eqn{(0,1]}) with the minimized quantity as the associated distance. The computations for all three types rely heavily on a specialized primal-dual algorithm (described in Luenberger (2003), Section 5.9) for Hitchcock's problem of optimal transport of a product from a number of suppliers to a number of (e.g. vending) locations. The C implementation used by default can handle patterns with a few hundreds of points, but should not be used with thousands of points. By setting \code{show.rprimal = TRUE}, some insight in the working of the algorithm can be gained. For moderate and large values of \code{q} there can be numerical issues based on the fact that the \code{q}-th powers of distances are taken and some positive values enter the optimization algorithm as zeroes because they are too small in comparison with the larger values. In this case the number of zeroes introduced is given in a warning message, and it is possible then that the matching obtained is not optimal and the associated distance is only a strict upper bound of the true distance. As a general guideline (which can be very wrong in special situations) a small number of zeroes (up to about 50\% of the smaller point pattern cardinality \eqn{m}) usually still results in the right matching, and the number can even be quite a bit higher and usually still provides a highly accurate upper bound for the distance. These numerical problems can be reduced by enforcing (much slower) \R code via the argument \code{ccode = FALSE}. For \code{q = Inf} there is no fast algorithm available, which is why approximation is normally used: for finding the optimal matching, \code{q} is set to the value of \code{approximation}. The resulting distance is still given as the maximum rather than the \code{q}-th order average in the corresponding distance computation. If \code{approximation = Inf}, approximation is suppressed and a very inefficient exhaustive search for the best matching is performed. The value of \code{precision} should normally not be supplied by the user. If \code{ccode = TRUE}, this value is preset to the highest exponent of 10 that the C code still can handle (usually \eqn{9}). If \code{ccode = FALSE}, the value is preset according to \code{q} (usually \eqn{15} if \code{q} is small), which can sometimes be changed to obtain less severe warning messages. } \value{ Normally an object of class \code{pppmatching} that contains detailed information about the parameters used and the resulting distance. See \code{\link{pppmatching.object}} for details. If \code{matching = FALSE}, only the numerical value of the distance is returned. } \references{ Hitchcock F.L. (1941) The distribution of a product from several sources to numerous localities. \emph{J. Math. Physics} \bold{20}, 224--230. Luenberger D.G. (2003). \emph{Linear and nonlinear programming.} Second edition. Kluwer. Schuhmacher, D. and Xia, A. (2008) A new metric between distributions of point processes. \emph{Advances in Applied Probability} \bold{40}, 651--672 Schuhmacher, D., Vo, B.-T. and Vo, B.-N. (2008) A consistent metric for performance evaluation of multi-object filters. \emph{IEEE Transactions on Signal Processing} \bold{56}, 3447--3457. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppmatching.object}} \code{\link{matchingdist}} } \examples{ # equal cardinalities X <- runifpoint(100) Y <- runifpoint(100) m <- pppdist(X, Y) m \dontrun{ plot(m) } # differing cardinalities X <- runifpoint(14) Y <- runifpoint(10) m1 <- pppdist(X, Y, type="spa") m2 <- pppdist(X, Y, type="ace") m3 <- pppdist(X, Y, type="mat") summary(m1) summary(m2) summary(m3) \dontrun{ m1$matrix m2$matrix m3$matrix } # q = Inf X <- runifpoint(10) Y <- runifpoint(10) mx1 <- pppdist(X, Y, q=Inf)$matrix mx2 <- pppdist(X, Y, q=Inf, ccode=FALSE, approximation=50)$matrix mx3 <- pppdist(X, Y, q=Inf, approximation=Inf)$matrix ((mx1 == mx2) && (mx2 == mx3)) # TRUE if approximations are good } \keyword{spatial} \keyword{math} spatstat/man/fitted.slrm.Rd0000755000176000001440000000235612237642732015463 0ustar ripleyusers\name{fitted.slrm} \Rdversion{1.1} \alias{fitted.slrm} \title{ Fitted Probabilities for Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel. } \usage{ \method{fitted}{slrm}(object, ...) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link[stats:fitted.values]{fitted}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The algorithm computes the fitted probabilities of the presence of a random point in each pixel. } \value{ A pixel image (object of class \code{"im"}) containing the fitted probability for each pixel. } \seealso{ \code{\link{slrm}}, \code{\link[stats:fitted.values]{fitted}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(fitted(fit)) } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/as.function.fv.Rd0000755000176000001440000000742512241624725016070 0ustar ripleyusers\name{as.function.fv} \alias{as.function.fv} \alias{as.function.rhohat} \title{ Convert Function Value Table to Function } \description{ Converts an object of class \code{"fv"} to an \R language function. } \usage{ \method{as.function}{fv}(x, ..., value=".y", extrapolate=FALSE) \method{as.function}{rhohat}(x, ..., value=".y", extrapolate=TRUE) } \arguments{ \item{x}{ Object of class \code{"fv"} or \code{"rhohat"}. } \item{\dots}{ Ignored. } \item{value}{ Optional. Character string or character vector selecting one or more of the columns of \code{x} for use as the function value. See Details. } \item{extrapolate}{ Logical, indicating whether to extrapolate the function outside the domain of \code{x}. See Details. } } \details{ A function value table (object of class \code{"fv"}) is a convenient way of storing and plotting several different estimates of the same function. Objects of this class are returned by many commands in \pkg{spatstat}, such as \code{\link{Kest}} which returns an estimate of Ripley's \eqn{K}-function for a point pattern dataset. Sometimes it is useful to convert the function value table to a \code{function} in the \R language. This is done by \code{as.function.fv}. It converts an object \code{x} of class \code{"fv"} to an \R function \code{f}. If \code{f <- as.function(x)} then \code{f} is an \R function that accepts a numeric argument and returns a corresponding value for the summary function by linear interpolation between the values in the table \code{x}. Argument values lying outside the range of the table yield an \code{NA} value (if \code{extrapolate=FALSE}) or the function value at the nearest endpoint of the range (if \code{extrapolate = TRUE}). Typically the table \code{x} contains several columns of function values corresponding to different edge corrections. Auxiliary information for the table identifies one of these columns as the \emph{recommended value}. By default, the values of the function \code{f <- as.function(x)} are taken from this column of recommended values. This default can be changed using the argument \code{value}, which can be a character string or character vector of names of columns of \code{x}. Alternatively \code{value} can be one of the abbreviations used by \code{\link{fvnames}}. If \code{value} specifies a single column of the table, then the result is a function \code{f(r)} with a single numeric argument \code{r} (with the same name as the orginal argument of the function table). If \code{value} specifies several columns of the table, then the result is a function \code{f(r,what)} where \code{r} is the numeric argument and \code{what} is a character string identifying the column of values to be used. The formal arguments of the resulting function are \code{f(r, what=value)}, which means that in a call to this function \code{f}, the permissible values of \code{what} are the entries of the original vector \code{value}; the default value of \code{what} is the first entry of \code{value}. The command \code{as.function.fv} is a method for the generic command \code{\link{as.function}}. } \value{ A \code{function(r)} or \code{function(r,what)} where \code{r} is the name of the original argument of the function table. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{fv}}, \code{\link{fv.object}}, \code{\link{fvnames}}, \code{\link{plot.fv}}, \code{\link{Kest}} } \examples{ K <- Kest(cells) f <- as.function(K) f f(0.1) g <- as.function(K, value=c("iso", "trans")) g g(0.1, "trans") } \keyword{spatial} \keyword{methods} spatstat/man/unique.ppp.Rd0000755000176000001440000000317012242557163015326 0ustar ripleyusers\name{unique.ppp} \alias{unique.ppp} \alias{unique.ppx} \title{Extract Unique Points from a Spatial Point Pattern} \description{ Removes any points that are identical to other points in a spatial point pattern. } \usage{ \method{unique}{ppp}(x, \dots, warn=FALSE) \method{unique}{ppx}(x, \dots, warn=FALSE) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{duplicated.ppp}} or \code{\link{duplicated.data.frame}}. } \item{warn}{ Logical. If \code{TRUE}, issue a warning message if any duplicated points were found. } } \value{ Another point pattern object. } \details{ These are methods for the generic function \code{unique} for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}, or class \code{"ppx"}). Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. This function removes duplicate points in \code{x}, and returns a point pattern. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{multiplicity.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) unique(X) unique(X, rule="deldir") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/demohyper.Rd0000644000176000001440000000226412252274531015212 0ustar ripleyusers\name{demohyper} \alias{demohyper} \docType{data} \title{ Demonstration Example of Hyperframe of Spatial Data } \description{ This is an artificially constructed example of a hyperframe of spatial data. The data could have been obtained from an experiment in which there are two groups of experimental units, the response from each unit is a point pattern \code{Points}, and for each unit there is explanatory data in the form of a pixel image \code{Image}. } \usage{data(demohyper)} \format{ A \code{\link{hyperframe}} with 3 rows and 3 columns: \describe{ \item{Points}{ List of spatial point patterns (objects of class \code{"ppp"}) serving as the responses in an experiment. } \item{Image}{ List of images (objects of class \code{"im"}) serving as explanatory variables. } \item{Group}{ Factor with two levels \code{a} and \code{b} serving as an explanatory variable. } } } \source{ Artificially generated by Adrian Baddeley. } \examples{ plot(demohyper, quote({ plot(Image, main=""); plot(Points, add=TRUE) }), parargs=list(mar=rep(1,4))) mppm(Points ~ Group/Image, data=demohyper) } \keyword{datasets} spatstat/man/miplot.Rd0000755000176000001440000000424112237642733014530 0ustar ripleyusers\name{miplot} \alias{miplot} \title{Morishita Index Plot} \description{ Displays the Morishita Index Plot of a spatial point pattern. } \usage{ miplot(X, ...) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} } \details{ Morishita (1959) defined an index of spatial aggregation for a spatial point pattern based on quadrat counts. The spatial domain of the point pattern is first divided into \eqn{Q} subsets (quadrats) of equal size and shape. The numbers of points falling in each quadrat are counted. Then the Morishita Index is computed as \deqn{ \mbox{MI} = Q \frac{\sum_{i=1}^Q n_i (n_i - 1)}{N(N-1)} }{ MI = Q * sum(n[i] (n[i]-1))/(N(N-1)) } where \eqn{n_i}{n[i]} is the number of points falling in the \eqn{i}-th quadrat, and \eqn{N} is the total number of points. If the pattern is completely random, \code{MI} should be approximately equal to 1. Values of \code{MI} greater than 1 suggest clustering. The \emph{Morishita Index plot} is a plot of the Morishita Index \code{MI} against the linear dimension of the quadrats. The point pattern dataset is divided into \eqn{2 \times 2}{2 * 2} quadrats, then \eqn{3 \times 3}{3 * 3} quadrats, etc, and the Morishita Index is computed each time. This plot is an attempt to discern different scales of dependence in the point pattern data. } \value{ None. } \references{ M. Morishita (1959) Measuring of the dispersion of individuals and analysis of the distributional patterns. Memoir of the Faculty of Science, Series E2, Kyushu University. Pages 215--235. } \seealso{ \code{\link{quadratcount}} } \examples{ data(longleaf) miplot(longleaf) opa <- par(mfrow=c(2,3)) data(cells) data(japanesepines) data(redwood) plot(cells) plot(japanesepines) plot(redwood) miplot(cells) miplot(japanesepines) miplot(redwood) par(opa) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/fv.object.Rd0000755000176000001440000000321012237642732015076 0ustar ripleyusers\name{fv.object} \alias{fv.object} %DoNotExport \title{Function Value Table} \description{ A class \code{"fv"} to support the convenient plotting of several estimates of the same function. } \details{ An object of this class is a convenient way of storing and plotting several different estimates of the same function. It is a data frame with extra attributes indicating the recommended way of plotting the function, and other information. There are methods for \code{print} and \code{plot} for this class. Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. } \seealso{ Objects of class \code{"fv"} are returned by \code{\link{Fest}}, \code{\link{Gest}},\code{\link{Jest}}, and \code{\link{Kest}} along with many other functions. See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \examples{ data(cells) K <- Kest(cells) class(K) K # prints a sensible summary plot(K) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/affine.psp.Rd0000755000176000001440000000355412237642732015262 0ustar ripleyusers\name{affine.psp} %DontDeclareMethods \alias{affine.psp} \title{Apply Affine Transformation To Line Segment Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a line segment pattern. } \usage{ \method{affine}{psp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the affine transformation. } \details{ The line segment pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.ppp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") # shear transformation Y <- affine(X, matrix(c(1,0,0.6,1),ncol=2)) plot(Y, main="transformed") par(oldpar) # # rescale y coordinates by factor 0.2 affine(X, diag(c(1,0.2))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/pcfdot.inhom.Rd0000755000176000001440000001172312237642733015617 0ustar ripleyusers\name{pcfdot.inhom} \alias{pcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Type-i-To-Any-Type) } \description{ Estimates the inhomogeneous multitype pair correlation function (from type \eqn{i} to any type) for a multitype point pattern. } \usage{ pcfdot.inhom(X, i, lambdaI = NULL, lambdadot = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdadot}{ Optional. Values of the estimated intensity function of the point pattern \code{X}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdadot} is estimated by kernel smoothing. } } \details{ The inhomogeneous multitype (type \eqn{i} to any type) pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} is a summary of the dependence between different types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and another point of any type at location \eqn{y}, where \eqn{x} and \eqn{y} are separated by a distance \eqn{r}, is equal to \deqn{ p(r) = \lambda_i(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda(y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}, and where \eqn{\lambda}{lambda} is the intensity function of the points of all types. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda(y)}{p(r) = lambda[i](x) * lambda(y)} so \eqn{g_{i\bullet}(r) = 1}{g[i.](r) = 1}. The command \code{pcfdot.inhom} estimates the inhomogeneous multitype pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdadot} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous multitype pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{i\bullet}(r)}{g[i.](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{i\bullet}(r)}{g[i.](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfdot}}, \code{\link{pcfcross.inhom}} } \examples{ data(amacrine) plot(pcfdot.inhom(amacrine, "on", stoyan=0.1), legendpos="bottom") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rsyst.Rd0000755000176000001440000000457412237642734014422 0ustar ripleyusers\name{rsyst} \alias{rsyst} \title{Simulate systematic random point pattern} \description{ Generates a \dQuote{systematic random} pattern of points in a window, consisting of a grid of equally-spaced points with a random common displacement. } \usage{ rsyst(win=square(1), nx=NULL, ny=nx, \dots, dx=NULL, dy=dx) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of columns of grid points in the window. Incompatible with \code{dx}. } \item{ny}{Number of rows of grid points in the window. Incompatible with \code{dy}. } \item{\dots}{Ignored.} \item{dx}{Spacing of grid points in \eqn{x} direction. Incompatible with \code{nx}. } \item{dy}{Spacing of grid points in \eqn{y} direction. Incompatible with \code{ny}. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function generates a \dQuote{systematic random} pattern of points in the window \code{win}. The pattern consists of a rectangular grid of points with a random common displacement. The grid spacing in the \eqn{x} direction is determined either by the number of columns \code{nx} or by the horizontal spacing \code{dx}. The grid spacing in the \eqn{y} direction is determined either by the number of rows \code{ny} or by the vertical spacing \code{dy}. The grid is then given a random displacement (the common displacement of the grid points is a uniformly distributed random vector in the tile of dimensions \code{dx, dy}). Some of the resulting grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rstrat}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rsyst(nx=10) plot(X) # polygonal boundary data(letterR) X <- rsyst(letterR, 5, 10) plot(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/km.rs.Rd0000755000176000001440000000627412237642732014265 0ustar ripleyusers\name{km.rs} \alias{km.rs} \title{Kaplan-Meier and Reduced Sample Estimator using Histograms} \description{ Compute the Kaplan-Meier and Reduced Sample estimators of a survival time distribution function, using histogram techniques } \usage{ km.rs(o, cc, d, breaks) } \arguments{ \item{o}{vector of observed survival times } \item{cc}{vector of censoring times } \item{d}{vector of non-censoring indicators } \item{breaks}{Vector of breakpoints to be used to form histograms. } } \value{ A list with five elements \item{rs}{Reduced-sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{km}{Kaplan-Meier estimate of the survival time c.d.f. \eqn{F(t)} } \item{hazard}{corresponding Nelson-Aalen estimate of the hazard rate \eqn{\lambda(t)}{lambda(t)} } \item{r}{values of \eqn{t} for which \eqn{F(t)} is estimated } \item{breaks}{the breakpoints vector } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the Kaplan-Meier estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. The arguments to this function are vectors \code{o}, \code{cc}, \code{d} of observed values of \eqn{\tilde T_i}{T*[i]}, \eqn{C_i}{C[i]} and \eqn{D_i}{D[i]} respectively. The function computes histograms and forms the reduced-sample and Kaplan-Meier estimates of \eqn{F(t)} by invoking the functions \code{\link{kaplan.meier}} and \code{\link{reduced.sample}}. This is efficient if the lengths of \code{o}, \code{cc}, \code{d} (i.e. the number of observations) is large. The vectors \code{km} and \code{hazard} returned by \code{kaplan.meier} are (histogram approximations to) the Kaplan-Meier estimator of \eqn{F(t)} and its hazard rate \eqn{\lambda(t)}{lambda(t)}. Specifically, \code{km[k]} is an estimate of \code{F(breaks[k+1])}, and \code{lambda[k]} is an estimate of the average of \eqn{\lambda(t)}{lambda(t)} over the interval \code{(breaks[k],breaks[k+1])}. This approximation is exact only if the survival times are discrete and the histogram breaks are fine enough to ensure that each interval \code{(breaks[k],breaks[k+1])} contains only one possible value of the survival time. The vector \code{rs} is the reduced-sample estimator, \code{rs[k]} being the reduced sample estimate of \code{F(breaks[k+1])}. This value is exact, i.e. the use of histograms does not introduce any approximation error in the reduced-sample estimator. } \seealso{ \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rshift.psp.Rd0000755000176000001440000001063212237642734015326 0ustar ripleyusers\name{rshift.psp} \alias{rshift.psp} \title{Randomly Shift a Line Segment Pattern} \description{ Randomly shifts the segments in a line segment pattern. } \usage{ \method{rshift}{psp}(X, \dots, group=NULL, which=NULL) } \arguments{ \item{X}{Line segment pattern to be subjected to a random shift. An object of class \code{"psp"}. } \item{\dots}{ Arguments controlling the randomisation and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{group}{ Optional. Factor specifying a grouping of the line segments of \code{X}, or \code{NULL} indicating that all line segments belong to the same group. Each group will be shifted together, and separately from other groups. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A line segment pattern (object of class \code{"psp"}). } \details{ This operation randomly shifts the locations of the line segments in a line segment pattern. The function \code{rshift} is generic. This function \code{rshift.psp} is the method for line segment patterns. The line segments of \code{X} are first divided into groups, then the line segments within a group are shifted by a common random displacement vector. Different groups of line segments are shifted independently. If the argument \code{group} is present, then this determines the grouping. Otherwise, all line segments belong to a single group. The argument \code{group} should be a factor, of length equal to the number of line segments in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all line segments of \code{X} belong to a single group. By default, every group of line segments will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data line segments are shifted, is generated at random. The \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random line segment inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted line segment lies partially or completely outside the window of \code{X}. Currently the only option is \code{"erode"} which specifies that the segments will be clipped to a smaller window. The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- rshift(X, radius=0.1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rstrat.Rd0000755000176000001440000000346712237642734014555 0ustar ripleyusers\name{rstrat} \alias{rstrat} \title{Simulate Stratified Random Point Pattern} \description{ Generates a ``stratified random'' pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points independently in each tile. } \usage{ rstrat(win=square(1), nx, ny=nx, k = 1) } \arguments{ \item{win}{ A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each column. } \item{ny}{Number of tiles in each row. } \item{k}{Number of random points to generate in each tile. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This function generates a random pattern of points in a ``stratified random'' sampling design. It can be useful for generating random spatial sampling points. The bounding rectangle of \code{win} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Some of these grid points may lie outside the window \code{win}: if they do, they are deleted. The result is a point pattern inside the window \code{win}. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{rsyst}}, \code{\link{runifpoint}}, \code{\link{quadscheme}} } \examples{ X <- rstrat(nx=10) plot(X) # polygonal boundary data(letterR) X <- rstrat(letterR, 5, 10, k=3) plot(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/StraussHard.Rd0000755000176000001440000000774412237642731015500 0ustar ripleyusers\name{StraussHard} \alias{StraussHard} \title{The Strauss / Hard Core Point Process Model} \description{ Creates an instance of the ``Strauss/ hard core'' point process model which can then be fitted to point pattern data. } \usage{ StraussHard(r, hc=NA) } \arguments{ \item{r}{The interaction radius of the Strauss interaction} \item{hc}{The hard core distance. Optional.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the ``Strauss/hard core'' process with Strauss interaction radius \eqn{r} and hard core distance \code{hc}. } \details{ A Strauss/hard core process with interaction radius \eqn{r}, hard core distance \eqn{h < r}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is a pairwise interaction point process in which \itemize{ \item distinct points are not allowed to come closer than a distance \eqn{h} apart \item each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the probability density. } This is a hybrid of the Strauss process and the hard core process. The probability density is zero if any pair of points is closer than \eqn{h} units apart, and otherwise equals \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case for the Strauss process). If \eqn{\gamma = 1}{gamma = 1}, the process reduces to a classical hard core process. If \eqn{\gamma < 1}{gamma < 1}, the model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma > 1}{gamma > 1}, the model is ``ordered'' or ``inhibitive'' up to the distance \eqn{h}, but has an ``attraction'' between points lying at distances in the range between \eqn{h} and \eqn{r}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss/hard core process pairwise interaction is yielded by the function \code{StraussHard()}. See the examples below. The canonical parameter \eqn{\log(\gamma)}{log(gamma)} is estimated by \code{\link{ppm}()}, not fixed in \code{StraussHard()}. If the hard core distance argument \code{hc} is missing or \code{NA}, it will be estimated from the data when \code{\link{ppm}} is called. The estimated value of \code{hc} is the minimum nearest neighbour distance multiplied by \eqn{n/(n+1)}, where \eqn{n} is the number of data points. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{63}, 467--475. } \examples{ StraussHard(r=1,hc=0.02) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, StraussHard(r=0.1, hc=0.05)) # fit the stationary Strauss/hard core process to `cells' } ppm(cells, ~ polynom(x,y,3), StraussHard(r=0.1, hc=0.05)) # fit a nonstationary Strauss/hard core process # with log-cubic polynomial trend } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/plot.mppm.Rd0000644000176000001440000000371612241443112015136 0ustar ripleyusers\name{plot.mppm} \alias{plot.mppm} \title{plot a Fitted Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns by \code{\link{mppm}}, compute spatial trend or conditional intensity surface of the model, in a form suitable for plotting, and (optionally) plot this surface. } \usage{ \method{plot}{mppm}(x, ..., trend = TRUE, cif = FALSE, how="image") } \arguments{ \item{x}{ A point process model fitted to multiple point patterns, typically obtained from the model-fitting algorithm \code{\link{mppm}}. An object of class \code{"mppm"}. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{plot.ppm}} or \code{\link[spatstat]{plot.listof}} controlling the plot. } \item{trend}{ logical flag; if \code{TRUE}, the spatial trend surface will be produced. } \item{cif}{ logical flag; if \code{TRUE}, the conditional intensity surface will be produced. } \item{how}{ character string vector indicating the style of plot to be performed. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"mppm"} of point process models fitted to multiple point patterns (see \code{\link{mppm}}). It invokes \code{\link{subfits}} to compute the fitted model for each individual point pattern dataset, then calls \code{\link[spatstat]{plot.ppm}} to plot these individual models. These individual plots are displayed using \code{\link[spatstat]{plot.listof}}, which generates either a series of separate plot frames or an array of plot panels on a single page. } \seealso{ \code{\link[spatstat]{plot.ppm}}, \code{\link{mppm}}, \code{\link[spatstat]{plot.listof}} } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/is.im.Rd0000755000176000001440000000136412237642732014245 0ustar ripleyusers\name{is.im} \alias{is.im} \title{Test Whether An Object Is A Pixel Image} \description{ Tests whether its argument is a pixel image (object of class \code{"im"}). } \usage{ is.im(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the argument \code{x} is a pixel image object of class \code{"im"}. For details of this class, see \code{\link{im.object}}. The object is determined to be an image if it inherits from class \code{"im"}. } \value{ \code{TRUE} if \code{x} is a pixel image, otherwise \code{FALSE}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rmhstart.Rd0000755000176000001440000000722212237642734015073 0ustar ripleyusers\name{rmhstart} \alias{rmhstart} \alias{rmhstart.default} \title{Determine Initial State for Metropolis-Hastings Simulation.} \description{ Builds a description of the initial state for the Metropolis-Hastings algorithm. } \usage{ rmhstart(start, \dots) \method{rmhstart}{default}(start=NULL, \dots, n.start=NULL, x.start=NULL) } \arguments{ \item{start}{An existing description of the initial state in some format. Incompatible with the arguments listed below. } \item{\dots}{There should be no other arguments.} \item{n.start}{ Number of initial points (to be randomly generated). Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. } } \value{ An object of class \code{"rmhstart"}, which is essentially a list of parameters describing the initial point pattern and (optionally) the initial state of the random number generator. There is a \code{print} method for this class, which prints a sensible description of the initial state. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm implemented in \code{\link{rmh}}. This function \code{rmhstart} creates a full description of the initial state of the Metropolis-Hastings algorithm, \emph{including possibly the initial state of the random number generator}, for use in a subsequent call to \code{\link{rmh}}. It also checks that the initial state is valid. The initial state should be specified \bold{either} by the first argument \code{start} \bold{or} by the other arguments \code{n.start}, \code{x.start} etc. If \code{start} is a list, then it should have components named \code{n.start} or \code{x.start}, with the same interpretation as described below. The arguments are: \describe{ \item{n.start}{ The number of \dQuote{initial} points to be randomly (uniformly) generated in the simulation window \code{w}. Incompatible with \code{x.start}. For a multitype point process, \code{n.start} may be a vector (of length equal to the number of types) giving the number of points of each type to be generated. If expansion of the simulation window is selected (see the argument \code{expand} to \code{\link{rmhcontrol}}), then the actual number of starting points in the simulation will be \code{n.start} multiplied by the expansion factor (ratio of the areas of the expanded window and original window). For faster convergence of the Metropolis-Hastings algorithm, the value of \code{n.start} should be roughly equal to (an educated guess at) the expected number of points for the point process inside the window. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{ppp}), or an object which can be coerced to this class by \code{\link{as.ppp}}, or a dataset containing vectors \code{x} and \code{y}. If \code{x.start} is specified, then expansion of the simulation window (the argument \code{expand} of \code{\link{rmhcontrol}}) is not permitted. } } The parameters \code{n.start} and \code{x.start} are \emph{incompatible}. } \seealso{ \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhmodel}} } \examples{ # 30 random points a <- rmhstart(n.start=30) # a particular point pattern data(cells) b <- rmhstart(x.start=cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Kmeasure.Rd0000755000176000001440000001600612237642731015000 0ustar ripleyusers\name{Kmeasure} \alias{Kmeasure} \title{Reduced Second Moment Measure} \description{ Estimates the reduced second moment measure \eqn{\kappa}{Kappa} from a point pattern in a window of arbitrary shape. } \usage{ Kmeasure(X, sigma, edge=TRUE, ..., varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{\kappa}{Kappa} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ Standard deviation \eqn{\sigma}{sigma} of the Gaussian smoothing kernel. Incompatible with \code{varcov}. } \item{edge}{ logical value indicating whether an edge correction should be applied. } \item{\dots}{Ignored.} \item{varcov}{ Variance-covariance matrix of the Gaussian smoothing kernel. Incompatible with \code{sigma}. } } \value{ A real-valued pixel image (an object of class \code{"im"}, see \code{\link{im.object}}) whose pixel values are estimates of the density of the reduced second moment measure at each location. } \details{ Given a point pattern dataset, this command computes an estimate of the reduced second moment measure \eqn{\kappa}{Kappa} of the point process. The result is a pixel image whose pixel values are estimates of the density of the reduced second moment measure. The reduced second moment measure \eqn{\kappa}{Kappa} can be regarded as a generalisation of the more familiar \eqn{K}-function. An estimate of \eqn{\kappa}{Kappa} derived from a spatial point pattern dataset can be useful in exploratory data analysis. Its advantage over the \eqn{K}-function is that it is also sensitive to anisotropy and directional effects. In a nutshell, the command \code{Kmeasure} computes a smoothed version of the \emph{Fry plot}. As explained under \code{\link{fryplot}}, the Fry plot is a scatterplot of the vectors joining all pairs of points in the pattern. The reduced second moment measure is (essentially) defined as the average of the Fry plot over different realisations of the point process. The command \code{Kmeasure} effectively smooths the Fry plot of a dataset to obtain an estimate of the reduced second moment measure. In formal terms, the reduced second moment measure \eqn{\kappa}{Kappa} of a stationary point process \eqn{X} is a measure defined on the two-dimensional plane such that, for a `typical' point \eqn{x} of the process, the expected number of other points \eqn{y} of the process such that the vector \eqn{y - x} lies in a region \eqn{A}, equals \eqn{\lambda \kappa(A)}{lambda * Kappa(A)}. Here \eqn{\lambda}{lambda} is the intensity of the process, i.e. the expected number of points of \eqn{X} per unit area. The \eqn{K}-function is a special case. The function value \eqn{K(t)} is the value of the reduced second moment measure for the disc of radius \eqn{t} centred at the origin; that is, \eqn{K(t) = \kappa(b(0,t))}{K(t) = Kappa(b(0,t))}. The command \code{Kmeasure} computes an estimate of \eqn{\kappa}{Kappa} from a point pattern dataset \code{X}, which is assumed to be a realisation of a stationary point process, observed inside a known, bounded window. Marks are ignored. The algorithm approximates the point pattern and its window by binary pixel images, introduces a Gaussian smoothing kernel and uses the Fast Fourier Transform \code{\link{fft}} to form a density estimate of \eqn{\kappa}{Kappa}. The calculation corresponds to the edge correction known as the ``translation correction''. The Gaussian smoothing kernel may be specified by either of the arguments \code{sigma} or \code{varcov}. If \code{sigma} is a single number, this specifies an isotropic Gaussian kernel with standard deviation \code{sigma} on each coordinate axis. If \code{sigma} is a vector of two numbers, this specifies a Gaussian kernel with standard deviation \code{sigma[1]} on the \eqn{x} axis, standard deviation \code{sigma[2]} on the \eqn{y} axis, and zero correlation between the \eqn{x} and \eqn{y} axes. If \code{varcov} is given, this specifies the variance-covariance matrix of the Gaussian kernel. There do not seem to be any well-established rules for selecting the smoothing kernel in this context. The density estimate of \eqn{\kappa}{Kappa} is returned in the form of a real-valued pixel image. Pixel values are estimates of the normalised second moment density at the centre of the pixel. (The uniform Poisson process would have values identically equal to \eqn{1}.) The image \code{x} and \code{y} coordinates are on the same scale as vector displacements in the original point pattern window. The point \code{x=0, y=0} corresponds to the `typical point'. A peak in the image near \code{(0,0)} suggests clustering; a dip in the image near \code{(0,0)} suggests inhibition; peaks or dips at other positions suggest possible periodicity. If desired, the value of \eqn{\kappa(A)}{Kappa(A)} for a region \eqn{A} can be estimated by computing the integral of the pixel image over the domain \eqn{A}, i.e.\ summing the pixel values and multiplying by pixel area, using \code{\link{integral.im}}. One possible application is to compute anisotropic counterparts of the \eqn{K}-function (in which the disc of radius \eqn{t} is replaced by another shape). See Examples. } \section{Warning}{ Some writers use the term \emph{reduced second moment measure} when they mean the \eqn{K}-function. This has caused confusion. As originally defined, the reduced second moment measure is a measure, obtained by modifying the second moment measure, while the \eqn{K}-function is a function obtained by evaluating this measure for discs of increasing radius. In \pkg{spatstat}, the \eqn{K}-function is computed by \code{\link{Kest}} and the reduced second moment measure is computed by \code{Kmeasure}. } \references{ Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{fryplot}}, \code{\link{spatstat.options}}, \code{\link{integral.im}}, \code{\link{im.object}} } \examples{ data(cells) plot(Kmeasure(cells, 0.05)) # shows pronounced dip around origin consistent with strong inhibition data(redwood) plot(Kmeasure(redwood, 0.03), col=grey(seq(1,0,length=32))) # shows peaks at several places, reflecting clustering and ?periodicity M <- Kmeasure(cells, 0.05) # evaluate measure on a sector W <- as.owin(M) ang <- as.im(atan2, W) rad <- as.im(function(x,y){sqrt(x^2+y^2)}, W) sector <- solutionset(ang > 0 & ang < 1 & rad < 0.6) integral.im(M[sector, drop=FALSE]) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/convexhull.Rd0000755000176000001440000000172412237642732015415 0ustar ripleyusers\name{convexhull} \alias{convexhull} \title{Convex Hull} \description{ Computes the convex hull of a spatial object. } \usage{ convexhull(x) } \arguments{ \item{x}{ a window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or an object that can be converted to a window by \code{\link{as.owin}}. } } \value{ A window (an object of class \code{"owin"}). } \details{ This function computes the convex hull of the spatial object \code{x}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{is.convex}} } \examples{ data(demopat) W <- demopat$window plot(convexhull(W), col="lightblue", border=NA) plot(W, add=TRUE, lwd=2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/rescale.im.Rd0000755000176000001440000000346612237642734015257 0ustar ripleyusers\name{rescale.im} \alias{rescale.im} \title{Convert Pixel Image to Another Unit of Length} \description{ Converts a pixel image to another unit of length. } \usage{ \method{rescale}{im}(X, s) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} } \value{ Another pixel image (of class \code{"im"}), containing the same pixel values, but with pixel coordinates expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates of the pixels in \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. The result is a pixel image representing the \emph{same} data but re-expressed in a different unit. Pixel values are unchanged. This may not be what you intended! } \seealso{ \code{\link{im}}, \code{\link{rescale}}, \code{\link{unitname}}, \code{\link{eval.im}} } \examples{ # Bramble Canes data: 1 unit = 9 metres data(bramblecanes) # distance transform Z <- distmap(bramblecanes) # convert to metres # first alter the pixel values Zm <- eval.im(9 * Z) # now rescale the pixel coordinates Z <- rescale(Zm, 1/9) # or equivalently Z <- rescale(Zm) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/crossdist.ppx.Rd0000755000176000001440000000350012237642732016043 0ustar ripleyusers\name{crossdist.ppx} \alias{crossdist.ppx} \title{Pairwise Distances Between Two Different Multi-Dimensional Point Patterns} \description{ Computes the distances between pairs of points taken from two different multi-dimensional point patterns. } \usage{ \method{crossdist}{ppx}(X, Y, \dots) } \arguments{ \item{X,Y}{ Multi-dimensional point patterns (objects of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in multi-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"ppx"}). This function expects two multidimensional point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(3),y=runif(3),z=runif(3),w=runif(3)) X <- ppx(data=df) df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) Y <- ppx(data=df) d <- crossdist(X, Y) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/psstG.Rd0000755000176000001440000001145412237642733014330 0ustar ripleyusers\name{psstG} \Rdversion{1.1} \alias{psstG} \title{ Pseudoscore Diagnostic For Fitted Model against Saturation Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of saturation type. } \usage{ psstG(object, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the Geyer saturation process (see \code{\link{Geyer}}) with saturation parameter 1. The family of alternatives includes models that are more regular than the fitted model, and others that are more clustered than the fitted model. For any point pattern \eqn{x}, and any \eqn{r > 0}, let \eqn{S(x,r)} be the number of points in \eqn{x} whose nearest neighbour (the nearest other point in \eqn{x}) is closer than \eqn{r} units. Then the pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r ) - \int_W \Delta S(u,x,r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x,r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Alternative functions: \code{\link{psstA}}, \code{\link{psst}}, \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ X <- rStrauss(200,0.1,0.05) plot(psstG(X)) plot(psstG(X, interaction=Strauss(0.05))) } \keyword{spatial} \keyword{models} spatstat/man/connected.Rd0000755000176000001440000001000412237642732015157 0ustar ripleyusers\name{connected} %DontDeclareMethods \Rdversion{1.1} \alias{connected} \alias{connected.im} \alias{connected.owin} \title{ Connected components } \description{ Finds the topologically-connected components of a spatial object, such as the connected clumps of pixels in a binary image. } \usage{ connected(X, \dots) \method{connected}{owin}(X, \dots, method="C") \method{connected}{im}(X, \dots, background = NA, method="C") } \arguments{ \item{X}{ A spatial object such as a pixel image (object of class \code{"im"}) or a window (object of class \code{"owin"}). } \item{background}{ Optional. Treat pixels with this value as being part of the background. } \item{method}{ String indicating the algorithm to be used. Either \code{"C"} or \code{"interpreted"}. See Details. } \item{\dots}{ Other arguments, not recognised by these methods. } } \details{ The function \code{connected} is generic, with methods for pixel images (class \code{"im"}) and windows (class \code{"owin"}) described here. There is also a method for point patterns described in \code{\link{connected.ppp}}. The functions described here compute the connected component transform (Rosenfeld and Pfalz, 1966) of a binary image or binary mask. The argument \code{X} is first converted into a pixel image with logical values. Then the algorithm identifies the connected components (topologically-connected clumps of pixels) in the foreground. Two pixels belong to the same connected component if they have the value \code{TRUE} and if they are neighbours (in the 8-connected sense). This rule is applied repeatedly until it terminates. Then each connected component contains all the pixels that can be reached by stepping from neighbour to neighbour. If \code{method="C"}, the computation is performed by a compiled C language implementation of the classical algorithm of Rosenfeld and Pfalz (1966). If \code{method="interpreted"}, the computation is performed by an \R implementation of the algorithm of Park et al (2000). The result is a factor-valued image, with levels that correspond to the connected components. The Examples show how to extract each connected component as a separate window object. } \value{ A pixel image (object of class \code{"im"}) with factor values. The levels of the factor correspond to the connected components. } \references{ Park, J.-M., Looney, C.G. and Chen, H.-C. (2000) Fast connected component labeling algorithm using a divide and conquer technique. Pages 373-376 in S.Y. Shin (ed) \emph{Computers and Their Applications:} Proceedings of the ISCA 15th International Conference on Computers and Their Applications, March 29-31, 2000, New Orleans, Louisiana USA. ISCA 2000, ISBN 1-880843-32-3. Rosenfeld, A. and Pfalz, J.L. (1966) Sequential operations in digital processing. \emph{Journal of the Association for Computing Machinery} \bold{13} 471-494. } \seealso{ \code{\link{connected.ppp}}, \code{\link{im.object}}, \code{\link{tess}} } \section{Warnings}{ It may be hard to distinguish different components in the default plot because the colours of nearby components may be very similar. See the Examples for a randomised colour map. The algorithm for \code{method="interpreted"} can be very slow for large images (or images where the connected components include a large number of pixels). } \examples{ data(cells) d <- distmap(cells, dimyx=256) X <- levelset(d, 0.06) plot(X) Z <- connected(X) plot(Z) # number of components nc <- length(levels(Z)) # plot with randomised colour map plot(Z, col=hsv(h=sample(seq(0,1,length=nc), nc))) # how to extract the components as a list of windows W <- tiles(tess(image=Z)) } \author{ Original \R code by Julian Burgos, University of Washington. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/pairdist.psp.Rd0000755000176000001440000000461012237642733015644 0ustar ripleyusers\name{pairdist.psp} \alias{pairdist.psp} \title{Pairwise distances between line segments} \description{ Computes the matrix of distances between all pairs of line segments in a line segment pattern. } \usage{ \method{pairdist}{psp}(X, \dots, method="Fortran", type="Hausdorff") } \arguments{ \item{X}{ A line segment pattern (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{ String specifying which method of calculation to use. Values are \code{"Fortran"}, \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the line segments numbered \code{i} and \code{j}. } \details{ This function computes the distance between each pair of line segments in \code{X}, and returns the matrix of distances. This is a method for the generic function \code{\link{pairdist}} for the class \code{"psp"}. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="Fortran"} (the default) then Fortran code is used. The Fortran code is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{pairdist.ppp}} } \examples{ L <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- pairdist(L) S <- pairdist(L, type="sep") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Extract.ppx.Rd0000644000176000001440000000325312237642732015442 0ustar ripleyusers\name{Extract.ppx} \alias{[.ppx} \title{Extract Subset of Multidimensional Point Pattern} \description{ Extract a subset of a multidimensional point pattern. } \usage{ \method{[}{ppx}(x, i, ...) } \arguments{ \item{x}{ A multidimensional point pattern (object of class \code{"ppx"}). } \item{i}{ Subset index. A valid subset index in the usual \R sense, indicating which points should be retained. } \item{\dots}{ Ignored. } } \value{ A multidimensional point pattern (of class \code{"ppx"}) in the same domain. } \details{ This function extracts a designated subset of a multidimensional point pattern. The function \code{[.ppx} is a method for \code{\link{[}} for the class \code{"ppx"}. It extracts a designated subset of a point pattern. The argument \code{i} should be a subset index in the usual \R sense: either a numeric vector of positive indices (identifying the points to be retained), a numeric vector of negative indices (identifying the points to be deleted) or a logical vector of length equal to the number of points in the point pattern \code{x}. In the latter case, the points \code{(x$x[i], x$y[i])} for which \code{subset[i]=TRUE} will be retained, and the others will be deleted. Use the function \code{\link{unmark}} to remove marks from a marked point pattern. } \seealso{ \code{\link{ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t")) X[-2] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/applynbd.Rd0000755000176000001440000002007012237642732015032 0ustar ripleyusers\name{applynbd} \alias{applynbd} \title{Apply Function to Every Neighbourhood in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and apply a given function to them. } \usage{ applynbd(X, FUN, N=NULL, R=NULL, criterion=NULL, exclude=FALSE, \dots) } \arguments{ \item{X}{ Point pattern. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}}. } \item{FUN}{ Function to be applied to each neighbourhood. The arguments of \code{FUN} are described under \bold{Details}. } \item{N}{ Integer. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of the \code{N} points of \code{X} which are closest to it. } \item{R}{ Nonnegative numeric value. If this argument is present, the neighbourhood of a point of \code{X} is defined to consist of all points of \code{X} which lie within a distance \code{R} of it. } \item{criterion}{ Function. If this argument is present, the neighbourhood of a point of \code{X} is determined by evaluating this function. See under \bold{Details}. } \item{exclude}{ Logical. If \code{TRUE} then the point currently being visited is excluded from its own neighbourhood. } \item{\dots}{ extra arguments passed to the function \code{FUN}. They must be given in the form \code{name=value}. } } \value{ Similar to the result of \code{\link{apply}}. If each call to \code{FUN} returns a single numeric value, the result is a vector of dimension \code{X$n}, the number of points in \code{X}. If each call to \code{FUN} returns a vector of the same length \code{m}, then the result is a matrix of dimensions \code{c(m,n)}; note the transposition of the indices, as usual for the family of \code{apply} functions. If the calls to \code{FUN} return vectors of different lengths, the result is a list of length \code{X$n}. } \details{ This is an analogue of \code{\link{apply}} for point patterns. It visits each point in the point pattern \code{X}, determines which points of \code{X} are ``neighbours'' of the current point, applies the function \code{FUN} to this neighbourhood, and collects the values returned by \code{FUN}. The definition of ``neighbours'' depends on the arguments \code{N}, \code{R} and \code{criterion}. Also the argument \code{exclude} determines whether the current point is excluded from its own neighbourhood. \itemize{ \item If \code{N} is given, then the neighbours of the current point are the \code{N} points of \code{X} which are closest to the current point (including the current point itself unless \code{exclude=TRUE}). \item If \code{R} is given, then the neighbourhood of the current point consists of all points of \code{X} which lie closer than a distance \code{R} from the current point. \item If \code{criterion} is given, then it must be a function with two arguments \code{dist} and \code{drank} which will be vectors of equal length. The interpretation is that \code{dist[i]} will be the distance of a point from the current point, and \code{drank[i]} will be the rank of that distance (the three points closest to the current point will have rank 1, 2 and 3). This function must return a logical vector of the same length as \code{dist} and \code{drank} whose \code{i}-th entry is \code{TRUE} if the corresponding point should be included in the neighbourhood. See the examples below. \item If more than one of the arguments \code{N}, \code{R} and \code{criterion} is given, the neighbourhood is defined as the \emph{intersection} of the neighbourhoods specified by these arguments. For example if \code{N=3} and \code{R=5} then the neighbourhood is formed by finding the 3 nearest neighbours of current point, and retaining only those neighbours which lie closer than 5 units from the current point. } When \code{applynbd} is executed, each point of \code{X} is visited, and the following happens for each point: \itemize{ \item the neighbourhood of the current point is determined according to the chosen rule, and stored as a point pattern \code{Y}; \item the function \code{FUN} is called as: \code{FUN(Y=Y, current=current, dists=dists, dranks=dranks, \dots)} where \code{current} is the location of the current point (in a format explained below), \code{dists} is a vector of distances from the current point to each of the points in \code{Y}, \code{dranks} is a vector of the ranks of these distances with respect to the full point pattern \code{X}, and \code{\dots} are the arguments passed from the call to \code{applynbd}; \item The result of the call to \code{FUN} is stored. } The results of each call to \code{FUN} are collected and returned according to the usual rules for \code{\link{apply}} and its relatives. See the \bold{Value} section of this help file. The format of the argument \code{current} is as follows. If \code{X} is an unmarked point pattern, then \code{current} is a vector of length 2 containing the coordinates of the current point. If \code{X} is marked, then \code{current} is a point pattern containing exactly one point, so that \code{current$x} is its \eqn{x}-coordinate and \code{current$marks} is its mark value. In either case, the coordinates of the current point can be referred to as \code{current$x} and \code{current$y}. Note that \code{FUN} will be called exactly as described above, with each argument named explicitly. Care is required when writing the function \code{FUN} to ensure that the arguments will match up. See the Examples. See \code{\link{markstat}} for a common use of this function. To simply tabulate the marks in every \code{R}-neighbourhood, use \code{\link{marktable}}. } \seealso{ \code{\link{ppp.object}}, \code{\link{apply}}, \code{\link{markstat}}, \code{\link{marktable}} } \examples{ data(redwood) # count the number of points within radius 0.2 of each point of X nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){Y$n-1}) # equivalent to: nneighbours <- applynbd(redwood, R=0.2, function(Y, ...){Y$n}, exclude=TRUE) # compute the distance to the second nearest neighbour of each point secondnndist <- applynbd(redwood, N = 2, function(dists, ...){max(dists)}, exclude=TRUE) # marked point pattern data(longleaf) \testonly{ # smaller dataset longleaf <- longleaf[seq(1, longleaf$n, by=80)] } # compute the median of the marks of all neighbours of a point # (see also 'markstat') dbh.med <- applynbd(longleaf, R=90, exclude=TRUE, function(Y, ...) { median(Y$marks)}) # ANIMATION explaining the definition of the K function # (arguments `fullpicture' and 'rad' are passed to FUN) \dontrun{ showoffK <- function(Y, current, dists, dranks, fullpicture,rad) { plot(fullpicture, main="") points(Y, cex=2) u <- current points(u$x,u$y,pch="+",cex=3) theta <- seq(0,2*pi,length=100) polygon(u$x+ rad * cos(theta),u$y+rad*sin(theta)) text(u$x+rad/3,u$y+rad/2,Y$n,cex=3) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(Y$n - 1) } applynbd(redwood, R=0.2, showoffK, fullpicture=redwood, rad=0.2, exclude=TRUE) # animation explaining the definition of the G function showoffG <- function(Y, current, dists, dranks, fullpicture) { plot(fullpicture, main="") points(Y, cex=2) u <- current points(u[1],u[2],pch="+",cex=3) v <- c(Y$x[1],Y$y[1]) segments(u[1],u[2],v[1],v[2],lwd=2) w <- (u + v)/2 nnd <- dists[1] text(w[1],w[2],round(nnd,3),cex=2) Sys.sleep(if(runif(1) < 0.1) 1.5 else 0.3) return(nnd) } data(cells) applynbd(cells, N=1, showoffG, exclude=TRUE, fullpicture=cells) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{programming} \keyword{iteration} spatstat/man/setcov.Rd0000755000176000001440000000400412237642734014525 0ustar ripleyusers\name{setcov} \alias{setcov} \title{Set Covariance of a Window} \description{ Computes the set covariance function of a window. } \usage{ setcov(W, V=W, \dots) } \arguments{ \item{W}{ A window (object of class \code{"owin"}. } \item{V}{ Optional. Another window. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution. } } \value{ A pixel image (an object of class \code{"im"}) representing the set covariance function of \code{W}, or the cross-covariance of \code{W} and \code{V}. } \details{ The set covariance function of a region \eqn{W} in the plane is the function \eqn{C(v)} defined for each vector \eqn{v} as the area of the intersection between \eqn{W} and \eqn{W+v}, where \eqn{W+v} is the set obtained by shifting (translating) \eqn{W} by \eqn{v}. We may interpret \eqn{C(v)} as the area of the set of all points \eqn{x} in \eqn{W} such that \eqn{x+v} also lies in \eqn{W}. This command computes a discretised approximation to the set covariance function of any plane region \eqn{W} represented as a window object (of class \code{"owin"}, see \code{\link{owin.object}}). The return value is a pixel image (object of class \code{"im"}) whose greyscale values are values of the set covariance function. The set covariance is computed using the Fast Fourier Transform, unless \code{W} is a rectangle, when an exact formula is used. If the argument \code{V} is present, then \code{setcov(W,V)} computes the set \emph{cross-covariance} function \eqn{C(x)} defined for each vector \eqn{x} as the area of the intersection between \eqn{W} and \eqn{V+x}. } \seealso{ \code{\link{imcov}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) v <- setcov(w) plot(v) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/methods.box3.Rd0000755000176000001440000000273212237642733015544 0ustar ripleyusers\name{methods.box3} \Rdversion{1.1} \alias{methods.box3} %DoNotExport \alias{print.box3} \alias{unitname.box3} \alias{unitname<-.box3} \title{ Methods for Three-Dimensional Box } \description{ Methods for class \code{"box3"}. } \usage{ \method{print}{box3}(x, ...) \method{unitname}{box3}(x) \method{unitname}{box3}(x) <- value } \arguments{ \item{x}{ Object of class \code{"box3"} representing a three-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"box3"} of three-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.box3} the value is \code{NULL}. For \code{unitname.box3} an object of class \code{"units"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/by.ppp.Rd0000755000176000001440000000535312237642732014440 0ustar ripleyusers\name{by.ppp} \alias{by.ppp} \title{Apply a Function to a Point Pattern Broken Down by Factor} \description{ Splits a point pattern into sub-patterns, and applies the function to each sub-pattern. } \usage{ \method{by}{ppp}(data, INDICES=marks(data), FUN, ...) } \arguments{ \item{data}{Point pattern (object of class \code{"ppp"}).} \item{INDICES}{Grouping variable. Either a factor, a pixel image with factor values, or a tessellation.} \item{FUN}{Function to be applied to subsets of \code{data}.} \item{\dots}{Additional arguments to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for point patterns (class \code{"ppp"}). The point pattern \code{data} is first divided into subsets according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The argument \code{INDICES} may be \itemize{ \item a factor, of length equal to the number of points in \code{data}. The levels of \code{INDICES} determine the destination of each point in \code{data}. The \code{i}th point of \code{data} will be placed in the sub-pattern \code{split.ppp(data)$l} where \code{l = f[i]}. \item a pixel image (object of class \code{"im"}) with factor values. The pixel value of \code{INDICES} at each point of \code{data} will be used as the classifying variable. \item a tessellation (object of class \code{"tess"}). Each point of \code{data} will be classified according to the tile of the tessellation into which it falls. } If \code{INDICES} is missing, then \code{data} must be a multitype point pattern (a marked point pattern whose marks vector is a factor). Then the effect is that the points of each type are separated into different point patterns. } \value{ A list (also of class \code{"listof"}) containing the results returned from \code{FUN} for each of the subpatterns. } \seealso{ \code{\link{ppp}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{tess}}, \code{\link{im}}. } \examples{ # multitype point pattern, broken down by type data(amacrine) by(amacrine, FUN=density) by(amacrine, FUN=function(x) { min(nndist(x)) } ) # how to pass additional arguments to FUN by(amacrine, FUN=clarkevans, correction=c("Donnelly","cdf")) # point pattern broken down by tessellation data(swedishpines) tes <- quadrats(swedishpines, 5, 5) B <- by(swedishpines, tes, clarkevans, correction="Donnelly") unlist(lapply(B, as.numeric)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/Jinhom.Rd0000644000176000001440000001345712237642731014454 0ustar ripleyusers\name{Jinhom} \alias{Jinhom} \title{ Inhomogeneous J-function } \description{ Estimates the inhomogeneous \eqn{J} function of a non-stationary point pattern. } \usage{ Jinhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{J} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } } \details{ This command computes estimates of the inhomogeneous \eqn{J}-function (Van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the \eqn{J} function for homogeneous point patterns computed by \code{\link{Jest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{J} function is computed as \eqn{Jinhom(r) = (1 - Ginhom(r))/(1-Finhom(r))} where \eqn{Ginhom, Finhom} are the inhomogeneous \eqn{G} and \eqn{F} functions computed using the border correction (equations (7) and (6) respectively in Van Lieshout, 2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}) whose fitted \emph{trend} can be used as the fitted intensity. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Finhom}}, \code{\link{Jest}} } \examples{ \dontrun{ plot(Jinhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Jinhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/lgcp.estpcf.Rd0000755000176000001440000002017112237642732015433 0ustar ripleyusers\name{lgcp.estpcf} \alias{lgcp.estpcf} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model (with exponential covariance function) to a point pattern dataset by the Method of Minimum Contrast using the pair correlation function. } \usage{ lgcp.estpcf(X, startpar=c(sigma2=1,alpha=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs=list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical pair correlation function of the LGCP model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The theoretical pair correlation function of the LGCP is \deqn{ g(r) = \exp(C(s)) }{ g(r) = exp(C(s)) } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} takes the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. It may be any of the covariance functions recognised by the command \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } The argument \code{covmodel} should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is the string name of one of the covariance models recognised by the command \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package, and \code{\dots} are arguments of the form \code{tag=value} giving the values of parameters controlling the shape of these models. For example the exponential covariance is specified by \code{covmodel=list(model="exponential")} while the Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Moller, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, with modifications by Shen Guochun and Rasmus Waagepetersen \email{rw@math.auc.dk} } \seealso{ \code{\link{lgcp.estK}} for alternative method of fitting LGCP. \code{\link{matclust.estpcf}}, \code{\link{thomas.estpcf}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{pcf}} for the pair correlation function. } \examples{ data(redwood) u <- lgcp.estpcf(redwood, c(sigma2=0.1, alpha=1)) u plot(u) if(require(RandomFields)) { lgcp.estpcf(redwood, covmodel=list(model="matern", nu=0.3)) } } \keyword{spatial} \keyword{models} spatstat/man/runifpointOnLines.Rd0000755000176000001440000000345012237642734016713 0ustar ripleyusers\name{runifpointOnLines} \alias{runifpointOnLines} \title{Generate N Uniform Random Points On Line Segments} \description{ Given a line segment pattern, generate a random point pattern consisting of \code{n} points uniformly distributed on the line segments. } \usage{ runifpointOnLines(n, L) } \arguments{ \item{n}{Number of points to generate.} \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should lie. } } \details{ This command generates a point pattern consisting of \code{n} independent random points, each point uniformly distributed on the line segment pattern. This means that, for each random point, \itemize{ \item the probability of falling on a particular segment is proportional to the length of the segment; and \item given that the point falls on a particular segment, it has uniform probability density along that segment. } If \code{n} is a single integer, the result is an unmarked point pattern containing \code{n} points. If \code{n} is a vector of integers, the result is a marked point pattern, with \code{m} different types of points, where \code{m = length(n)}, in which there are \code{n[j]} points of type \code{j}. } \value{ A point pattern (object of class \code{"ppp"}) with the same window as \code{L}. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{pointsOnLines}}, \code{\link{runifpoint}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) Y <- runifpointOnLines(20, X) plot(X, main="") plot(Y, add=TRUE) Z <- runifpointOnLines(c(5,5), X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/model.depends.Rd0000755000176000001440000000711112237642733015744 0ustar ripleyusers\name{model.depends} \alias{model.depends} \alias{model.is.additive} \alias{model.covariates} \alias{has.offset.term} \alias{has.offset} \title{ Identify Covariates Involved in each Model Term } \description{ Given a fitted model (of any kind), identify which of the covariates is involved in each term of the model. } \usage{ model.depends(object) model.is.additive(object) model.covariates(object, fitted=TRUE, offset=TRUE) has.offset.term(object) has.offset(object) } \arguments{ \item{object}{ A fitted model of any kind. } \item{fitted,offset}{ Logical values determining which type of covariates to include. } } \details{ The \code{object} can be a fitted model of any kind, including models of the classes \code{\link{lm}}, \code{\link{glm}} and \code{\link{ppm}}. To be precise, \code{object} must belong to a class for which there are methods for \code{\link{formula}}, \code{\link{terms}} and \code{\link{model.matrix}}. The command \code{model.depends} determines the relationship between the original covariates (the data supplied when \code{object} was fitted) and the canonical covariates (the columns of the design matrix). It returns a logical matrix, with one row for each canonical covariate, and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th canonical covariate depends on the \code{j}th original covariate. If the model formula of \code{object} includes offset terms (see \code{\link{offset}}), then the return value of \code{model.depends} also has an attribute \code{"offset"}. This is a logical value or matrix with one row for each offset term and one column for each of the original covariates, with the \code{i,j} entry equal to \code{TRUE} if the \code{i}th offset term depends on the \code{j}th original covariate. The command \code{model.covariates} returns a character vector containing the names of all (original) covariates that were actually used to fit the model. By default, this includes all covariates that appear in the model formula, including offset terms as well as canonical covariate terms. To omit the offset terms, set \code{offset=FALSE}. To omit the canonical covariate terms, set \code{fitted=FALSE}. The command \code{model.is.additive} determines whether the model is additive, in the sense that there is no canonical covariate that depends on two or more original covariates. It returns a logical value. The command \code{has.offset.term} is a faster way to determine whether the model \emph{formula} includes an \code{offset} term. The functions \code{model.depends} and \code{has.offset.term} only detect \code{offset} terms which are present in the model formula. They do not detect numerical offsets in the model object, that were inserted using the \code{offset} argument in \code{lm}, \code{glm} etc. To detect the presence of offsets of both kinds, use \code{has.offset}. } \value{ A logical value or matrix. } \seealso{ \code{\link{ppm}}, \code{\link{model.matrix}} } \examples{ x <- 1:10 y <- 3*x + 2 z <- rep(c(-1,1), 5) fit <- lm(y ~ poly(x,2) + sin(z)) model.depends(fit) model.covariates(fit) model.is.additive(fit) fitoff1 <- lm(y ~ x + offset(z)) fitoff2 <- lm(y ~ x, offset=z) has.offset.term(fitoff1) has.offset(fitoff1) has.offset.term(fitoff2) has.offset(fitoff2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/pixellate.psp.Rd0000755000176000001440000000423112237642733016013 0ustar ripleyusers\name{pixellate.psp} %DontDeclareMethods \alias{pixellate.psp} \title{ Convert Line Segment Pattern to Pixel Image } \description{ Converts a line segment pattern to a pixel image by measuring the length of lines intersecting each pixel. } \usage{ \method{pixellate}{psp}(x, W=NULL, ..., weights = NULL) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel resolution. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } \item{weights}{ Optional vector of weights associated with each line segment. } } \details{ This function converts a line segment pattern to a pixel image by computing, for each pixel, the total length of intersection between the pixel and the line segments. This is a method for the generic function \code{\link{pixellate}} for the class of line segment patterns. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} are given, then the length of the intersection between line segment \code{i} and pixel \code{j} is multiplied by \code{weights[i]} before the lengths are summed for each pixel. } \value{ A pixel image (object of class \code{"im"}) with numeric values. } \seealso{ \code{\link{pixellate}}, \code{\link{as.mask}}, \code{\link{as.mask.psp}}. Use \code{\link{as.mask.psp}} if you only want to know which pixels are intersected by lines. } \examples{ X <- psp(runif(10),runif(10), runif(10), runif(10), window=owin()) plot(pixellate(X)) plot(X, add=TRUE) sum(lengths.psp(X)) sum(pixellate(X)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Kmulti.Rd0000755000176000001440000001717712237642731014503 0ustar ripleyusers\name{Kmulti} \alias{Kmulti} \title{ Marked K-Function } \description{ For a marked point pattern, estimate the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}. } \usage{ Kmulti(X, I, J, r=NULL, breaks=NULL, correction, \dots, ratio=FALSE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{\dots}{Ignored.} \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of each edge-corrected estimate will also be saved, for use in analysing replicated point patterns. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. If \code{ratio=TRUE} then the return value also has two attributes called \code{"numerator"} and \code{"denominator"} which are \code{"fv"} objects containing the numerators and denominators of each estimate of \eqn{K(r)}. } \details{ The function \code{Kmulti} generalises \code{\link{Kest}} (for unmarked point patterns) and \code{\link{Kdot}} and \code{\link{Kcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. The multitype \eqn{K} function is defined so that \eqn{\lambda_J K_{IJ}(r)}{lambda[J] KIJ(r)} equals the expected number of additional random points of \eqn{X_J}{X[J]} within a distance \eqn{r} of a typical point of \eqn{X_I}{X[I]}. Here \eqn{\lambda_J}{lambda[J]} is the intensity of \eqn{X_J}{X[J]} i.e. the expected number of points of \eqn{X_J}{X[J]} per unit area. The function \eqn{K_{IJ}}{KIJ} is determined by the second order moment properties of \eqn{X}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The function \eqn{K_{IJ}}{KIJ} is not necessarily differentiable. The border correction (reduced sample) estimator of \eqn{K_{IJ}}{KIJ} used here is pointwise approximately unbiased, but need not be a nondecreasing function of \eqn{r}, while the true \eqn{K_{IJ}}{KIJ} must be nondecreasing. } \seealso{ \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kest}}, \code{\link{pcf}} } \examples{ # Longleaf Pine data: marks represent diameter \testonly{ longleaf <- longleaf[seq(1,npoints(longleaf), by=50), ] } K <- Kmulti(longleaf, longleaf$marks <= 15, longleaf$marks >= 25) plot(K) # functions determining subsets f1 <- function(X) { marks(X) <= 15 } f2 <- function(X) { marks(X) >= 15 } K <- Kmulti(longleaf, f1, f2) \testonly{ rm(longleaf) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/Gres.Rd0000755000176000001440000000552112237642731014124 0ustar ripleyusers\name{Gres} \Rdversion{1.1} \alias{Gres} \title{ Residual G Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the residual \eqn{G} function, which serves as a diagnostic for goodness-of-fit of the model. } \usage{ Gres(object, ...) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), a quadrature scheme (object of class \code{"quad"}), or the value returned by a previous call to \code{\link{Gcom}}. } \item{\dots}{ Arguments passed to \code{\link{Gcom}}. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes a residual version of the \eqn{G} function of the dataset, which should be approximately zero if the model is a good fit to the data. In normal use, \code{object} is a fitted point process model or a point pattern. Then \code{Gres} first calls \code{\link{Gcom}} to compute both the nonparametric estimate of the \eqn{G} function and its model compensator. Then \code{Gres} computes the difference between them, which is the residual \eqn{G}-function. Alternatively, \code{object} may be a function value table (object of class \code{"fv"}) that was returned by a previous call to \code{\link{Gcom}}. Then \code{Gres} computes the residual from this object. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Related functions: \code{\link{Gcom}}, \code{\link{Gest}}. Alternative functions: \code{\link{Kres}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{psst}}. Model-fitting: \code{\link{ppm}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson G0 <- Gres(fit0) plot(G0) # Hanisch correction estimate plot(G0, hres ~ r) # uniform Poisson is clearly not correct fit1 <- ppm(cells, ~1, Strauss(0.08)) plot(Gres(fit1), hres ~ r) # fit looks approximately OK; try adjusting interaction distance plot(Gres(cells, interaction=Strauss(0.12))) # How to make envelopes \dontrun{ E <- envelope(fit1, Gres, interaction=as.interact(fit1), nsim=39) plot(E) } # For computational efficiency Gc <- Gcom(fit1) G1 <- Gres(Gc) } \keyword{spatial} \keyword{models} spatstat/man/runifpoint.Rd0000755000176000001440000000524512237642734015427 0ustar ripleyusers\name{runifpoint} \alias{runifpoint} \title{Generate N Uniform Random Points} \description{ Generate a random point pattern containing \eqn{n} independent uniform random points. } \usage{ runifpoint(n, win=owin(c(0,1),c(0,1)), giveup=1000, warn=TRUE) } \arguments{ \item{n}{ Number of points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{warn}{ Logical. Whether to issue a warning if \code{n} is very large. See Details. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This function generates \code{n} independent random points, uniformly distributed in the window \code{win}. (For nonuniform distributions, see \code{\link{rpoint}}.) The algorithm depends on the type of window, as follows: \itemize{ \item If \code{win} is a rectangle then \eqn{n} independent random points, uniformly distributed in the rectangle, are generated by assigning uniform random values to their cartesian coordinates. \item If \code{win} is a binary image mask, then a random sequence of pixels is selected (using \code{\link{sample}}) with equal probabilities. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. \item If \code{win} is a polygonal window, the algorithm uses the rejection method. It finds a rectangle enclosing the window, generates points in this rectangle, and tests whether they fall in the desired window. It gives up when \code{giveup * n} tests have been performed without yielding \code{n} successes. } The algorithm for binary image masks is faster than the rejection method but involves discretisation. If \code{warn=TRUE}, then a warning will be issued if \code{n} is very large. The threshold is \code{\link{spatstat.options}("huge.npoints")}. This warning has no consequences, but it helps to trap a number of common errors. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{rpoispp}}, \code{\link{rpoint}} } \examples{ # 100 random points in the unit square pp <- runifpoint(100) # irregular window data(letterR) # polygonal pp <- runifpoint(100, letterR) # binary image mask pp <- runifpoint(100, as.mask(letterR)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/affine.Rd0000755000176000001440000000243512237642732014456 0ustar ripleyusers\name{affine} \alias{affine} \title{Apply Affine Transformation} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a plane geometrical object, such as a point pattern or a window. } \usage{ affine(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), a window (object of class \code{"owin"}) or a pixel image (object of class \code{"im"}). } \item{\dots}{Arguments determining the affine transformation.} } \value{ Another object of the same type, representing the result of applying the affine transformation. } \details{ This is generic. Methods are provided for point patterns (\code{\link{affine.ppp}}) and windows (\code{\link{affine.owin}}). } \seealso{ \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/ppx.Rd0000755000176000001440000000640412240375346014033 0ustar ripleyusers\name{ppx} \Rdversion{1.1} \alias{ppx} \title{ Multidimensional Space-Time Point Pattern } \description{ Creates a multidimensional space-time point pattern with any kind of coordinates and marks. } \usage{ ppx(data, domain=NULL, coord.type=NULL, simplify=FALSE) } \arguments{ \item{data}{ The coordinates and marks of the points. A \code{data.frame} or \code{hyperframe}. } \item{domain}{ Optional. The space-time domain containing the points. An object in some appropriate format, or \code{NULL}. } \item{coord.type}{ Character vector specifying how each column of \code{data} should be interpreted: as a spatial coordinate, a temporal coordinate, a local coordinate or a mark. Entries are partially matched to the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. } \item{simplify}{ Logical value indicating whether to simplify the result in special cases. If \code{simplify=TRUE}, a two-dimensional point pattern will be returned as an object of class \code{"ppp"}, and a three-dimensional point pattern will be returned as an object of class \code{"pp3"}. If \code{simplify=FALSE} (the default) then the result is always an object of class \code{"ppx"}. } } \details{ An object of class \code{"ppx"} represents a marked point pattern in multidimensional space and/or time. There may be any number of spatial coordinates, any number of temporal coordinates, any number of local coordinates, and any number of mark variables. The individual marks may be atomic (numeric values, factor values, etc) or objects of any kind. The argument \code{data} should contain the coordinates and marks of the points. It should be a \code{data.frame} or more generally a \code{hyperframe} (see \code{\link{hyperframe}}) with one row of data for each point. Each column of \code{data} is either a spatial coordinate, a temporal coordinate, a local coordinate, or a mark variable. The argument \code{coord.type} determines how each column is interpreted. It should be a character vector, of length equal to the number of columns of \code{data}. It should contain strings that partially match the values \code{"spatial"}, \code{"temporal"}, \code{"local"} and \code{"mark"}. (The first letters will be sufficient.) By default (if \code{coord.type} is missing or \code{NULL}), columns of numerical data are assumed to represent spatial coordinates, while other columns are assumed to be marks. } \value{ Usually an object of class \code{"ppx"}. If \code{simplify=TRUE} the result may be an object of class \code{"ppp"} or \code{"pp3"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{pp3}}, \code{\link{print.ppx}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X val <- 20 * runif(4) E <- lapply(val, function(s) { rpoispp(s) }) hf <- hyperframe(t=val, e=as.listof(E)) Z <- ppx(data=hf, domain=c(0,1)) Z } \keyword{spatial} \keyword{datagen} spatstat/man/finpines.Rd0000755000176000001440000000511612237642732015040 0ustar ripleyusers\name{finpines} \alias{finpines} \alias{finpines.extra} \docType{data} \title{ Pine saplings in Finland. } \description{ The data record the locations of 126 pine saplings in a Finnish forest, their heights and their diameters. The dataset \code{finpines} is a marked point pattern containing the locations of the saplings marked by their heights and their diameters. Sapling locations are given in metres (to six significant digits); heights are in metres (rounded to the nearest 0.1 metre, except in one case to the nearest 0.05 metres); diameters are in centimetres (rounded to the nearest centimetre). The data were recorded by Professor Seppo Kellomaki, Faculty of Forestry, University of Joensuu, Finland, and subsequently massaged by Professor Antti Penttinen, Department of Statistics, University of Jyv\"askyl\"a, Finland. Originally the point locations were observed in polar coordinates with rather poor angular precision. Hence the coordinates are imprecise for large radius because of rounding errors: indeed the alignments can be observed by eye. The data were manipulated by Prof Penttinen by making small angular perturbations at random. After this transformation, the original data (in a circular plot) were clipped to a square window, for convenience. Professor Penttinen emphasises that the data were intended only for initial experimentation. They have some strange features. For example, if the height is less than 1.3 metres then the diameter can be uncertain. Also there are some very close pairs of points. Some pairs of trees (namely (58,59), (78,79), (96,97) and (102,103)) violate the requirement that the interpoint distance should be greater than half the sum of their diameters. These data have subsequently been analysed by Van Lieshout (2004). } \format{ Object of class \code{"ppp"} representing the point pattern of sapling locations marked by their heights and diameters. See \code{\link{ppp.object}} for details of the format. } \usage{data(finpines)} \examples{ data(finpines) plot(unmark(finpines), main="Finnish pines: locations") plot(finpines, which.marks="height", main="heights") plot(finpines, which.marks="diameter", main="diameters") plot(finpines, which.marks="diameter", main="diameters to scale", markscale=1/200) } \source{Prof Antti Penttinen} \references{ Van Lieshout, M.N.M. (2004) A J-function for marked point patterns. Research Report PNA-R0404, June 2004. Centrum voor Wiskunde en Informatica (CWI), Amsterdam, 2004. } \keyword{datasets} \keyword{spatial} spatstat/man/nncross.lpp.Rd0000644000176000001440000000716112237642733015504 0ustar ripleyusers\name{nncross.lpp} \alias{nncross.lpp} \title{Nearest Neighbours on a Linear Network} \description{ Given two point patterns \code{X} and \code{Y} on a linear network, finds the nearest neighbour in \code{Y} of each point of \code{X} using the shortest path in the network. } \usage{ \method{nncross}{lpp}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, method="C") } \arguments{ \item{X,Y}{ Point patterns on a linear network (objects of class \code{"lpp"}). They must lie on the \emph{same} linear network. } \item{iX, iY}{ Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{Ignored.} \item{method}{ Internal use only. } } \details{ Given two point patterns \code{X} and \code{Y} on the same linear network, this function finds, for each point of \code{X}, the nearest point of \code{Y}, measuring distance by the shortest path in the network. The distance between these points is also computed. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \value{ By default (if \code{what=c("dist", "which")}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"}, a vector of nearest neighbour distances. If \code{what="which"}, a vector of nearest neighbour indices. } \seealso{ \code{\link{nndist.lpp}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- runiflpp(3, simplenet) Y <- runiflpp(5, simplenet) nn <- nncross(X,Y) nn plot(simplenet, main="nncross") plot(X, add=TRUE, col="red") plot(Y, add=TRUE, col="blue") XX <- as.ppp(X) YY <- as.ppp(Y) i <- nn$which arrows(XX$x, XX$y, YY[i]$x, YY[i]$y, length=0.15) # two patterns with some points in common X <- Y[1:2] iX <- 1:2 iY <- 1:5 nncross(X,Y, iX, iY) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz}, } \keyword{spatial} \keyword{math} spatstat/man/methods.linnet.Rd0000755000176000001440000000454412237642733016165 0ustar ripleyusers\name{methods.linnet} \alias{methods.linnet} %DoNotExport \Rdversion{1.1} \alias{as.linnet} \alias{as.linnet.linnet} \alias{as.linnet.lpp} \alias{as.owin.linnet} \alias{as.psp.linnet} \alias{print.linnet} \alias{summary.linnet} \alias{unitname.linnet} \alias{unitname<-.linnet} \title{ Methods for Linear Networks } \description{ These are methods for the class \code{"linnet"} of linear networks. } \usage{ as.linnet(X, ...) \method{as.linnet}{linnet}(X, ...) \method{as.linnet}{lpp}(X, ..., fatal=TRUE) \method{as.owin}{linnet}(W, ...) \method{as.psp}{linnet}(x, ..., fatal=TRUE) \method{print}{linnet}(x, ...) \method{summary}{linnet}(object, ...) \method{unitname}{linnet}(x) \method{unitname}{linnet}(x) <- value } \arguments{ \item{x,X,object,W}{ An object of class \code{"linnet"} representing a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ A valid name for the unit of length for \code{x}. See \code{\link{unitname}}. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } } \details{ The function \code{as.linnet} is generic. It converts data from some other format into an object of class \code{"linnet"}. The method \code{as.linnet.lpp} extracts the linear network information from an \code{lpp} object. The other functions are methods for the generic commands \code{\link{as.owin}}, \code{\link{as.psp}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"linnet"}. The method \code{as.owin.linnet} extracts the window containing the linear network, and returns it as an object of class \code{"owin"}. The method \code{as.psp.linnet} extracts the lines of the linear network as a line segment pattern (object of class \code{"psp"}). } \value{ For \code{as.linnet} the value is an object of class \code{"linnet"}. For other functions, see the help file for the corresponding generic function. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linnet}} } \examples{ data(simplenet) simplenet summary(simplenet) unitname(simplenet) <- c("cubit", "cubits") } \keyword{spatial} \keyword{methods} spatstat/man/Linhom.Rd0000755000176000001440000000554312237642731014456 0ustar ripleyusers\name{Linhom} \alias{Linhom} \title{L-function} \description{ Calculates an estimate of the inhomogeneous version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) for a spatial point pattern. } \usage{ Linhom(...) } \arguments{ \item{\dots}{ Arguments passed to \code{\link{Kinhom}} to estimate the inhomogeneous K-function. } } \details{ This command computes an estimate of the inhomogeneous version of the \eqn{L}-function for a spatial point pattern The original \eqn{L}-function is a transformation (proposed by Besag) of Ripley's \eqn{K}-function, \deqn{L(r) = \sqrt{\frac{K(r)}{\pi}}}{L(r) = sqrt(K(r)/pi)} where \eqn{K(r)} is the Ripley \eqn{K}-function of a spatially homogeneous point pattern, estimated by \code{\link{Kest}}. The inhomogeneous \eqn{L}-function is the corresponding transformation of the inhomogeneous \eqn{K}-function, estimated by \code{\link{Kinhom}}. It is appropriate when the point pattern clearly does not have a homogeneous intensity of points. It was proposed by Baddeley, Moller and Waagepetersen (2000). The command \code{Linhom} first calls \code{\link{Kinhom}} to compute the estimate of the inhomogeneous K-function, and then applies the square root transformation. For a Poisson point pattern (homogeneous or inhomogeneous), the theoretical value of the inhomogeneous \eqn{L}-function is \eqn{L(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L} has been estimated } \item{theo}{the theoretical value \eqn{L(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L(r)} obtained by the edge corrections named. } \references{ Baddeley, A., Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Kinhom}}, \code{\link{pcf}} } \examples{ data(japanesepines) X <- japanesepines L <- Linhom(X, sigma=0.1) plot(L, main="Inhomogeneous L function for Japanese Pines") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/Lcross.inhom.Rd0000755000176000001440000001011112237642731015571 0ustar ripleyusers\name{Lcross.inhom} \alias{Lcross.inhom} \title{ Inhomogeneous Cross Type L Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross-type \eqn{L} function. } \usage{ Lcross.inhom(X, i, j, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Other arguments passed to \code{\link{Kcross.inhom}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{L_{ij}(r)}{Lij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{L_{ij}(r)}{Lij(r)} for a marked Poisson process, identically equal to \code{r} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}(r)}{Lij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Lcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Linhom}}. All the arguments are passed to \code{\link{Kcross.inhom}}, which estimates the inhomogeneous multitype K function \eqn{K_{ij}(r)}{Kij(r)} for the point pattern. The resulting values are then transformed by taking \eqn{L(r) = \sqrt{K(r)/\pi}}{L(r) = sqrt(K(r)/pi)}. } \references{ Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Lcross}}, \code{\link{Linhom}}, \code{\link{Kcross.inhom}} } \examples{ # Lansing Woods data data(lansing) lansing <- lansing[seq(1,lansing$n, by=10)] ma <- split(lansing)$maple wh <- split(lansing)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") L <- Lcross.inhom(lansing, "whiteoak", "maple", lambdaW, lambdaM) # method (2): fit parametric intensity model fit <- ppm(lansing, ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, lansing$marks) L <- Lcross.inhom(lansing, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) L <- Lcross.inhom(X, "A", "B", lambdaI=as.im(50, X$window), lambdaJ=lamB) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/erosion.Rd0000755000176000001440000000636012243310060014664 0ustar ripleyusers\name{erosion} %DontDeclareMethods \alias{erosion} \alias{erosion.owin} \alias{erosion.ppp} \alias{erosion.psp} \title{Morphological Erosion} \description{ Perform morphological erosion of a window, a line segment pattern or a point pattern. } \usage{ erosion(w, r, \dots) \method{erosion}{owin}(w, r, shrink.frame=TRUE, \dots, strict=FALSE, polygonal=NULL) \method{erosion}{ppp}(w, r,\dots) \method{erosion}{psp}(w, r,\dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of erosion.} \item{shrink.frame}{logical: if \code{TRUE}, erode the bounding rectangle as well.} \item{\dots}{extra arguments to \code{\link{as.mask}} controlling the pixel resolution, if pixel approximation is used.} \item{strict}{Logical flag determining the fate of boundary pixels, if pixel approximation is used. See details.} \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the eroded region (or \code{NULL} if this region is empty). If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the erosion is computed. If \code{polygonal=FALSE} then a pixel approximation to the erosion is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. The erosion consists of all pixels whose distance from the boundary of \code{w} is strictly greater than \code{r} (if \code{strict=TRUE}) or is greater than or equal to \code{r} (if \code{strict=FALSE}). When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. If \code{shrink.frame} is false, the resulting window is given the same outer, bounding rectangle as the original window \code{w}. If \code{shrink.frame} is true, the original bounding rectangle is also eroded by the same distance \code{r}. To simply compute the area of the eroded window, use \code{\link{eroded.areas}}. } \seealso{ \code{\link{dilation}} for the opposite operation. \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{eroded.areas}} } \examples{ plot(letterR, main="erosion(letterR, 0.2)") plot(erosion(letterR, 0.2), add=TRUE, col="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/as.data.frame.ppp.Rd0000755000176000001440000000220612237642732016424 0ustar ripleyusers\name{as.data.frame.ppp} \alias{as.data.frame.ppp} \title{Coerce Point Pattern to a Data Frame} \description{ Extracts the coordinates of the points in a point pattern, and their marks if any, and returns them in a data frame. } \usage{ \method{as.data.frame}{ppp}(x, row.names = NULL, ...) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{row.names}{Optional character vector of row names.} \item{\dots}{Ignored.} } \details{ This is a method for the generic function \code{\link{as.data.frame}} for the class \code{"ppp"} of point patterns. It extracts the coordinates of the points in the point pattern, and returns them as columns named \code{x} and \code{y} in a data frame. If the points were marked, the marks are returned as a column named \code{marks} with the same type as in the point pattern dataset. } \value{ A data frame. } \examples{ data(amacrine) df <- as.data.frame(amacrine) df[1:5,] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/cells.Rd0000755000176000001440000000207212237642732014325 0ustar ripleyusers\name{cells} \alias{cells} \docType{data} \title{ Biological Cells Point Pattern } \description{ The data record the locations of the centres of 42 biological cells observed under optical microscopy in a histological section. The microscope field-of-view has been rescaled to the unit square. The data were recorded by F.H.C. Crick and B.D. Ripley, and analysed in Ripley (1977, 1981) and Diggle (1983). They are often used as a canonical example of an `ordered' point pattern. } \format{ An object of class \code{"ppp"} representing the point pattern of cell centres. See \code{\link{ppp.object}} for details of the format. } \usage{data(cells)} \source{Crick and Ripley, see Ripley (1977)} \references{ Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{39}, 172--212. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. } \keyword{datasets} \keyword{spatial} spatstat/man/nnclean.Rd0000755000176000001440000000703212237642733014643 0ustar ripleyusers\name{nnclean} \alias{nnclean} \alias{nnclean.ppp} \alias{nnclean.pp3} \title{ Nearest Neighbour Clutter Removal } \description{ Detect features in a 2D or 3D spatial point pattern using nearest neighbour clutter removal. } \usage{ nnclean(X, k, ...) \method{nnclean}{ppp}(X, k, ..., edge.correct = FALSE, wrap = 0.1, convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) \method{nnclean}{pp3}(X, k, ..., convergence = 0.001, plothist = FALSE, verbose = TRUE, maxit = 50) } \arguments{ \item{X}{ A two-dimensional spatial point pattern (object of class \code{"ppp"}) or a three-dimensional point pattern (object of class \code{"pp3"}). } \item{k}{ Degree of neighbour: \code{k=1} means nearest neighbour, \code{k=2} means second nearest, etc. } \item{\dots}{ Ignored. } \item{edge.correct}{ Logical flag specifying whether periodic edge correction should be performed (only implemented in 2 dimensions). } \item{wrap}{ Numeric value specifying the relative size of the margin in which data will be replicated for the periodic edge correction (if \code{edge.correct=TRUE}). A fraction of window width and window height. } \item{convergence}{ Tolerance threshold for testing convergence of EM algorithm. } \item{maxit}{ Maximum number of iterations for EM algorithm. } \item{plothist}{ Logical flag specifying whether to plot a diagnostic histogram of the nearest neighbour distances and the fitted distribution. } \item{verbose}{ Logical flag specifying whether to print progress reports. } } \details{ Byers and Raftery (1998) developed a technique for recognising features in a spatial point pattern in the presence of random clutter. For each point in the pattern, the distance to the \eqn{k}th nearest neighbour is computed. Then the E-M algorithm is used to fit a mixture distribution to the nearest neighbour distances. The mixture components represent the feature and the clutter. The mixture model can be used to classify each point as belong to one or other component. The function \code{nnclean} is generic, with methods for two-dimensional point patterns (class \code{"ppp"}) and three-dimensional point patterns (class \code{"pp3"}) currently implemented. The result is a point pattern (2D or 3D) with two additional columns of marks: \describe{ \item{class}{ A factor, with levels \code{"noise"} and \code{"feature"}, indicating the maximum likelihood classification of each point. } \item{prob}{ Numeric vector giving the estimated probabilities that each point belongs to a feature. } } } \value{ An object of the same kind as \code{X}, obtained by attaching marks to the points of \code{X}. } \references{ Byers, S. and Raftery, A.E. (1998) Nearest-neighbour clutter removal for estimating features in spatial point processes. \emph{Journal of the American Statistical Association} \bold{93}, 577--584. } \author{ Original by Simon Byers and Adrian Raftery. Adapted for \pkg{spatstat} by Adrian Baddeley. } \seealso{ \code{\link{nndist}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}} } \examples{ data(shapley) X <- nnclean(shapley, k=17) plot(X, chars=c(".", "+"), cols=1:2) Y <- split(X) plot(Y, chars="+", cex=0.5) marks(X) <- marks(X)$prob plot(cut(X, breaks=3), chars=c(".", "+", "+"), cols=1:3) } \keyword{spatial} \keyword{classif} spatstat/man/mppm.Rd0000644000176000001440000002014112241443111014147 0ustar ripleyusers\name{mppm} \alias{mppm} \title{Fit Point Process Model to Several Point Patterns} \description{ Fits a Gibbs point process model to several point patterns simultaneously. } \usage{ mppm(formula, data, interaction=Poisson(), ..., iformula=NULL, use.gam = FALSE) } \arguments{ \item{formula}{ A formula describing the systematic part of the model. Variables in the formula are names of columns in \code{data}. } \item{data}{ A hyperframe (object of class \code{"hyperframe"}, see \code{\link[spatstat]{hyperframe}}) containing the point pattern responses and the explanatory variables. } \item{interaction}{ Interpoint interaction(s) appearing in the model. Either an object of class \code{"interact"} describing the point process interaction structure, or a hyperframe (with the same number of rows as \code{data}) whose entries are objects of class \code{"interact"}. } \item{\dots}{Arguments passed to \code{\link[spatstat]{ppm}} controlling the fitting procedure. } \item{iformula}{ Optional. A formula (with no left hand side) describing the interaction to be applied to each case. Each variable name in the formula should either be the name of a column in the hyperframe \code{interaction}, or the name of a column in the hyperframe \code{data} that is a vector or factor. } \item{use.gam}{Logical flag indicating whether to fit the model using \code{\link{gam}} or \code{\link{glm}}. } } \details{ This function fits a common point process model to a dataset containing several different point patterns. It extends the capabilities of the function \code{\link[spatstat]{ppm}} to deal with data such as \itemize{ \item replicated observations of spatial point patterns \item two groups of spatial point patterns \item a designed experiment in which the response from each unit is a point pattern. } The syntax of this function is similar to that of standard \R model-fitting functions like \code{\link{lm}} and \code{\link{glm}}. The first argument \code{formula} is an \R formula describing the systematic part of the model. The second argument \code{data} contains the responses and the explanatory variables. Other arguments determine the stochastic structure of the model. Schematically, the data are regarded as the results of a designed experiment involving \eqn{n} experimental units. Each unit has a \sQuote{response}, and optionally some \sQuote{explanatory variables} (covariates) describing the experimental conditions for that unit. In this context, \emph{the response from each unit is a point pattern}. The value of a particular covariate for each unit can be either a single value (numerical, logical or factor), or a spatial covariate. A \sQuote{spatial} covariate is a quantity that depends on spatial location, for example, the soil acidity or altitude at each location. For the purposes of \code{mppm}, a spatial covariate must be stored as a pixel image (object of class \code{"im"}) which gives the values of the covariate at a fine grid of locations. The argument \code{data} is a hyperframe (a generalisation of a data frame, see \code{\link[spatstat]{hyperframe}}). This is like a data frame except that the entries can be objects of any class. The hyperframe has one row for each experimental unit, and one column for each variable (response or explanatory variable). The \code{formula} should be an \R formula. The left hand side of \code{formula} determines the \sQuote{response} variable. This should be a single name, which should correspond to a column in \code{data}. The right hand side of \code{formula} determines the spatial trend of the model. It specifies the linear predictor, and effectively represents the \bold{logarithm} of the spatial trend. Variables in the formula must be the names of columns of \code{data}, or one of the reserved names \describe{ \item{x,y}{Cartesian coordinates of location} \item{marks}{Mark attached to point} \item{id}{which is a factor representing the serial number (\eqn{1} to \eqn{n}) of the point pattern, i.e. the row number in the data hyperframe. } } The column of responses in \code{data} must consist of point patterns (objects of class \code{"ppp"}). The individual point pattern responses can be defined in different spatial windows. If some of the point patterns are marked, then they must all be marked, and must have the same type of marks. The scope of models that can be fitted to each pattern is the same as the scope of \code{\link[spatstat]{ppm}}, that is, Gibbs point processes with interaction terms that belong to a specified list, including for example the Poisson process, Strauss process, Geyer's saturation model, and piecewise constant pairwise interaction models. The stochastic part of the model is determined by the arguments \code{interaction} and (optionally) \code{iformula}. \itemize{ \item In the simplest case, \code{interaction} is an object of class \code{"interact"}, determining the interpoint interaction structure of the point process model, for all experimental units. \item Alternatively, \code{interaction} may be a hyperframe, whose entries are objects of class \code{"interact"}. It should have the same number of rows as \code{data}. \itemize{ \item If \code{interaction} consists of only one column, then the entry in row \code{i} is taken to be the interpoint interaction for the \code{i}th experimental unit (corresponding to the \code{i}th row of \code{data}). \item If \code{interaction} has more than one column, then the argument \code{iformula} is also required. Each row of \code{interaction} determines several interpoint interaction structures that might be applied to the corresponding row of \code{data}. The choice of interaction is determined by \code{iformula}; this should be an \R formula, without a left hand side. For example if \code{interaction} has two columns called \code{A} and \code{B} then \code{iformula = ~B} indicates that the interpoint interactions are taken from the second column. } } Variables in \code{iformula} typically refer to column names of \code{interaction}. They can also be names of columns in \code{data}, but only for columns of numeric, logical or factor values. For example \code{iformula = ~B * group} (where \code{group} is a column of \code{data} that contains a factor) causes the model with interpoint interaction \code{B} to be fitted with different interaction parameters for each level of \code{group}. } \value{ An object of class \code{"mppm"} representing the fitted model. There are methods for \code{print}, \code{summary} and \code{coef} for this class. } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \references{ Baddeley, A. and Turner, R. Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42} (2000) 283--322. Baddeley, A., Bischof, L., Sintorn, I.-M., Haggarty, S., Bell, M. and Turner, R. Analysis of a designed experiment where the response is a spatial point pattern. In preparation. } \seealso{ \code{\link[spatstat]{ppm}}, \code{\link{print.mppm}}, \code{\link{summary.mppm}}, \code{\link{coef.mppm}}, } \examples{ # Waterstriders data data(waterstriders) H <- hyperframe(Y = waterstriders) mppm(Y ~ 1, data=H) mppm(Y ~ 1, data=H, Strauss(7)) mppm(Y ~ id, data=H) mppm(Y ~ x, data=H) # Synthetic data from known model n <- 10 H <- hyperframe(V=1:n, U=runif(n, min=-1, max=1), M=factor(letters[1 + (1:n) \%\% 3])) H$Z <- setcov(square(1)) H$U <- with(H, as.im(U, as.rectangle(Z))) H$Y <- with(H, rpoispp(eval.im(exp(2+3*Z)), win=as.rectangle(Z))) fit <- mppm(Y ~Z + U + V, data=H) } \keyword{spatial} \keyword{models} spatstat/man/convexhull.xy.Rd0000755000176000001440000000255412237642732016056 0ustar ripleyusers\name{convexhull.xy} \alias{convexhull.xy} \title{Convex Hull of Points} \description{ Computes the convex hull of a set of points in two dimensions. } \usage{ convexhull.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes the convex hull of the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull}}, \code{\link{bounding.box.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- convexhull.xy(x,y) plot(owin(), main="convexhull.xy(x,y)", lty=2) plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="convexhull.xy(X)") plot(convexhull.xy(X), add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/pairdist.ppx.Rd0000755000176000001440000000255412237642733015656 0ustar ripleyusers\name{pairdist.ppx} \alias{pairdist.ppx} \title{Pairwise Distances in Any Dimensions} \description{ Computes the matrix of distances between all pairs of points in a multi-dimensional point pattern. } \usage{ \method{pairdist}{ppx}(X, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a multi-dimensional point pattern \code{X} (an object of class \code{"ppx"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),z=runif(4),w=runif(4)) X <- ppx(data=df) pairdist(X) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/cauchy.estpcf.Rd0000644000176000001440000001364512251535221015755 0ustar ripleyusers\name{cauchy.estpcf} \alias{cauchy.estpcf} \title{Fit the Neyman-Scott cluster process with Cauchy kernel} \description{ Fits the Neyman-Scott Cluster point process with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ cauchy.estpcf(X, startpar=c(kappa=1,eta2=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott cluster point process model with Cauchy kernel to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott cluster point process with Cauchy kernel to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical pair correlation function of the Matern Cluster process and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent follow a common distribution described in Jalilian et al (2013). If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rCauchy}}. For computational reasons, the optimisation procedure uses the parameter \code{eta2}, which is equivalent to \code{4 * omega^2} where \code{omega} is the scale parameter for the model as used in \code{\link{rCauchy}}. Homogeneous or inhomogeneous Neyman-Scott/Cauchy models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Ghorbani, M. (2012) Cauchy cluster process. \emph{Metrika}, to appear. Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{cauchy.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rCauchy}} to simulate the model. } \examples{ u <- cauchy.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/eval.fv.Rd0000755000176000001440000000712712237642732014572 0ustar ripleyusers\name{eval.fv} \alias{eval.fv} \title{Evaluate Expression Involving Functions} \description{ Evaluates any expression involving one or more function value (fv) objects, and returns another object of the same kind. } \usage{ eval.fv(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{An expression.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{dotonly}{Logical. See Details.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the summary functions used in spatial statistics. An object of class \code{"fv"} is essentially a data frame containing several different statistical estimates of the same function. Such objects are returned by \code{\link{Kest}} and its relatives. For example, suppose \code{X} is an object of class \code{"fv"} containing several different estimates of the Ripley's K function \eqn{K(r)}, evaluated at a sequence of values of \eqn{r}. Then \code{eval.fv(X+3)} effectively adds 3 to each function estimate in \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fv"} which are compatible (in particular they have the same vector of \eqn{r} values). Then \code{eval.im(X + Y)} will add the corresponding function values in \code{X} and \code{Y}, and return the resulting function. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fv} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fv"}. Each such name is replaced by a vector containing the function values. The expression is then evaluated. The result should be a vector; it is taken as the new vector of function values. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fv"} in the expression. All such objects must be compatible. If \code{dotonly=TRUE} (the default), the expression will be evaluated only for those columns of an \code{"fv"} object that contain values of the function itself (rather than values of the derivative of the function, the hazard rate, etc). If \code{dotonly=FALSE}, the expression will be evaluated for all columns. For example the result of \code{\link{Fest}} includes several columns containing estimates of the empty space function \eqn{F(r)}, but also includes an estimate of the \emph{hazard} \eqn{h(r)} of \eqn{F(r)}. Transformations that are valid for \eqn{F} may not be valid for \eqn{h}. Accordingly, \eqn{h} would normally be omitted from the calculation. The columns of an object \code{x} that represent the function itself are identified by its \dQuote{dot} names, \code{fvnames(x, ".")}. They are the columns normally plotted by \code{\link{plot.fv}} and identified by the symbol \code{"."} in plot formulas in \code{\link{plot.fv}}. } \value{ Another object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function X <- rpoispp(42) Ks <- Kest(X) eval.fv(Ks + 3) Ls <- eval.fv(sqrt(Ks/pi)) # manipulating two K functions Y <- rpoispp(20) Kr <- Kest(Y) Kdif <- eval.fv(Ks - Kr) Z <- eval.fv(sqrt(Ks/pi) - sqrt(Kr/pi)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/Finhom.Rd0000644000176000001440000001354012237642731014441 0ustar ripleyusers\name{Finhom} \alias{Finhom} \title{ Inhomogeneous Empty Space Function } \description{ Estimates the inhomogeneous empty space function of a non-stationary point pattern. } \usage{ Finhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{F} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes estimates of the inhomogeneous \eqn{F}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the empty space function \eqn{F} for homogeneous point patterns computed by \code{\link{Fest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{F} function is computed using the border correction, equation (6) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}) whose fitted \emph{trend} can be used as the fitted intensity. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Ginhom}}, \code{\link{Jinhom}}, \code{\link{Fest}} } \examples{ \dontrun{ plot(Finhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Finhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/objsurf.Rd0000644000176000001440000000477612240447360014701 0ustar ripleyusers\name{objsurf} \alias{objsurf} \alias{objsurf.kppm} \alias{objsurf.minconfit} \title{ Objective Function Surface } \description{ For a model that was fitted by optimisation, compute the values of the objective function in a neighbourhood of the optimal value. } \usage{ objsurf(x, \dots) \method{objsurf}{kppm}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) \method{objsurf}{minconfit}(x, ..., ngrid = 32, ratio = 1.5, verbose = TRUE) } \arguments{ \item{x}{ Some kind of model that was fitted by finding the optimal value of an objective function. An object of class \code{"kppm"} or \code{"minconfit"}. } \item{\dots}{ Extra arguments are usually ignored. } \item{ngrid}{ Number of grid points to evaluate along each axis. Either a single integer, or a pair of integers. For example \code{ngrid=32} would mean a \code{32 * 32} grid. } \item{ratio}{ Number greater than 1 determining the range of parameter values to be considered. If the optimal parameter value is \code{opt} then the objective function will be evaluated for values between \code{opt/ratio} and \code{opt * ratio}. } \item{verbose}{ Logical value indicating whether to print progress reports. } } \details{ The object \code{x} should be some kind of model that was fitted by maximising or minimising the value of an objective function. The objective function will be evaluated on a grid of values of the model parameters. Currently the following types of objects are accepted: \itemize{ \item an object of class \code{"kppm"} representing a cluster point process or Cox point process. See \code{\link{kppm}}. \item an object of class \code{"minconfit"} representing a minimum-contrast fit between a summary function and its theoretical counterpart. See \code{\link{mincontrast}}. } The result is an object of class \code{"objsurf"} which can be printed and plotted: see \code{\link{methods.objsurf}}. } \value{ An object of class \code{"objsurf"} which can be printed and plotted. Essentially a list containing entries \code{x}, \code{y}, \code{z} giving the parameter values and objective function values. } \author{ Adrian Baddeley and Ege Rubak. } \seealso{ \code{\link{methods.objsurf}}, \code{\link{kppm}}, \code{\link{mincontrast}} } \examples{ fit <- kppm(redwood, ~1, "Thomas") os <- objsurf(fit) if(interactive()) { plot(os) contour(os, add=TRUE) persp(os) } } \keyword{spatial} \keyword{models} spatstat/man/Jmulti.Rd0000755000176000001440000001406312237642731014471 0ustar ripleyusers\name{Jmulti} \alias{Jmulti} \title{ Marked J Function } \description{ For a marked point pattern, estimate the multitype \eqn{J} function summarising dependence between the points in subset \eqn{I} and those in subset \eqn{J}. } \usage{ Jmulti(X, I, J, eps=NULL, r=NULL, breaks=NULL, \dots, disjoint=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the multitype distance distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset of points of \code{X} from which distances are measured. See Details. } \item{J}{Subset of points in \code{X} to which distances are measured. See Details. } \item{eps}{A positive number. The pixel resolution of the discrete approximation to Euclidean distance (see \code{\link{Jest}}). There is a sensible default. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{disjoint}{Optional flag indicating whether the subsets \code{I} and \code{J} are disjoint. If missing, this value will be computed by inspecting the vectors \code{I} and \code{J}. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"Hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{J_{IJ}(r)}{J[IJ](r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{han}{the Hanisch-style estimator of \eqn{J_{IJ}(r)}{J[IJ](r)} } \item{un}{the uncorrected estimate of \eqn{J_{IJ}(r)}{J[IJ](r)}, formed by taking the ratio of uncorrected empirical estimators of \eqn{1 - G_{IJ}(r)}{1 - G[IJ](r)} and \eqn{1 - F_{J}(r)}{1 - F[J](r)}, see \code{\link{Gdot}} and \code{\link{Fest}}. } \item{theo}{the theoretical value of \eqn{J_{IJ}(r)}{J[IJ](r)} for a marked Poisson process with the same estimated intensity, namely 1. } } \details{ The function \code{Jmulti} generalises \code{\link{Jest}} (for unmarked point patterns) and \code{\link{Jdot}} and \code{\link{Jcross}} (for multitype point patterns) to arbitrary marked point patterns. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are subsets, possibly overlapping, of a marked point process. Define \deqn{J_{IJ}(r) = \frac{1 - G_{IJ}(r)}{1 - F_J(r)}}{ J[IJ](r) = (1 - G[IJ](r))/(1 - F[J](r))} where \eqn{F_J(r)}{F[J](r)} is the cumulative distribution function of the distance from a fixed location to the nearest point of \eqn{X_J}{X[J]}, and \eqn{G_{IJ}(r)}{GJ(r)} is the distribution function of the distance from a typical point of \eqn{X_I}{X[I]} to the nearest distinct point of \eqn{X_J}{X[J]}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. It is assumed that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Jest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{J_{IJ}(r)}{J[IJ](r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jcross}}, \code{\link{Jdot}}, \code{\link{Jest}} } \examples{ data(longleaf) # Longleaf Pine data: marks represent diameter \testonly{ longleaf <- longleaf[seq(1,longleaf$n, by=50), ] } Jm <- Jmulti(longleaf, marks(longleaf) <= 15, marks(longleaf) >= 25) plot(Jm) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/Lcross.Rd0000755000176000001440000000563312237642731014475 0ustar ripleyusers\name{Lcross} \alias{Lcross} \title{Multitype L-function (cross-type)} \description{ Calculates an estimate of the cross-type L-function for a multitype point pattern. } \usage{ Lcross(X, i, j, ...) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{Kcross}}. } } \details{ The cross-type L-function is a transformation of the cross-type K-function, \deqn{L_{ij}(r) = \sqrt{\frac{K_{ij}(r)}{\pi}}}{Lij(r) = sqrt(Kij(r)/pi)} where \eqn{K_{ij}(r)}{Kij(r)} is the cross-type K-function from type \code{i} to type \code{j}. See \code{\link{Kcross}} for information about the cross-type K-function. The command \code{Lcross} first calls \code{\link{Kcross}} to compute the estimate of the cross-type K-function, and then applies the square root transformation. For a marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the L-function is \eqn{L_{ij}(r) = r}{Lij(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{ij}}{Lij} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{ij}}{Lij} has been estimated } \item{theo}{the theoretical value \eqn{L_{ij}(r) = r}{Lij(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{ij}}{Lij} obtained by the edge corrections named. } \seealso{ \code{\link{Kcross}}, \code{\link{Ldot}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Lcross(amacrine, "off", "on") plot(L) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/reduced.sample.Rd0000755000176000001440000000657112237642733016127 0ustar ripleyusers\name{reduced.sample} \alias{reduced.sample} \title{Reduced Sample Estimator using Histogram Data} \description{ Compute the Reduced Sample estimator of a survival time distribution function, from histogram data } \usage{ reduced.sample(nco, cen, ncc, show=FALSE, uppercen=0) } \arguments{ \item{nco}{vector of counts giving the histogram of uncensored observations (those survival times that are less than or equal to the censoring time) } \item{cen}{vector of counts giving the histogram of censoring times } \item{ncc}{vector of counts giving the histogram of censoring times for the uncensored observations only } \item{uppercen}{ number of censoring times greater than the rightmost histogram breakpoint (if there are any) } \item{show}{Logical value controlling the amount of detail returned by the function value (see below) } } \value{ If \code{show = FALSE}, a numeric vector giving the values of the reduced sample estimator. If \code{show=TRUE}, a list with three components which are vectors of equal length, \item{rs}{Reduced sample estimate of the survival time c.d.f. \eqn{F(t)} } \item{numerator}{numerator of the reduced sample estimator } \item{denominator}{denominator of the reduced sample estimator } } \details{ This function is needed mainly for internal use in \pkg{spatstat}, but may be useful in other applications where you want to form the reduced sample estimator from a huge dataset. Suppose \eqn{T_i}{T[i]} are the survival times of individuals \eqn{i=1,\ldots,M} with unknown distribution function \eqn{F(t)} which we wish to estimate. Suppose these times are right-censored by random censoring times \eqn{C_i}{C[i]}. Thus the observations consist of right-censored survival times \eqn{\tilde T_i = \min(T_i,C_i)}{T*[i] = min(T[i],C[i])} and non-censoring indicators \eqn{D_i = 1\{T_i \le C_i\}}{D[i] = 1(T[i] <= C[i])} for each \eqn{i}. If the number of observations \eqn{M} is large, it is efficient to use histograms. Form the histogram \code{cen} of all censoring times \eqn{C_i}{C[i]}. That is, \code{obs[k]} counts the number of values \eqn{C_i}{C[i]} in the interval \code{(breaks[k],breaks[k+1]]} for \eqn{k > 1} and \code{[breaks[1],breaks[2]]} for \eqn{k = 1}. Also form the histogram \code{nco} of all uncensored times, i.e. those \eqn{\tilde T_i}{T*[i]} such that \eqn{D_i=1}{D[i]=1}, and the histogram of all censoring times for which the survival time is uncensored, i.e. those \eqn{C_i}{C[i]} such that \eqn{D_i=1}{D[i]=1}. These three histograms are the arguments passed to \code{kaplan.meier}. The return value \code{rs} is the reduced-sample estimator of the distribution function \eqn{F(t)}. Specifically, \code{rs[k]} is the reduced sample estimate of \code{F(breaks[k+1])}. The value is exact, i.e. the use of histograms does not introduce any approximation error. Note that, for the results to be valid, either the histogram breaks must span the censoring times, or the number of censoring times that do not fall in a histogram cell must have been counted in \code{uppercen}. } \seealso{ \code{\link{kaplan.meier}}, \code{\link{km.rs}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pixellate.owin.Rd0000755000176000001440000000447212237642733016174 0ustar ripleyusers\name{pixellate.owin} %DontDeclareMethods \Rdversion{1.1} \alias{pixellate.owin} \title{ Convert Window to Pixel Image } \description{ Convert a window to a pixel image by measuring the area of intersection between the window and each pixel in a raster. } \usage{ \method{pixellate}{owin}(x, W = NULL, ...) } \arguments{ \item{x}{ Window (object of class \code{"owin"}) to be converted. } \item{W}{ Optional. Window determining the pixel raster on which the conversion should occur. } \item{\dots}{ Optional. Extra arguments passed to \code{\link{as.mask}} to determine the pixel raster. } } \details{ This is a method for the generic function \code{pixellate}. It converts a window \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. (The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but records only the presence or absence of \code{x} in each pixel.) The pixel raster for the conversion is determined by the argument \code{W} and the extra arguments \code{\dots}. \itemize{ \item If \code{W} is given, and it is a binary mask (a window of type \code{"mask"}) then it determines the pixel raster. \item If \code{W} is given, but it is not a binary mask (it is a window of another type) then it will be converted to a binary mask using \code{as.mask(W, \dots)}. \item If \code{W} is not given, it defaults to \code{as.mask(as.rectangle(x), \dots)} } In the second and third cases it would be common to use the argument \code{dimyx} to control the number of pixels. See the Examples. The algorithm then computes the area of intersection of each pixel with the window. The result is a pixel image with pixel entries equal to these intersection areas. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate}}, \code{\link{as.im}} } \examples{ data(letterR) plot(pixellate(letterR, dimyx=15)) W <- grow.rectangle(as.rectangle(letterR), 0.2) plot(pixellate(letterR, W, dimyx=15)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/pixellate.ppp.Rd0000755000176000001440000000655212237642733016020 0ustar ripleyusers\name{pixellate.ppp} %DontDeclareMethods \alias{pixellate.ppp} \alias{as.im.ppp} \title{Convert Point Pattern to Pixel Image} \description{ Converts a point pattern to a pixel image. The value in each pixel is the number of points falling in that pixel, and is typically either 0 or 1. } \usage{ \method{pixellate}{ppp}(x, W=NULL, \dots, weights = NULL, padzero=FALSE) \method{as.im}{ppp}(X, \dots) } \arguments{ \item{x,X}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution} \item{W}{Optional window mask (object of class \code{"owin"}) determining the pixel raster. } \item{weights}{Optional vector of weights associated with the points.} \item{padzero}{Logical flag indicating whether to set pixel values to zero outside the window. } } \details{ The functions \code{pixellate.ppp} and \code{as.im.ppp} convert a spatial point pattern \code{x} into a pixel image, by counting the number of points (or the total weight of points) falling in each pixel. Calling \code{as.im.ppp} is equivalent to calling \code{pixellate.ppp} with its default arguments. Note that \code{pixellate.ppp} is more general than \code{as.im.ppp} (it has additional arguments for greater flexibility). The functions \code{as.im.ppp} and \code{pixellate.ppp} are methods for the generic functions \code{\link{as.im}} and \code{\link{pixellate}} respectively, for the class of point patterns. The pixel raster (in which points are counted) is determined by the argument \code{W} if it is present (for \code{pixellate.ppp} only). In this case \code{W} should be a binary mask (a window object of class \code{"owin"} with type \code{"mask"}). Otherwise the pixel raster is determined by extracting the window containing \code{x} and converting it to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. If \code{weights} is \code{NULL}, then for each pixel in the mask, the algorithm counts how many points in \code{x} fall in the pixel. This count is usually either 0 (for a pixel with no data points in it) or 1 (for a pixel containing one data point) but may be greater than 1. The result is an image with these counts as its pixel values. If \code{weights} is given, it should be a numeric vector of the same length as the number of points in \code{x}. For each pixel, the algorithm finds the total weight associated with points in \code{x} that fall in the given pixel. The result is an image with these total weights as its pixel values. By default (if \code{zeropad=FALSE}) the resulting pixel image has the same spatial domain as the window of the point pattern \code{x}. If \code{zeropad=TRUE} then the resulting pixel image has a rectangular domain; pixels outside the original window are assigned the value zero. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate}}, \code{\link{im}}, \code{\link{as.im}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}. } \examples{ data(humberside) plot(pixellate(humberside)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/dirichlet.Rd0000755000176000001440000000325612237642732015177 0ustar ripleyusers\name{dirichlet} \alias{dirichlet} \title{Dirichlet Tessellation of Point Pattern} \description{ Computes the Dirichlet tessellation of a spatial point pattern. Also known as the Voronoi or Thiessen tessellation. } \usage{ dirichlet(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ In a spatial point pattern \code{X}, the Dirichlet tile associated with a particular point \code{X[i]} is the region of space that is closer to \code{X[i]} than to any other point in \code{X}. The Dirichlet tiles divide the two-dimensional plane into disjoint regions, forming a tessellation. The Dirichlet tessellation is also known as the Voronoi or Thiessen tessellation. This function computes the Dirichlet tessellation (within the original window of \code{X}) using the function \code{\link[deldir]{deldir}} in the package \pkg{deldir}. To ensure that there is a one-to-one correspondence between the points of \code{X} and the tiles of \code{dirichlet(X)}, duplicated points in \code{X} should first be removed by \code{X <- unique(X, rule="deldir")}. The tiles of the tessellation will be computed as polygons if the original window is a rectangle or a polygon. Otherwise the tiles will be computed as binary masks. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, \code{\link{delaunay}}, \code{\link{ppp}} } \examples{ X <- runifpoint(42) plot(dirichlet(X)) plot(X, add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Extract.owin.Rd0000644000176000001440000000237312237642732015611 0ustar ripleyusers\name{Extract.owin} \alias{[.owin} \title{Extract Subset of Window} \description{ Extract a subset of a window. } \usage{ \method{[}{owin}(x, i, \dots) } \arguments{ \item{x}{ A spatial window (object of class \code{"owin"}). } \item{i}{ Object defining the subregion. Either a spatial window, or a pixel image with logical values. } \item{\dots}{Ignored.} } \value{ Another spatial window (object of class \code{"owin"}). } \details{ This function computes the intersection between the window \code{x} and the domain specified by \code{i}, using \code{\link{intersect.owin}}. This function is a method for the subset operator \code{"["} for spatial windows (objects of class \code{"owin"}). It is provided mainly for completeness. The index \code{i} may be either a window, or a pixel image with logical values (the \code{TRUE} values of the image specify the spatial domain). } \seealso{ \code{\link{intersect.owin}} } \examples{ W <- owin(c(2.5, 3.2), c(1.4, 2.9)) plot(letterR) plot(letterR[W], add=TRUE, col="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/summary.ppp.Rd0000755000176000001440000000322312237642734015517 0ustar ripleyusers\name{summary.ppp} \alias{summary.ppp} \title{Summary of a Point Pattern Dataset} \description{ Prints a useful summary of a point pattern dataset. } \usage{ \method{summary}{ppp}(object, \dots, checkdup=TRUE) } \arguments{ \item{object}{ Point pattern (object of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{checkdup}{ Logical value indicating whether to check for the presence of duplicate points. } } \details{ A useful summary of the point pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. If \code{checkdup=TRUE}, the pattern will be checked for the presence of dublicate points, using \code{\link{duplicated.ppp}}. This can be time-consuming if the pattern contains many points, so the checking can be disabled by setting \code{checkdup=FALSE}. If the point pattern was generated by simulation using \code{\link{rmh}}, the parameters of the algorithm are printed. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.ppp}} } \examples{ summary(cells) # plain vanilla point pattern # multitype point pattern \testonly{lansing <- lansing[seq(1, lansing$n, length=40)]} summary(lansing) # tabulates frequencies of each mark # numeric marks \testonly{longleaf <- longleaf[seq(1, longleaf$n, length=40)]} summary(longleaf) # prints summary.default(x$marks) # weird polygonal window summary(demopat) # describes it } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/project2segment.Rd0000755000176000001440000000460512237642733016343 0ustar ripleyusers\name{project2segment} \alias{project2segment} \title{Move Point To Nearest Line} \description{ Given a point pattern and a line segment pattern, this function moves each point to the closest location on a line segment. } \usage{ project2segment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ For each point \code{x} in the point pattern \code{X}, this function finds the closest line segment \code{y} in the line segment pattern \code{Y}. It then `projects' the point \code{x} onto the line segment \code{y} by finding the position \code{z} along \code{y} which is closest to \code{x}. This position \code{z} is returned, along with supplementary information. } \value{ A list with the following components. Each component has length equal to the number of points in \code{X}, and its entries correspond to the points of \code{X}. \item{Xproj }{ Point pattern (object of class \code{"ppp"} containing the projected points. } \item{mapXY }{ Integer vector identifying the nearest segment to each point. } \item{d}{ Numeric vector of distances from each point of \code{X} to the corresponding projected point. } \item{tp}{ Numeric vector giving the scaled parametric coordinate \eqn{0 \le t_p \le 1}{0 <= tp <= 1} of the position of the projected point along the segment. } For example suppose \code{mapXY[2] = 5} and \code{tp[2] = 0.33}. Then \code{Y[5]} is the line segment lying closest to \code{X[2]}. The projection of the point \code{X[2]} onto the segment \code{Y[5]} is the point \code{Xproj[2]}, which lies one-third of the way between the first and second endpoints of the line segment \code{Y[5]}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{nearestsegment}} for a faster way to determine which segment is closest to each point. } \examples{ X <- rstrat(square(1), 5) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) plot(Y, lwd=3, col="green") plot(X, add=TRUE, col="red", pch=16) v <- project2segment(X,Y) Xproj <- v$Xproj plot(Xproj, add=TRUE, pch=16) arrows(X$x, X$y, Xproj$x, Xproj$y, angle=10, length=0.15, col="red") } \keyword{spatial} \keyword{math} spatstat/man/pcfinhom.Rd0000755000176000001440000001323112251564230015015 0ustar ripleyusers\name{pcfinhom} \alias{pcfinhom} \title{ Inhomogeneous Pair Correlation Function } \description{ Estimates the inhomogeneous pair correlation function of a point pattern using kernel methods. } \usage{ pcfinhom(X, lambda = NULL, ..., r = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), divisor = c("r", "d"), renormalise = TRUE, normpower=1, reciplambda = NULL, sigma = NULL, varcov = NULL) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g(r)} should be evaluated. There is a sensible default. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{divisor}{ Choice of divisor in the estimation formula: either \code{"r"} (the default) or \code{"d"}. See \code{\link{pcf.ppp}}. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \details{ The inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} is a summary of the dependence between points in a spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda(x) lambda(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda(x) * lambda(y) * g(r) dx dy } where \eqn{\lambda}{lambda} is the intensity function of the point process. For a Poisson point process with intensity function \eqn{\lambda}{lambda}, this probability is \eqn{p(r) = \lambda(x) \lambda(y)}{p(r) = lambda(x) * lambda(y)} so \eqn{g_{\rm inhom}(r) = 1}{ginhom(r) = 1}. The inhomogeneous pair correlation function is related to the inhomogeneous \eqn{K} function through \deqn{ g_{\rm inhom}(r) = \frac{K'_{\rm inhom}(r)}{2\pi r} }{ ginhom(r) = Kinhom'(r)/ ( 2 * pi * r) } where \eqn{K'_{\rm inhom}(r)}{Kinhom'(r)} is the derivative of \eqn{K_{\rm inhom}(r)}{Kinhom(r)}, the inhomogeneous \eqn{K} function. See \code{\link{Kinhom}} for information about \eqn{K_{\rm inhom}(r)}{Kinhom(r)}. The command \code{pcfinhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous pair correlation function \eqn{g_{\rm inhom}(r)}{ginhom(r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{\rm inhom}(r)}{ginhom(r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{\rm inhom}(r)}{ginhom(r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf}}, \code{\link{pcf.ppp}}, \code{\link{Kinhom}} } \examples{ data(residualspaper) X <- residualspaper$Fig4b plot(pcfinhom(X, stoyan=0.2, sigma=0.1)) fit <- ppm(X, ~polynom(x,y,2)) plot(pcfinhom(X, lambda=fit, normpower=2)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/im.object.Rd0000755000176000001440000000733712237642732015106 0ustar ripleyusers\name{im.object} \alias{im.object} %DoNotExport \title{Class of Images} \description{ A class \code{"im"} to represent a two-dimensional pixel image. } \details{ An object of this class represents a two-dimensional pixel image. It specifies \itemize{ \item the dimensions of the rectangular array of pixels \item \eqn{x} and \eqn{y} coordinates for the pixels \item a numeric value (``grey value'') at each pixel } If \code{X} is an object of type \code{im}, it contains the following elements: \tabular{ll}{ \code{v} \tab matrix of values \cr \code{dim} \tab dimensions of matrix \code{v} \cr \code{xrange} \tab range of \eqn{x} coordinates of image window \cr \code{yrange} \tab range of \eqn{y} coordinates of image window \cr \code{xstep} \tab width of one pixel \cr \code{ystep} \tab height of one pixel \cr \code{xcol} \tab vector of \eqn{x} coordinates of centres of pixels \cr \code{yrow} \tab vector of \eqn{y} coordinates of centres of pixels } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"im"} may be created by the functions \code{\link{im}} and \code{\link{as.im}}. Image objects are also returned by various functions including \code{\link{distmap}}, \code{\link{Kmeasure}}, \code{\link{setcov}}, \code{\link{eval.im}} and \code{\link{cut.im}}. Image objects may be displayed using the methods \code{\link{plot.im}}, \code{image.im}, \code{\link{persp.im}} and \code{contour.im}. There are also methods \code{\link{print.im}} for printing information about an image, \code{\link{summary.im}} for summarising an image, \code{\link{mean.im}} for calculating the average pixel value, \code{\link{hist.im}} for plotting a histogram of pixel values, \code{\link{quantile.im}} for calculating quantiles of pixel values, and \code{\link{cut.im}} for dividing the range of pixel values into categories. Pixel values in an image may be extracted using the subset operator \code{\link{[.im}}. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. The levels of a factor-valued image can be extracted and changed with \code{levels} and \code{levels<-}. Calculations involving one or more images (for example, squaring all the pixel values in an image, converting numbers to factor levels, or subtracting one image from another) can often be done easily using \code{\link{eval.im}}. To find all pixels satisfying a certain constraint, use \code{\link{solutionset}}. Note carefully that the entry \code{v[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i]}. That is, the \bold{row} index of the matrix \code{v} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{v} and \code{xcol} has one entry for each column of \code{v}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(v))}, if you wanted to do it by hand. } \seealso{ \code{\link{im}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{eval.im}}, \code{\link{[.im}} } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. Do not address the entries in an image directly. To extract all pixel values from an image object, use \code{\link{as.matrix.im}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/disc.Rd0000755000176000001440000000360512237642732014150 0ustar ripleyusers\name{disc} \alias{disc} \title{Circular Window} \description{ Creates a circular window } \usage{ disc(radius=1, centre=c(0,0), \dots, mask=FALSE, npoly=128) } \arguments{ \item{radius}{Radius of the circle.} \item{centre}{Coordinates of the centre of the circle.} \item{mask}{Logical flag controlling the type of approximation to a perfect circle. See Details. } \item{npoly}{Number of edges of the polygonal approximation, if \code{mask=FALSE}. } \item{\dots}{Arguments passed to \code{as.mask} determining the pixel resolution, if \code{mask=TRUE}. } } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying a window. } \details{ This command creates a window object representing a disc, with the given radius and centre. By default, the circle is approximated by a polygon with \code{npoly} edges. If \code{mask=TRUE}, then the disc is approximated by a binary pixel mask. The resolution of the mask is controlled by the arguments \code{\dots} which are passed to \code{\link{as.mask}}. } \seealso{ \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.mask}} } \note{This function can also be used to generate regular polygons, by setting \code{npoly} to a small integer value. For example \code{npoly=5} generates a pentagon and \code{npoly=13} a triskaidecagon. } \examples{ # unit disc W <- disc() # disc of radius 3 centred at x=10, y=5 W <- disc(3, c(10,5)) # plot(disc()) plot(disc(mask=TRUE)) # nice smooth circle plot(disc(npoly=256)) # how to control the resolution of the mask plot(disc(mask=TRUE, dimyx=256)) # check accuracy of approximation area.owin(disc())/pi area.owin(disc(mask=TRUE))/pi } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/affine.ppp.Rd0000755000176000001440000000331712237642732015254 0ustar ripleyusers\name{affine.ppp} %DontDeclareMethods \alias{affine.ppp} \title{Apply Affine Transformation To Point Pattern} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a point pattern. } \usage{ \method{affine}{ppp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Arguments passed to \code{\link{affine.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the affine transformation. } \details{ The point pattern, and its window, are subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and are then translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.owin}}, \code{\link{affine.psp}}, \code{\link{affine.im}}, \code{\link{flipxy}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) # shear transformation X <- affine(cells, matrix(c(1,0,0.6,1),ncol=2)) \dontrun{ plot(X) # rescale y coordinates by factor 1.3 plot(affine(cells, diag(c(1,1.3)))) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Kest.fft.Rd0000755000176000001440000000623112237642731014707 0ustar ripleyusers\name{Kest.fft} \alias{Kest.fft} \title{K-function using FFT} \description{ Estimates the reduced second moment function \eqn{K(r)} from a point pattern in a window of arbitrary shape, using the Fast Fourier Transform. } \usage{ Kest.fft(X, sigma, r=NULL, breaks=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{K(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{sigma}{ standard deviation of the isotropic Gaussian smoothing kernel. } \item{r}{ vector of values for the argument \eqn{r} at which \eqn{K(r)} should be evaluated. There is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{border}{the estimates of \eqn{K(r)} for these values of \eqn{r} } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} for a stationary Poisson process } } \details{ This is an alternative to the function \code{\link{Kest}} for estimating the \eqn{K} function. It may be useful for very large patterns of points. Whereas \code{\link{Kest}} computes the distance between each pair of points analytically, this function discretises the point pattern onto a rectangular pixel raster and applies Fast Fourier Transform techniques to estimate \eqn{K(t)}. The hard work is done by the function \code{\link{Kmeasure}}. The result is an approximation whose accuracy depends on the resolution of the pixel raster. The resolution is controlled by setting the parameter \code{npixel} in \code{\link{spatstat.options}}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. (1995) \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag. Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{Kest}}, \code{\link{Kmeasure}}, \code{\link{spatstat.options}} } \examples{ pp <- runifpoint(10000) \dontrun{ spatstat.options(npixel=512) } \testonly{ op <- spatstat.options(npixel=125) } Kpp <- Kest.fft(pp, 0.01) plot(Kpp) \testonly{spatstat.options(op)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/effectfun.Rd0000755000176000001440000000676612237642732015206 0ustar ripleyusers\name{effectfun} \alias{effectfun} \title{Compute Fitted Effect of a Spatial Covariate in a Point Process Model} \description{ Compute the trend or intensity of a fitted point process model as a function of one of its covariates. } \usage{ effectfun(model, covname, ..., se.fit=FALSE) } \arguments{ \item{model}{ A fitted point process model (object of class \code{"ppm"}). } \item{covname}{ The name of the covariate. A character string. } \item{\dots}{ The fixed values of other covariates (in the form \code{name=value}) if required. } \item{se.fit}{ Logical. If \code{TRUE}, asymptotic standard errors of the estimates will be computed, together with a 95\% confidence interval. } } \details{ The object \code{model} should be an object of class \code{"ppm"} representing a point process model fitted to point pattern data. The model's trend formula should involve a spatial covariate named \code{covname}. This could be \code{"x"} or \code{"y"} representing one of the Cartesian coordinates. More commonly the covariate is another, external variable that was supplied when fitting the model. The command \code{effectfun} computes the fitted trend of the point process \code{model} as a function of the covariate named \code{covname}. The return value can be plotted immediately, giving a plot of the fitted trend against the value of the covariate. If the model also involves covariates other than \code{covname}, then these covariates will be held fixed. Values for these other covariates must be provided as arguments to \code{effectfun} in the form \code{name=value}. If \code{se.fit=TRUE}, the algorithm also calculates the asymptotic standard error of the fitted trend, and a (pointwise) asymptotic 95\% confidence interval for the true trend. This command is just a wrapper for the prediction method \code{\link{predict.ppm}}. For more complicated computations about the fitted intensity, use \code{\link{predict.ppm}}. } \section{Trend and intensity}{ For a Poisson point process model, the trend is the same as the intensity of the point process. For a more general Gibbs model, the trend is the first order potential in the model (the first order term in the Gibbs representation). In Poisson or Gibbs models fitted by \code{\link{ppm}}, the trend is the only part of the model that depends on the covariates. } \value{ A data frame containing a column of values of the covariate and a column of values of the fitted trend. If \code{se.fit=TRUE}, there are 3 additional columns containing the standard error and the upper and lower limits of a confidence interval. If the covariate named \code{covname} is numeric (rather than a factor or logical variable), the return value is also of class \code{"fv"} so that it can be plotted immediately. } \seealso{ \code{\link{ppm}}, \code{\link{predict.ppm}}, \code{\link{fv.object}} } \examples{ data(copper) X <- copper$SouthPoints D <- distmap(copper$SouthLines) fit <- ppm(X, ~polynom(Z, 5), covariates=list(Z=D)) \donttest{ plot(effectfun(fit, "Z")) } plot(effectfun(fit, "Z", se.fit=TRUE), shade=c("hi", "lo")) fit <- ppm(X, ~x + polynom(Z, 5), covariates=list(Z=D)) plot(effectfun(fit, "Z", x=20)) fit <- ppm(X, ~x) plot(effectfun(fit, "x")) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/methods.ppx.Rd0000755000176000001440000000302612237642733015475 0ustar ripleyusers\name{methods.ppx} \Rdversion{1.1} \alias{methods.ppx} %DoNotExport \alias{print.ppx} \alias{plot.ppx} \alias{unitname.ppx} \alias{unitname<-.ppx} \title{ Methods for Multidimensional Space-Time Point Patterns } \description{ Methods for printing and plotting a general multidimensional space-time point pattern. } \usage{ \method{print}{ppx}(x, ...) \method{plot}{ppx}(x, ...) \method{unitname}{ppx}(x) \method{unitname}{ppx}(x) <- value } \arguments{ \item{x}{ Multidimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Additional arguments passed to plot methods. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{plot}}, \code{\link{unitname}} and \code{\link{unitname<-}} for the class \code{"ppx"} of multidimensional point patterns. The \code{print} method prints a description of the point pattern and its spatial domain. The \code{unitname} method extracts the name of the unit of length in which the point coordinates are expressed. The \code{unitname<-} method assigns the name of the unit of length. } \value{ For \code{print.ppx} the value is \code{NULL}. For \code{unitname.ppx} an object of class \code{"units"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppx}}, \code{\link{unitname}} } \keyword{spatial} spatstat/man/as.linim.Rd0000644000176000001440000000422412237642732014733 0ustar ripleyusers\name{as.linim} %DontDeclareMethods \alias{as.linim} \alias{as.linim.linim} \alias{as.linim.default} \title{Convert to Pixel Image on Linear Network} \description{ Converts various kinds of data to a pixel image on a linear network. } \usage{ as.linim(X, \dots) \method{as.linim}{linim}(X, \dots) \method{as.linim}{default}(X, L, \dots) } \arguments{ \item{X}{ Data to be converted to a pixel image on a linear network. } \item{L}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{Additional arguments passed to \code{X} when \code{X} is a function, or arguments passed to \code{\link{as.mask}} which determine the pixel array geometry. } } \details{ This function converts the data \code{X} into a pixel image on a linear network, an object of class \code{"linim"} (see \code{\link{linim}}). The argument \code{X} may be any of the following: \itemize{ \item a pixel image on a linear network, an object of class \code{"linim"}. \item a pixel image, an object of class \code{"im"}. \item any type of data acceptable to \code{\link{as.im}}, such as a function, numeric value, or window. } First \code{X} is converted to a pixel image object \code{Y} (object of class \code{"im"}). The conversion is performed by \code{\link{as.im}} using any relevant arguments specified in \code{\dots}. For example the argument \code{eps} could be used to change the pixel resolution. Next \code{Y} is converted to a pixel image on a linear network using \code{\link{linim}}. The argument \code{L} determines the linear network. If \code{L} is missing or \code{NULL}, then \code{X} should be an object of class \code{"linim"}, and \code{L} defaults to the linear network on which \code{X} is defined. } \value{ An image object on a linear network; an object of class \code{"linim"}. } \seealso{ \code{\link{as.im}} } \examples{ f <- function(x,y){ x + y } plot(as.linim(f, simplenet)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rPoissonCluster.Rd0000644000176000001440000000734312237642734016406 0ustar ripleyusers\name{rPoissonCluster} \alias{rPoissonCluster} \title{Simulate Poisson Cluster Process} \description{ Generate a random point pattern, a realisation of the general Poisson cluster process. } \usage{ rPoissonCluster(kappa, rmax, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{rmax}{ Maximum radius of a random cluster. } \item{rcluster}{ A function which generates random clusters. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster} } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern: see Details. } \details{ This algorithm generates a realisation of the general Poisson cluster process, with the cluster mechanism given by the function \code{rcluster}. The clusters must have a finite maximum possible radius \code{rmax}. First, the algorithm generates a Poisson point process of ``parent'' points with intensity \code{kappa}. Here \code{kappa} may be a single positive number, a function \code{kappa(x, y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points, created by calling the function \code{rcluster}. These clusters are combined together to yield a single point pattern which is then returned as the result of \code{rPoissonCluster}. The function \code{rcluster} should expect to be called as \code{rcluster(xp[i],yp[i],\dots)} for each parent point at a location \code{(xp[i],yp[i])}. The return value of \code{rcluster} should be a list with elements \code{x,y} which are vectors of equal length giving the absolute \eqn{x} and \code{y} coordinates of the points in the cluster. If the return value of \code{rcluster} is a point pattern (object of class \code{"ppp"}) then it may have marks. The result of \code{rPoissonCluster} will then be a marked point pattern. If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rPoissonCluster} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}} } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rPoissonCluster(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rPoissonCluster(15,0.1,nclust2, radius=0.1, n=5)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/vcov.mppm.Rd0000644000176000001440000000446612241443112015140 0ustar ripleyusers\name{vcov.mppm} \alias{vcov.mppm} \title{Calculate Variance-Covariance Matrix for Fitted Multiple Point Process Model} \description{ Given a fitted multiple point process model, calculate the variance-covariance matrix of the parameter estimates. } \usage{ \method{vcov}{mppm}(object, ..., what="vcov", err="fatal") } \arguments{ \item{object}{ A multiple point process model (object of class \code{"mppm"}). } \item{\dots}{ Ignored. } \item{what}{ Character string indicating which quantity should be calculated. Either \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, or \code{"fisher"} for the Fisher information matrix. } \item{err}{ Character string indicating what action to take if an error occurs. Either \code{"fatal"}, \code{"warn"} or \code{"null"}. } } \details{ This is a method for the generic function \code{\link{vcov}}. The argument \code{object} should be a fitted multiple point process model (object of class \code{"mppm"}) generated by \code{\link{mppm}}. The model must be a Poisson point process. The variance-covariance matrix of the parameter estimates is computed using asymptotic theory for maximum likelihood. If \code{what="vcov"} (the default), the variance-covariance matrix is returned. If \code{what="corr"}, the variance-covariance matrix is normalised to yield a correlation matrix, and this is returned. If \code{what="fisher"}, the Fisher information matrix is returned instead. In all three cases, the rows and columns of the matrix correspond to the parameters (coefficients) in the same order as in \code{coef{model}}. These calculations are not available if the model is not Poisson, or if it was computed using \code{gam}. In such cases, the argument \code{err} determines what will happen. If \code{err="fatal"} an error will occur. If \code{err="warn"} a warning will be issued and \code{NA} will be returned. If \code{err="null"}, no warning is issued, but \code{NULL} is returned. } \value{ A numeric matrix (or \code{NA} or \code{NULL}). } \seealso{ \code{\link{vcov}}, \code{\link{mppm}} } \examples{ data(waterstriders) fit <- mppm(Wat ~x, data=hyperframe(Wat=waterstriders)) vcov(fit) } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/Geyer.Rd0000755000176000001440000001003112237642731014267 0ustar ripleyusers\name{Geyer} \alias{Geyer} \title{Geyer's Saturation Point Process Model} \description{ Creates an instance of Geyer's saturation point process model which can then be fitted to point pattern data. } \usage{ Geyer(r,sat) } \arguments{ \item{r}{Interaction radius. A positive real number.} \item{sat}{Saturation threshold. A positive real number.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of Geyer's saturation point process with interaction radius \eqn{r} and saturation threshold \code{sat}. } \details{ Geyer (1999) introduced the \dQuote{saturation process}, a modification of the Strauss process (see \code{\link{Strauss}}) in which the total contribution to the potential from each point (from its pairwise interaction with all other points) is trimmed to a maximum value \eqn{s}. This model is implemented in the function \code{\link{Geyer}()}. The saturation point process with interaction radius \eqn{r}, saturation threshold \eqn{s}, and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma}, is the point process in which each point \eqn{x_i}{x[i]} in the pattern \eqn{X} contributes a factor \deqn{ \beta \gamma^{\min(s, t(x_i, X))} }{ beta gamma^min(s, t(x[i],X)) } to the probability density of the point pattern, where \eqn{t(x_i, X)}{t(x[i],X)} denotes the number of \sQuote{close neighbours} of \eqn{x_i}{x[i]} in the pattern \eqn{X}. A close neighbour of \eqn{x_i}{x[i]} is a point \eqn{x_j}{x[j]} with \eqn{j \neq i}{j != i} such that the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]} is less than or equal to \eqn{r}. If the saturation threshold \eqn{s} is set to infinity, this model reduces to the Strauss process (see \code{\link{Strauss}}) with interaction parameter \eqn{\gamma^2}{gamma^2}. If \eqn{s = 0}, the model reduces to the Poisson point process. If \eqn{s} is a finite positive number, then the interaction parameter \eqn{\gamma}{gamma} may take any positive value (unlike the case of the Strauss process), with values \eqn{\gamma < 1}{gamma < 1} describing an \sQuote{ordered} or \sQuote{inhibitive} pattern, and values \eqn{\gamma > 1}{gamma > 1} describing a \sQuote{clustered} or \sQuote{attractive} pattern. The nonstationary saturation process is similar except that the value \eqn{\beta}{beta} is replaced by a function \eqn{\beta(x_i)}{beta(x[i])} of location. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the saturation process interaction is yielded by \code{Geyer(r, sat)} where the arguments \code{r} and \code{sat} specify the Strauss interaction radius \eqn{r} and the saturation threshold \eqn{s}, respectively. See the examples below. Note the only arguments are the interaction radius \code{r} and the saturation threshold \code{sat}. When \code{r} and \code{sat} are fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Geyer()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}}, \code{\link{SatPiece}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ data(cells) ppm(cells, ~1, Geyer(r=0.07, sat=2)) # fit the stationary saturation process to `cells' } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/methods.layered.Rd0000644000176000001440000000336412237642733016315 0ustar ripleyusers\name{methods.layered} %DontDeclareMethods \Rdversion{1.1} \alias{methods.layered} %DoNotExport \alias{shift.layered} \alias{reflect.layered} \alias{flipxy.layered} \alias{rotate.layered} \alias{affine.layered} \alias{rescale.layered} \alias{scalardilate.layered} \title{ Methods for Layered Objects } \description{ Methods for geometrical transformations of layered objects (class \code{"layered"}). } \usage{ \method{shift}{layered}(X, ...) \method{rotate}{layered}(X, ...) \method{affine}{layered}(X, ...) \method{reflect}{layered}(X) \method{flipxy}{layered}(X) \method{rescale}{layered}(X, s) \method{scalardilate}{layered}(X, ...) } \arguments{ \item{X}{ Object of class \code{"layered"}. } \item{\dots}{ Arguments passed to the relevant methods when applying the operation to each layer of \code{X}. } \item{s}{ Rescaling factor passed to the relevant method for \code{\link{rescale}}. May be missing. } } \details{ These are methods for the generic functions \code{\link{shift}}, \code{\link{rotate}}, \code{\link{reflect}}, \code{\link{affine}}, \code{\link{rescale}}, \code{\link{scalardilate}} and \code{\link{flipxy}} for the class of layered objects. A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. } \value{ Another object of class \code{"layered"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{layered}} } \examples{ L <- layered(letterR, runifpoint(20, letterR)) plot(L) plot(rotate(L, pi/4)) } \keyword{spatial} \keyword{methods} spatstat/man/vargamma.estpcf.Rd0000644000176000001440000001524612251541120016266 0ustar ripleyusers\name{vargamma.estpcf} \alias{vargamma.estpcf} \title{Fit the Neyman-Scott Cluster Point Process with Variance Gamma kernel} \description{ Fits the Neyman-Scott cluster point process, with Variance Gamma kernel, to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. } \usage{ vargamma.estpcf(X, startpar=c(kappa=1,eta=1), nu.ker = -1/4, lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, nu.pcf=NULL, ..., pcfargs = list()) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the model. } \item{nu.ker}{ Numerical value controlling the shape of the tail of the clusters. A number greater than \code{-1/2}. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{nu.pcf}{ Alternative specification of the shape parameter. See Details. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } \item{pcfargs}{ Optional list containing arguments passed to \code{\link{pcf.ppp}} to control the smoothing in the estimation of the pair correlation function. } } \details{ This algorithm fits the Neyman-Scott Cluster point process model with Variance Gamma kernel (Jalilian et al, 2013) to a point pattern dataset by the Method of Minimum Contrast, using the pair correlation function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The pair correlation function of the point pattern will be computed using \code{\link{pcf}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the pair correlation function, and this object should have been obtained by a call to \code{\link{pcf}} or one of its relatives. } } The algorithm fits the Neyman-Scott Cluster point process with Variance Gamma kernel to \code{X}, by finding the parameters of the model which give the closest match between the theoretical pair correlation function of the model and the observed pair correlation function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Neyman-Scott cluster point process with Variance Gamma kernel is described in Jalilian et al (2013). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent have a common distribution described in Jalilian et al (2013). The shape of the kernel is determined by the dimensionless index \code{nu.ker}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). Instead of specifying \code{nu.ker} the user can specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. Exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. If the argument \code{lambda} is provided, then this is used as the value of the point process intensity \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The corresponding model can be simulated using \code{\link{rVarGamma}}. The parameter \code{eta} appearing in \code{startpar} is equivalent to the scale parameter \code{omega} used in \code{\link{rVarGamma}}. Homogeneous or inhomogeneous Neyman-Scott/VarGamma models can also be fitted using the function \code{\link{kppm}} and the fitted models can be simulated using \code{\link{simulate.kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{vargamma.estK}}, \code{\link{lgcp.estpcf}}, \code{\link{thomas.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{mincontrast}}, \code{\link{pcf}}, \code{\link{pcfmodel}}. \code{\link{rVarGamma}} to simulate the model. } \examples{ u <- vargamma.estpcf(redwood) u plot(u, legendpos="topright") } \keyword{spatial} \keyword{models} spatstat/man/deltametric.Rd0000755000176000001440000000603012237642732015516 0ustar ripleyusers\name{deltametric} \Rdversion{1.1} \alias{deltametric} \title{ Delta Metric } \description{ Computes the discrepancy between two sets \eqn{A} and \eqn{B} according to Baddeley's delta-metric. } \usage{ deltametric(A, B, p = 2, c = Inf, ...) } \arguments{ \item{A,B}{ The two sets which will be compared. Windows (objects of class \code{"owin"}), point patterns (objects of class \code{"ppp"}) or line segment patterns (objects of class \code{"psp"}). } \item{p}{ Index of the \eqn{L^p} metric. Either a positive numeric value, or \code{Inf}. } \item{c}{ Distance threshold. Either a positive numeric value, or \code{Inf}. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the distance maps computed by \code{\link{distmap}}. } } \details{ Baddeley (1992a, 1992b) defined a distance between two sets \eqn{A} and \eqn{B} contained in a space \eqn{W} by \deqn{ \Delta(A,B) = \left[ \frac 1 {|W|} \int_W \left| \min(c, d(x,A)) - \min(c, d(x,B)) \right|^p \, {\rm d}x \right]^{1/p} }{ Delta(A,B) = [ (1/|W|) * integral of |min(c, d(x,A))-min(c, d(x,B))|^p dx ]^(1/p) } where \eqn{c \ge 0}{c >= 0} is a distance threshold parameter, \eqn{0 < p \le \infty}{0 < p <= Inf} is the exponent parameter, and \eqn{d(x,A)} denotes the shortest distance from a point \eqn{x} to the set \eqn{A}. Also \code{|W|} denotes the area or volume of the containing space \eqn{W}. This is defined so that it is a \emph{metric}, i.e. \itemize{ \item \eqn{\Delta(A,B)=0}{Delta(A,B)=0} if and only if \eqn{A=B} \item \eqn{\Delta(A,B)=\Delta(B,A)}{Delta(A,B)=Delta(B,A)} \item \eqn{\Delta(A,C) \le \Delta(A,B) + \Delta(B,C)}{Delta(A,C) <= Delta(A,B) + Delta(B,C)} } It is topologically equivalent to the Hausdorff metric (Baddeley, 1992a) but has better stability properties in practical applications (Baddeley, 1992b). If \eqn{p=\infty}{p=Inf} and \eqn{c=\infty}{c=Inf} the Delta metric is equal to the Hausdorff metric. The algorithm uses \code{\link{distmap}} to compute the distance maps \eqn{d(x,A)} and \eqn{d(x,B)}, then approximates the integral numerically. The accuracy of the computation depends on the pixel resolution which is controlled through the extra arguments \code{\dots} passed to \code{\link{as.mask}}. } \value{ A numeric value. } \references{ Baddeley, A.J. (1992a) Errors in binary images and an \eqn{L^p} version of the Hausdorff metric. \emph{Nieuw Archief voor Wiskunde} \bold{10}, 157--183. Baddeley, A.J. (1992b) An error metric for binary images. In W. Foerstner and S. Ruwiedel (eds) \emph{Robust Computer Vision}. Karlsruhe: Wichmann. Pages 59--78. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{distmap}} } \examples{ X <- runifpoint(20) Y <- runifpoint(10) deltametric(X, Y, p=1,c=0.1) } \keyword{spatial} \keyword{math} spatstat/man/collapse.fv.Rd0000755000176000001440000000541112237642732015437 0ustar ripleyusers\name{collapse.fv} \alias{collapse.fv} \title{ Collapse Several Function Tables into One } \description{ Combines several function tables (objects of class \code{"fv"}) into a single function table, merging columns that are identical and relabelling columns that are different. } \usage{ collapse.fv(..., same = NULL, different = NULL) } \arguments{ \item{\dots}{ Arguments which are objects of class \code{"fv"}, or a list of objects of class \code{"fv"}. } \item{same}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that are identical in each object. This column or columns will be included only once. } \item{different}{ Character string or character vector specifying a column or columns, present in each \code{"fv"} object, that contain different values in each object. Each of these columns of data will be included, with labels that distinguish them from each other. } } \details{ This command combines the data in several function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) to make a single function table. It is essentially a smart wrapper for \code{\link{cbind.fv}}. A typical application is to calculate the same summary statistic (such as the \eqn{K} function) for different point patterns, and then to use \code{collapse.fv} to combine the results into a single object that can easily be plotted. See the Examples. The arguments \code{\dots} should be function tables (objects of class \code{"fv"}, see \code{\link{fv.object}}) that are compatible in the sense that they have the same values of the function argument. The argument \code{same} identifies any columns that are present in each function table, and which are known to contain exactly the same values in each table. This column or columns will be included only once in the result. The argument \code{different} identifies any columns that are present in each function table, and which contain different numerical values in each table. Each of these columns will be included, with labels to distinguish them. Columns that are not named in \code{same} or \code{different} will not be included. } \value{ Object of class \code{"fv"}. } \seealso{ \code{\link{fv.object}}, \code{\link{cbind.fv}} } \examples{ # generate simulated data X <- replicate(3, rpoispp(100), simplify=FALSE) names(X) <- paste("Simulation", 1:3) # compute K function estimates Klist <- lapply(X, Kest) # collapse K <- collapse.fv(Klist, same="theo", different="iso") K } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/hamster.Rd0000755000176000001440000000313412237642732014666 0ustar ripleyusers\name{hamster} \alias{hamster} \docType{data} \title{Aherne's hamster tumour data} \description{ Point pattern of cell nuclei in hamster kidney, each nucleus classified as either `dividing' or `pyknotic'. A multitype point pattern. } \format{ An object of class \code{"ppp"} representing the point pattern of cell locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr \code{marks} \tab factor with levels \code{"dividing"} and \code{"pyknotic"}. } See \code{\link{ppp.object}} for details of the format. } \usage{data(hamster)} \source{Dr W. A. Aherne, Department of Pathology, University of Newcastle-upon-Tyne, UK. Data supplied by Prof. Peter Diggle} \section{Notes}{ These data were presented and analysed by Diggle (1983, section 7.3). The data give the positions of the centres of the nuclei of certain cells in a histological section of tissue from a laboratory-induced metastasising lymphoma in the kidney of a hamster. The nuclei are classified as either "pyknotic" (corresponding to dying cells) or "dividing" (corresponding to cells arrested in metaphase, i.e. in the act of dividing). The background void is occupied by unrecorded, interphase cells in relatively large numbers. The sampling window is a square, originally about 0.25 mm square in real units, which has been rescaled to the unit square. } \references{ Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. } \keyword{datasets} \keyword{spatial} spatstat/man/nsegments.Rd0000755000176000001440000000147012237642733015230 0ustar ripleyusers\name{nsegments} \alias{nsegments} \alias{nsegments.psp} \title{ Number of Line Segments in a Line Segment Pattern } \description{ Returns the number of line segments in a line segment pattern. } \usage{ nsegments(x) \method{nsegments}{psp}(x) } \arguments{ \item{x}{ A line segment pattern, i.e. an object of class \code{psp}. } } \details{ This function is generic, but there is at present only one method, that for class \code{psp}. } \value{ Integer. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{npoints}()}, \code{\link{psp.object}()} } \examples{ data(copper) nsegments(copper$Lines) nsegments(copper$SouthLines) } \keyword{ spatial } \keyword{ manip } spatstat/man/MultiHard.Rd0000755000176000001440000000541012237642731015112 0ustar ripleyusers\name{MultiHard} \alias{MultiHard} \title{The Multitype Hard Core Point Process Model} \description{ Creates an instance of the multitype hard core point process model which can then be fitted to point pattern data. } \usage{ MultiHard(types=NULL, hradii) } \arguments{ \item{types}{Optional; vector of all possible types (i.e. the possible levels of the \code{marks} variable in the data)} \item{hradii}{Matrix of hard core radii} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the multitype hard core process with hard core radii \eqn{hradii[i,j]}. } \details{ This is a multitype version of the hard core process. A pair of points of types \eqn{i} and \eqn{j} must not lie closer than \eqn{h_{ij}}{h[i,j]} units apart. The argument \code{types} need not be specified in normal use. It will be determined automatically from the point pattern data set to which the MultiStrauss interaction is applied, when the user calls \code{\link{ppm}}. However, the user should be confident that the ordering of types in the dataset corresponds to the ordering of rows and columns in the matrix \code{hradii}. The matrix \code{hradii} must be symmetric, with entries which are either positive numbers or \code{NA}. A value of \code{NA} indicates that no distance constraint should be applied for this combination of types. Note that only the hardcore radii are specified in \code{MultiHard}. The canonical parameters \eqn{\log(\beta_j)}{log(beta[j])} are estimated by \code{\link{ppm}()}, not fixed in \code{MultiHard()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Strauss}} } \examples{ h <- matrix(c(1,2,2,1), nrow=2,ncol=2) # prints a sensible description of itself MultiHard(hradii=h) # Fit the stationary multitype hardcore process to `amacrine' # with hard core operating only between cells of the same type. h <- 0.02 * matrix(c(1, NA, NA, 1), nrow=2,ncol=2) ppm(amacrine, ~1, MultiHard(,h)) # Note the comma; needed since "types" is not specified. } \section{Warnings}{ In order that \code{\link{ppm}} can fit the multitype hard core model correctly to a point pattern \code{X}, this pattern must be marked, with \code{markformat} equal to \code{vector} and the mark vector \code{marks(X)} must be a factor. If the argument \code{types} is specified it is interpreted as a set of factor levels and this set must equal \code{levels(marks(X))}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/tess.Rd0000755000176000001440000001026712237642734014210 0ustar ripleyusers\name{tess} \alias{tess} \title{Create a Tessellation} \description{ Creates an object of class \code{"tess"} representing a tessellation of a spatial region. } \usage{ tess(..., xgrid = NULL, ygrid = NULL, tiles = NULL, image = NULL, window=NULL, keepempty=FALSE) } \arguments{ \item{\dots}{Ignored.} \item{xgrid,ygrid}{Cartesian coordinates of vertical and horizontal lines determining a grid of rectangles. Incompatible with other arguments. } \item{tiles}{List of tiles in the tessellation. A list, each of whose elements is a window (object of class \code{"owin"}). Incompatible with other arguments. } \item{image}{ Pixel image which specifies the tessellation. Incompatible with other arguments. } \item{window}{ Optional. The spatial region which is tessellated (i.e. the union of all the tiles). An object of class \code{"owin"}. } \item{keepempty}{ Logical flag indicating whether empty tiles should be retained or deleted. } } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. Three types of tessellation are supported: \describe{ \item{rectangular:}{ tiles are rectangles, with sides parallel to the \code{x} and \code{y} axes. They may or may not have equal size and shape. The arguments \code{xgrid} and \code{ygrid} determine the positions of the vertical and horizontal grid lines, respectively. (See \code{\link{quadrats}} for another way to do this.) } \item{tile list:}{ tiles are arbitrary spatial regions. The argument \code{tiles} is a list of these tiles, which are objects of class \code{"owin"}. } \item{pixel image:}{ Tiles are subsets of a fine grid of pixels. The argument \code{image} is a pixel image (object of class \code{"im"}) with factor values. Each level of the factor represents a different tile of the tessellation. The pixels that have a particular value of the factor constitute a tile. } } The optional argument \code{window} specifies the spatial region formed by the union of all the tiles. In other words it specifies the spatial region that is divided into tiles by the tessellation. If this argument is missing or \code{NULL}, it will be determined by computing the set union of all the tiles. This is a time-consuming computation. For efficiency it is advisable to specify the window. Note that the validity of the window will not be checked. Empty tiles may occur, either because one of the entries in the list \code{tiles} is an empty window, or because one of the levels of the factor-valued pixel image \code{image} does not occur in the pixel data. When \code{keepempty=TRUE}, empty tiles are permitted. When \code{keepempty=FALSE} (the default), tiles are not allowed to be empty, and any empty tiles will be removed from the tessellation. There are methods for \code{print}, \code{plot}, \code{[} and \code{[<-} for tessellations. Use \code{\link{tiles}} to extract the list of tiles in a tessellation, or \code{\link{tile.areas}} to compute their areas. Tessellations can be used to classify the points of a point pattern, in \code{\link{split.ppp}}, \code{\link{cut.ppp}} and \code{\link{by.ppp}}. } \value{ An object of class \code{"tess"} representing the tessellation. } \seealso{ \code{\link{plot.tess}}, \code{\link{[.tess}}, \code{\link{as.tess}}, \code{\link{tiles}}, \code{\link{intersect.tess}}, \code{\link{split.ppp}}, \code{\link{cut.ppp}}, \code{\link{by.ppp}}, \code{\link{quadrats}}, \code{\link{bdist.tiles}}, \code{\link{tile.areas}}. } \examples{ A <- tess(xgrid=0:4,ygrid=0:4) A B <- A[c(1, 2, 5, 7, 9)] B v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] E <- tess(image=v) E } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/crossdist.psp.Rd0000755000176000001440000000517712237642732016052 0ustar ripleyusers\name{crossdist.psp} %DontDeclareMethods \alias{crossdist.psp} \title{Pairwise distances between two different line segment patterns} \description{ Computes the distances between all pairs of line segments taken from two different line segment patterns. } \usage{ \method{crossdist}{psp}(X, Y, \dots, method="Fortran", type="Hausdorff") } \arguments{ \item{X,Y}{ Line segment patterns (objects of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{method}{String specifying which method of calculation to use. Values are \code{"Fortran"}, \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{type}{ Type of distance to be computed. Options are \code{"Hausdorff"} and \code{"separation"}. Partial matching is used. } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th line segment in \code{X} to the \code{j}-th line segment in \code{Y}. } \details{ This is a method for the generic function \code{\link{crossdist}}. Given two line segment patterns, this function computes the distance from each line segment in the first pattern to each line segment in the second pattern, and returns a matrix containing these distances. The distances between line segments are measured in one of two ways: \itemize{ \item if \code{type="Hausdorff"}, distances are computed in the Hausdorff metric. The Hausdorff distance between two line segments is the \emph{maximum} distance from any point on one of the segments to the nearest point on the other segment. \item if \code{type="separation"}, distances are computed as the \emph{minimum} distance from a point on one line segment to a point on the other line segment. For example, line segments which cross over each other have separation zero. } The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="Fortran"} (the default) then Fortran code is used. The Fortran code is several times faster. } \seealso{ \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ L1 <- psp(runif(5), runif(5), runif(5), runif(5), owin()) L2 <- psp(runif(10), runif(10), runif(10), runif(10), owin()) D <- crossdist(L1, L2) #result is a 5 x 10 matrix S <- crossdist(L1, L2, type="sep") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/lansing.Rd0000755000176000001440000000606312237642732014662 0ustar ripleyusers\name{lansing} \alias{lansing} \docType{data} \title{ Lansing Woods Point Pattern } \description{ Locations and botanical classification of trees in Lansing Woods. The data come from an investigation of a 924 ft x 924 ft (19.6 acre) plot in Lansing Woods, Clinton County, Michigan USA by D.J. Gerrard. The data give the locations of 2251 trees and their botanical classification (into hickories, maples, red oaks, white oaks, black oaks and miscellaneous trees). The original plot size (924 x 924 feet) has been rescaled to the unit square. Note that the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of tree \cr \code{y} \tab Cartesian \eqn{y}-coordinate of tree \cr \code{marks} \tab factor with levels indicating species of each tree } The levels of \code{marks} are \code{blackoak}, \code{hickory}, \code{maple}, \code{misc}, \code{redoak} and \code{whiteoak}. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(lansing)} \examples{ data(lansing) plot(lansing) summary(lansing) plot(split(lansing)) plot(split(lansing)$maple) } \references{ Besag, J. (1978) Some methods of statistical analysis for spatial data. \emph{Bull. Internat. Statist. Inst.} \bold{44}, 77--92. Cox, T.F. (1976) The robust estimation of the density of a forest stand using a new conditioned distance method. \emph{Biometrika} \bold{63}, 493--500. Cox, T.F. (1979) A method for mapping the dense and sparse regions of a forest stand. \emph{Applied Statistics} \bold{28}, 14--19. Cox, T.F. and Lewis, T. (1976) A conditioned distance ratio method for analysing spatial patterns. \emph{Biometrika} \bold{63}, 483--492. Diggle, P.J. (1979a) The detection of random heterogeneity in plant populations. \emph{Biometrics} \bold{33}, 390--394. Diggle, P.J. (1979b) Statistical methods for spatial point patterns in ecology. \emph{Spatial and temporal analysis in ecology}. R.M. Cormack and J.K. Ord (eds.) Fairland: International Co-operative Publishing House. pages 95--150. Diggle, P.J. (1981) Some graphical methods in the analysis of spatial point patterns. In \emph{Interpreting Multivariate Data}. V. Barnett (eds.) John Wiley and Sons. Pages 55--73. Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. Gerrard, D.J. (1969) Competition quotient: a new measure of the competition affecting individual forest trees. Research Bulletin 20, Agricultural Experiment Station, Michigan State University. Lotwick, H.W. (1981) \emph{Spatial stochastic point processes}. PhD thesis, University of Bath, UK. Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. } \keyword{datasets} \keyword{spatial} spatstat/man/rmhmodel.ppm.Rd0000755000176000001440000001164712237642734015637 0ustar ripleyusers\name{rmhmodel.ppm} \alias{rmhmodel.ppm} \title{Interpret Fitted Model for Metropolis-Hastings Simulation.} \description{ Converts a fitted point process model into a format that can be used to simulate the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{ppm}(model, win, ..., verbose=TRUE, project=TRUE, control=rmhcontrol()) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{win}{ Optional. Window in which the simulations should be generated. } \item{\dots}{Ignored.} \item{verbose}{ Logical flag indicating whether to print progress reports while the model is being converted. } \item{project}{Logical flag indicating what to do if the fitted model does not correspond to a valid point process. See Details.} \item{control}{ Parameters determining the iterative behaviour of the simulation algorithm. Passed to \code{\link{rmhcontrol}}. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.ppm} is the method for the class \code{"ppm"} of fitted point process models. The argument \code{model} should be a fitted point process model (object of class \code{"ppm"}) typically obtained from the model-fitting function \code{\link{ppm}}. This will be converted into an object of class \code{"rmhmodel"}. The optional argument \code{win} specifies the window in which the pattern is to be generated. If specified, it must be in a form which can be coerced to an object of class \code{owin} by \code{\link{as.owin}}. Not all fitted point process models obtained from \code{\link{ppm}} can be simulated. We have not yet implemented simulation code for the \code{\link{LennardJones}} and \code{\link{OrdThresh}} models. It is also possible that a fitted point process model obtained from \code{\link{ppm}} may not correspond to a valid point process. For example a fitted model with the \code{\link{Strauss}} interpoint interaction may have any value of the interaction parameter \eqn{\gamma}{gamma}; however the Strauss process is not well-defined for \eqn{\gamma > 1}{gamma > 1} (Kelly and Ripley, 1976). The argument \code{project} determines what to do in such cases. If \code{project=FALSE}, a fatal error will occur. If \code{project=TRUE}, the fitted model parameters will be adjusted to the nearest values which do correspond to a valid point process. For example a Strauss process with \eqn{\gamma > 1}{gamma > 1} will be projected to a Strauss process with \eqn{\gamma = 1}{gamma = 1}, equivalent to a Poisson process. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.list}}, \code{\link{rmhmodel.default}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{Hybrid}}, \code{\link{LennardJones}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \examples{ data(cells) fit1 <- ppm(cells, ~1, Strauss(0.07)) mod1 <- rmhmodel(fit1) fit2 <- ppm(cells, ~x, Geyer(0.07, 2)) mod2 <- rmhmodel(fit2) fit3 <- ppm(cells, ~x, Hardcore(0.07)) mod3 <- rmhmodel(fit3) # Then rmh(mod1), etc } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/ponderosa.Rd0000755000176000001440000000417412237642733015223 0ustar ripleyusers\name{ponderosa} \alias{ponderosa} \alias{ponderosa.extra} \docType{data} \title{ Ponderosa Pine Tree Point Pattern } \description{ The data record the locations of 108 Ponderosa Pine (\emph{Pinus ponderosa}) trees in a 120 metre square region in the Klamath National Forest in northern California, published as Figure 2 of Getis and Franklin (1987). Franklin et al. (1985) determined the locations of approximately 5000 trees from United States Forest Service aerial photographs and digitised them for analysis. Getis and Franklin (1987) selected a 120 metre square subregion that appeared to exhibit clustering. This subregion is the \code{ponderosa} dataset. In principle these data are equivalent to Figure 2 of Getis and Franklin (1987) but they are not exactly identical; some of the spatial locations appear to be slightly perturbed. The data points identified as A, B, C on Figure 2 of Getis and Franklin (1987) correspond to points numbered 42, 7 and 77 in the dataset respectively. } \format{ Typing \code{data(ponderosa)} gives access to two objects, \code{ponderosa} and \code{ponderosa.extra}. The dataset \code{ponderosa} is a spatial point pattern (object of class \code{"ppp"}) representing the point pattern of tree positions. See \code{\link{ppp.object}} for details of the format. The dataset \code{ponderosa.extra} is a list containing supplementary data. The entry \code{id} contains the index numbers of the three special points A, B, C in the point pattern. The entry \code{plotit} is a function that can be called to produce a nice plot of the point pattern. } \usage{data(ponderosa)} \source{Prof. Janet Franklin, University of California, Santa Barbara} \examples{ data(ponderosa) ponderosa.extra$plotit() } \references{ Franklin, J., Michaelsen, J. and Strahler, A.H. (1985) Spatial analysis of density dependent pattern in coniferous forest stands. \emph{Vegetatio} \bold{64}, 29--36. Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \keyword{datasets} \keyword{spatial} spatstat/man/Triplets.Rd0000644000176000001440000000651612237642731015034 0ustar ripleyusers\name{Triplets} \alias{Triplets} \title{The Triplet Point Process Model} \description{ Creates an instance of Geyer's triplet interaction point process model which can then be fitted to point pattern data. } \usage{ Triplets(r) } \arguments{ \item{r}{The interaction radius of the Triplets process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Triplets process with interaction radius \eqn{r}. } \details{ The (stationary) Geyer triplet process (Geyer, 1999) with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each triplet of close points contributes a factor \eqn{\gamma}{gamma} to the density. A triplet of close points is a group of 3 points, each pair of which is closer than \eqn{r} units apart. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of unordered triples of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Triplets process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Triplets process pairwise interaction is yielded by the function \code{Triplets()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Triplets()}. } \seealso{ \code{\link{ppm}}, \code{\link{triplet.family}}, \code{\link{ppm.object}} } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \examples{ Triplets(r=0.1) # prints a sensible description of itself \dontrun{ ppm(cells, ~1, Triplets(r=0.1)) # fit the stationary Triplets process to `cells' } ppm(cells, ~polynom(x,y,3), Triplets(r=0.1)) # fit a nonstationary Triplets process with log-cubic polynomial trend } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/Extract.im.Rd0000755000176000001440000001610612237642732015244 0ustar ripleyusers\name{Extract.im} \alias{[.im} \title{Extract Subset of Image} \description{ Extract a subset or subregion of a pixel image. } \usage{ \method{[}{im}(x, i, j, \dots, drop=TRUE, raster=NULL, rescue=is.owin(i)) } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is a spatial object. } \item{\dots}{Ignored.} \item{drop}{ Logical value. Locations in \code{w} that lie outside the spatial domain of the image \code{x} return a pixel value of \code{NA} if \code{drop=FALSE}, and are omitted if \code{drop=TRUE}. } \item{raster}{ Optional. An object of class \code{"owin"} or \code{"im"} determining a pixel grid. } \item{rescue}{ Logical value indicating whether rectangular blocks of data should always be returned as pixel images. } } \value{ Either a pixel image or a vector of pixel values. See Details. } \details{ This function extracts a subset of the pixel values in a pixel image. (To reassign the pixel values, see \code{\link{[<-.im}}). The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be extracted is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=as.owin(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are extracted (after first clipping the window to the spatial domain of the image if necessary). If \code{i} is a pixel image with logical values, it is interpreted as a spatial window (with \code{TRUE} values inside the window and \code{FALSE} outside). If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are extracted. This is a simple way to read the pixel values at a given spatial location. At locations outside the spatial domain of the image, the pixel value is undefined, and is taken to be \code{NA}. The logical argument \code{drop} determines whether such \code{NA} values will be returned or omitted. It also influences the format of the return value. If \code{i} is a point pattern (or something that can be converted to a point pattern), then \code{X[i, drop=FALSE]} is a numeric vector containing the pixel values at each of the points of the pattern. Its length is equal to the number of points in the pattern \code{i}. It may contain \code{NA}s corresponding to points which lie outside the spatial domain of the image \code{x}. By contrast, \code{X[i]} or \code{X[i, drop=TRUE]} contains only those pixel values which are not \code{NA}. It may be shorter. If \code{i} is a spatial window then \code{X[i, drop=FALSE]} is another pixel image of the same dimensions as \code{x} obtained by setting all pixels outside the window \code{i} to have value \code{NA}. When the result is displayed by \code{\link{plot.im}} the effect is that the pixel image \code{x} is clipped to the window \code{i}. If \code{i} is a spatial window then \code{X[i, drop=TRUE]} is either: \itemize{ \item a numeric vector containing the pixel values for all pixels that lie inside the window \code{i}. This happens if \code{i} is \emph{not} a rectangle (i.e. \code{i$type != "rectangle"}) or if \code{rescue=FALSE}. \item a pixel image. This happens only if \code{i} is a rectangle (\code{i$type = "rectangle"}) and \code{rescue=TRUE} (the default). } If the optional argument \code{raster} is given, then it should be a binary image mask or a pixel image. Then \code{x} will first be converted to an image defined on the pixel grid implied by \code{raster}, before the subset operation is carried out. In particular, \code{x[i, raster=i, drop=FALSE]} will return an image defined on the same pixel array as the object \code{i}. If \code{i} does not satisfy any of the conditions above, then the algorithm attempts to interpret \code{i} and \code{j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. The result is usually a vector or matrix of pixel values. Exceptionally the result is a pixel image if \code{i,j} determines a rectangular subset of the pixel grid, and if the user specifies \code{rescue=TRUE}. Finally, if none of the above conditions is met, the object \code{i} may also be a data frame or list of \code{x,y} coordinates which will be converted to a point pattern, taking the observation window to be \code{as.owin(x)}. Then the pixel values at these points will be extracted as a vector. } \section{Warnings}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. If \code{W} is a window or a pixel image, then \code{x[W, drop=FALSE]} will return an image defined on the same pixel array as the original image \code{x}. If you want to obtain an image whose pixel dimensions agree with those of \code{W}, use the \code{raster} argument, \code{x[W, raster=W, drop=FALSE]}. } \seealso{ \code{\link{im.object}}, \code{\link{[<-.im}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}}, \code{\link{plot.im}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) Y <- X[W] plot(Y) # a polygonal subset data(letterR) R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) Y <- X[R, drop=FALSE] plot(Y) # a point pattern P <- rpoispp(20) Y <- X[P] # look up a specified location X[list(x=0.1,y=0.2)] # 10 x 10 pixel array X <- as.im(function(x,y) { x + y }, owin(c(-1,1),c(-1,1)), dimyx=10) # 100 x 100 W <- as.mask(disc(1, c(0,0)), dimyx=100) # 10 x 10 raster X[W,drop=FALSE] # 100 x 100 raster X[W, raster=W, drop=FALSE] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/compatible.im.Rd0000755000176000001440000000234012237642732015744 0ustar ripleyusers\name{compatible.im} %DontDeclareMethods \alias{compatible.im} \title{Test Whether Pixel Images Are Compatible} \description{ Tests whether two or more pixel image objects have compatible dimensions. } \usage{ \method{compatible}{im}(A, B, \dots, tol=1e-6) } \arguments{ \item{A,B,\dots}{Two or more pixel images (objects of class \code{"im"}).} \item{tol}{Tolerance factor} } \details{ This function tests whether the pixel images \code{A} and \code{B} (and any additional images \code{\dots}) have compatible pixel dimensions. They are compatible if they have the same number of rows and columns, the same physical pixel dimensions, and occupy the same rectangle in the plane. The argument \code{tol} specifies the maximum tolerated error in the pixel coordinates, expressed as a fraction of the dimensions of a single pixel. } \value{ Logical value: \code{TRUE} if the images are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.im}}, \code{\link{harmonise.im}}, \code{\link{commonGrid}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/psp.object.Rd0000755000176000001440000000636012237642733015277 0ustar ripleyusers\name{psp.object} \alias{psp.object} %DoNotExport \title{Class of Line Segment Patterns} \description{ A class \code{"psp"} to represent a spatial pattern of line segments in the plane. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ An object of this class represents a two-dimensional pattern of line segments. It specifies \itemize{ \item the locations of the line segments (both endpoints) \item the window in which the pattern was observed \item optionally, a ``mark'' attached to each line segment (extra information such as a type label). } If \code{X} is an object of type \code{psp}, it contains the following elements: \tabular{ll}{ \code{ends} \tab data frame with entries \code{x0, y0, x1, y1} \cr \tab giving coordinates of segment endpoints \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{n} \tab number of line segments \cr \code{marks} \tab optional vector or data frame of marks \cr \code{markformat} \tab character string specifying the format of the \cr \tab marks; \dQuote{none}, \dQuote{vector}, or \dQuote{dataframe} } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"psp"} may be created by the function \code{\link{psp}} and converted from other types of data by the function \code{\link{as.psp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the line segments alone. Subsets of a line segment pattern may be obtained by the functions \code{\link{[.psp}} and \code{\link{clip.psp}}. Line segment pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for line segment pattern objects, \code{\link{plot.psp}}. See \code{\link{plot.psp}} for further information. There are also methods for \code{summary} and \code{print} for line segment patterns. Use \code{summary(X)} to see a useful description of the data. Utilities for line segment patterns include \code{\link{midpoints.psp}} (to compute the midpoints of each segment), \code{\link{lengths.psp}}, (to compute the length of each segment), \code{\link{angles.psp}}, (to compute the angle of orientation of each segment), and \code{\link{distmap.psp}} to compute the distance map of a line segment pattern. } \seealso{ \code{\link{psp}}, \code{\link{as.psp}}, \code{\link{[.psp}} } \examples{ # creating a <- psp(runif(20),runif(20),runif(20),runif(20), window=owin()) # converting from other formats a <- as.psp(matrix(runif(80), ncol=4), window=owin()) a <- as.psp(data.frame(x0=runif(20), y0=runif(20), x1=runif(20), y1=runif(20)), window=owin()) # clipping w <- owin(c(0.1,0.7), c(0.2, 0.8)) b <- clip.psp(a, w) b <- a[w] # the last two lines are equivalent. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/spatstat-internal.Rd0000755000176000001440000011247712251563340016704 0ustar ripleyusers\name{spatstat-internal} \title{Internal spatstat functions} \alias{[.hyperframe} \alias{[<-.hyperframe} \alias{$.hyperframe} \alias{$<-.hyperframe} \alias{[.pp3} \alias{[.localpcfmatrix} \alias{[.splitppx} \alias{[<-.splitppx} \alias{acedist.show} \alias{acedist.noshow} \alias{active.interactions} \alias{adjustthinrange} \alias{affinexy} \alias{affinexypolygon} \alias{anycrossing.psp} \alias{apply23sum} \alias{applytolayers} \alias{area.xypolygon} \alias{areadelta2} \alias{areaGain.diri} \alias{areaGain.grid} \alias{areaLoss.diri} \alias{areaLoss.grid} \alias{assemble.plot.objects} \alias{AsymmDistance.psp} \alias{as.breakpts} \alias{as.data.frame.bw.optim} \alias{as.data.frame.fv} \alias{as.im.scan.test} \alias{as.im.linim} \alias{as.list.hyperframe} \alias{as.listof} \alias{as.units} \alias{badprobability} \alias{bermantestEngine} \alias{bdrylength.xypolygon} \alias{bdry.mask} \alias{bind.ratfv} \alias{blankcoefnames} \alias{bounding.box3} \alias{break.holes} \alias{breakpts} \alias{breakpts.from.r} \alias{bt.frame} \alias{bw.optim} \alias{cannot.update} \alias{cartesian} \alias{cat.factor} \alias{cellmiddles} \alias{censtimeCDFest} \alias{change.default.expand} \alias{checkfields} \alias{check.finite} \alias{check.hist.lengths} \alias{check.named.list} \alias{check.named.thing} \alias{check.named.vector} \alias{check.nvector} \alias{check.nmatrix} \alias{check.1.integer} \alias{check.1.real} \alias{check.range} \alias{check.testfun} \alias{clarkevansCalc} \alias{clip.psp} \alias{cliprect.psp} \alias{clippoly.psp} \alias{closethresh} \alias{coef.summary.ppm} \alias{compatible.rat} \alias{complaining} \alias{compileCDF} \alias{compileK} \alias{compilepcf} \alias{conform.ratfv} \alias{crosspairquad} \alias{cobble.xy} \alias{codetime} \alias{commasep} \alias{conform.imagelist} \alias{countingweights} \alias{damaged.ppm} \alias{data.mppm} \alias{datagen.runifpointOnLines} \alias{datagen.runifpoisppOnLines} \alias{datagen.rpoisppOnLines} \alias{default.clipwindow} \alias{default.n.tiling} \alias{default.ntile} \alias{deltasuffstat} \alias{dflt.redraw} \alias{densitypointsEngine} \alias{diagnose.ppm.engine} \alias{digital.volume} \alias{dilate.owin} \alias{dim.fasp} \alias{dim.hyperframe} \alias{dim.im} \alias{dim.msr} \alias{dimnames.fasp} \alias{dimnames<-.fasp} \alias{dimnames.msr} \alias{distpl} \alias{distppl} \alias{distppll} \alias{distppllmin} \alias{distributecbind} \alias{dist2dpath} \alias{divisors} \alias{do.as.im} \alias{do.call.matched} \alias{do.istat} \alias{dotexpr.to.call} \alias{dropifsingle} \alias{emptywindow} \alias{ensure2vector} \alias{ensure3Darray} \alias{envelopeEngine} \alias{envelopeProgressData} \alias{envelopeTest} \alias{envelope.matrix} \alias{equalpairs} \alias{equalpairs.quad} \alias{equals.quad} \alias{equalsfun.quad} \alias{eratosthenes} \alias{erodemask} \alias{erode.owin} \alias{evalCovar} \alias{evalCovar.ppm} \alias{evalCovar.lppm} \alias{evalCovariate} \alias{evalInteraction} \alias{evalInterEngine} \alias{evalPairPotential} \alias{even.breaks.owin} \alias{evenly.spaced} \alias{exactdt} \alias{exactPdt} \alias{explain.ifnot} \alias{extractAIC.slrm} \alias{extractAtomicQtests} \alias{fakecallstring} \alias{fave.order} \alias{f3engine} \alias{f3Cengine} \alias{fasp} \alias{findbestlegendpos} \alias{findCovariate} \alias{findcbind} \alias{firstfactor} \alias{fii} \alias{fillNA} \alias{flipxypolygon} \alias{forbidNA} \alias{forbid.logi} \alias{FormatFaspFormulae} \alias{fvlabels} \alias{fvlabels<-} \alias{fvlabelmap} \alias{fvlegend} \alias{g3engine} \alias{g3Cengine} \alias{greatest.common.divisor} \alias{getdataname} \alias{getfields} \alias{getglmdata} \alias{getglmfit} \alias{getglmsubset} \alias{getlambda.lpp} \alias{getppmdatasubset} \alias{getSumFun} \alias{geyercounts} \alias{geyerdelta2} \alias{GLMpredict} \alias{good.names} \alias{good.correction.K} \alias{gridindex} \alias{grid1index} \alias{grow.rectangle} \alias{gsubdot} \alias{handle.r.b.args} \alias{handle.rshift.args} \alias{ho.engine} \alias{hsvNA} \alias{identical.formulae} \alias{idorempty} \alias{ifelseAB} \alias{ifelseAX} \alias{ifelseXB} \alias{ifelseXY} \alias{ifelse1NA} \alias{ifelse0NA} \alias{ifelseNegPos} \alias{illegal.iformula} \alias{implemented.for.K} \alias{impliedpresence} \alias{impliedcoefficients} \alias{inpoint} \alias{inside.range} \alias{inside.triangle} \alias{inside.xypolygon} \alias{instantiate.interact} \alias{intersect.ranges} \alias{intX.owin} \alias{intX.xypolygon} \alias{intY.owin} \alias{intY.xypolygon} \alias{is.atomicQtest} \alias{is.cadlag} \alias{is.data} \alias{is.expandable} \alias{is.expandable.ppm} \alias{is.expandable.rmhmodel} \alias{is.fv} \alias{is.hole.xypolygon} \alias{is.hyperframe} \alias{is.infline} \alias{is.interact} \alias{is.marked.default} \alias{is.marked.psp} \alias{is.marked.quad} \alias{is.mppm} \alias{is.multitype.quad} \alias{is.multitype.default} \alias{is.parseable} \alias{is.poisson.mppm} \alias{is.pp3} \alias{is.ppx} \alias{is.prime} \alias{is.psp} \alias{is.tess} \alias{k3engine} \alias{Kborder.engine} \alias{Knone.engine} \alias{Kount} \alias{Kwtsum} \alias{Kpcf.kppm} \alias{killinteraction} \alias{km.rs.opt} \alias{kppmMinCon} \alias{kppmComLik} \alias{labels.ppm} \alias{least.common.multiple} \alias{levels.im} \alias{levels<-.im} \alias{lhs.of.formula} \alias{linequad} \alias{linearKengine} \alias{linearKmulti} \alias{linearKmulti.inhom} \alias{linearKmultiEngine} \alias{linearpcfengine} \alias{linearpcfmulti} \alias{linearpcfmulti.inhom} \alias{linearPCFmultiEngine} \alias{listof} \alias{localKengine} \alias{localpcfengine} \alias{localpcfmatrix} \alias{local2lpp} \alias{logi.dummy} \alias{logi.engine} \alias{passthrough} \alias{paste.expr} \alias{prettyinside} \alias{lookup.im} \alias{majorminorversion} \alias{make.even.breaks} \alias{make.parseable} \alias{makeunits} \alias{markappend} \alias{markcbind} \alias{markformat} \alias{markformat.ppp} \alias{markformat.ppx} \alias{markformat.psp} \alias{markformat.default} \alias{mark.scale.default} \alias{markspace.integral} \alias{marks.default} \alias{marks.quad} \alias{\%mapp\%} %DoNotExport %NAMESPACE export("%mapp%") \alias{markappendop} \alias{marksubset} \alias{markreplicateop} \alias{\%mrep\%} %DoNotExport %NAMESPACE export("%mrep%") \alias{marksubsetop} \alias{\%msub\%} %DoNotExport %NAMESPACE export("%msub%") \alias{mask2df} \alias{matcolall} \alias{matcolany} \alias{matcolsum} \alias{matrixsample} \alias{matrowall} \alias{matrowany} \alias{matrowsum} \alias{maxflow} \alias{meanlistfv} \alias{meanX.owin} \alias{meanY.owin} \alias{model.se.image} \alias{mpl.engine} \alias{mpl.get.covariates} \alias{mpl.prepare} \alias{MultiPair.checkmatrix} \alias{multiply.only.finite.entries} \alias{multiplicity.data.frame} \alias{na.handle.im} \alias{names.hyperframe} \alias{names<-.hyperframe} \alias{nearest.pixel} \alias{nearest.valid.pixel} \alias{newstyle.coeff.handling} \alias{niceround} \alias{nncleanEngine} \alias{nndcumfun} \alias{no.trend.ppm} \alias{nobjects} \alias{nobjects.ppp} \alias{nobjects.ppx} \alias{nobjects.psp} \alias{n.quad} \alias{numalign} \alias{numeric.columns} \alias{nzpaste} \alias{objsurfEngine} \alias{offsetsinformula} \alias{onecolumn} \alias{optimStatus} \alias{ordinal} \alias{\%orifnull\%} %DoNotExport %NAMESPACE export("%orifnull%") \alias{outdated.interact} \alias{overlap.trapezium} \alias{overlap.xypolygon} \alias{oversize.quad} \alias{owinpolycheck} \alias{owinpoly2mask} \alias{pairs.listof} \alias{param.quad} \alias{paren} \alias{partialModelMatrix} \alias{pcf3engine} \alias{pcfmulti.inhom} \alias{pickoption} \alias{ploterodewin} \alias{ploterodeimage} \alias{plot.addvar} \alias{plot.barplotdata} \alias{plot.bw.frac} \alias{plot.bw.optim} \alias{plot.localpcfmatrix} \alias{plot.minconfit} \alias{plot.parres} \alias{plot.plotpairsim} \alias{plot.pppmatching} \alias{plot.profilepl} \alias{plot.qqppm} \alias{plot.quadratcount} \alias{plot.quadrattest} \alias{plot.scan.test} \alias{polynom} \alias{ppllengine} \alias{ppmCovariates} \alias{ppm.influence} \alias{pppdist.mat} \alias{pppdist.prohorov} \alias{ppsubset} \alias{prange} \alias{prefixfv} \alias{primefactors} \alias{primesbelow} \alias{printStatus} \alias{print.addvar} \alias{print.autoexec} \alias{print.bt.frame} \alias{print.bw.frac} \alias{print.bw.optim} \alias{print.colourmap} \alias{print.diagppm} \alias{print.distfun} \alias{print.envelope} \alias{print.fasp} \alias{print.fv} \alias{print.fvfun} \alias{print.funxy} \alias{print.hyperframe} \alias{print.influence.ppm} \alias{print.interact} \alias{print.isf} \alias{print.layered} \alias{print.leverage.ppm} \alias{print.linim} \alias{print.localpcfmatrix} \alias{print.lut} \alias{print.minconfit} \alias{print.mppm} \alias{print.msr} \alias{print.nnfun} \alias{print.parres} \alias{print.plotpairsim} \alias{print.plotppm} \alias{print.pppmatching} \alias{print.profilepl} \alias{print.quadrattest} \alias{print.qqppm} \alias{print.rat} \alias{print.rmhcontrol} \alias{print.rmhexpand} \alias{print.rmhmodel} \alias{print.rmhstart} \alias{print.rmhInfoList} \alias{print.splitppp} \alias{print.simplepanel} \alias{print.splitppx} \alias{print.summary.hyperframe} \alias{print.summary.listof} \alias{print.summary.logiquad} \alias{print.summary.lut} \alias{print.summary.mppm} \alias{print.summary.owin} \alias{print.summary.ppp} \alias{print.summary.psp} \alias{print.summary.rmhexpand} \alias{print.summary.splitppp} \alias{print.summary.splitppx} \alias{print.summary.units} \alias{print.tess} \alias{print.timed} \alias{prolongseq} \alias{quad} \alias{quad.mppm} \alias{RandomFieldsSafe} \alias{ratfv} \alias{rectquadrat.breaks} \alias{rectquadrat.countEngine} \alias{reduceformula} \alias{repair.image.xycoords} \alias{resolveEinfo} \alias{resolve.vargamma.shape} \alias{rgbNA} \alias{rhs.of.formula} \alias{rhohatEngine} \alias{rhohatCalc} \alias{RmhExpandRule} \alias{rmhsnoop} \alias{quadrat.testEngine} \alias{quadscheme.replicated} \alias{quadscheme.spatial} \alias{pointgrid} \alias{rastersample} \alias{rasterx.im} \alias{rastery.im} \alias{rasterxy.im} \alias{rebadge.fv} \alias{rebound} \alias{rebound.im} \alias{rebound.ppp} \alias{rebound.psp} \alias{rebound.owin} \alias{reconcile.fv} \alias{repair.old.factor.image} \alias{reincarnate.interact} \alias{resid4plot} \alias{resid1plot} \alias{resid1panel} \alias{resolve.defaults} \alias{resolve.1.default} \alias{resolve.2D.kernel} \alias{restrict.mask} \alias{reverse.xypolygon} \alias{revcumsum} \alias{rmax.rule} \alias{rotxy} \alias{rotxypolygon} \alias{row.names.hyperframe} \alias{row.names<-.hyperframe} \alias{runifpoispp} \alias{runifpoisppOnLines} \alias{runifrect} \alias{rmhResolveControl} \alias{rmhResolveExpansion} \alias{rmhResolveTypes} \alias{rmhSnoopEnv} \alias{rmhcontrol.rmhcontrol} \alias{rmhcontrol.list} \alias{rmhEngine} \alias{rmhmodel.rmhmodel} \alias{rmhstart.rmhstart} \alias{rmhstart.list} \alias{rmpoint.I.allim} \alias{rpoint.multi} \alias{safelookup} \alias{samefunction} \alias{scanmeasure} \alias{scanmeasure.ppp} \alias{scanmeasure.im} \alias{scanBinomLRTS} \alias{scanPoisLRTS} \alias{scanLRTS} \alias{second.moment.calc} \alias{second.moment.engine} \alias{sensiblevarname} \alias{sewpcf} \alias{sewsmod} \alias{shiftxy} \alias{shiftxypolygon} \alias{short.deparse} \alias{signalStatus} \alias{simplify.xypolygon} \alias{simulrecipe} \alias{singlestring} \alias{slr.prepare} \alias{slrAssemblePixelData} \alias{smoothpointsEngine} \alias{sort.im} \alias{spatstat.rawdata.location} \alias{spatstatClusterModelInfo} \alias{spatstatRmhInfo} \alias{spatialCDFframe} \alias{spatialCDFtest} \alias{splitHybridInteraction} \alias{sp.foundclass} \alias{sp.foundclasses} \alias{sphere.volume} \alias{store.versionstring.spatstat} \alias{str.hyperframe} \alias{strausscounts} \alias{suffloc} \alias{suffstat.generic} \alias{suffstat.poisson} \alias{summarise.trend} \alias{summary.envelope} \alias{summary.hyperframe} \alias{summary.logiquad} \alias{summary.lut} \alias{summary.mppm} \alias{summary.profilepl} \alias{summary.pppmatching} \alias{summary.ppx} \alias{summary.splitppx} \alias{summary.rmhexpand} \alias{sumsymouter} \alias{superimposeMarks} \alias{sympoly} \alias{termsinformula} \alias{test.crossing.psp} \alias{test.selfcrossing.psp} \alias{tilecentroids} \alias{trap.extra.arguments} \alias{trianglediameters} \alias{trim.mask} \alias{tweak.fv.entry} \alias{unitname.default} \alias{unitname<-.default} \alias{unparen} \alias{update.interact} \alias{update.rmhstart} \alias{validradius} \alias{validate.mask} \alias{validate.quad} \alias{validposint} \alias{vanilla.fv} \alias{variablesinformula} \alias{verbalogic} \alias{versionstring.interact} \alias{versionstring.ppm} \alias{versionstring.spatstat} \alias{verifyclass} \alias{verify.xypolygon} \alias{warn.ignored.args} \alias{weighted.var} \alias{windows.mppm} \alias{with.msr} \alias{w.quad} \alias{x.quad} \alias{y.quad} \alias{xy.grid} \alias{X2testEngine} \alias{xtfrm.im} \alias{xypolygon2psp} \alias{xypolyselfint} \description{ Internal spatstat functions. } \usage{ \method{[}{hyperframe}(x, i, j, drop=FALSE, \dots) \method{[}{hyperframe}(x, i, j) <- value \method{$}{hyperframe}(x, name) \method{$}{hyperframe}(x, i) <- value \method{[}{splitppx}(x, \dots) \method{[}{splitppx}(x, \dots) <- value acedist.show(X, Y, n, d, timelag = 0) acedist.noshow(X, Y, n, d) active.interactions(object) adjustthinrange(ur,vstep,vr) affinexy(X, mat = diag(c(1, 1)), vec = c(0, 0), invert=FALSE) affinexypolygon(p, mat, vec, detmat) anycrossing.psp(A,B) apply23sum(x) applytolayers(L, FUN, \dots) area.xypolygon(polly) areadelta2(X, r, \dots) areaGain.diri(u, X, r, \dots, W=as.owin(X)) areaGain.grid(u, X, r, \dots, W=NULL, ngrid=spatstat.options("ngrid.disc")) areaLoss.diri(X, r, \dots, W=as.owin(X), subset=NULL) areaLoss.grid(X, r, \dots, W=as.owin(X), subset=NULL, method = c("count", "distmap"), ngrid = spatstat.options("ngrid.disc"), exact = FALSE) assemble.plot.objects(xlim, ylim, \dots, lines, polygon) AsymmDistance.psp(X, Y, metric, method) as.breakpts(\dots) \method{as.data.frame}{fv}(x, \dots) \method{as.data.frame}{bw.optim}(x, \dots) \method{as.list}{hyperframe}(x, \dots) \method{as.im}{scan.test}(X, \dots) \method{as.im}{linim}(X, \dots) as.listof(x) as.units(s) badprobability(x, NAvalue) bermantestEngine(model, covariate, which, alternative, \dots, modelname, covname, dataname) bdrylength.xypolygon(polly) bdry.mask(W) bind.ratfv(x, numerator, denominator, labl, desc, preferred, ratio) blankcoefnames(x) bounding.box3(\dots) break.holes(x, splitby, depth, maxdepth) breakpts(val, maxi, even = FALSE, npos = NULL, step = NULL) breakpts.from.r(r) bt.frame(Q, trend=~1, interaction=NULL, \dots, covariates=NULL, correction="border", rbord=0, use.gam=FALSE, allcovar=FALSE) bw.optim(cv, h, iopt, \dots, cvname, hname, criterion) cannot.update(\dots) cartesian(pp, markset, fac = TRUE) cat.factor(\dots, recursive=FALSE) cellmiddles(W, nx, ny, npix, gi) censtimeCDFest(o, cc, d, breaks, \dots, KM, RS, HAN, RAW, han.denom, tt, pmax) change.default.expand(x, newdefault) checkfields(X,L) check.finite(x, context, xname, fatal, usergiven) check.hist.lengths(hist,breaks) check.named.list(x, nam, context, namopt) check.named.vector(x, nam, context, namopt) check.named.thing(x, nam, namopt, xtitle, valid, type, context, fatal) check.nvector(v, npoints, fatal=TRUE, things="data points", naok=FALSE) check.nmatrix(m, npoints, fatal=TRUE, things="data points", naok=FALSE, squarematrix=TRUE, matchto="nrow") check.1.integer(x, context, fatal) check.1.real(x, context, fatal) check.range(x, fatal) check.testfun(f, f1, X) clarkevansCalc(X, correction, clipregion, working) clip.psp(x, window, check=TRUE) cliprect.psp(x, window) clippoly.psp(s, window) closethresh(X,R,S,ordered) \method{coef}{summary.ppm}(object, \dots) \method{compatible}{rat}(A, B, \dots) %DontDeclare complaining(whinge, fatal, value) compileCDF(D, B, r, \dots, han.denom, check) compileK(D, r, weights, denom, check, ratio) compilepcf(D, r, weights, denom, check, endcorrect, \dots) conform.ratfv(x) crosspairquad(Q,rmax,what) cobble.xy(x, y, f, fatal, \dots) codetime(x, hms, what) commasep(x, join) conform.imagelist(X, Zlist) countingweights(id, areas, check = TRUE) damaged.ppm(object) data.mppm(x) datagen.runifpointOnLines(n, L) datagen.runifpoisppOnLines(lambda, L) datagen.rpoisppOnLines(lambda, L, lmax, \dots, check=TRUE) default.clipwindow(object, epsilon) default.n.tiling(X, nd, ntile, npix, eps, verbose) default.ntile(X) deltasuffstat(model, \dots, restrict, dataonly, force) dflt.redraw(button, name, env) densitypointsEngine(x, sigma, \dots, weights, edge, varcov, leaveoneout, diggle, sorted) diagnose.ppm.engine(object, \dots, type="eem", typename, opt, sigma=NULL, rbord = reach(object), compute.sd=TRUE, compute.cts=TRUE, rv=NULL, oldstyle=FALSE) digital.volume(range, nval, vside) dilate.owin(\dots) \method{dim}{fasp}(x) \method{dim}{hyperframe}(x) \method{dim}{im}(x) \method{dim}{msr}(x) \method{dimnames}{fasp}(x) \method{dimnames}{fasp}(x) <- value \method{dimnames}{msr}(x) distpl(p, l) distppl(p, l) distppll(p, l, mintype, method, listit) distppllmin(p, l, big) distributecbind(x) dist2dpath(dist, method="C") divisors(n) do.as.im(x, action, \dots, W, eps, dimyx, xy, na.replace) do.call.matched(fun, arglist, funargs, extrargs, sieve) do.istat(panel) dotexpr.to.call(expr, dot, evaluator) dropifsingle(x) emptywindow(w) ensure2vector(x) ensure3Darray(x) envelopeEngine(X, fun, simul, nsim=99, nrank=1, \dots, verbose=TRUE, clipdata=TRUE, transform=NULL, global=FALSE, ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, saveresultof=NULL, weights=NULL, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, maxnerr=nsim, internal=NULL, cl=NULL, envir.user=envir.user, expected.arg="r", do.pwrong=FALSE) envelopeProgressData(X, fun, \dots, expo, normalize, deflate) envelopeTest(X, \dots, power, rinterval, use.theo, tie.rule, save.envelope, savefuns, savepatterns, Xname, verbose, internal) \method{envelope}{matrix}(Y, \dots, rvals, observed, theory, funX, nsim, nsim2, jsim, jsim.mean, type, csr, use.theory, nrank, ginterval, nSD, savefuns, check, Yname, do.pwrong, weights, precomputed) equalpairs(U, X, marked=FALSE) equalpairs.quad(Q) equals.quad(Q) equalsfun.quad(Q) eratosthenes(nmax, startset) erodemask(w,r,strict) erode.owin(\dots) evalCovar(model, covariate, \dots) \method{evalCovar}{ppm}(model, covariate, \dots, dimyx, eps, jitter, modelname, covname, dataname) \method{evalCovar}{lppm}(model, covariate, \dots, eps, nd, jitter, modelname, covname, dataname) evalCovariate(covariate, locations) evalInteraction(X,P,E,interaction,correction,\dots,precomputed,savecomputed) evalInterEngine(X,P,E,interaction,correction,\dots,precomputed,savecomputed) evalPairPotential(X,P,E,pairpot,potpars,R) even.breaks.owin(w) evenly.spaced(x, tol) exactdt(X, \dots) exactPdt(w) explain.ifnot(expr, context) \method{extractAIC}{slrm}(fit, scale = 0, k = 2, \dots) extractAtomicQtests(x) fakecallstring(fname, parlist) fave.order(x) f3engine(x, y, z, box, vside, range, nval, correction) f3Cengine(x, y, z, box, vside, rmax, nrval) fasp(fns, which, formulae, dataname, title, rowNames, colNames, checkfv) findbestlegendpos(\dots) findCovariate(covname, scope, scopename=NULL) findcbind(root, depth, maxdepth) firstfactor(x) fii(interaction, coefs, Vnames, IsOffset) fillNA(x, value) flipxypolygon(p) forbidNA(x, context, xname, fatal, usergiven) forbid.logi(object) FormatFaspFormulae(f, argname) fvlabels(x, expand=FALSE) fvlabels(x) <- value fvlabelmap(x, dot=TRUE) fvlegend(object, elang) g3engine(x, y, z, box, rmax, nrval, correction) g3Cengine(x, y, z, box, rmax, nrval) greatest.common.divisor(n,m) getdataname(defaultvalue, \dots, dataname) getfields(X, L, fatal = TRUE) getglmdata(object, drop=FALSE) getglmfit(object) getglmsubset(object) getlambda.lpp(lambda, X, \dots) getppmdatasubset(object) getSumFun(abbreviation, classname, ismarked, fatal) geyercounts(U,X,r,sat,Xcounts,EqualPairs) geyerdelta2(X,r,sat) GLMpredict(fit, data, coefs, changecoef) good.names(nama, defaults, suffices) good.correction.K(X) gridindex(x, y, xrange, yrange, nx, ny) grid1index(x, xrange, nx) grow.rectangle(W, xmargin=0, ymargin=xmargin) gsubdot(replacement, x) handle.r.b.args(r = NULL, breaks = NULL, window, eps = NULL, rmaxdefault) handle.rshift.args(W, \dots, radius, width, height, edge, clip, edgedefault) ho.engine(model, \dots, nsim, nrmh, start, control, verb) hsvNA(h, s, v, \dots) identical.formulae(x,y) idorempty(w, r, caller) ifelseAB(test, a, b) ifelseAX(test, a, x) ifelseXB(test, x, b) ifelseXY(test, x, y) ifelse1NA(test) ifelse0NA(test) ifelseNegPos(test, x) illegal.iformula(ifmla, itags, dfvarnames) implemented.for.K(correction, windowtype, explicit) impliedpresence(tags, formula, df, extranames=character(0)) impliedcoefficients(object, tag) inpoint(W) inside.range(x, r) inside.triangle(x, y, xx, yy) inside.xypolygon(pts, polly, test01, method) instantiate.interact(x, par) intersect.ranges(a,b,fatal) intX.owin(w) intX.xypolygon(polly) intY.owin(w) intY.xypolygon(polly) is.atomicQtest(x) is.cadlag(s) is.data(Q) is.expandable(x) \method{is.expandable}{ppm}(x) %DontDeclare \method{is.expandable}{rmhmodel}(x) %DontDeclare is.fv(x) is.hole.xypolygon(polly) is.hyperframe(x) is.infline(x) is.interact(x) \method{is.marked}{default}(\dots) \method{is.marked}{psp}(X, \dots) \method{is.marked}{quad}(X, na.action="warn", \dots) is.mppm(x) \method{is.multitype}{default}(X, \dots) \method{is.multitype}{quad}(X, na.action="warn", \dots) is.parseable(x) \method{is.poisson}{mppm}(x) is.pp3(x) is.ppx(x) is.prime(n) is.psp(x) is.tess(x) k3engine(x, y, z, box, rmax, nrval, correction) Kborder.engine(X, rmax, nr, correction, weights, ratio) Knone.engine(X, rmax, nr, weights, ratio) Kount(dIJ, bI, b, breaks) Kwtsum(dIJ, bI, wIJ, b, w, breaks) Kpcf.kppm(model, what) killinteraction(model) km.rs.opt(o, cc, d, breaks, KM, RS) kppmMinCon(X, Xname, po, clusters, statistic, statargs, \dots) kppmComLik(X, Xname, po, clusters, control, weightfun, rmax, \dots) \method{labels}{ppm}(object, \dots) least.common.multiple(n,m) \method{levels}{im}(x) \method{levels}{im}(x) <- value lhs.of.formula(x) linequad(X, Y, \dots, eps, nd) linearKengine(X, \dots, r, reweight, denom, correction, showworking) linearKmulti(X, I, J, r, \dots, correction) linearKmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearpcfengine(X, \dots, r, reweight, denom, correction) linearpcfmulti(X, I, J, r, \dots, correction) linearpcfmulti.inhom(X, I, J, lambdaI, lambdaJ, r, \dots, correction, normalise) linearKmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) linearPCFmultiEngine(X, I, J, \dots, r, reweight, denom, correction, showworking) listof(\dots) localKengine(X, \dots, wantL, lambda, correction, verbose, rvalue) localpcfengine(X, \dots, delta, rmax, nr, stoyan, lambda) localpcfmatrix(X, i, \dots, lambda, delta, rmax, nr, stoyan) local2lpp(L, seg, tp, X) logi.dummy(X, dummytype, nd, mark.repeat, \dots) logi.engine(Q, trend, interaction, \dots, covariates, correction, rbord, covfunargs, allcovar, vnamebase, vnameprefix, justQ, savecomputed, precomputed) passthrough(.Fun, \dots, .Fname) paste.expr(x) prettyinside(x, \dots) \method{print}{localpcfmatrix}(x, \dots) \method{plot}{localpcfmatrix}(x, \dots) \method{[}{localpcfmatrix}(x, i, \dots) \method{[}{pp3}(x, i, \dots) lookup.im(Z, x, y, naok, strict) majorminorversion(v) make.even.breaks(bmax, npos, bstep) make.parseable(x) makeunits(sing, plur, mul) markappend(\dots) markcbind(\dots) markformat(x) \method{markformat}{ppp}(x) %DontDeclare \method{markformat}{ppx}(x) %DontDeclare \method{markformat}{psp}(x) %DontDeclare \method{markformat}{default}(x) %DontDeclare mark.scale.default(marx, w, markscale=NULL, maxsize=NULL) markspace.integral(X) \method{marks}{default}(x, \dots) \method{marks}{quad}(x, dfok=FALSE, \dots) markappendop(x, y) x \%mapp\% y marksubset(x, index, format) marksubsetop(x, i) x \%msub\% i markreplicateop(x, n) x \%mrep\% n mask2df(w) matcolall(x) matcolany(x) matcolsum(x) matrixsample(mat, newdim, phase, scale, na.value) matrowall(x) matrowany(x) matrowsum(x) maxflow(costm) meanlistfv(z) meanX.owin(w) meanY.owin(w) model.se.image(fit, W, \dots, what) mpl.engine(Q, trend, interaction, \dots, covariates, covfunargs, correction, rbord, use.gam, gcontrol, famille, forcefit, nd, eps, allcovar, callstring, precomputed, savecomputed, preponly, rename.intercept, justQ, weightfactor) mpl.get.covariates(covariates, locations, type, covfunargs) mpl.prepare(Q, X, P, trend, interaction, covariates, want.trend, want.inter, correction, rbord, Pname, callstring, \dots, covfunargs, allcovar, precomputed, savecomputed, vnamebase, vnameprefix, warn.illegal, warn.unidentifiable, weightfactor, skip.border) MultiPair.checkmatrix(mat, n, matname, naok, zerook) \method{multiplicity}{data.frame}(x) multiply.only.finite.entries(x, a) na.handle.im(X, na.replace) \method{names}{hyperframe}(x) \method{names}{hyperframe}(x) <- value nearest.pixel(x, y, im) nearest.valid.pixel(x, y, im) newstyle.coeff.handling(object) niceround(x, m) nncleanEngine(kthNND, k, d, \dots, tol, plothist, verbose, maxit) nndcumfun(X, \dots, r) no.trend.ppm(x) nobjects(x) \method{nobjects}{ppp}(x) %DontDeclare \method{nobjects}{ppx}(x) %DontDeclare \method{nobjects}{psp}(x) %DontDeclare n.quad(Q) numalign(i, nmax, zero) numeric.columns(M, logical, others) nzpaste(\dots, sep, collapse) objsurfEngine(objfun, optpar, objargs, \dots, dotargs, objname, ngrid, ratio, verbose) offsetsinformula(x) onecolumn(m) optimStatus(x, call) printStatus(x, errors.only) signalStatus(x, errors.only) ordinal(k) a \%orifnull\% b outdated.interact(object) overlap.trapezium(xa, ya, xb, yb, verb = FALSE) overlap.xypolygon(P, Q) oversize.quad(Q, \dots, nU, nX) owinpolycheck(W, verbose=TRUE) owinpoly2mask(w, rasta, check=TRUE) \method{pairs}{listof}(\dots, plot=TRUE) param.quad(Q) paren(x, type) partialModelMatrix(X,D,model,callstring,\dots) pcf3engine(x, y, z, box, rmax, nrval, correction, delta) pcfmulti.inhom(X, I, J, lambdaI = NULL, lambdaJ = NULL, \dots, r = NULL, breaks = NULL, kernel = "epanechnikov", bw = NULL, stoyan = 0.15, correction = c("translate", "Ripley"), sigma = NULL, varcov = NULL, Iname = "points satisfying condition I", Jname = "points satisfying condition J") pickoption(what="option", key, keymap, \dots, exact=FALSE, list.on.err=TRUE, die=TRUE, multi=FALSE) ploterodewin(W1, W2, col.edge, col.inside, \dots) ploterodeimage(W, Z, \dots, Wcol, rangeZ, colsZ) \method{plot}{addvar}(x, \dots, do.points=FALSE) \method{plot}{barplotdata}(x, \dots) \method{plot}{bw.frac}(x, \dots) \method{plot}{bw.optim}(x, \dots, showopt, optargs) \method{plot}{minconfit}(x, \dots) \method{plot}{parres}(x, \dots) \method{plot}{pppmatching}(x, addmatch = NULL, main = NULL, \dots) \method{plot}{plotpairsim}(x, \dots) \method{plot}{profilepl}(x, \dots, add=FALSE, main=NULL, tag=TRUE, coeff=NULL, xvariable=NULL) \method{plot}{qqppm}(x, \dots, limits=TRUE, monochrome=FALSE, limcol=if(monochrome) "black" else "red") \method{plot}{quadratcount}(x, \dots, add, entries, dx, dy, show.tiles) \method{plot}{quadrattest}(x, \dots) \method{plot}{scan.test}(x, \dots, do.window) polynom(x, \dots) ppllengine(X, Y, action="project", check=FALSE) ppmCovariates(model) ppm.influence(fit, what, \dots, iScore, iHessian, iArgs, drop, method, precomputed) pppdist.mat(X, Y, cutoff = 1, q = 1, matching = TRUE, precision = 9, approximation = 10) pppdist.prohorov(X, Y, n, dfix, type, cutoff = 1, matching = TRUE, ccode = TRUE, precision = 9, approximation = 10) ppsubset(X, I) prange(x) prefixfv(x, tagprefix, descprefix, lablprefix, whichtags) primefactors(n, prmax) primesbelow(nmax) \method{print}{addvar}(x, \dots) \method{print}{autoexec}(x, \dots) \method{print}{bt.frame}(x, \dots) \method{print}{bw.frac}(x, \dots) \method{print}{bw.optim}(x, \dots) \method{print}{colourmap}(x, \dots) \method{print}{diagppm}(x, \dots) \method{print}{distfun}(x, \dots) \method{print}{envelope}(x, \dots) \method{print}{fasp}(x, \dots) \method{print}{funxy}(x, \dots) \method{print}{fv}(x, \dots) \method{print}{fvfun}(x, \dots) \method{print}{hyperframe}(x, \dots) \method{print}{influence.ppm}(x, \dots) \method{print}{interact}(x, \dots, family=TRUE, brief=FALSE) \method{print}{isf}(x, \dots) \method{print}{layered}(x, \dots) \method{print}{leverage.ppm}(x, \dots) \method{print}{linim}(x, \dots) \method{print}{lut}(x, \dots) \method{print}{minconfit}(x, \dots) \method{print}{mppm}(x, \dots) \method{print}{msr}(x, \dots) \method{print}{nnfun}(x, \dots) \method{print}{parres}(x, \dots) \method{print}{plotppm}(x, \dots) \method{print}{plotpairsim}(x, \dots) \method{print}{pppmatching}(x, \dots) \method{print}{profilepl}(x, \dots) \method{print}{quadrattest}(x, \dots) \method{print}{qqppm}(x, \dots) \method{print}{rat}(x, \dots) \method{print}{rmhcontrol}(x, \dots) \method{print}{rmhexpand}(x, \dots, prefix=TRUE) \method{print}{rmhmodel}(x, \dots) \method{print}{rmhstart}(x, \dots) \method{print}{rmhInfoList}(x, \dots) \method{print}{simplepanel}(x, \dots) \method{print}{splitppp}(x, \dots) \method{print}{splitppx}(x, \dots) \method{print}{summary.hyperframe}(x, \dots) \method{print}{summary.listof}(x, \dots) \method{print}{summary.logiquad}(x, \dots, dp=3) \method{print}{summary.lut}(x, \dots) \method{print}{summary.mppm}(x, \dots, brief) \method{print}{summary.owin}(x, \dots) \method{print}{summary.ppp}(x, \dots, dp=3) \method{print}{summary.psp}(x, \dots) \method{print}{summary.rmhexpand}(x, \dots) \method{print}{summary.splitppp}(x, \dots) \method{print}{summary.splitppx}(x, \dots) \method{print}{summary.units}(x, \dots) \method{print}{tess}(x, \dots, brief=FALSE) \method{print}{timed}(x, \dots) prolongseq(x, newrange) quad(data, dummy, w, param) quad.mppm(x) RandomFieldsSafe() ratfv(df, numer, denom, \dots, ratio) rectquadrat.breaks(xr, yr, nx = 5, ny = nx, xbreaks = NULL, ybreaks = NULL) rectquadrat.countEngine(x, y, xbreaks, ybreaks, weights) reduceformula(fmla, deletevar, verbose) repair.image.xycoords(x) resolveEinfo(x, what, fallback, warn) resolve.vargamma.shape(\dots, nu.ker, nu.pcf) rgbNA(red, green, blue, \dots) rhs.of.formula(x, tilde) rhohatEngine(model, covariate, reference, volume, \dots, method, smoother, resolution, n, bw, adjust, from, to, bwref, covname, covunits, confidence, modelcall, callstring) rhohatCalc(ZX, Zvalues, lambda, baseline, \dots, method, smoother, n, bw, adjust, from, to, bwref, covname, confidence, covunits, modelcall, callstring, savestuff) RmhExpandRule(nama) rmhsnoop(\dots, Wsim, Wclip, R, xcoords, ycoords, mlevels, mcodes, irep, itype, proptype, proplocn, propmark, propindx, numerator, denominator) quadrat.testEngine(X, nx, ny, alternative, method, conditional, \dots, nsim, Xcount, xbreaks, ybreaks, tess, fit, Xname, fitname) quadscheme.replicated(data, dummy, method = "grid", \dots) quadscheme.spatial(data, dummy, method = "grid", \dots) pointgrid(W, ngrid) rastersample(X, Y) rasterx.im(x) rastery.im(x) rasterxy.im(x, drop) rebadge.fv(x, new.ylab, new.fname, tags, new.desc, new.labl, new.yexp, new.dotnames, new.preferred, new.formula, new.tags) rebound(x, rect) \method{rebound}{im}(x, rect) %DontDeclare \method{rebound}{ppp}(x, rect) %DontDeclare \method{rebound}{psp}(x, rect) %DontDeclare \method{rebound}{owin}(x, rect) %DontDeclare reconcile.fv(\dots) repair.old.factor.image(x) reincarnate.interact(object) resid4plot(RES, plot.neg="image", plot.smooth="imagecontour", spacing=0.1, srange=NULL,monochrome=FALSE, main=NULL, \dots) resid1plot(RES, opt, plot.neg="image", plot.smooth="imagecontour", srange=NULL, monochrome=FALSE, main=NULL, \dots) resid1panel(observedX, observedV, theoreticalX, theoreticalV, theoreticalSD, xlab,ylab, \dots) resolve.defaults(\dots, .MatchNull=TRUE, .StripNull=FALSE) resolve.1.default(.A, \dots) resolve.2D.kernel(\dots, sigma, varcov, x, mindist, adjust, bwfun, allow.zero) restrict.mask(M, W) reverse.xypolygon(p, adjust=FALSE) revcumsum(x) rmax.rule(fun, W, lambda) rotxy(X, angle = pi/2) rotxypolygon(p, angle = pi/2) rmhResolveControl(control, model) rmhResolveExpansion(win, control, imagelist, itype) rmhResolveTypes(model, start, control) rmhSnoopEnv(Xinit, Wclip, R) \method{rmhcontrol}{rmhcontrol}(\dots) %DontDeclare \method{rmhcontrol}{list}(\dots) %DontDeclare rmhEngine(InfoList, \dots, verbose, kitchensink, preponly, snoop) \method{rmhmodel}{rmhmodel}(model, \dots) %DontDeclare \method{rmhstart}{rmhstart}(start, \dots) %DontDeclare \method{rmhstart}{list}(start, \dots) %DontDeclare rmpoint.I.allim(n, f, types) \method{row.names}{hyperframe}(x) \method{row.names}{hyperframe}(x) <- value rpoint.multi(n, f, fmax, marks, win, giveup, verbose, warn) runifpoispp(lambda, win = owin(c(0, 1), c(0, 1))) runifpoisppOnLines(lambda, L) runifrect(n, win = owin(c(0, 1), c(0, 1))) safelookup(Z, X, factor, warn) samefunction(f, g) scanmeasure(X, \dots) \method{scanmeasure}{ppp}(X, r, \dots, method) %DontDeclare \method{scanmeasure}{im}(X, r, \dots) %DontDeclare scanPoisLRTS(nZ, nG, muZ, muG, alternative) scanBinomLRTS(nZ, nG, muZ, muG, alternative) scanLRTS(X, r, \dots, method, baseline, case, alternative) second.moment.calc(x, sigma=NULL, edge=TRUE, what="Kmeasure", debug=FALSE, \dots, varcov=NULL, expand=FALSE) second.moment.engine(x, sigma, edge, what, \dots, obswin, varcov, npts, debug) sensiblevarname(guess, fallback, maxlen) sewpcf(d, w, denargs, lambda2area, divisor) sewsmod(d, ff, wt, Ef, rvals, method="smrep", \dots, nwtsteps=500) shiftxy(X, vec = c(0, 0)) shiftxypolygon(p, vec = c(0, 0)) short.deparse(x, maxlen) simplify.xypolygon(p, dmin) simulrecipe(type, expr, envir, csr, pois) singlestring(s, coll) slr.prepare(CallInfo, envir, data, dataAtPoints, splitby, clip) slrAssemblePixelData(Y, Yname, W, covimages, dataAtPoints, pixelarea) smoothpointsEngine(x, values, sigma, \dots, weights, varcov, leaveoneout, sorted) \method{sort}{im}(x, \dots) spatstat.rawdata.location(\dots) spatstatClusterModelInfo(name) spatstatRmhInfo(cifname) spatialCDFframe(model, covariate, \dots) spatialCDFtest(model, covariate, test, \dots, dimyx, eps, jitter, modelname, covname, dataname) sphere.volume(range, nval = 10) splitHybridInteraction(coeffs, inte) sp.foundclass(cname, inlist, formalname, argsgiven) sp.foundclasses(cnames, inlist, formalname, argsgiven) store.versionstring.spatstat() \method{str}{hyperframe}(object, \dots) strausscounts(U,X,r,EqualPairs) suffloc(object) suffstat.generic(model, X, callstring) suffstat.poisson(model, X, callstring) summarise.trend(trend, w, a) \method{summary}{envelope}(object,\dots) \method{summary}{hyperframe}(object, \dots, brief=FALSE) \method{summary}{logiquad}(object, \dots, checkdup=FALSE) \method{summary}{lut}(object, \dots) \method{summary}{mppm}(object, \dots, brief=FALSE) \method{summary}{profilepl}(object, \dots) \method{summary}{pppmatching}(object, \dots) \method{summary}{ppx}(object, \dots) \method{summary}{rmhexpand}(object, \dots) \method{summary}{splitppx}(object, \dots) sumsymouter(x, w) superimposeMarks(arglist, nobj) sympoly(x, y, n) termsinformula(x) test.crossing.psp(A,B) test.selfcrossing.psp(A) tilecentroids(W, nx, ny) trap.extra.arguments(\dots, .Context, .Fatal) trianglediameters(iedge, jedge, edgelength, \dots, nvert, check) trim.mask(M, R, tolerant) tweak.fv.entry(x, current.tag, new.labl=NULL, new.desc=NULL, new.tag=NULL) \method{unitname}{default}(x) %DontDeclare \method{unitname}{default}(x) <- value %DontDeclare unparen(x) \method{update}{interact}(object, \dots) \method{update}{rmhstart}(object, \dots) validradius(r, caller) validate.mask(w, fatal=TRUE) validate.quad(Q, fatal, repair, announce) validposint(n, caller, fatal) vanilla.fv(x) variablesinformula(x) verbalogic(x, op) versionstring.interact(object) versionstring.ppm(object) versionstring.spatstat() verifyclass(X, C, N = deparse(substitute(X)), fatal = TRUE) verify.xypolygon(p, fatal=TRUE) warn.ignored.args(\dots, context) weighted.var(x, w, na.rm) windows.mppm(x) \method{with}{msr}(data, expr, \dots) w.quad(Q) x.quad(Q) y.quad(Q) xy.grid(xr, yr, nx, ny, dx, dy) X2testEngine(OBS, EXP, \dots, method, df, nsim, conditional, alternative, testname, dataname) \method{xtfrm}{im}(x) xypolyselfint(p, eps, proper, yesorno, checkinternal) xypolygon2psp(p, w, check) } \details{ These internal \pkg{spatstat} functions are not usually called directly by the user. Their names and capabilities may change without warning from one version of \pkg{spatstat} to the next. } \keyword{internal} spatstat/man/methods.lpp.Rd0000755000176000001440000000471712237642733015471 0ustar ripleyusers\name{methods.lpp} \alias{methods.lpp} %DoNotExport \Rdversion{1.1} \alias{as.ppp.lpp} \alias{as.psp.lpp} \alias{marks<-.lpp} \alias{print.lpp} \alias{print.summary.lpp} \alias{summary.lpp} \alias{unitname.lpp} \alias{unitname<-.lpp} \alias{unmark.lpp} \title{ Methods for Point Patterns on a Linear Network } \description{ These are methods specifically for the class \code{"lpp"} of point patterns on linear networks. } \usage{ \method{as.ppp}{lpp}(X, ..., fatal=TRUE) \method{as.psp}{lpp}(x, ..., fatal=TRUE) \method{marks}{lpp}(x, ...) <- value \method{print}{lpp}(x, ...) \method{print}{summary.lpp}(x, ...) \method{summary}{lpp}(object, ...) \method{unitname}{lpp}(x) \method{unitname}{lpp}(x) <- value \method{unmark}{lpp}(X) } \arguments{ \item{x,X,object}{ An object of class \code{"lpp"} representing a point pattern on a linear network. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ Replacement value for the \code{marks} or \code{unitname} of \code{x}. See Details. } \item{fatal}{ Logical value indicating whether data in the wrong format should lead to an error (\code{fatal=TRUE}) or a warning (\code{fatal=FALSE}). } } \details{ These are methods for the generic functions \code{\link{as.ppp}}, \code{\link{as.psp}}, \code{\link{marks<-}}, \code{\link{print}}, \code{\link{summary}}, \code{\link{unitname}}, \code{\link{unitname<-}} and \code{\link{unmark}} for objects of the class \code{"lpp"}. For \code{"marks<-.lpp"} the replacement \code{value} should be either \code{NULL}, or a vector of length equal to the number of points in \code{x}, or a data frame with one row for each point in \code{x}. For \code{"unitname<-.lpp"} the replacement \code{value} should be a valid name for the unit of length, as described in \code{\link{unitname}}. } \section{Other methods}{ An object of class \code{"lpp"} also inherits the class \code{"ppx"} for which many other methods are available. See \code{\link[spatstat:methods.ppx]{methods.ppx}}. } \value{ See the documentation on the corresponding generic function. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lpp}}, \code{\link{intensity.lpp}}, \code{\link[spatstat:methods.ppx]{methods.ppx}} } \examples{ example(lpp) X as.ppp(X) summary(X) unitname(X) <- c("furlong", "furlongs") } \keyword{spatial} \keyword{methods} spatstat/man/solutionset.Rd0000755000176000001440000000517612237642734015625 0ustar ripleyusers\name{solutionset} \alias{solutionset} \title{Evaluate Logical Expression Involving Pixel Images and Return Region Where Expression is True} \description{ Given a logical expression involving one or more pixel images, find all pixels where the expression is true, and assemble these pixels into a window. } \usage{ solutionset(\dots, envir) } \arguments{ \item{\dots}{An expression in the \R language, involving one or more pixel images.} \item{envir}{Optional. The environment in which to evaluate the expression.} } \details{ Given a logical expression involving one or more pixel images, this function will find all pixels where the expression is true, and assemble these pixels into a spatial window. Pixel images in \code{spatstat} are represented by objects of class \code{"im"} (see \code{\link{im.object}}). These are essentially matrices of pixel values, with extra attributes recording the pixel dimensions, etc. Suppose \code{X} is a pixel image. Then \code{eval.im(abs(X) > 3)} will find all the pixels in \code{X} for which the pixel value is greater than 3 in absolute value, and return a window containing all these pixels. Suppose \code{X} and \code{Y} are two pixel images with compatible dimensions: they have the same number of pixels, the same physical size of pixels, and the same bounding box. Then \code{eval.im(X > Y)} will find all pixels for which the pixel value of \code{X} is greater than the corresponding pixel value of \code{Y}, and return a window containing these pixels. In general, \code{expr} can be any logical expression involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. The expression \code{expr} is evaluated by \code{\link{eval.im}}. The expression \code{expr} must be vectorised. There must be at least one pixel image in the expression. All images must have compatible dimensions. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}). } \seealso{ \code{\link{im.object}}, \code{\link{owin.object}}, \code{\link{eval.im}}, \code{\link{levelset}} } \examples{ # test images X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) Y <- as.im(function(x,y) { 3 * x + y - 1}, unit.square()) W <- solutionset(abs(X) > 0.1) W <- solutionset(X > Y) W <- solutionset(X + Y >= 1) area.owin(solutionset(X < Y)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/expand.owin.Rd0000755000176000001440000000251412237642732015456 0ustar ripleyusers\name{expand.owin} \alias{expand.owin} \title{Apply Expansion Rule} \description{ Applies an expansion rule to a window. } \usage{ expand.owin(W, \dots) } \arguments{ \item{W}{A window.} \item{\dots}{ Arguments passed to \code{\link{rmhexpand}} to determine an expansion rule. } } \value{ A window (object of class \code{"owin"}). } \details{ The argument \code{W} should be a window (an object of class \code{"owin"}). This command applies the expansion rule specified by the arguments \code{\dots} to the window \code{W}, yielding another window. The arguments \code{\dots} are passed to \code{\link{rmhexpand}} to determine the expansion rule. For other transformations of the scale, location and orientation of a window, see \code{\link{shift}}, \code{\link{affine}} and \code{\link{rotate}}. } \seealso{ \code{\link{rmhexpand}} about expansion rules. \code{\link{shift}}, \code{\link{rotate}}, \code{\link{affine}} for other types of manipulation. } \examples{ expand.owin(square(1), 9) expand.owin(square(1), distance=0.5) expand.owin(letterR, length=2) expand.owin(letterR, distance=0.1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/discpartarea.Rd0000755000176000001440000000413012237642732015662 0ustar ripleyusers\name{discpartarea} \Rdversion{1.1} \alias{discpartarea} \title{ Area of Part of Disc } \description{ Compute area of intersection between a disc and a window } \usage{ discpartarea(X, r, W=as.owin(X)) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}) specifying the centres of the discs. Alternatively, \code{X} may be in any format acceptable to \code{\link{as.ppp}}. } \item{r}{ Matrix, vector or numeric value specifying the radii of the discs. } \item{W}{ Window (object of class \code{"owin"}) with which the discs should be intersected. } } \details{ This algorithm computes the exact area of the intersection between a window \code{W} and a disc (or each of several discs). The centres of the discs are specified by the point pattern \code{X}, and their radii are specified by \code{r}. If \code{r} is a single numeric value, then the algorithm computes the area of intersection between \code{W} and the disc of radius \code{r} centred at each point of \code{X}, and returns a one-column matrix containing one entry for each point of \code{X}. If \code{r} is a vector of length \code{m}, then the algorithm returns an \code{n * m} matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[j]}. If \code{r} is a matrix, it should have one row for each point in \code{X}. The algorithm returns a matrix in which the entry on row \code{i}, column \code{j} is the area of the intersection between \code{W} and the disc centred at \code{X[i]} with radius \code{r[i,j]}. Areas are computed by analytic geometry. } \value{ Numeric matrix, with one row for each point of \code{X}. } \seealso{ \code{\link{owin}}, \code{\link{disc}} } \examples{ data(letterR) X <- runifpoint(3, letterR) discpartarea(X, 0.2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/linearpcf.Rd0000755000176000001440000000502412237642732015166 0ustar ripleyusers\name{linearpcf} \alias{linearpcf} \title{ Linear Pair Correlation Function } \description{ Computes an estimate of the linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcf(X, r=NULL, ..., correction="Ang") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } } \details{ This command computes the linear pair correlation function from point pattern data on a linear network. The pair correlation function is estimated from the shortest-path distances between each pair of data points, using the fixed-bandwidth kernel smoother \code{\link{density.default}}, with a bias correction at each end of the interval of \eqn{r} values. To switch off the bias correction, set \code{endcorrect=FALSE}. If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. The result is an estimate of the first derivative of the network \eqn{K} function defined by Okabe and Yamada (2001). If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). The result is an estimate of the pair correlation function in the linear network. } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearK}}, \code{\link{linearpcfinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) linearpcf(X) linearpcf(X, correction="none") } \keyword{spatial} \keyword{nonparametric} spatstat/man/diameter.owin.Rd0000755000176000001440000000225112237642732015767 0ustar ripleyusers\name{diameter.owin} %DontDeclareMethods \alias{diameter.owin} \title{Diameter of a Window} \description{ Computes the diameter of a window. } \usage{ \method{diameter}{owin}(x) } \arguments{ \item{x}{ A window whose diameter will be computed. } } \value{ The numerical value of the diameter of the window. } \details{ This function computes the diameter of a window of arbitrary shape, i.e. the maximum distance between any two points in the window. The argument \code{x} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The function \code{diameter} is generic. This function is the method for the class \code{"owin"}. } \seealso{ \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) diameter(w) # returns sqrt(2) data(letterR) diameter(letterR) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/print.ppp.Rd0000755000176000001440000000174012237642733015157 0ustar ripleyusers\name{print.ppp} \alias{print.ppp} \title{Print Brief Details of a Point Pattern Dataset} \description{ Prints a very brief description of a point pattern dataset. } \usage{ \method{print}{ppp}(x, \dots) } \arguments{ \item{x}{Point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the point pattern \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.owin}}, \code{\link{summary.ppp}} } \examples{ data(cells) # plain vanilla point pattern cells data(lansing) # multitype point pattern lansing data(longleaf) # numeric marks longleaf data(demopat) # weird polygonal window demopat } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} spatstat/man/corners.Rd0000755000176000001440000000212212237642732014672 0ustar ripleyusers\name{corners} \alias{corners} \title{Corners of a rectangle} \description{ Returns the four corners of a rectangle } \usage{ corners(window) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors of length 4 giving the coordinates of the four corner points of the (bounding rectangle of the) window. } \details{ This trivial function is occasionally convenient. If \code{window} is of type \code{"rectangle"} this returns the four corners of the window itself; otherwise, it returns the corners of the bounding rectangle of the window. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}} } \examples{ w <- unit.square() corners(w) # returns list(x=c(0,1,0,1),y=c(0,0,1,1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/lineardisc.Rd0000755000176000001440000000543412237642732015345 0ustar ripleyusers\name{lineardisc} \alias{lineardisc} \alias{countends} \title{ Compute Disc of Given Radius in Linear Network } \description{ Computes the \sQuote{disc} of given radius and centre in a linear network. } \usage{ lineardisc(L, x = locator(1), r, plotit = TRUE, cols=c("blue", "red","green")) countends(L, x = locator(1), r) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{x}{ Location of centre of disc. Either a point pattern (object of class \code{"ppp"}) containing exactly 1 point, or a numeric vector of length 2. } \item{r}{ Radius of disc. } \item{plotit}{ Logical. Whether to plot the disc. } \item{cols}{ Colours for plotting the disc. A numeric or character vector of length 3 specifying the colours of the disc centre, disc lines and disc endpoints respectively. } } \details{ The \sQuote{disc} \eqn{B(u,r)} of centre \eqn{x} and radius \eqn{r} in a linear network \eqn{L} is the set of all points \eqn{u} in \eqn{L} such that the shortest path distance from \eqn{x} to \eqn{u} is less than or equal to \eqn{r}. This is a union of line segments contained in \eqn{L}. The \emph{relative boundary} of the disc \eqn{B(u,r)} is the set of points \eqn{v} such that the shortest path distance from \eqn{x} to \eqn{u} is \emph{equal} to \eqn{r}. The function \code{lineardisc} computes the disc of radius \eqn{r} and its relative boundary, optionally plots them, and returns them. The faster function \code{countends} simply counts the number of points in the relative boundary. } \value{ The value of \code{lineardisc} is a list with two entries: \item{lines }{Line segment pattern (object of class \code{"psp"}) representing the interior disc} \item{endpoints}{Point pattern (object of class \code{"ppp"}) representing the relative boundary of the disc. } The value of \code{countends} is an integer giving the number of points in the relative boundary. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linnet}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. } \examples{ example(linnet) lineardisc(letterA, c(0,3), 1.6) # count the endpoints countends(letterA, c(0,3), 1.6) # cross-check (slower) lineardisc(letterA, c(0,3), 1.6, plotit=FALSE)$endpoints$n } \keyword{spatial} spatstat/man/Ldot.Rd0000755000176000001440000000520112237642731014121 0ustar ripleyusers\name{Ldot} \alias{Ldot} \title{Multitype L-function (i-to-any)} \description{ Calculates an estimate of the multitype L-function (from type \code{i} to any type) for a multitype point pattern. } \usage{ Ldot(X, i, ...) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot-type \eqn{L} function \eqn{L_{ij}(r)}{Lij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{Kdot}}. } } \details{ This command computes \deqn{L_{i\bullet}(r) = \sqrt{\frac{K_{i\bullet}(r)}{\pi}}}{Li.(r) = sqrt(Ki.(r)/pi)} where \eqn{K_{i\bullet}(r)}{Ki.(r)} is the multitype \eqn{K}-function from points of type \code{i} to points of any type. See \code{\link{Kdot}} for information about \eqn{K_{i\bullet}(r)}{Ki.(r)}. The command \code{Ldot} first calls \code{\link{Kdot}} to compute the estimate of the \code{i}-to-any \eqn{K}-function, and then applies the square root transformation. For a marked Poisson point process, the theoretical value of the L-function is \eqn{L_{i\bullet}(r) = r}{Li.(r) = r}. The square root also has the effect of stabilising the variance of the estimator, so that \eqn{L_{i\bullet}}{Li.} is more appropriate for use in simulation envelopes and hypothesis tests. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{L_{i\bullet}}{Li.} has been estimated } \item{theo}{the theoretical value \eqn{L_{i\bullet}(r) = r}{Li.(r) = r} for a stationary Poisson process } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{L_{i\bullet}}{Li.} obtained by the edge corrections named. } \seealso{ \code{\link{Kdot}}, \code{\link{Lcross}}, \code{\link{Lest}} } \examples{ data(amacrine) L <- Ldot(amacrine, "off") plot(L) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.convex.Rd0000755000176000001440000000220312237642732015133 0ustar ripleyusers\name{is.convex} \alias{is.convex} \title{Test Whether a Window is Convex} \description{ Determines whether a window is convex. } \usage{ is.convex(x) } \arguments{ \item{x}{ Window (object of class \code{"owin"}). } } \value{ Logical value, equal to \code{TRUE} if \code{x} is convex. } \details{ If \code{x} is a rectangle, the result is TRUE. If \code{x} is polygonal, the result is TRUE if \code{x} consists of a single polygon and this polygon is equal to the minimal convex hull of its vertices computed by \code{\link[grDevices]{chull}}. If \code{x} is a mask, the algorithm first extracts all boundary pixels of \code{x} using \code{\link{vertices}}. Then it computes the (polygonal) convex hull \eqn{K} of the boundary pixels. The result is TRUE if every boundary pixel lies within one pixel diameter of an edge of \eqn{K}. } \seealso{ \code{\link{owin}}, \code{\link{convexhull.xy}}, \code{\link{vertices}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/DiggleGatesStibbard.Rd0000755000176000001440000000526212237642731017060 0ustar ripleyusers\name{DiggleGatesStibbard} \alias{DiggleGatesStibbard} \title{Diggle-Gates-Stibbard Point Process Model} \description{ Creates an instance of the Diggle-Gates-Stibbard point process model which can then be fitted to point pattern data. } \usage{ DiggleGatesStibbard(rho) } \arguments{ \item{rho}{Interaction range} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Diggle-Gates-Stibbard process with interaction range \code{rho}. } \details{ Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((pi * d)/(2 * rho)) } for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d >= rho}. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Diggle-Gates-Stibbard pairwise interaction is yielded by the function \code{DiggleGatesStibbard()}. See the examples below. Note that this model does not have any regular parameters (as explained in the section on Interaction Parameters in the help file for \code{\link{ppm}}). The parameter \eqn{rho} is not estimated by \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{DiggleGratton}}, \code{\link{rDGS}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. } \examples{ DiggleGatesStibbard(0.02) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, DiggleGatesStibbard(0.05)) # fit the stationary D-G-S process to `cells' } ppm(cells, ~ polynom(x,y,3), DiggleGatesStibbard(0.05)) # fit a nonstationary D-G-S process # with log-cubic polynomial trend } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/crossdist.pp3.Rd0000755000176000001440000000423412237642732015743 0ustar ripleyusers\name{crossdist.pp3} %DontDeclareMethods \alias{crossdist.pp3} \title{Pairwise distances between two different three-dimensional point patterns} \description{ Computes the distances between pairs of points taken from two different three-dimensional point patterns. } \usage{ \method{crossdist}{pp3}(X, Y, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns in three dimensions (objects of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns in three-dimensional space, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for three-dimensional point patterns (objects of class \code{"pp3"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. } \seealso{ \code{\link{crossdist}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{G3est}} } \examples{ X <- runifpoint3(20) Y <- runifpoint3(30) d <- crossdist(X, Y) d <- crossdist(X, Y, periodic=TRUE) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on code for two dimensions by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/bw.smoothppp.Rd0000755000176000001440000000601112237642732015660 0ustar ripleyusers\name{bw.smoothppp} \alias{bw.smoothppp} \title{ Cross Validated Bandwidth Selection for Spatial Smoothing } \description{ Uses least-squares cross-validation to select a smoothing bandwidth for spatial smoothing of marks. } \usage{ bw.smoothppp(X, nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A marked point pattern with numeric marks. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric smoothing of mark values using \code{\link{Smooth.ppp}}. The argument \code{X} must be a marked point pattern with a vector or data frame of marks. All mark values must be numeric. The bandwidth is selected by least-squares cross-validation. Let \eqn{y_i}{y[i]} be the mark value at the \eqn{i}th data point. For a particular choice of smoothing bandwidth, let \eqn{\hat y_i}{y*[i]} be the smoothed value at the \eqn{i}th data point. Then the bandwidth is chosen to minimise the squared error of the smoothed values \eqn{\sum_i (y_i - \hat y_i)^2}{sum (y[i] - y*[i])^2}. The result of \code{bw.smoothppp} is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on the nearest neighbour distances. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{Smooth.ppp}} } \examples{ data(longleaf) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.smoothppp(longleaf) b plot(b) \testonly{spatstat.options(op)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/rlinegrid.Rd0000755000176000001440000000175012237642734015206 0ustar ripleyusers\name{rlinegrid} \alias{rlinegrid} \title{Generate grid of parallel lines with random displacement} \description{ Generates a grid of parallel lines, equally spaced, inside the specified window. } \usage{ rlinegrid(angle = 45, spacing = 0.1, win = owin()) } \arguments{ \item{angle}{Common orientation of the lines, in degrees anticlockwise from the x axis. } \item{spacing}{Spacing between successive lines.} \item{win}{Window in which to generate the lines. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \details{ The grid is randomly displaced from the origin. } \value{ A line segment pattern (object of class \code{"psp"}). } \seealso{ \code{\link{psp}}, \code{\link{rpoisline}} } \examples{ plot(rlinegrid(30, 0.05)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/spatstat.options.Rd0000755000176000001440000002441612237642734016570 0ustar ripleyusers\name{spatstat.options} \alias{spatstat.options} \alias{reset.spatstat.options} \title{Internal Options in Spatstat Package} \description{ Allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. } \usage{ spatstat.options(...) reset.spatstat.options() } \arguments{ \item{\dots}{ Either empty, or a succession of parameter names in quotes, or a succession of \code{name=value} pairs. See below for the parameter names. } } \value{ Either a list of parameters and their values, or a single value. See Details. } \details{ The function \code{spatstat.options} allows the user to examine and reset the values of global parameters which control actions in the \pkg{spatstat} package. It is analogous to the system function \code{\link{options}}. The function \code{reset.spatstat.options} resets all the global parameters in \pkg{spatstat} to their original, default values. The global parameters are: \describe{ \item{scalable}{ Logical flag indicating whether the new code in \code{rmh.default} which makes the results scalable (invariant to change of units) should be used. In order to recover former behaviour (so that previous results can be reproduced) set this option equal to \code{FALSE}. See the \dQuote{Warning} section in the help for \code{\link{rmh}()} for more detail. } \item{npixel}{ Default number of pixels in a binary mask or pixel image. Either an integer, or a pair of integers, giving the number of pixels in the \code{x} and \code{y} directions respectively. } \item{maxedgewt}{ Edge correction weights will be trimmed so as not to exceed this value. This applies to the weights computed by \code{\link{edge.Trans}} or \code{\link{edge.Ripley}} and used in \code{\link{Kest}} and its relatives. } \item{par.binary}{ List of arguments to be passed to the function \code{\link{image}} when displaying a binary image mask (in \code{\link{plot.owin}} or \code{\link{plot.ppp}}). Typically used to reset the colours of foreground and background. } \item{par.persp}{ List of arguments to be passed to the function \code{\link{persp}} when displaying a real-valued image, such as the fitted surfaces in \code{\link{plot.ppm}}. } \item{par.points}{ List of arguments controlling the plotting of point patterns by \code{\link{plot.ppp}}. } \item{par.contour}{ List of arguments controlling contour plots of pixel images by \code{\link{contour.im}}. } \item{par.fv}{ List of arguments controlling the plotting of functions by \code{\link{plot.fv}} and its relatives. } \item{ndummy.min}{ The minimum number of dummy points in a quadrature scheme created by \code{\link{default.dummy}}. Either an integer or a pair of integers giving the minimum number of dummy points in the \code{x} and \code{y} directions respectively. } \item{ngrid.disc}{ Number of points in the square grid used to compute a discrete approximation to the areas of discs in \code{\link{areaLoss}} and \code{\link{areaGain}} when exact calculation is not available. A single integer. } \item{image.colfun}{ Function determining the default colour map for \code{\link{plot.im}}. When called with one integer argument \code{n}, this function should return a character vector of length \code{n} specifying \code{n} different colours. } \item{progress}{ Character string determining the style of progress reports printed by \code{\link{progressreport}}. Either \code{"tty"} or \code{"txtbar"}. } \item{checkpolygons}{ Logical flag indicating whether the functions \code{\link{owin}} and \code{\link{as.owin}} should check the validity of polygon data. It is advisable to leave this set to \code{TRUE}. If you set \code{checkpolygons=FALSE}, no checking will be performed, making it possible to create window objects whose topology is garbled. This can be useful for inspecting spatial data that contain errors, for example, when converting data from shapefiles. However, other functions in \pkg{spatstat} may return incorrect answers on these data. } \item{checksegments}{ Logical flag indicating whether the functions \code{\link{psp}} and \code{\link{as.psp}} should check the validity of line segment data (in particular, checking that the endpoints of the line segments are inside the specified window). It is advisable to leave this flag set to \code{TRUE}. } \item{maxmatrix}{ The maximum permitted size (rows times columns) of matrices generated by \pkg{spatstat}'s internal code. Used by \code{\link{ppm}} and \code{\link{predict.ppm}} (for example) to decide when to split a large calculation into blocks. Defaults to \code{2^24=16777216}. } \item{huge.npoints}{ The maximum value of \code{n} for which \code{runif(n)} will not generate an error (possible errors include failure to allocate sufficient memory, and integer overflow of \code{n}). An attempt to generate more than this number of random points triggers a warning from \code{\link{runifpoint}} and other functions. Defaults to \code{1e6}. } \item{expand}{ The default expansion factor (area inflation factor) for expansion of the simulation window in \code{\link{rmh}} (see \code{\link{rmhcontrol}}). Initialised to \code{2}. } \item{fasteval}{ One of the strings \code{'off'}, \code{'on'} or \code{'test'} determining whether to use accelerated C code to evaluate the conditional intensity of a Gibbs model. Initialised to \code{'on'}. } \item{density}{ Logical. Indicates whether to use accelerated C code to evaluate \code{density.ppp(X, at="points")} Initialised to \code{TRUE}. } \item{n.bandwidth}{ Integer. Number of trial values of smoothing bandwidth to use for cross-validation in \code{\link{bw.relrisk}} and similar functions. } \item{psstG.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link{psstG}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{Kcom.remove.zeroes}{ Logical value, determining whether the algorithm in \code{\link{Kcom}} and \code{\link{Kres}} removes or retains the contributions to the function from pairs of points that are identical. If these are retained then the function has a jump at \eqn{r=0}. Initialised to \code{TRUE}. } \item{psstA.ngrid}{ Single integer, controlling the accuracy of the discrete approximation of areas computed in the function \code{\link{psstA}}. The area of a disc is approximated by counting points on an \eqn{n \times n}{n * n} grid. Initialised to 32. } \item{psstA.nr}{ Single integer, determining the number of distances \eqn{r} at which the function \code{\link{psstA}} will be evaluated (in the default case where argument \code{r} is absent). Initialised to 30. } \item{project.fast}{ Logical. If \code{TRUE}, the algorithm of \code{\link{project.ppm}} will be accelerated using a shorcut. Initialised to \code{FALSE}. } \item{exactdt.checks.data,closepairs.newcode}{ Logical. For software development purposes only. Do not change these values, unless you are Adrian Baddeley. } \item{rmh.p, rmh.q, rmh.nrep}{ New default values for the parameters \code{p}, \code{q} and \code{nrep} in the Metropolis-Hastings simulation algorithm. These override the defaults in \code{\link{rmhcontrol.default}}. } \item{print.ppm.SE}{ Default rule used by \code{\link{print.ppm}} to decide whether to calculate and print standard errors of the estimated coefficients of the model. One of the strings \code{"always"}, \code{"never"} or \code{"poisson"} (the latter indicating that standard errors will be calculated only for Poisson models). The default is \code{"poisson"} because the calculation for non-Poisson models can take a long time. } \item{nvoxel}{ Default number of voxels in a 3D image, typically for calculating the distance transform in \code{\link{F3est}}. Initialised to 4 megavoxels: \code{nvoxel = 2^22 = 4194304}. } \item{fastK.lgcp}{ Logical. Whether to use fast or slow algorithm to compute the (theoretical) \eqn{K}-function of a log-Gaussian Cox process for use in \code{\link{lgcp.estK}} or \code{\link{Kmodel}}. The slow algorithm uses accurate numerical integration; the fast algorithm uses Simpson's Rule for numerical integration, and is about two orders of magnitude faster. Initialised to \code{FALSE}. } } If no arguments are given, the current values of all parameters are returned, in a list. If one parameter name is given, the current value of this parameter is returned (\bold{not} in a list, just the value). If several parameter names are given, the current values of these parameters are returned, in a list. If \code{name=value} pairs are given, the named parameters are reset to the given values, and the \bold{previous} values of these parameters are returned, in a list. } \seealso{ \code{\link{options}} } \examples{ # save current values oldopt <- spatstat.options() spatstat.options("npixel") spatstat.options(npixel=150) spatstat.options(npixel=c(100,200)) spatstat.options(par.binary=list(col=grey(c(0.5,1)))) spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) # see help(persp.default) for other options # revert spatstat.options(oldopt) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} spatstat/man/marks.Rd0000755000176000001440000000665612237642733014355 0ustar ripleyusers\name{marks} \alias{marks} \alias{marks.ppp} \alias{marks.ppx} \alias{marks<-} \alias{marks<-.ppp} \alias{marks<-.ppx} \alias{setmarks} \alias{\%mark\%} %DoNotExport %NAMESPACE export("%mark%") \title{Marks of a Point Pattern} \description{ Extract or change the marks attached to a point pattern dataset. } \usage{ marks(x, \dots) \method{marks}{ppp}(x, \dots, dfok=TRUE) \method{marks}{ppx}(x, \dots, drop=TRUE) marks(x, \dots) <- value \method{marks}{ppp}(x, \dots, dfok=TRUE) <- value \method{marks}{ppx}(x, \dots) <- value setmarks(x, value) x \%mark\% value } \arguments{ \item{x}{ Point pattern dataset (object of class \code{"ppp"} or \code{"ppx"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{drop}{ Logical. If \code{TRUE}, a data frame consisting of a single column of marks will be converted to a vector or factor. } \item{value}{ Vector, data frame or hyperframe of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor, data frame or hyperframe, containing the mark values attached to the points of \code{x}. For \code{marks(x) <- value}, the result is the updated point pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). For \code{setmarks(x,value)} and \code{x \%mark\% value}, the return value is the point pattern obtained by replacing the marks of \code{x} by \code{value}. } \details{ These functions extract or change the marks attached to the points of the point pattern \code{x}. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The expression \code{setmarks(x,value)} or equivalently \code{x \%mark\% value} returns a point pattern obtained by replacing the marks of \code{x} by \code{value}, but does not change the dataset \code{x} itself. For point patterns in two-dimensional space (objects of class \code{"ppp"}) the marks can be a vector, a factor, or a data frame. For general point patterns (objects of class "ppx") the marks can be a vector, a factor, a data frame or a hyperframe. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of points in \code{x}, or a data frame or hyperframe with as many rows as there are points in \code{x}. If \code{value} is a single value, or a data frame or hyperframe with one row, then it will be replicated so that the same marks will be attached to each point. To remove marks, use \code{marks(x) <- NULL} or \code{\link{unmark}(x)}. Use \code{\link{ppp}} or \code{\link{ppx}} to create point patterns in more general situations. } \seealso{ \code{\link{ppp.object}}, \code{\link{ppx}}, \code{\link{unmark}}, \code{\link{hyperframe}} } \examples{ data(amacrine) # extract marks m <- marks(amacrine) # recode the mark values "off", "on" as 0, 1 marks(amacrine) <- as.integer(m == "on") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rmpoispp.Rd0000755000176000001440000001626212237642734015104 0ustar ripleyusers\name{rmpoispp} \alias{rmpoispp} \title{Generate Multitype Poisson Point Pattern} \description{ Generate a random point pattern, a realisation of the (homogeneous or inhomogeneous) multitype Poisson process. } \usage{ rmpoispp(lambda, lmax=NULL, win, types, \dots) } \arguments{ \item{lambda}{ Intensity of the multitype Poisson process. Either a single positive number, a vector, a \code{function(x,y,m, \dots)}, a pixel image, a list of functions \code{function(x,y, \dots)}, or a list of pixel images. } \item{lmax}{ An upper bound for the value of \code{lambda}. May be omitted } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image or list of images. } \item{types}{ All the possible types for the multitype pattern. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } } \value{ The simulated multitype point pattern (an object of class \code{"ppp"} with a component \code{marks} which is a factor). } \details{ This function generates a realisation of the marked Poisson point process with intensity \code{lambda}. Note that the intensity function \eqn{\lambda(x,y,m)}{lambda(x,y,m)} is the average number of points \bold{of type m} per unit area near the location \eqn{(x,y)}. Thus a marked point process with a constant intensity of 10 and three possible types will have an average of 30 points per unit area, with 10 points of each type on average. The intensity function may be specified in any of the following ways. \describe{ \item{single number:}{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform marked Poisson process inside the window \code{win} with intensity \code{lambda} for each type. The total intensity of points of all types is \code{lambda * length(types)}. The argument \code{types} must be given and determines the possible types in the multitype pattern. } \item{vector:}{ If \code{lambda} is a numeric vector, then this algorithm generates a realisation of the stationary marked Poisson process inside the window \code{win} with intensity \code{lambda[i]} for points of type \code{types[i]}. The total intensity of points of all types is \code{sum(lambda)}. The argument \code{types} defaults to \code{seq(lambda)}. } \item{function:}{ If \code{lambda} is a function, the process has intensity \code{lambda(x,y,m,\dots)} at spatial location \code{(x,y)} for points of type \code{m}. The function \code{lambda} must work correctly with vectors \code{x}, \code{y} and \code{m}, returning a vector of function values. (Note that \code{m} will be a factor with levels equal to \code{types}.) The value \code{lmax}, if present, must be an upper bound on the values of \code{lambda(x,y,m,\dots)} for all locations \code{(x, y)} inside the window \code{win} and all types \code{m}. The argument \code{types} must be given. } \item{list of functions:}{ If \code{lambda} is a list of functions, the process has intensity \code{lambda[[i]](x,y,\dots)} at spatial location \code{(x,y)} for points of type \code{types[i]}. The function \code{lambda[[i]]} must work correctly with vectors \code{x} and \code{y}, returning a vector of function values. The value \code{lmax}, if given, must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. The argument \code{types} defaults to \code{seq(lambda)}. } \item{pixel image:}{ If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), the intensity at a location \code{(x,y)} for points of any type is equal to the pixel value of \code{lambda} for the pixel nearest to \code{(x,y)}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} must be given. } \item{list of pixel images:}{ If \code{lambda} is a list of pixel images, then the image \code{lambda[[i]]} determines the intensity of points of type \code{types[i]}. The argument \code{win} is ignored; the window of the pixel image is used instead. The argument \code{types} defaults to \code{seq(lambda)}. } } If \code{lmax} is missing, an approximate upper bound will be calculated. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax} for points of each type \code{m}, then randomly deletes or retains each point independently, with retention probability \eqn{p(x,y,m) = \lambda(x,y,m)/\mbox{lmax}}{p(x,y,m) = lambda(x,y)/lmax}. } \seealso{ \code{\link{rpoispp}} for unmarked Poisson point process; \code{\link{rmpoint}} for a fixed number of random marked points; \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform bivariate Poisson process with total intensity 100 in unit square pp <- rmpoispp(50, types=c("a","b")) # stationary bivariate Poisson process with intensity A = 30, B = 70 pp <- rmpoispp(c(30,70), types=c("A","B")) pp <- rmpoispp(c(30,70)) # works in any window data(letterR) pp <- rmpoispp(c(30,70), win=letterR, types=c("A","B")) # inhomogeneous lambda(x,y,m) # note argument 'm' is a factor lam <- function(x,y,m) { 50 * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B")) # extra arguments lam <- function(x,y,m,scal) { scal * (x^2 + y^3) * ifelse(m=="A", 2, 1)} pp <- rmpoispp(lam, win=letterR, types=c("A","B"), scal=50) # list of functions lambda[[i]](x,y) lams <- list(function(x,y){50 * x^2}, function(x,y){20 * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B")) pp <- rmpoispp(lams, win=letterR) # functions with extra arguments lams <- list(function(x,y,scal){5 * scal * x^2}, function(x,y, scal){2 * scal * abs(y)}) pp <- rmpoispp(lams, win=letterR, types=c("A","B"), scal=10) pp <- rmpoispp(lams, win=letterR, scal=10) # florid example lams <- list(function(x,y){ 100*exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend , function(x,y){ 100*exp(-0.6*x+0.5*y) } # log linear trend ) X <- rmpoispp(lams, win=unit.square(), types=c("on", "off")) # pixel image Z <- as.im(function(x,y){30 * (x^2 + y^3)}, letterR) pp <- rmpoispp(Z, types=c("A","B")) # list of pixel images ZZ <- list( as.im(function(x,y){20 * (x^2 + y^3)}, letterR), as.im(function(x,y){40 * (x^3 + y^2)}, letterR)) pp <- rmpoispp(ZZ, types=c("A","B")) pp <- rmpoispp(ZZ) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/linearpcfinhom.Rd0000755000176000001440000000637312237642732016231 0ustar ripleyusers\name{linearpcfinhom} \alias{linearpcfinhom} \title{ Inhomogeneous Linear Pair Correlation Function } \description{ Computes an estimate of the inhomogeneous linear pair correlation function for a point pattern on a linear network. } \usage{ linearpcfinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{density.default}} to control the smoothing. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \details{ This command computes the inhomogeneous version of the linear pair correlation function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous pair correlation function \code{\link{linearpcf}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \seealso{ \code{\link{linearpcf}}, \code{\link{linearKinhom}}, \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X, ~x) K <- linearpcfinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/simplify.owin.Rd0000755000176000001440000000303012237642734016027 0ustar ripleyusers\name{simplify.owin} \Rdversion{1.1} \alias{simplify.owin} \title{ Approximate a Polygon by a Simpler Polygon } \description{ Given a polygonal window, this function finds a simpler polygon that approximates it. } \usage{ simplify.owin(W, dmin) } \arguments{ \item{W}{ The polygon which is to be simplied. An object of class \code{"owin"}. } \item{dmin}{ Numeric value. The smallest permissible length of an edge. } } \details{ This function simplifies a polygon \code{W} by recursively deleting the shortest edge of \code{W} until all remaining edges are longer than the specified minimum length \code{dmin}, or until there are only three edges left. The argument \code{W} must be a window (object of class \code{"owin"}). It should be of type \code{"polygonal"}. If \code{W} is a rectangle, it is returned without alteration. The simplification algorithm is not yet implemented for binary masks. If \code{W} is a mask, an error is generated. } \value{ Another window (object of class \code{"owin"}) of type \code{"polygonal"}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{owin}} } \examples{ data(letterR) plot(letterR, col="red") plot(simplify.owin(letterR, 0.3), col="blue", add=TRUE) data(chorley) W <- chorley$window plot(W) WS <- simplify.owin(W, 2) plot(WS, add=TRUE, border="green") points(vertices(WS)) } \keyword{spatial} \keyword{math} spatstat/man/alltypes.Rd0000755000176000001440000002224012237642732015057 0ustar ripleyusers\name{alltypes} \alias{alltypes} \title{Calculate Summary Statistic for All Types in a Multitype Point Pattern} \description{ Given a marked point pattern, this computes the estimates of a selected summary function (\eqn{F},\eqn{G}, \eqn{J}, \eqn{K} etc) of the pattern, for all possible combinations of marks, and returns these functions in an array. } \usage{ alltypes(X, fun="K", \dots, dataname=NULL,verb=FALSE,envelope=FALSE) } \arguments{ \item{X}{The observed point pattern, for which summary function estimates are required. An object of class \code{"ppp"} or \code{"lpp"}. } \item{fun}{The summary function. Either an \R function, or a character string indicating the summary function required. Options for strings are \code{"F"}, \code{"G"}, \code{"J"}, \code{"K"}, \code{"L"}, \code{"pcf"}, \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"}, \code{"Lcross"}, \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"}, \code{"Ldot"}. } \item{\dots}{ Arguments passed to the summary function (and to the function \code{\link{envelope}} if appropriate) } \item{dataname}{Character string giving an optional (alternative) name to the point pattern, different from what is given in the call. This name, if supplied, may be used by \code{\link{plot.fasp}()} in forming the title of the plot. If not supplied it defaults to the parsing of the argument supplied as \code{X} in the call. } \item{verb}{ Logical value. If \code{verb} is true then terse ``progress reports'' (just the values of the mark indices) are printed out when the calculations for that combination of marks are completed. } \item{envelope}{ Logical value. If \code{envelope} is true, then simulation envelopes of the summary function will also be computed. See Details. } } \details{ This routine is a convenient way to analyse the dependence between types in a multitype point pattern. It computes the estimates of a selected summary function of the pattern, for all possible combinations of marks. It returns these functions in an array (an object of class \code{"fasp"}) amenable to plotting by \code{\link{plot.fasp}()}. The argument \code{fun} specifies the summary function that will be evaluated for each type of point, or for each pair of types. It may be either an \R function or a character string. Suppose that the points have possible types \eqn{1,2,\ldots,m} and let \eqn{X_i}{X[i]} denote the pattern of points of type \eqn{i} only. If \code{fun="F"} then this routine calculates, for each possible type \eqn{i}, an estimate of the Empty Space Function \eqn{F_i(r)} of \eqn{X_i}{X[i]}. See \code{\link{Fest}} for explanation of the empty space function. The estimate is computed by applying \code{\link{Fest}} to \eqn{X_i}{X[i]} with the optional arguments \code{\dots}. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"}, the routine calculates, for each pair of types \eqn{(i,j)}, an estimate of the ``\code{i}-to\code{j}'' cross-type function \eqn{G_{ij}(r)}{G[i,j](r)}, \eqn{J_{ij}(r)}{J[i,j](r)}, \eqn{K_{ij}(r)}{K[i,j](r)} or \eqn{L_{ij}(r)}{L[i,j](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X_j}{X[j]}. See \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or \code{\link{Lcross}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gcross}} etc) to \code{X} using each possible value of the arguments \code{i,j}, together with the optional arguments \code{\dots}. If \code{fun} is \code{"pcf"} the routine calculates the cross-type pair correlation function \code{\link{pcfcross}} between each pair of types. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the routine calculates, for each type \eqn{i}, an estimate of the ``\code{i}-to-any'' dot-type function \eqn{G_{i\bullet}(r)}{G[i.](r)}, \eqn{J_{i\bullet}(r)}{J[i.](r)} or \eqn{K_{i\bullet}(r)}{K[i.](r)} or \eqn{L_{i\bullet}(r)}{L[i.](r)} respectively describing the dependence between \eqn{X_i}{X[i]} and \eqn{X}{X}. See \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} respectively for explanation of these functions. The estimate is computed by applying the relevant function (\code{\link{Gdot}} etc) to \code{X} using each possible value of the argument \code{i}, together with the optional arguments \code{\dots}. The letters \code{"G"}, \code{"J"}, \code{"K"} and \code{"L"} are interpreted as abbreviations for \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} and \code{\link{Lcross}} respectively, assuming the point pattern is marked. If the point pattern is unmarked, the appropriate function \code{\link{Fest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} is invoked instead. If \code{envelope=TRUE}, then as well as computing the value of the summary function for each combination of types, the algorithm also computes simulation envelopes of the summary function for each combination of types. The arguments \code{\dots} are passed to the function \code{\link{envelope}} to control the number of simulations, the random process generating the simulations, the construction of envelopes, and so on. } \value{ A function array (an object of class \code{"fasp"}, see \code{\link{fasp.object}}). This can be plotted using \code{\link{plot.fasp}}. If the pattern is not marked, the resulting ``array'' has dimensions \eqn{1 \times 1}{1 x 1}. Otherwise the following is true: If \code{fun="F"}, the function array has dimensions \eqn{m \times 1}{m * 1} where \eqn{m} is the number of different marks in the point pattern. The entry at position \code{[i,1]} in this array is the result of applying \code{\link{Fest}} to the points of type \code{i} only. If \code{fun} is \code{"Gdot"}, \code{"Jdot"}, \code{"Kdot"} or \code{"Ldot"}, the function array again has dimensions \eqn{m \times 1}{m * 1}. The entry at position \code{[i,1]} in this array is the result of \code{Gdot(X, i)}, \code{Jdot(X, i)} \code{Kdot(X, i)} or \code{Ldot(X, i)} respectively. If \code{fun} is \code{"Gcross"}, \code{"Jcross"}, \code{"Kcross"} or \code{"Lcross"} (or their abbreviations \code{"G"}, \code{"J"}, \code{"K"} or \code{"L"}), the function array has dimensions \eqn{m \times m}{m * m}. The \code{[i,j]} entry of the function array (for \eqn{i \neq j}{i != j}) is the result of applying the function \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}} or\code{\link{Lcross}} to the pair of types \code{(i,j)}. The diagonal \code{[i,i]} entry of the function array is the result of applying the univariate function \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}} or \code{\link{Lest}} to the points of type \code{i} only. If \code{envelope=FALSE}, then each function entry \code{fns[[i]]} retains the format of the output of the relevant estimating routine \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}} ,\code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}} or \code{\link{Ldot}} The default formulae for plotting these functions are \code{cbind(km,theo) ~ r} for F, G, and J functions, and \code{cbind(trans,theo) ~ r} for K and L functions. If \code{envelope=TRUE}, then each function entry \code{fns[[i]]} has the same format as the output of the \code{\link{envelope}} command. } \note{ Sizeable amounts of memory may be needed during the calculation. } \seealso{ \code{\link{plot.fasp}}, \code{\link{fasp.object}}, \code{\link{Fest}}, \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{Gcross}}, \code{\link{Jcross}}, \code{\link{Kcross}}, \code{\link{Lcross}}, \code{\link{Gdot}}, \code{\link{Jdot}}, \code{\link{Kdot}}, \code{\link{envelope}}. } \examples{ # bramblecanes (3 marks). \testonly{ bramblecanes <- bramblecanes[c(seq(1, 744, by=20), seq(745, 823, by=4))] } bF <- alltypes(bramblecanes,"F",verb=TRUE) plot(bF) if(interactive()) { plot(alltypes(bramblecanes,"G")) plot(alltypes(bramblecanes,"Gdot")) } # Swedishpines (unmarked). \testonly{ swedishpines <- swedishpines[1:25] } plot(alltypes(swedishpines,"K")) plot(alltypes(amacrine, "pcf"), ylim=c(0,1.3)) # A setting where you might REALLY want to use dataname: \dontrun{ xxx <- alltypes(ppp(Melvin$x,Melvin$y, window=as.owin(c(5,20,15,50)),marks=clyde), fun="F",verb=TRUE,dataname="Melvin") } # envelopes bKE <- alltypes(bramblecanes,"K",envelope=TRUE,nsim=19) \dontrun{ bFE <- alltypes(bramblecanes,"F",envelope=TRUE,nsim=19,global=TRUE) } # extract one entry as.fv(bKE[1,1]) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/persp.im.Rd0000755000176000001440000000475312237642733014771 0ustar ripleyusers\name{persp.im} \alias{persp.im} \title{Perspective Plot of Pixel Image} \description{ Displays a perspective plot of a pixel image. } \usage{ \method{persp}{im}(x, \dots, colmap=NULL) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link{persp.default}} to control the display. } \item{colmap}{ Optional data controlling the colour map. See Details. } } \value{ Returns the 3D transformation matrix returned by \code{\link{persp.default}}. } \details{ This is the \code{persp} method for the class \code{"im"}. The pixel image \code{x} must have real or integer values. These values are treated as heights of a surface, and the surface is displayed as a perspective plot on the current plot device, using equal scales on the \code{x} and \code{y} axes. The optional argument \code{colmap} gives an easy way to display different altitudes in different colours (if this is what you want). \itemize{ \item If \code{colmap} is a colour map (object of class \code{"colourmap"}, created by the function \code{\link{colourmap}}) then this colour map will be used to associate altitudes with colours. \item If \code{colmap} is a character vector, then the range of altitudes in the perspective plot will be divided into \code{length(colmap)} intervals, and those parts of the surface which lie in a particular altitude range will be assigned the corresponding colour from \code{colmap}. \item If \code{colmap} is a list with entries \code{breaks} and \code{col}, then \code{colmap$breaks} determines the breakpoints of the altitude intervals, and \code{colmap$col} provides the corresponding colours. } Graphical parameters controlling the perspective plot are passed through the \code{...} arguments directly to the function \code{\link{persp.default}}. See the examples in \code{\link{persp.default}} or in \code{demo(persp)}. } \seealso{ \code{\link{im.object}}, \code{\link{plot.im}}, \code{\link{contour.im}} } \examples{ # an image Z <- setcov(owin()) persp(Z, colmap=terrain.colors(128)) co <- colourmap(range=c(0,1), col=rainbow(128)) persp(Z, colmap=co, axes=FALSE, shade=0.3) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/parres.Rd0000755000176000001440000001650312237642733014524 0ustar ripleyusers\name{parres} \alias{parres} \title{ Partial Residuals for Point Process Model } \description{ Computes the smoothed partial residuals, a diagnostic for transformation of a covariate in a Poisson point process model. } \usage{ parres(model, covariate, ..., smooth.effect=FALSE, subregion=NULL, bw = "nrd0", adjust=1, from = NULL, to = NULL, n = 512, bw.input = c("points", "quad"), bw.restrict=FALSE, covname) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{covariate}{ The covariate of interest. Either a character string matching the name of one of the canonical covariates in the model, or one of the names \code{"x"} or \code{"y"} referring to the Cartesian coordinates, or one of the names of the covariates given when \code{model} was fitted, or a pixel image (object of class \code{"im"}) or \code{function(x,y)} supplying the values of a covariate at any location. } \item{smooth.effect}{ Logical. Determines the choice of algorithm. See Details. } \item{subregion}{ Optional. A window (object of class \code{"owin"}) specifying a subset of the spatial domain of the data. The calculation will be confined to the data in this subregion. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link[stats]{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link[stats]{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link[stats]{density.default}} to control the number and range of values at which the function will be estimated. } \item{\dots}{ Additional arguments passed to \code{\link[stats]{density.default}}. } \item{bw.input}{ Character string specifying the input data used for automatic bandwidth selection. } \item{bw.restrict}{ Logical value, specifying whether bandwidth selection is performed using data from the entire spatial domain or from the \code{subregion}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } } \details{ This command computes the smoothed partial residual diagnostic (Baddeley, Chang, Song and Turner, 2012) for the transformation of a covariate in a Poisson point process model. The argument \code{model} must be a fitted Poisson point process model. The diagnostic works in two different ways: \describe{ \item{Canonical covariate:}{ The argument \code{covariate} may be a character string which is the name of one of the \emph{canonical covariates} in the model. The canonical covariates are the functions \eqn{Z_j}{Z[j]} that appear in the expression for the Poisson point process intensity \deqn{ \lambda(u) = \exp(\beta_1 Z_1(u) + \ldots + \beta_p Z_p(u)) }{ lambda(u) = exp(beta[1] * Z[1](u) + \ldots + \beta[p] * Z[p](u)) } at spatial location \eqn{u}. Type \code{names(coef(model))} to see the names of the canonical covariates in \code{model}. If the selected covariate is \eqn{Z_j}{Z[j]}, then the diagnostic plot concerns the model term \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)}. The plot shows a smooth estimate of a function \eqn{h(z)} that should replace this linear term, that is, \eqn{\beta_j Z_j(u)}{beta[j] * Z[j](u)} should be replaced by \eqn{h(Z_j(u))}{h(Z[j](u))}. The linear function is also plotted as a dotted line. } \item{New covariate:}{ If the argument \code{covariate} is a pixel image (object of class \code{"im"}) or a \code{function(x,y)}, it is assumed to provide the values of a covariate that is not present in the model. Alternatively \code{covariate} can be the name of a covariate that was supplied when the model was fitted (i.e. in the call to \code{\link{ppm}}) but which does not feature in the model formula. In either case we speak of a new covariate \eqn{Z(u)}. If the fitted model intensity is \eqn{\lambda(u)}{lambda(u)} then we consider modifying this to \eqn{\lambda(u) \exp(h(Z(u)))}{lambda(u) * exp(h(Z(u)))} where \eqn{h(z)} is some function. The diagnostic plot shows an estimate of \eqn{h(z)}. \bold{Warning: in this case the diagnostic is not theoretically justified. This option is provided for research purposes.} } } Alternatively \code{covariate} can be one of the character strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. The behaviour here depends on whether the coordinate was one of the canonical covariates in the model. If there is more than one canonical covariate in the model that depends on the specified \code{covariate}, then the covariate effect is computed using all these canonical covariates. For example in a log-quadratic model which includes the terms \code{x} and \code{I(x^2)}, the quadratic effect involving both these terms will be computed. There are two choices for the algorithm. If \code{smooth.effect=TRUE}, the fitted covariate effect (according to \code{model}) is added to the point process residuals, then smoothing is applied to these values. If \code{smooth.effect=FALSE}, the point process residuals are smoothed first, and then the fitted covariate effect is added to the result. The smoothing bandwidth is controlled by the arguments \code{bw}, \code{adjust}, \code{bw.input} and \code{bw.restrict}. If \code{bw} is a numeric value, then the bandwidth is taken to be \code{adjust * bw}. If \code{bw} is a string representing a bandwidth selection rule (recognised by \code{\link[stats]{density.default}}) then the bandwidth is selected by this rule. The data used for automatic bandwidth selection are specified by \code{bw.input} and \code{bw.restrict}. If \code{bw.input="points"} (the default) then bandwidth selection is based on the covariate values at the points of the original point pattern dataset to which the model was fitted. If \code{bw.input="quad"} then bandwidth selection is based on the covariate values at every quadrature point used to fit the model. If \code{bw.restrict=TRUE} then the bandwidth selection is performed using only data from inside the \code{subregion}. } \value{ A function value table (object of class \code{"fv"}) containing the values of the smoothed partial residual, the estimated variance, and the fitted effect of the covariate. Also belongs to the class \code{"parres"} which has methods for \code{print} and \code{plot}. } \references{ Baddeley, A. and Chang, Y.-M. and Song, Y. and Turner, R. (2012) \emph{Residual diagnostics for covariate effects in spatial point process models}. Submitted for publication. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz}, Ya-Mei Chang and Yong Song. } \seealso{ \code{\link{addvar}}, \code{\link{rhohat}}, \code{\link{rho2hat}} } \examples{ X <- rpoispp(function(x,y){exp(3+x+2*x^2)}) model <- ppm(X, ~x+y) tra <- parres(model, "x") plot(tra) plot(parres(model, "x", subregion=square(0.5))) model2 <- ppm(X, ~x+I(x^2)+y) plot(parres(model2, "x")) Z <- setcov(owin()) plot(parres(model2, Z)) } \keyword{spatial} \keyword{models} spatstat/man/as.tess.Rd0000755000176000001440000000515112237642732014604 0ustar ripleyusers\name{as.tess} \alias{as.tess} \alias{as.tess.tess} \alias{as.tess.im} \alias{as.tess.owin} \alias{as.tess.quadratcount} \alias{as.tess.quadrattest} \alias{as.tess.list} \title{Convert Data To Tessellation} \description{ Converts data specifying a tessellation, in any of several formats, into an object of class \code{"tess"}. } \usage{ as.tess(X) \method{as.tess}{tess}(X) \method{as.tess}{im}(X) \method{as.tess}{owin}(X) \method{as.tess}{quadratcount}(X) \method{as.tess}{quadrattest}(X) \method{as.tess}{list}(X) } \arguments{ \item{X}{Data to be converted to a tessellation.} } \value{ An object of class \code{"tess"} specifying a tessellation. } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. This command creates an object of class \code{"tess"} that represents a tessellation. This function converts data in any of several formats into an object of class \code{"tess"} for use by the \pkg{spatstat} package. The argument \code{X} may be \itemize{ \item an object of class \code{"tess"}. The object will be stripped of any extraneous attributes and returned. \item a pixel image (object of class \code{"im"}) with pixel values that are logical or factor values. Each level of the factor will determine a tile of the tessellation. \item a window (object of class \code{"owin"}). The result will be a tessellation consisting of a single tile. \item a set of quadrat counts (object of class \code{"quadratcount"}) returned by the command \code{\link{quadratcount}}. The quadrats used to generate the counts will be extracted and returned as a tessellation. \item a quadrat test (object of class \code{"quadrattest"}) returned by the command \code{\link{quadrat.test}}. The quadrats used to perform the test will be extracted and returned as a tessellation. \item a list of windows (objects of class \code{"owin"}) giving the tiles of the tessellation. } The function \code{as.tess} is generic, with methods for various classes, as listed above. } \seealso{ \code{\link{tess}} } \examples{ # pixel image v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] as.tess(v) # quadrat counts data(nztrees) qNZ <- quadratcount(nztrees, nx=4, ny=3) as.tess(qNZ) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/summary.psp.Rd0000755000176000001440000000155712237642734015532 0ustar ripleyusers\name{summary.psp} \alias{summary.psp} \title{Summary of a Line Segment Pattern Dataset} \description{ Prints a useful summary of a line segment pattern dataset. } \usage{ \method{summary}{psp}(object, \dots) } \arguments{ \item{object}{Line segment pattern (object of class \code{"psp"}).} \item{\dots}{Ignored.} } \details{ A useful summary of the line segment pattern \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.owin}}, \code{\link{print.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) summary(a) # describes it } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/rmh.default.Rd0000755000176000001440000006475712237642734015460 0ustar ripleyusers\name{rmh.default} \alias{rmh.default} \title{Simulate Point Process Models using the Metropolis-Hastings Algorithm.} \description{ Generates a random point pattern, simulated from a chosen point process model, using the Metropolis-Hastings algorithm. } \usage{ \method{rmh}{default}(model, start=NULL, control=default.rmhcontrol(model), \dots, verbose=TRUE, snoop=FALSE) } \arguments{ \item{model}{Data specifying the point process model that is to be simulated. } \item{start}{Data determining the initial state of the algorithm. } \item{control}{Data controlling the iterative behaviour and termination of the algorithm. } \item{\dots}{ Further arguments passed to \code{\link{rmhcontrol}} or to trend functions in \code{model}. } \item{verbose}{Logical flag indicating whether to print progress reports. } \item{snoop}{ Logical. If \code{TRUE}, activate the visual debugger. } } \value{A point pattern (an object of class \code{"ppp"}, see \code{\link{ppp.object}}). The returned value has an attribute \code{info} containing modified versions of the arguments \code{model}, \code{start}, and \code{control} which together specify the exact simulation procedure. The \code{info} attribute can be printed (and is printed automatically by \code{\link{summary.ppp}}). The value of \code{\link[base:Random]{.Random.seed}} at the start of the simulations is also saved and returned as an attribute \code{seed}. If the argument \code{track=TRUE} was given (see \code{\link{rmhcontrol}}), the transition history of the algorithm is saved, and returned as an attribute \code{history}. The transition history is a data frame containing a factor \code{proposaltype} identifying the proposal type (Birth, Death or Shift) and a logical vector \code{accepted} indicating whether the proposal was accepted. The data frame also has columns \code{numerator}, \code{denominator} which give the numerator and denominator of the Hastings ratio for the proposal. If the argument \code{nsave} was given (see \code{\link{rmhcontrol}}), the return value has an attribute \code{saved} which is a list of point patterns, containing the intermediate states of the algorithm. } \details{ This function generates simulated realisations from any of a range of spatial point processes, using the Metropolis-Hastings algorithm. It is the default method for the generic function \code{\link{rmh}}. This function executes a Metropolis-Hastings algorithm with birth, death and shift proposals as described in Geyer and Moller (1994). The argument \code{model} specifies the point process model to be simulated. It is either a list, or an object of class \code{"rmhmodel"}, with the following components: \describe{ \item{cif}{A character string specifying the choice of interpoint interaction for the point process. } \item{par}{ Parameter values for the conditional intensity function. } \item{w}{ (Optional) window in which the pattern is to be generated. An object of class \code{"owin"}, or data acceptable to \code{\link{as.owin}}. } \item{trend}{ Data specifying the spatial trend in the model, if it has a trend. This may be a function, a pixel image (of class \code{"im"}), (or a list of functions or images if the model is multitype). If the trend is a function or functions, any auxiliary arguments \code{...} to \code{rmh.default} will be passed to these functions, which should be of the form \code{function(x, y, ...)}. } \item{types}{ List of possible types, for a multitype point process. } } For full details of these parameters, see \code{\link{rmhmodel.default}}. The argument \code{start} determines the initial state of the Metropolis-Hastings algorithm. It is either \code{NULL}, or an object of class \code{"rmhstart"}, or a list with the following components: \describe{ \item{n.start}{ Number of points in the initial point pattern. A single integer, or a vector of integers giving the numbers of points of each type in a multitype point pattern. Incompatible with \code{x.start}. } \item{x.start}{ Initial point pattern configuration. Incompatible with \code{n.start}. \code{x.start} may be a point pattern (an object of class \code{"ppp"}), or data which can be coerced to this class by \code{\link{as.ppp}}, or an object with components \code{x} and \code{y}, or a two-column matrix. In the last two cases, the window for the pattern is determined by \code{model$w}. In the first two cases, if \code{model$w} is also present, then the final simulated pattern will be clipped to the window \code{model$w}. } } For full details of these parameters, see \code{\link{rmhstart}}. The third argument \code{control} controls the simulation procedure (including \emph{conditional simulation}), iterative behaviour, and termination of the Metropolis-Hastings algorithm. It is either \code{NULL}, or a list, or an object of class \code{"rmhcontrol"}, with components: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that birth/death has been chosen over shift. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. } \item{expand}{ Either a numerical expansion factor, or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a larger domain than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. The default is to expand the simulation window if the model is stationary and non-Poisson (i.e. it has no trend and the interaction is not Poisson) and not to expand in all other cases. If the model has a trend, then in order for expansion to be feasible, the trend must be given either as a function, or an image whose bounding box is large enough to contain the expanded window. } \item{periodic}{A logical scalar; if \code{periodic} is \code{TRUE} we simulate a process on the torus formed by identifying opposite edges of a rectangular window. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{x.cond}{If this argument is present, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the conditioning points and the type of conditioning. } \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } } For full details of these parameters, see \code{\link{rmhcontrol}}. The control parameters can also be given in the \code{\dots} arguments. } \section{Conditional Simulation}{ There are several kinds of conditional simulation. \itemize{ \item Simulation \emph{conditional upon the number of points}, that is, holding the number of points fixed. To do this, set \code{control$p} (the probability of a shift) equal to 1. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be a scalar, or by setting the initial pattern \code{start$x.start}. \item In the case of multitype processes, it is possible to simulate the model \emph{conditionally upon the number of points of each type}, i.e. holding the number of points of each type to be fixed. To do this, set \code{control$p} equal to 1 and \code{control$fixall} to be \code{TRUE}. The number of points is then determined by the starting state, which may be specified either by setting \code{start$n.start} to be an integer vector, or by setting the initial pattern \code{start$x.start}. \item Simulation \emph{conditional on the configuration observed in a sub-window}, that is, requiring that, inside a specified sub-window \eqn{V}, the simulated pattern should agree with a specified point pattern \eqn{y}.To do this, set \code{control$x.cond} to equal the specified point pattern \eqn{y}, making sure that it is an object of class \code{"ppp"} and that the window \code{as.owin(control$x.cond)} is the conditioning window \eqn{V}. \item Simulation \emph{conditional on the presence of specified points}, that is, requiring that the simulated pattern should include a specified set of points. This is simulation from the Palm distribution of the point process given a pattern \eqn{y}. To do this, set \code{control$x.cond} to be a \code{data.frame} containing the coordinates (and marks, if appropriate) of the specified points. } For further information, see \code{\link{rmhcontrol}}. Note that, when we simulate conditionally on the number of points, or conditionally on the number of points of each type, no expansion of the window is possible. } \section{Visual Debugger}{ If \code{snoop = TRUE}, an interactive debugger is activated. On the current plot device, the debugger displays the current state of the Metropolis-Hastings algorithm together with the proposed transition to the next state. Clicking on this graphical display (using the left mouse button) will re-centre the display at the clicked location. Surrounding this graphical display is an array of boxes representing different actions. Clicking on one of the action boxes (using the left mouse button) will cause the action to be performed. Debugger actions include: \itemize{ \item Zooming in or out \item Panning (shifting the field of view) left, right, up or down \item Jumping to the next iteration \item Skipping 10, 100, 1000, 10000 or 100000 iterations \item Jumping to the next Birth proposal (etc) \item Changing the fate of the proposal (i.e. changing whether the proposal is accepted or rejected) \item Dumping the current state and proposal to a file \item Printing detailed information at the terminal \item Exiting the debugger (so that the simulation algorithm continues without further interruption). } Right-clicking the mouse will also cause the debugger to exit. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283 -- 322. Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. Geyer, C.J. and Moller, J. (1994) Simulation procedures and likelihood inference for spatial point processes. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \section{Warnings}{ There is never a guarantee that the Metropolis-Hastings algorithm has converged to its limiting distribution. If \code{start$x.start} is specified then \code{expand} is set equal to 1 and simulation takes place in \code{x.start$window}. Any specified value for \code{expand} is simply ignored. The presence of both a component \code{w} of \code{model} and a non-null value for \code{x.start$window} makes sense ONLY if \code{w} is contained in \code{x.start$window}. For multitype processes make sure that, even if there is to be no trend corresponding to a particular type, there is still a component (a NULL component) for that type, in the list. } \seealso{ \code{\link{rmh}}, \code{\link{rmh.ppm}}, \code{\link{rStrauss}}, \code{\link{ppp}}, \code{\link{ppm}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{MultiHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{PairPiece}}, \code{\link{Poisson}}, \code{\link{Softcore}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Triplets}} } \section{Other models}{ In theory, any finite point process model can be simulated using the Metropolis-Hastings algorithm, provided the conditional intensity is uniformly bounded. In practice, the list of point process models that can be simulated using \code{rmh.default} is limited to those that have been implemented in the package's internal C code. More options will be added in the future. Note that the \code{lookup} conditional intensity function permits the simulation (in theory, to any desired degree of approximation) of any pairwise interaction process for which the interaction depends only on the distance between the pair of points. } \section{Reproducible simulations}{ If the user wants the simulation to be exactly reproducible (e.g. for a figure in a journal article, where it is useful to have the figure consistent from draft to draft) then the state of the random number generator should be set before calling \code{rmh.default}. This can be done either by calling \code{\link[base:Random]{set.seed}} or by assigning a value to \code{\link[base:Random]{.Random.seed}}. In the examples below, we use \code{\link[base:Random]{set.seed}}. If a simulation has been performed and the user now wants to repeat it exactly, the random seed should be extracted from the simulated point pattern \code{X} by \code{seed <- attr(x, "seed")}, then assigned to the system random nunber state by \code{.Random.seed <- seed} before calling \code{rmh.default}. } \examples{ if(interactive()) { nr <- 1e5 nv <- 5000 ns <- 200 } else { nr <- 10 nv <- 5 ns <- 20 oldopt <- spatstat.options() spatstat.options(expand=1.1) } set.seed(961018) # Strauss process. mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X1.strauss <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.strauss) # Strauss process, conditioning on n = 42: X2.strauss <- rmh(model=mod01,start=list(n.start=42), control=list(p=1,nrep=nr,nverb=nv)) # Tracking algorithm progress: X <- rmh(model=mod01,start=list(n.start=ns), control=list(nrep=nr, nsave=nr/5, nburn=nr/2, track=TRUE)) History <- attr(X, "history") Saved <- attr(X, "saved") head(History) plot(Saved) # Hard core process: mod02 <- list(cif="hardcore",par=list(beta=2,hc=0.7),w=c(0,10,0,10)) X3.hardcore <- rmh(model=mod02,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X3.hardcore) # Strauss process equal to pure hardcore: mod02s <- list(cif="strauss",par=list(beta=2,gamma=0,r=0.7),w=c(0,10,0,10)) X3.strauss <- rmh(model=mod02s,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Strauss process in a polygonal window. x <- c(0.55,0.68,0.75,0.58,0.39,0.37,0.19,0.26,0.42) y <- c(0.20,0.27,0.68,0.99,0.80,0.61,0.45,0.28,0.33) mod03 <- list(cif="strauss",par=list(beta=2000,gamma=0.6,r=0.07), w=owin(poly=list(x=x,y=y))) X4.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X4.strauss) # Strauss process in a polygonal window, conditioning on n = 80. X5.strauss <- rmh(model=mod03,start=list(n.start=ns), control=list(p=1,nrep=nr,nverb=nv)) # Strauss process, starting off from X4.strauss, but with the # polygonal window replace by a rectangular one. At the end, # the generated pattern is clipped to the original polygonal window. xxx <- X4.strauss xxx$window <- as.owin(c(0,1,0,1)) X6.strauss <- rmh(model=mod03,start=list(x.start=xxx), control=list(nrep=nr,nverb=nv)) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=c(0,10,0,10)) X1.straush <- rmh(model=mod04,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Another Strauss with hardcore (with a perhaps surprising result): mod05 <- list(cif="straush",par=list(beta=80,gamma=0.36,r=45,hc=2.5), w=c(0,250,0,250)) X2.straush <- rmh(model=mod05,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Pure hardcore (identical to X3.strauss). mod06 <- list(cif="straush",par=list(beta=2,gamma=1,r=1,hc=0.7), w=c(0,10,0,10)) X3.straush <- rmh(model=mod06,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Soft core: w <- c(0,10,0,10) mod07 <- list(cif="sftcr",par=list(beta=0.8,sigma=0.1,kappa=0.5), w=c(0,10,0,10)) X.sftcr <- rmh(model=mod07,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.sftcr) # Area-interaction process: mod42 <- rmhmodel(cif="areaint",par=list(beta=2,eta=1.6,r=0.7), w=c(0,10,0,10)) X.area <- rmh(model=mod42,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.area) # Triplets process modtrip <- list(cif="triplets",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) X.triplets <- rmh(model=modtrip, start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.triplets) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm",par=list(beta=beta,gamma=gmma,radii=r), w=c(0,250,0,250)) X1.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) if(interactive()) plot(X1.straussm) # Multitype Strauss conditioning upon the total number # of points being 80: X2.straussm <- rmh(model=mod08,start=list(n.start=ns), control=list(p=1,ptypes=c(0.75,0.25),nrep=nr, nverb=nv)) # Conditioning upon the number of points of type 1 being 60 # and the number of points of type 2 being 20: X3.straussm <- rmh(model=mod08,start=list(n.start=c(60,20)), control=list(fixall=TRUE,p=1,ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) # Multitype Strauss hardcore: rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) mod09 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250)) X.straushm <- rmh(model=mod09,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),nrep=nr,nverb=nv)) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) X1.straushm.trend <- rmh(model=mod10,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25), nrep=nr,nverb=nv)) if(interactive()) plot(X1.straushm.trend) # Multitype Strauss hardcore with trends for each type, given as images: bigwin <- square(250) i1 <- as.im(tr3, bigwin) i2 <- as.im(tr4, bigwin) mod11 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=r,hradii=rhc),w=bigwin, trend=list(i1,i2)) X2.straushm.trend <- rmh(model=mod11,start=list(n.start=ns), control=list(ptypes=c(0.75,0.25),expand=1, nrep=nr,nverb=nv)) # Diggle, Gates, and Stibbard: mod12 <- list(cif="dgs",par=list(beta=3600,rho=0.08),w=c(0,1,0,1)) X.dgs <- rmh(model=mod12,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.dgs) # Diggle-Gratton: mod13 <- list(cif="diggra", par=list(beta=1800,kappa=3,delta=0.02,rho=0.04), w=square(1)) X.diggra <- rmh(model=mod13,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.diggra) # Fiksel: modFik <- list(cif="fiksel", par=list(beta=180,r=0.15,hc=0.07,kappa=2,a= -1.0), w=square(1)) X.fiksel <- rmh(model=modFik,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.fiksel) # Geyer: mod14 <- list(cif="geyer",par=list(beta=1.25,gamma=1.6,r=0.2,sat=4.5), w=c(0,10,0,10)) X1.geyer <- rmh(model=mod14,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X1.geyer) # Geyer; same as a Strauss process with parameters # (beta=2.25,gamma=0.16,r=0.7): mod15 <- list(cif="geyer",par=list(beta=2.25,gamma=0.4,r=0.7,sat=10000), w=c(0,10,0,10)) X2.geyer <- rmh(model=mod15,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod16 <- list(cif="geyer",par=list(beta=8.1,gamma=2.2,r=0.08,sat=3)) data(redwood) X3.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(periodic=TRUE,nrep=nr,nverb=nv)) # Geyer, starting from the redwood data set, simulating # on a torus, and conditioning on n: X4.geyer <- rmh(model=mod16,start=list(x.start=redwood), control=list(p=1,periodic=TRUE,nrep=nr,nverb=nv)) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) X.lookup <- rmh(model=mod17,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) if(interactive()) plot(X.lookup) # Strauss with trend tr <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } beta <- 0.3 gmma <- 0.5 r <- 45 modStr <- list(cif="strauss",par=list(beta=beta,gamma=gmma,r=r), w=square(250), trend=tr) X1.strauss.trend <- rmh(model=modStr,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Baddeley-Geyer r <- seq(0,0.2,length=8)[-1] gmma <- c(0.5,0.6,0.7,0.8,0.7,0.6,0.5) mod18 <- list(cif="badgey",par=list(beta=4000, gamma=gmma,r=r,sat=5), w=square(1)) X1.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) mod19 <- list(cif="badgey", par=list(beta=4000, gamma=gmma,r=r,sat=1e4), w=square(1)) set.seed(1329) X2.badgey <- rmh(model=mod18,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Check: h <- ((prod(gmma)/cumprod(c(1,gmma)))[-8])^2 hs <- stepfun(r,c(h,1)) mod20 <- list(cif="lookup",par=list(beta=4000,h=hs),w=square(1)) set.seed(1329) X.check <- rmh(model=mod20,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # X2.badgey and X.check will be identical. mod21 <- list(cif="badgey",par=list(beta=300,gamma=c(1,0.4,1), r=c(0.035,0.07,0.14),sat=5), w=square(1)) X3.badgey <- rmh(model=mod21,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same result as Geyer model with beta=300, gamma=0.4, r=0.07, # sat = 5 (if seeds and control parameters are the same) # Or more simply: mod22 <- list(cif="badgey", par=list(beta=300,gamma=0.4,r=0.07, sat=5), w=square(1)) X4.badgey <- rmh(model=mod22,start=list(n.start=ns), control=list(nrep=nr,nverb=nv)) # Same again --- i.e. the BadGey model includes the Geyer model. # Illustrating scalability. \dontrun{ M1 <- rmhmodel(cif="strauss",par=list(beta=60,gamma=0.5,r=0.04),w=owin()) set.seed(496) X1 <- rmh(model=M1,start=list(n.start=300)) M2 <- rmhmodel(cif="strauss",par=list(beta=0.6,gamma=0.5,r=0.4), w=owin(c(0,10),c(0,10))) set.seed(496) X2 <- rmh(model=M2,start=list(n.start=300)) chk <- affine(X1,mat=diag(c(10,10))) all.equal(chk,X2,check.attribute=FALSE) # Under the default spatstat options the foregoing all.equal() # will yield TRUE. Setting spatstat.options(scalable=FALSE) and # re-running the code will reveal differences between X1 and X2. } if(!interactive()) spatstat.options(oldopt) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/pp3.Rd0000755000176000001440000000231212237642733013723 0ustar ripleyusers\name{pp3} \Rdversion{1.1} \alias{pp3} \title{ Three Dimensional Point Pattern } \description{ Create a three-dimensional point pattern } \usage{ pp3(x, y, z, ...) } \arguments{ \item{x,y,z}{ Numeric vectors of equal length, containing Cartesian coordinates of points in three-dimensional space. } \item{\dots}{ Arguments passed to \code{\link{as.box3}} to determine the three-dimensional box in which the points have been observed. } } \details{ An object of class \code{"pp3"} represents a pattern of points in three-dimensional space. The points are assumed to have been observed by exhaustively inspecting a three-dimensional rectangular box. The boundaries of the box are included as part of the dataset. } \value{ Object of class \code{"pp3"} representing a three dimensional point pattern. Also belongs to class \code{"ppx"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{box3}}, \code{\link{print.pp3}}, \code{\link{ppx}} } \examples{ X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) } \keyword{spatial} \keyword{datagen} spatstat/man/localK.Rd0000755000176000001440000001124512237642732014432 0ustar ripleyusers\name{localK} \alias{localK} \alias{localL} \title{Neighbourhood density function} \description{ Computes the neighbourhood density function, a local version of the \eqn{K}-function or \eqn{L}-function, defined by Getis and Franklin (1987). } \usage{ localK(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL) localL(X, ..., correction = "Ripley", verbose = TRUE, rvalue=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{\dots}{Ignored.} \item{correction}{String specifying the edge correction to be applied. Options are \code{"none"}, \code{"translate"}, \code{"translation"}, \code{"Ripley"}, \code{"isotropic"} or \code{"best"}. Only one correction may be specified. } \item{verbose}{Logical flag indicating whether to print progress reports during the calculation. } \item{rvalue}{Optional. A \emph{single} value of the distance argument \eqn{r} at which the function L or K should be computed. } } \details{ The command \code{localL} computes the \emph{neighbourhood density function}, a local version of the \eqn{L}-function (Besag's transformation of Ripley's \eqn{K}-function) that was proposed by Getis and Franklin (1987). The command \code{localK} computes the corresponding local analogue of the K-function. Given a spatial point pattern \code{X}, the neighbourhood density function \eqn{L_i(r)}{L[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ L_i(r) = \sqrt{\frac a {(n-1) \pi} \sum_j e_{ij}} }{ L[i](r) = sqrt( (a/((n-1)* pi)) * sum[j] e[i,j]) } where the sum is over all points \eqn{j \neq i}{j != i} that lie within a distance \eqn{r} of the \eqn{i}th point, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{e_{ij}}{e[i,j]} is an edge correction term (as described in \code{\link{Kest}}). The value of \eqn{L_i(r)}{L[i](r)} can also be interpreted as one of the summands that contributes to the global estimate of the L function. By default, the function \eqn{L_i(r)}{L[i](r)} or \eqn{K_i(r)}{K[i](r)} is computed for a range of \eqn{r} values for each point \eqn{i}. The results are stored as a function value table (object of class \code{"fv"}) with a column of the table containing the function estimates for each point of the pattern \code{X}. Alternatively, if the argument \code{rvalue} is given, and it is a single number, then the function will only be computed for this value of \eqn{r}, and the results will be returned as a numeric vector, with one entry of the vector for each point of the pattern \code{X}. Inhomogeneous counterparts of \code{localK} and \code{localL} are computed by \code{localKinhom} and \code{localLinhom}. } \value{ If \code{rvalue} is given, the result is a numeric vector of length equal to the number of points in the point pattern. If \code{rvalue} is absent, the result is an object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the neighbourhood density function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \references{ Getis, A. and Franklin, J. (1987) Second-order neighbourhood analysis of mapped point patterns. \emph{Ecology} \bold{68}, 473--477. } \seealso{ \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{localKinhom}}, \code{\link{localLinhom}}. } \examples{ data(ponderosa) X <- ponderosa # compute all the local L functions L <- localL(X) # plot all the local L functions against r plot(L, main="local L functions for ponderosa", legend=FALSE) # plot only the local L function for point number 7 plot(L, iso007 ~ r) # compute the values of L(r) for r = 12 metres L12 <- localL(X, rvalue=12) # Spatially interpolate the values of L12 # Compare Figure 5(b) of Getis and Franklin (1987) X12 <- X \%mark\% L12 Z <- Smooth(X12, sigma=5, dimyx=128) plot(Z, col=topo.colors(128), main="smoothed neighbourhood density") contour(Z, add=TRUE) points(X, pch=16, cex=0.5) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rVarGamma.Rd0000644000176000001440000001220312251541120015053 0ustar ripleyusers\name{rVarGamma} \alias{rVarGamma} \title{Simulate Neyman-Scott Point Process with Variance Gamma cluster kernel} \description{ Generate a random point pattern, a simulated realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel. } \usage{ rVarGamma(kappa, nu.ker, omega, mu, win = owin(), eps = 0.001, nu.pcf=NULL) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{nu.ker}{ Shape parameter for the cluster kernel. A number greater than -1. } \item{omega}{ Scale parameter for cluster kernel. Determines the size of clusters. A positive number in the same units as the spatial coordinates. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{eps}{ Threshold below which the values of the cluster kernel will be treated as zero for simulation purposes. } \item{nu.pcf}{ Alternative specifier of the shape parameter. See Details. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Neyman-Scott process with Variance Gamma (Bessel) cluster kernel, inside the window \code{win}. The process is constructed by first generating a Poisson point process of ``parent'' points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of points, the number of points in each cluster being random with a Poisson (\code{mu}) distribution, and the points being placed independently and uniformly according to a Variance Gamma kernel. The shape of the kernel is determined by the dimensionless index \code{nu.ker}. This is the parameter \eqn{\nu^\prime = \alpha/2-1}{nu' = alpha/2 - 1} appearing in equation (12) on page 126 of Jalilian et al (2013). Instead of specifying \code{nu.ker} the user can specify \code{nu.pcf} which is the parameter \eqn{\nu=\alpha-1}{nu = alpha-1} appearing in equation (13), page 127 of Jalilian et al (2013). These are related by \code{nu.pcf = 2 * nu.ker + 1} and \code{nu.ker = (nu.pcf - 1)/2}. Exactly one of \code{nu.ker} or \code{nu.pcf} must be specified. The scale of the kernel is determined by the argument \code{omega}, which is the parameter \eqn{\eta}{eta} appearing in equations (12) and (13) of Jalilian et al (2013). It is expressed in units of length (the same as the unit of length for the window \code{win}). In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. This model can be fitted to data by the method of minimum contrast, using \code{\link{cauchy.estK}}, \code{\link{cauchy.estpcf}} or \code{\link{kppm}}. It can also be fitted by maximum composite likelihood using \code{\link{kppm}}. The algorithm can also generate spatially inhomogeneous versions of the cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2006). } When the parents are homogeneous (\code{kappa} is a single number) and the offspring are inhomogeneous (\code{mu} is a function or pixel image), the model can be fitted to data using \code{\link{kppm}}, or using \code{\link{cauchy.estK}} or \code{\link{cauchy.estpcf}} applied to the inhomogeneous \eqn{K} function. } \seealso{ \code{\link{rpoispp}}, \code{\link{rNeymanScott}}, \code{\link{cauchy.estK}}, \code{\link{cauchy.estpcf}}, \code{\link{kppm}}. } \examples{ # homogeneous X <- rVarGamma(30, 2, 0.02, 5) # inhomogeneous Z <- as.im(function(x,y){ exp(2 - 3 * x) }, W= owin()) Y <- rVarGamma(30, 2, 0.02, Z) } \references{ Jalilian, A., Guan, Y. and Waagepetersen, R. (2013) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics} \bold{40}, 119-137. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Abdollah Jalilian and Rasmus Waagepetersen. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{datagen} spatstat/man/round.ppp.Rd0000644000176000001440000000223412237642734015147 0ustar ripleyusers\name{round.ppp} \alias{round.ppp} \alias{round.pp3} \alias{round.ppx} \title{ Apply Numerical Rounding to Spatial Coordinates } \description{ Apply numerical rounding to the spatial coordinates of a point pattern. } \usage{ \method{round}{ppp}(x, digits = 0) \method{round}{pp3}(x, digits = 0) \method{round}{ppx}(x, digits = 0) } \arguments{ \item{x}{ A spatial point pattern in any dimension (object of class \code{"ppp"}, \code{"pp3"} or \code{"ppx"}). } \item{digits}{ integer indicating the number of decimal places. } } \details{ These functions are methods for the generic function \code{\link[base]{round}}. They apply numerical rounding to the spatial coordinates of the point pattern \code{x}. } \value{ A point pattern object, of the same class as \code{x}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rounding}} to determine whether numbers have been rounded. \code{\link[base]{round}} in the Base package. } \examples{ round(cells, 1) } \keyword{spatial} \keyword{manip} spatstat/man/pixellate.Rd0000755000176000001440000000336512237642733015221 0ustar ripleyusers\name{pixellate} \Rdversion{1.1} \alias{pixellate} \title{ Convert Spatial Object to Pixel Image } \description{ Convert a spatial object to a pixel image by measuring the amount of stuff in each pixel. } \usage{ pixellate(x, ...) } \arguments{ \item{x}{ Spatial object to be converted. A point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a line segment pattern (object of class \code{"psp"}), or some other suitable data. } \item{\dots}{ Arguments passed to methods. } } \details{ The function \code{pixellate} converts a geometrical object \code{x} into a pixel image, by measuring the \emph{amount} of \code{x} that is inside each pixel. If \code{x} is a point pattern, \code{pixellate(x)} counts the number of points of \code{x} falling in each pixel. If \code{x} is a window, \code{pixellate(x)} measures the area of intersection of each pixel with the window. The function \code{pixellate} is generic, with methods for point patterns (\code{\link{pixellate.ppp}}), windows (\code{\link{pixellate.owin}}), and line segment patterns (\code{\link{pixellate.psp}}), See the separate documentation for these methods. The related function \code{\link{as.im}} also converts \code{x} into a pixel image, but typically measures only the presence or absence of \code{x} inside each pixel. } \value{ A pixel image (object of class \code{"im"}). } \seealso{ \code{\link{pixellate.ppp}}, \code{\link{pixellate.owin}}, \code{\link{pixellate.psp}}, \code{\link{as.im}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/layerplotargs.Rd0000644000176000001440000000225612237642732016114 0ustar ripleyusers\name{layerplotargs} \alias{layerplotargs} \alias{layerplotargs<-} \title{ Extract or Replace the Plot Arguments of a Layered Object } \description{ Extracts or replaces the plot arguments of a layered object. } \usage{ layerplotargs(L) layerplotargs(L) <- value } \arguments{ \item{L}{ An object of class \code{"layered"} created by the function \code{\link{layered}}. } \item{value}{ Replacement value. A list, with the same length as \code{L}, whose elements are lists of plot arguments. } } \details{ These commands extract or replace the \code{plotargs} in a layered object. See \code{\link{layered}}. } \value{ \code{layerplotargs} returns a list of lists of plot arguments. \code{"layerplotargs<-"} returns the updated object of class \code{"layered"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{layered}}, \code{\link{methods.layered}}, \code{\link{[.layered}}. } \examples{ W <- square(2) L <- layered(W=W, X=cells) layerplotargs(L)$X <- list(pch=16) } \keyword{spatial} \keyword{hplot} spatstat/man/delaunay.Rd0000755000176000001440000000246112237642732015027 0ustar ripleyusers\name{delaunay} \alias{delaunay} \title{Delaunay Triangulation of Point Pattern} \description{ Computes the Delaunay triangulation of a spatial point pattern. } \usage{ delaunay(X) } \arguments{ \item{X}{Spatial point pattern (object of class \code{"ppp"}).} } \details{ The Delaunay triangulation of a spatial point pattern \code{X} is defined as follows. First the Dirichlet/Voronoi tessellation of \code{X} computed; see \code{\link{dirichlet}}. Then two points of \code{X} are defined to be Delaunay neighbours if their Dirichlet/Voronoi tiles share a common boundary. Every pair of Delaunay neighbours is joined by a straight line. The result is a tessellation, consisting of disjoint triangles. The union of these triangles is the convex hull of \code{X}. } \value{ A tessellation (object of class \code{"tess"}). The window of the tessellation is the convex hull of \code{X}, not the original window of \code{X}. } \seealso{ \code{\link{tess}}, \code{\link{dirichlet}}, \code{\link{convexhull.xy}}, \code{\link{ppp}} } \examples{ X <- runifpoint(42) plot(delaunay(X)) plot(X, add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/eroded.areas.Rd0000755000176000001440000000317712237642732015566 0ustar ripleyusers\name{eroded.areas} \alias{eroded.areas} \title{Areas of Morphological Erosions} \description{ Computes the areas of successive morphological erosions of a window. } \usage{ eroded.areas(w, r) } \arguments{ \item{w}{A window.} \item{r}{Numeric vector of radii at which erosions will be performed.} } \value{ Numeric vector, of the same length as \code{r}, giving the areas of the successive erosions. } \details{ This function computes the areas of the erosions of the window \code{w} by each of the radii \code{r[i]}. The morphological erosion of a set \eqn{W} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x \in W}{x in W} such that the distance from \eqn{x} to the boundary of \eqn{W} is greater than or equal to \eqn{r}. In other words it is the result of trimming a margin of width \eqn{r} off the set \eqn{W}. The argument \code{r} should be a vector of positive numbers. The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. Unless \code{w} is a rectangle, the computation is performed using a pixel raster approximation. To compute the eroded window itself, use \code{\link{erosion}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{erosion}} } \examples{ w <- owin(c(0,1),c(0,1)) a <- eroded.areas(w, seq(0.01,0.49,by=0.01)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/plot.envelope.Rd0000755000176000001440000000305012237642733016013 0ustar ripleyusers\name{plot.envelope} \alias{plot.envelope} \title{Plot a Simulation Envelope} \description{ Plot method for the class \code{"envelope"}. } \usage{ \method{plot}{envelope}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"envelope"}, containing the variables to be plotted or variables from which the plotting coordinates can be computed. } \item{\dots}{ Extra arguments passed to \code{\link{plot.fv}}. } } \value{ Either \code{NULL}, or a data frame giving the meaning of the different line types and colours. } \details{ This is the \code{plot} method for the class \code{"envelope"} of simulation envelopes. Objects of this class are created by the command \code{\link{envelope}}. This plot method is currently identical to \code{\link{plot.fv}}. Its default behaviour is to shade the region between the upper and lower envelopes in a light grey colour. To suppress the shading and plot the upper and lower envelopes as curves, set \code{shade=NULL}. To change the colour of the shading, use the argument \code{shadecol} which is passed to \code{\link{plot.fv}}. See \code{\link{plot.fv}} for further information on how to control the plot. } \examples{ data(cells) E <- envelope(cells, Kest, nsim=19) plot(E) plot(E, sqrt(./pi) ~ r) } \seealso{ \code{\link{envelope}}, \code{\link{plot.fv}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/nncross.pp3.Rd0000644000176000001440000001475312237642733015420 0ustar ripleyusers\name{nncross.pp3} \alias{nncross.pp3} \title{Nearest Neighbours Between Two Patterns in 3D} \description{ Given two point patterns \code{X} and \code{Y} in three dimensions, finds the nearest neighbour in \code{Y} of each point of \code{X}. } \usage{ \method{nncross}{pp3}(X, Y, iX=NULL, iY=NULL, what = c("dist", "which"), \dots, k = 1, sortby=c("range", "var", "x", "y", "z"), is.sorted.X = FALSE, is.sorted.Y = FALSE) } \arguments{ \item{X,Y}{Point patterns in three dimensions (objects of class \code{"pp3"}).} \item{iX, iY}{Optional identifiers, used to determine whether a point in \code{X} is identical to a point in \code{Y}. See Details. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the identifier of the nearest neighbour (\code{"which"}), or both. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{sortby}{ Determines which coordinate to use to sort the point patterns. See Details. } \item{is.sorted.X, is.sorted.Y}{ Logical values attesting whether the point patterns \code{X} and \code{Y} have been sorted. See Details. } \item{\dots}{Ignored.} } \details{ Given two point patterns \code{X} and \code{Y} in three dimensions, this function finds, for each point of \code{X}, the nearest point of \code{Y}. The distance between these points is also computed. If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. The return value is a data frame, with rows corresponding to the points of \code{X}. The first column gives the nearest neighbour distances (i.e. the \code{i}th entry is the distance from the \code{i}th point of \code{X} to the nearest element of \code{Y}). The second column gives the indices of the nearest neighbours (i.e.\ the \code{i}th entry is the index of the nearest element in \code{Y}.) If \code{what="dist"} then only the vector of distances is returned. If \code{what="which"} then only the vector of indices is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. The result is a data frame. Note that this function is not symmetric in \code{X} and \code{Y}. To find the nearest neighbour in \code{X} of each point in \code{Y}, use \code{nncross(Y,X)}. The arguments \code{iX} and \code{iY} are used when the two point patterns \code{X} and \code{Y} have some points in common. In this situation \code{nncross(X, Y)} would return some zero distances. To avoid this, attach a unique integer identifier to each point, such that two points are identical if their identifying numbers are equal. Let \code{iX} be the vector of identifier values for the points in \code{X}, and \code{iY} the vector of identifiers for points in \code{Y}. Then the code will only compare two points if they have different values of the identifier. See the Examples. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts both the point patterns \code{X} and \code{Y} into increasing order of the \eqn{x} coordinate, or both into increasing order of the \eqn{y} coordinate, or both into increasing order of the \eqn{z} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the largest range of values (according to the frame of the enclosing window of \code{Y}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{Y}). Setting \code{sortby="x"} or \code{sortby = "y"} or \code{sortby = "z"} will specify that sorting should occur on the \eqn{x}, \eqn{y} or \eqn{z} coordinate, respectively. If the point pattern \code{X} is already sorted, then the corresponding argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. Similarly if \code{Y} is already sorted, then \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. If both \code{X} and \code{Y} are sorted \emph{on the same coordinate axis} then both \code{is.sorted.X} and \code{is.sorted.Y} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"}, \code{"y"} or \code{"z"} to indicate which coordinate is sorted. } \value{ A data frame, or a vector if the data frame would contain only one column. By default (if \code{what=c("dist", "which")} and \code{k=1}) a data frame with two columns: \item{dist}{Nearest neighbour distance} \item{which}{Nearest neighbour index in \code{Y}} If \code{what="dist"} and \code{k=1}, a vector of nearest neighbour distances. If \code{what="which"} and \code{k=1}, a vector of nearest neighbour indices. If \code{k} is specified, the result is a data frame with columns containing the \code{k}-th nearest neighbour distances and/or nearest neighbour indices. } \seealso{ \code{\link{nndist}} for nearest neighbour distances in a single point pattern. } \examples{ # two different point patterns X <- pp3(runif(10), runif(10), runif(10), box3(c(0,1))) Y <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) N <- nncross(X,Y)$which N <- nncross(X,Y, what="which") #faster # note that length(N) = 10 # k-nearest neighbours N3 <- nncross(X, Y, k=1:3) # two patterns with some points in common Z <- pp3(runif(20), runif(20), runif(20), box3(c(0,1))) X <- Z[1:15] Y <- Z[10:20] iX <- 1:15 iY <- 10:20 N <- nncross(X,Y, iX, iY, what="which") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz}, and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/diameter.boxx.Rd0000755000176000001440000000367512237642732016006 0ustar ripleyusers\name{diameter.boxx} %DontDeclareMethods \Rdversion{1.1} \alias{diameter.boxx} \alias{volume.boxx} \alias{shortside.boxx} \alias{sidelengths.boxx} \alias{eroded.volumes.boxx} \title{ Geometrical Calculations for Multi-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a multi-dimensional box. } \usage{ \method{diameter}{boxx}(x) \method{volume}{boxx}(x) \method{shortside}{boxx}(x) \method{sidelengths}{boxx}(x) \method{eroded.volumes}{boxx}(x, r) } \arguments{ \item{x}{ Multi-dimensional box (object of class \code{"boxx"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.boxx}, \code{volume.boxx} and \code{shortside.boxx} compute the diameter, volume and shortest side length of the box. \code{sidelengths.boxx} returns the lengths of each side of the box. \code{eroded.volumes.boxx} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.boxx}, \code{shortside.boxx} and \code{volume.boxx}, a single numeric value. For \code{sidelengths.boxx}, a numeric vector of length equal to the number of spatial dimensions. For \code{eroded.volumes.boxx}, a numeric vector of the same length as \code{r}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{boxx}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,2)) diameter(X) volume(X) shortside(X) sidelengths(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/split.ppx.Rd0000644000176000001440000000753212237642734015171 0ustar ripleyusers\name{split.ppx} \alias{split.ppx} \title{Divide Multidimensional Point Pattern into Sub-patterns} \description{ Divides a multidimensional point pattern into several sub-patterns, according to their marks, or according to any user-specified grouping. } \usage{ \method{split}{ppx}(x, f = marks(x), drop=FALSE, un=NULL, \dots) } \arguments{ \item{x}{ A multi-dimensional point pattern. An object of class \code{"ppx"}. } \item{f}{ Data determining the grouping. Either a factor, or the name of one of the columns of marks. } \item{drop}{ Logical. Determines whether empty groups will be deleted. } \item{un}{ Logical. Determines whether the resulting subpatterns will be unmarked (i.e. whether marks will be removed from the points in each subpattern). } \item{\dots}{ Other arguments are ignored. } } \value{ A list of point patterns. The components of the list are named by the levels of \code{f}. The list also has the class \code{"splitppx"} and \code{"listof"}. } \details{ The generic command \code{\link[base]{split}} allows a dataset to be separated into subsets according to the value of a grouping variable. The function \code{split.ppx} is a method for the generic \code{\link[base]{split}} for the class \code{"ppx"} of multidimensional point patterns. It divides up the points of the point pattern \code{x} into several sub-patterns according to the values of \code{f}. The result is a list of point patterns. The argument \code{f} may be \itemize{ \item a factor, of length equal to the number of points in \code{x}. The levels of \code{f} determine the destination of each point in \code{x}. The \code{i}th point of \code{x} will be placed in the sub-pattern \code{split.ppx(x)$l} where \code{l = f[i]}. \item a character string, matching the name of one of the columns of marks, if \code{marks(x)} is a data frame. This column should be a factor. } If \code{f} is missing, then it will be determined by the marks of the point pattern. The pattern \code{x} can be either \itemize{ \item a multitype point pattern (a marked point pattern whose marks vector is a factor). Then \code{f} is taken to be the marks vector. The effect is that the points of each type are separated into different point patterns. \item a marked point pattern with a data frame or hyperframe of marks, containing at least one column that is a factor. The first such column will be used to determine the splitting factor \code{f}. } Some of the sub-patterns created by the split may be empty. If \code{drop=TRUE}, then empty sub-patterns will be deleted from the list. If \code{drop=FALSE} then they are retained. The argument \code{un} determines how to handle marks in the case where \code{x} is a marked point pattern. If \code{un=TRUE} then the marks of the points will be discarded when they are split into groups, while if \code{un=FALSE} then the marks will be retained. If \code{f} and \code{un} are both missing, then the default is \code{un=TRUE} for multitype point patterns and \code{un=FALSE} for marked point patterns with a data frame of marks. The result of \code{split.ppx} has class \code{"splitppx"} and \code{"listof"}. There are methods for \code{print}, \code{summary} and \code{plot}. } \seealso{ \code{\link{ppx}}, \code{\link{plot.listof}} } \examples{ df <- data.frame(x=runif(4),y=runif(4),t=runif(4), age=rep(c("old", "new"), 2), size=runif(4)) X <- ppx(data=df, coord.type=c("s","s","t","m","m")) X split(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/methods.units.Rd0000644000176000001440000000410612237642733016025 0ustar ripleyusers\name{methods.units} \Rdversion{1.1} \alias{methods.units} %DoNotExport \alias{print.units} \alias{summary.units} \alias{rescale.units} \alias{compatible.units} \title{ Methods for Units } \description{ Methods for class \code{"units"}. } \usage{ \method{print}{units}(x, ...) \method{summary}{units}(object, ...) \method{rescale}{units}(X,s) \method{compatible}{units}(A,B, ..., coerce=TRUE) } \arguments{ \item{x,X,A,B,object}{ Objects of class \code{"units"} representing units of length. } \item{s}{Conversion factor: the new units are \code{s} times the old units.} \item{\dots}{ Other arguments. For \code{print.units} these arguments are passed to \code{print.default}. For \code{summary.units} they are ignored. For \code{compatible.units} these arguments are other objects of class \code{"units"}. } \item{coerce}{ Logical. If \code{TRUE}, a null unit of length is compatible with any non-null unit. } } \details{ These are methods for the generic functions \code{\link{print}}, \code{\link{summary}}, \code{\link{rescale}} and \code{\link{compatible}} for the class \code{"units"}. An object of class \code{"units"} represents a unit of length. The \code{print} method prints a description of the unit of length, and the \code{summary} method gives a more detailed description. The \code{rescale} method changes the unit of length by rescaling it. The \code{compatible} method tests whether two or more units of length are compatible. } \value{ For \code{print.units} the value is \code{NULL}. For \code{summary.units} the value is an object of class \code{summary.units} (with its own print method). For \code{rescale.units} the value is another object of class \code{"units"}. For \code{compatible.units} the result is logical. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{box3}}, \code{\link{print}}, \code{\link{unitname}} } \keyword{spatial} \keyword{methods} spatstat/man/rpoint.Rd0000755000176000001440000000751312237642734014545 0ustar ripleyusers\name{rpoint} \alias{rpoint} \title{Generate N Random Points} \description{ Generate a random point pattern containing \eqn{n} independent, identically distributed random points with any specified distribution. } \usage{ rpoint(n, f, fmax=NULL, win=unit.square(), \dots, giveup=1000, verbose=FALSE) } \arguments{ \item{n}{ Number of points to generate. } \item{f}{ The probability density of the points, possibly un-normalised. Either a constant, a function \code{f(x,y,...)}, or a pixel image object. } \item{fmax}{ An upper bound on the values of \code{f}. If missing, this number will be estimated. } \item{win}{ Window in which to simulate the pattern. Ignored if \code{f} is a pixel image. } \item{\dots}{ Arguments passed to the function \code{f}. } \item{giveup}{ Number of attempts in the rejection method after which the algorithm should stop trying to generate new points. } \item{verbose}{ Flag indicating whether to report details of performance of the simulation algorithm. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This function generates \code{n} independent, identically distributed random points with common probability density proportional to \code{f}. The argument \code{f} may be \describe{ \item{a numerical constant:}{ uniformly distributed random points will be generated. } \item{a function:}{random points will be generated in the window \code{win} with probability density proportional to \code{f(x,y,...)} where \code{x} and \code{y} are the cartesian coordinates. The function \code{f} must accept two \emph{vectors} of coordinates \code{x,y} and return the corresponding vector of function values. Additional arguments \code{...} of any kind may be passed to the function. } \item{a pixel image:}{if \code{f} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}) then random points will be generated in the window of this pixel image, with probability density proportional to the pixel values of \code{f}. } } The algorithm is as follows: \itemize{ \item If \code{f} is a constant, we invoke \code{\link{runifpoint}}. \item If \code{f} is a function, then we use the rejection method. Proposal points are generated from the uniform distribution. A proposal point \eqn{(x,y)} is accepted with probability \code{f(x,y,...)/fmax} and otherwise rejected. The algorithm continues until \code{n} points have been accepted. It gives up after \code{giveup * n} proposals if there are still fewer than \code{n} points. \item If \code{f} is a pixel image, then a random sequence of pixels is selected (using \code{\link{sample}}) with probabilities proportional to the pixel values of \code{f}. Then for each pixel in the sequence we generate a uniformly distributed random point in that pixel. } The algorithm for pixel images is more efficient than that for functions. } \seealso{ \code{\link{ppp.object}}, \code{\link{owin.object}}, \code{\link{runifpoint}} } \examples{ # 100 uniform random points in the unit square X <- rpoint(100) # 100 random points with probability density proportional to x^2 + y^2 X <- rpoint(100, function(x,y) { x^2 + y^2}, 1) # `fmax' may be omitted X <- rpoint(100, function(x,y) { x^2 + y^2}) # irregular window data(letterR) X <- rpoint(100, function(x,y) { x^2 + y^2}, win=letterR) # make a pixel image Z <- setcov(letterR) # 100 points with density proportional to pixel values X <- rpoint(100, Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/is.rectangle.Rd0000644000176000001440000000171212237642732015576 0ustar ripleyusers\name{is.rectangle} \alias{is.rectangle} \alias{is.polygonal} \alias{is.mask} \title{Determine Type of Window} \description{ Determine whether a window is a rectangle, a polygonal region, or a binary mask. } \usage{ is.rectangle(w) is.polygonal(w) is.mask(w) } \arguments{ \item{w}{ Window to be inspected. An object of class \code{"owin"}. } } \value{ Logical value, equal to \code{TRUE} if \code{w} is a window of the specified type. } \details{ These simple functions determine whether a window \code{w} (object of class \code{"owin"}) is a rectangle (\code{is.rectangle(w) = TRUE}), a domain with polygonal boundary (\code{is.polygonal(w) = TRUE}), or a binary pixel mask (\code{is.mask(w) = TRUE}). } \seealso{ \code{\link{owin}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/with.hyperframe.Rd0000755000176000001440000000524012237642735016342 0ustar ripleyusers\name{with.hyperframe} \alias{with.hyperframe} \title{Evaluate an Expression in Each Row of a Hyperframe} \description{ An expression, involving the names of columns in a hyperframe, is evaluated separately for each row of the hyperframe. } \usage{ \method{with}{hyperframe}(data, expr, ..., simplify = TRUE, ee = NULL, enclos=NULL) } \arguments{ \item{data}{A hyperframe (object of class \code{"hyperframe"}) containing data. } \item{expr}{An \R language expression to be evaluated.} \item{\dots}{Ignored.} \item{simplify}{ Logical. If \code{TRUE}, the return value will be simplified to a vector whenever possible. } \item{ee}{ Alternative form of \code{expr}, as an object of class \code{"expression"}. } \item{enclos}{ An environment in which to search for objects that are not found in the hyperframe. Defaults to \code{\link{parent.frame}()}. } } \details{ This function evaluates the expression \code{expr} in each row of the hyperframe \code{data}. It is a method for the generic function \code{\link{with}}. The argument \code{expr} should be an \R language expression in which each variable name is either the name of a column in the hyperframe \code{data}, or the name of an object in the parent frame (the environment in which \code{with} was called.) The argument \code{ee} can be used as an alternative to \code{expr} and should be an expression object (of class \code{"expression"}). For each row of \code{data}, the expression will be evaluated so that variables which are column names of \code{data} are interpreted as the entries for those columns in the current row. For example, if a hyperframe \code{h} has columns called \code{A} and \code{B}, then \code{with(h, A != B)} inspects each row of \code{data} in turn, tests whether the entries in columns \code{A} and \code{B} are equal, and returns the \eqn{n} logical values. } \value{ Normally a list of length \eqn{n} (where \eqn{n} is the number of rows) containing the results of evaluating the expression for each row. If \code{simplify=TRUE} and each result is a single atomic value, then the result is a vector or factor containing the same values. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{hyperframe}}, \code{\link{plot.hyperframe}} } \examples{ # generate Poisson point patterns with intensities 10 to 100 H <- hyperframe(L=seq(10,100, by=10)) X <- with(H, rpoispp(L)) } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/fvnames.Rd0000644000176000001440000000447012241444560014655 0ustar ripleyusers\name{fvnames} \alias{fvnames} \alias{fvnames<-} \title{ Abbreviations for Groups of Columns in Function Value Table } \description{ Groups of columns in a function value table (object of class \code{"fv"}) identified by standard abbreviations. } \usage{ fvnames(X, a = ".") fvnames(X, a = ".") <- value } \arguments{ \item{X}{ Function value table (object of class \code{"fv"}). See \code{\link{fv.object}}. } \item{a}{ One of the standard abbreviations listed below. } \item{value}{ Character vector containing names of columns of \code{X}. } } \details{ An object of class \code{"fv"} represents a table of values of a function, usually a summary function for spatial data such as the \eqn{K}-function, for which several different statistical estimators may be available. The different estimates are stored as columns of the table. Auxiliary information carried in the object \code{X} specifies some columns or groups of columns of this table that should be used for particular purposes. For convenience these groups can be referred to by standard abbreviations which are recognised by various functions in the \pkg{spatstat} package, such as \code{\link{plot.fv}}. These abbreviations are: \tabular{ll}{ \code{".x"} \tab the function argument \cr \code{".y"} \tab the recommended value of the function \cr \code{".s"} \tab the upper and lower limits of shading \cr \tab (for envelopes and confidence intervals)\cr \code{"*"} \tab all columns except the function argument\cr \code{"."} \tab all columns plotted by default.\cr } The command \code{fvnames(X, a)} expands the abbreviation \code{a} and returns a character vector containing the names of the columns. The assignment \code{fvnames(X, a) <- value} changes the definition of the abbreviation \code{a} to the character vector \code{value}. } \value{ For \code{fvnames}, a character vector. For \code{fvnames<-}, the updated object. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{fv.object}}, \code{\link{plot.fv}} } \examples{ K <- Kest(cells) fvnames(K, ".y") fvnames(K, ".y") <- "trans" } \keyword{spatial} \keyword{manip} spatstat/man/Extract.linnet.Rd0000644000176000001440000000215212237642732016121 0ustar ripleyusers\name{Extract.linnet} \alias{[.linnet} \title{Extract Subset of Linear Network} \description{ Extract a subset of a linear network. } \usage{ \method{[}{linnet}(x, i, \dots) } \arguments{ \item{x}{ A linear network (object of class \code{"linnet"}). } \item{i}{ Spatial window defining the subregion. An object of class \code{"owin"}. } \item{\dots}{Ignored.} } \value{ Another linear network (object of class \code{"linnet"}). } \details{ This function computes the intersection between the linear network \code{x} and the domain specified by \code{i}. This function is a method for the subset operator \code{"["} for linear networks (objects of class \code{"linnet"}). It is provided mainly for completeness. The index \code{i} should be a window. } \examples{ plot(simplenet) B <- owin(c(0,1),c(0.2,0.5)) plot(simplenet[B], add=TRUE, col="red") plot(B, add=TRUE, border="green") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Gcross.Rd0000755000176000001440000002226712237642731014472 0ustar ripleyusers\name{Gcross} \alias{Gcross} \title{ Multitype Nearest Neighbour Distance Function (i-to-j) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest point of type \eqn{j}. } \usage{ Gcross(X, i, j, r=NULL, breaks=NULL, \dots, correction=c("rs", "km", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type distance distribution function \eqn{G_{ij}(r)}{Gij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{ Ignored. } \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{ij}(r)}{Gij(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{ij}(r)}{Gij(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{ij}(r)}{Gij(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{ij}(r)}{Gij(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest point of type \eqn{j} } \item{theo}{the theoretical value of \eqn{G_{ij}(r)}{Gij(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gcross} and its companions \code{\link{Gdot}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). The ``cross-type'' (type \eqn{i} to type \eqn{j}) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{ij}(r)}{Gij(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest point of type \eqn{j}. An estimate of \eqn{G_{ij}(r)}{Gij(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{G_{ij}(r)}{Gij(r)} would equal \eqn{F_j(r)}{Fj(r)}, the empty space function of the type \eqn{j} points. For a multitype Poisson point process where the type \eqn{i} points have intensity \eqn{\lambda_i}{lambda[i]}, we have \deqn{G_{ij}(r) = 1 - e^{ - \lambda_j \pi r^2} }{% Gij(r) = 1 - exp( - lambda[j] * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{ij}}{Gij} curves may suggest dependence between the points of types \eqn{i} and \eqn{j}. This algorithm estimates the distribution function \eqn{G_{ij}(r)}{Gij(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{ij}(r)}{Gij(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{ij}(r)}{Gij(r)}. This estimate should be used with caution as \eqn{G_{ij}(r)}{Gij(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{ij}}{Gij}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{ij}}{Gij} as if it were an unbiased estimator of \eqn{G_{ij}}{Gij}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{ij}}{Gij} does not necessarily have a density. The reduced sample estimator of \eqn{G_{ij}}{Gij} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{ij}}{Gij} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gdot}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G01 <- Gcross(amacrine) # equivalent to: \dontrun{ G01 <- Gcross(amacrine, "off", "on") } plot(G01) # empty space function of `on' points \dontrun{ F1 <- Fest(split(amacrine)$on, r = G01$r) lines(F1$r, F1$km, lty=3) } # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, pp$n, replace=TRUE)) G <- Gcross(pp, "0", "1") # note: "0" not 0 } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rLGCP.Rd0000755000176000001440000001002612237642734014132 0ustar ripleyusers\name{rLGCP} \alias{rLGCP} \title{Simulate Log-Gaussian Cox Process} \description{ Generate a random point pattern, a realisation of the log-Gaussian Cox process. } \usage{ rLGCP(model="exponential", mu = 0, param = NULL, ..., win=NULL) } \arguments{ \item{model}{ character string: the name of a covariance model for the Gaussian random field, as recognised by the function \code{\link[RandomFields]{GaussRF}} in the \pkg{RandomFields} package. } \item{mu}{ mean function of the Gaussian random field. Either a single number, a \code{function(x,y, ...)} or a pixel image (object of class \code{"im"}). } \item{param}{ Numeric vector of parameters for the covariance, as understood by the function \code{\link[RandomFields]{GaussRF}} in the \pkg{RandomFields} package. } \item{\ldots}{ Further arguments passed to the function \code{\link[RandomFields]{GaussRF}} in the \pkg{RandomFields} package. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"}. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, the simulated intensity function is returned as an attribute \code{"Lambda"}. } \details{ This function generates a realisation of a log-Gaussian Cox process (LGCP). This is a Cox point process in which the logarithm of the random intensity is a Gaussian random field with mean function \eqn{\mu} and covariance function \eqn{c(r)}. Conditional on the random intensity, the point process is a Poisson process with this intensity. The arguments \code{model} and \code{param} specify the covariance function of the Gaussian random field, in the format expected by the \pkg{RandomFields} package. See \code{\link[RandomFields]{GaussRF}} or \code{\link[RandomFields:CovarianceFct]{Covariance}} for information about this format. A list of all implemented models is available by typing \code{PrintModelList()}. This algorithm uses the function \code{\link[RandomFields]{GaussRF}} in the \pkg{RandomFields} package to generate values of a Gaussian random field, with the specified mean function \code{mu} and the covariance specified by the arguments \code{model} and \code{param}, on the points of a regular grid. The exponential of this random field is taken as the intensity of a Poisson point process, and a realisation of the Poisson process is then generated by the function \code{\link{rpoispp}} in the \pkg{spatstat} package. If the simulation window \code{win} is missing or \code{NULL}, then it defaults to \code{as.owin(mu)} if \code{mu} is a pixel image, and it defaults to the unit square otherwise. The LGCP model can be fitted to data using \code{\link{kppm}}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{lgcp.estK}}, \code{\link{kppm}} } \references{ Moller, J., Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. } \examples{ if(require(RandomFields)) { # homogeneous LGCP with exponential covariance function X <- rLGCP("exp", 3, c(0, variance=0.2, nugget=0, scale=.1 )) # inhomogeneous LGCP with Gaussian covariance function m <- as.im(function(x, y){5 - 1.5 * (x - 0.5)^2 + 2 * (y - 0.5)^2}, W=owin()) X <- rLGCP("gauss", m, c(0, variance=0.15, nugget = 0, scale =0.5)) plot(attr(X, "Lambda")) points(X) # inhomogeneous LGCP with Matern covariance function X <- rLGCP("matern", function(x, y){ 1 - 0.4 * x}, c(0, variance=2, nugget=0, scale=0.7, a = 0.5), win = owin(c(0, 10), c(0, 10))) plot(X) } else message("Simulation requires the RandomFields package") } \author{Abdollah Jalilian and Rasmus Waagepetersen. Modified by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/hybrid.family.Rd0000755000176000001440000000206212237642732015763 0ustar ripleyusers\name{hybrid.family} \alias{hybrid.family} \title{ Hybrid Interaction Family } \description{ An object describing the family of all hybrid interactions. } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of all hybrid point process models. If you need to create a specific hybrid interaction model for use in modelling, use the function \code{\link{Hybrid}}. Anyway, \code{hybrid.family} is an object of class \code{"isf"} containing a function \code{hybrid.family$eval} for evaluating the sufficient statistics of any hybrid interaction point process model. } \seealso{ Use \code{\link{Hybrid}} to make hybrid interactions. Other families: \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{ord.family}}, \code{\link{inforder.family}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/rmhmodel.list.Rd0000755000176000001440000001146712237642734016016 0ustar ripleyusers\name{rmhmodel.list} \alias{rmhmodel.list} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Given a list of parameters, builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ \method{rmhmodel}{list}(model, ...) } \arguments{ \item{model}{A list of parameters. See Details.} \item{\dots}{ Optional list of additional named parameters. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a validated list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ The generic function \code{\link{rmhmodel}} takes a description of a point process model in some format, and converts it into an object of class \code{"rmhmodel"} so that simulations of the model can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. This function \code{rmhmodel.list} is the method for lists. The argument \code{model} should be a named list of parameters of the form \code{list(cif, par, w, trend, types)} where \code{cif} and \code{par} are required and the others are optional. For details about these components, see \code{\link{rmhmodel.default}}. The subsequent arguments \code{\dots} (if any) may also have these names, and they will take precedence over elements of the list \code{model}. } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.ppm}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \examples{ # Strauss process: mod01 <- list(cif="strauss",par=list(beta=2,gamma=0.2,r=0.7), w=c(0,10,0,10)) mod01 <- rmhmodel(mod01) # Strauss with hardcore: mod04 <- list(cif="straush",par=list(beta=2,gamma=0.2,r=0.7,hc=0.3), w=owin(c(0,10),c(0,5))) mod04 <- rmhmodel(mod04) # Soft core: w <- square(10) mod07 <- list(cif="sftcr", par=list(beta=0.8,sigma=0.1,kappa=0.5), w=w) mod07 <- rmhmodel(mod07) # Multitype Strauss: beta <- c(0.027,0.008) gmma <- matrix(c(0.43,0.98,0.98,0.36),2,2) r <- matrix(c(45,45,45,45),2,2) mod08 <- list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250)) mod08 <- rmhmodel(mod08) # specify types mod09 <- rmhmodel(list(cif="straussm", par=list(beta=beta,gamma=gmma,radii=r), w=square(250), types=c("A", "B"))) # Multitype Strauss hardcore with trends for each type: beta <- c(0.27,0.08) ri <- matrix(c(45,45,45,45),2,2) rhc <- matrix(c(9.1,5.0,5.0,2.5),2,2) tr3 <- function(x,y){x <- x/250; y <- y/250; exp((6*x + 5*y - 18*x^2 + 12*x*y - 9*y^2)/6) } # log quadratic trend tr4 <- function(x,y){x <- x/250; y <- y/250; exp(-0.6*x+0.5*y)} # log linear trend mod10 <- list(cif="straushm",par=list(beta=beta,gamma=gmma, iradii=ri,hradii=rhc),w=c(0,250,0,250), trend=list(tr3,tr4)) mod10 <- rmhmodel(mod10) # Lookup (interaction function h_2 from page 76, Diggle (2003)): r <- seq(from=0,to=0.2,length=101)[-1] # Drop 0. h <- 20*(r-0.05) h[r<0.05] <- 0 h[r>0.10] <- 1 mod17 <- list(cif="lookup",par=list(beta=4000,h=h,r=r),w=c(0,1,0,1)) mod17 <- rmhmodel(mod17) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/intersect.tess.Rd0000755000176000001440000000364312237642732016205 0ustar ripleyusers\name{intersect.tess} \alias{intersect.tess} \title{Intersection of Two Tessellations} \description{ Yields the intersection of two tessellations, or the intersection of a tessellation with a window. } \usage{ intersect.tess(X, Y, \dots) } \arguments{ \item{X,Y}{Two tessellations (objects of class \code{"tess"}), or windows (objects of class \code{"tess"}), or other data that can be converted to tessellations by \code{\link{as.tess}}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the discretisation, if required. } } \value{ A tessellation (object of class \code{"tess"}). } \details{ A tessellation is a collection of disjoint spatial regions (called \emph{tiles}) that fit together to form a larger spatial region. See \code{\link{tess}}. If \code{X} and \code{Y} are not tessellations, they are first converted into tessellations by \code{\link{as.tess}}. The function \code{intersect.tess} then computes the intersection between the two tessellations. This is another tessellation, each of whose tiles is the intersection of a tile from \code{X} and a tile from \code{Y}. One possible use of this function is to slice a window \code{W} into subwindows determined by a tessellation. See the Examples. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{tess}}, \code{\link{as.tess}}, \code{\link{intersect.owin}} } \examples{ opa <- par(mfrow=c(1,3)) # polygon data(letterR) plot(letterR) # tessellation of rectangles X <- tess(xgrid=seq(2, 4, length=10), ygrid=seq(0, 3.5, length=8)) plot(X) plot(intersect.tess(X, letterR)) A <- runifpoint(10) B <- runifpoint(10) plot(DA <- dirichlet(A)) plot(DB <- dirichlet(B)) plot(intersect.tess(DA, DB)) par(opa) } \keyword{spatial} \keyword{math} spatstat/man/areaGain.Rd0000755000176000001440000000462412237642732014737 0ustar ripleyusers\name{areaGain} \alias{areaGain} \title{Difference of Disc Areas} \description{ Computes the area of that part of a disc that is not covered by other discs. } \usage{ areaGain(u, X, r, ..., W=as.owin(X), exact=FALSE, ngrid=spatstat.options("ngrid.disc")) } \arguments{ \item{u}{ Coordinates of the centre of the disc of interest. A vector of length 2. Alternatively, a point pattern (object of class \code{"ppp"}). } \item{X}{ Locations of the centres of other discs. A point pattern (object of class \code{"ppp"}). } \item{r}{ Disc radius, or vector of disc radii. } \item{\dots}{Ignored.} \item{W}{ Window (object of class \code{"owin"}) in which the area should be computed. } \item{exact}{ Choice of algorithm. If \code{exact=TRUE}, areas are computed exactly using analytic geometry. If \code{exact=FALSE} then a faster algorithm is used to compute a discrete approximation to the areas. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the discrete approximation, when \code{exact=FALSE}. } } \value{ A matrix with one row for each point in \code{u} and one column for each value in \code{r}. } \details{ This function computes the area of that part of the disc of radius \code{r} centred at the location \code{u} that is \emph{not} covered by any of the discs of radius \code{r} centred at the points of the pattern \code{X}. This area is important in some calculations related to the area-interaction model \code{\link{AreaInter}}. If \code{u} is a point pattern and \code{r} is a vector, the result is a matrix, with one row for each point in \code{u} and one column for each entry of \code{r}. The \code{[i,j]} entry in the matrix is the area of that part of the disc of radius \code{r[j]} centred at the location \code{u[i]} that is \emph{not} covered by any of the discs of radius \code{r[j]} centred at the points of the pattern \code{X}. If \code{W} is not \code{NULL}, then the areas are computed only inside the window \code{W}. } \seealso{ \code{\link{AreaInter}}, \code{\link{areaLoss}} } \examples{ data(cells) u <- c(0.5,0.5) areaGain(u, cells, 0.1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/model.images.Rd0000755000176000001440000000773212237642733015600 0ustar ripleyusers\name{model.images} %DontDeclareMethods \alias{model.images} \alias{model.images.ppm} \alias{model.images.kppm} \alias{model.images.lppm} \alias{model.images.slrm} \title{Compute Images of Constructed Covariates} \description{ For a point process model fitted to spatial point pattern data, this function computes pixel images of the covariates in the design matrix. } \usage{ model.images(object, ...) \method{model.images}{ppm}(object, W = as.owin(object), ...) \method{model.images}{kppm}(object, W = as.owin(object), ...) \method{model.images}{lppm}(object, L = as.linnet(object), ...) \method{model.images}{slrm}(object, ...) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"} or \code{"slrm"}. } \item{W}{ A window (object of class \code{"owin"}) in which the images should be computed. Defaults to the window in which the model was fitted. } \item{L}{ A linear network (object of class \code{"linnet"}) in which the images should be computed. Defaults to the network in which the model was fitted. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } } \details{ This command is similar to \code{\link{model.matrix.ppm}} except that it computes pixel images of the covariates, instead of computing the covariate values at certain points only. The \code{object} must be a fitted spatial point process model object of class \code{"ppm"} (produced by the model-fitting function \code{\link{ppm}}) or class \code{"kppm"} (produced by the fitting function \code{\link{kppm}}) or class \code{"lppm"} (produced by \code{\link{lppm}}) or class \code{"slrm"} (produced by \code{\link{slrm}}). The spatial covariates required by the model-fitting procedure are computed at every pixel location in the window \code{W}. For \code{lppm} objects, the covariates are computed at every location on the network \code{L}. For \code{slrm} objects, the covariates are computed on the pixels that were used to fit the model. Note that the spatial covariates computed here are not the original covariates that were supplied when fitting the model. Rather, they are the covariates that actually appear in the loglinear representation of the (conditional) intensity and in the columns of the design matrix. For example, they might include dummy or indicator variables for different levels of a factor, depending on the contrasts that are in force. The pixel resolution is determined by \code{W} if \code{W} is a mask (that is \code{W$type = "mask"}). Otherwise, the pixel resolution is determined by \code{\link{spatstat.options}}. The result is a named list of pixel images (objects of class \code{"im"}) containing the values of the spatial covariates. The names of the list elements are the names of the covariates determined by \code{\link{model.matrix.lm}}. The result is also of class \code{"listof"} so that it can be plotted immediately. } \value{ An object of class \code{"listof"} consisting of a named list of pixel images (objects of class \code{"im"}). This list can be plotted immediately using \code{\link{plot.listof}}. For \code{model.images.lppm}, the images are also of class \code{"linim"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{model.matrix.ppm}}, \code{\link[stats]{model.matrix}}, \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{lppm}}, \code{\link{kppm}}, \code{\link{slrm}}, \code{\link{im}}, \code{\link{im.object}}, \code{\link{plot.listof}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(cells, ~x) model.images(fit) fit2 <- ppm(cells, ~cut(x,3)) model.images(fit2) fit3 <- slrm(japanesepines ~ x) model.images(fit3) } \keyword{spatial} \keyword{models} spatstat/man/plot.bermantest.Rd0000755000176000001440000000551112237642733016346 0ustar ripleyusers\name{plot.bermantest} \alias{plot.bermantest} \title{Plot Result of Berman Test} \description{ Plot the result of Berman's test of goodness-of-fit } \usage{ \method{plot}{bermantest}(x, ..., lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=col, lty0=lty) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"bermantest"} produced by \code{\link{bermantest}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.ecdf}}. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical distribution. } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the predicted distribution. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"bermantest"}. An object of this class represents the outcome of Berman's test of goodness-of-fit of a spatial Poisson point process model, computed by \code{\link{bermantest}}. For the \emph{Z1} test (i.e. if \code{x} was computed using \code{bermantest( ,which="Z1")}), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, \eqn{\hat F}{Fhat}, and the predicted cumulative distribution function of the covariate under the model, \eqn{F_0}{F0}, both plotted against the value of the covariate. Two vertical lines show the mean values of these two distributions. If the model is correct, the two curves should be close; the test is based on comparing the two vertical lines. For the \emph{Z2} test (i.e. if \code{x} was computed using \code{bermantest( ,which="Z2")}), the plot displays the empirical cumulative distribution function of the values \eqn{U_i = F_0(Y_i)}{U[i] = F0(Y[i])} where \eqn{Y_i}{Y[i]} is the value of the covariate at the \eqn{i}-th data point. The diagonal line with equation \eqn{y=x} is also shown. Two vertical lines show the mean of the values \eqn{U_i}{U[i]} and the value \eqn{1/2}. If the model is correct, the two curves should be close. The test is based on comparing the two vertical lines. } \seealso{ \code{\link{bermantest}} } \examples{ # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(-x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- bermantest(fit0, xcoord, "Z1") # plot result of test plot(k, col="red", col0="green") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/rMosaicSet.Rd0000755000176000001440000000300412237642733015271 0ustar ripleyusers\name{rMosaicSet} \alias{rMosaicSet} \title{Mosaic Random Set} \description{ Generate a random set by taking a random selection of tiles of a given tessellation. } \usage{ rMosaicSet(X, p=0.5) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{p}{ Probability of including a given tile. A number strictly between 0 and 1. } } \details{ Given a tessellation \code{X}, this function randomly selects some of the tiles of \code{X}, including each tile with probability \eqn{p} independently of the other tiles. The selected tiles are then combined to form a set in the plane. One application of this is Switzer's (1965) example of a random set which has a Markov property. It is constructed by generating \code{X} according to a Poisson line tessellation (see \code{\link{rpoislinetess}}). } \value{ A window (object of class \code{"owin"}). } \references{ Switzer, P. A random set process in the plane with a Markovian property. \emph{Annals of Mathematical Statistics} \bold{36} (1965) 1859--1863. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicField}} } \examples{ # Switzer's random set X <- rpoislinetess(3) plot(rMosaicSet(X, 0.5), col="green", border=NA) # another example plot(rMosaicSet(dirichlet(runifpoint(30)), 0.4)) } \keyword{spatial} \keyword{datagen} spatstat/man/Extract.tess.Rd0000755000176000001440000000410212237642732015606 0ustar ripleyusers\name{Extract.tess} \alias{[.tess} \alias{[<-.tess} \title{Extract or Replace Subset of Tessellation} \description{ Extract, change or delete a subset of the tiles of a tessellation, to make a new tessellation. } \usage{ \method{[}{tess}(x, ...) \method{[}{tess}(x, ...) <- value } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} \item{\dots}{ One argument that specifies the subset to be extracted or changed. Any valid format for the subset index in a list. } \item{value}{ Replacement value for the selected tiles of the tessellation. A list of windows (objects of class \code{"owin"}) or \code{NULL}. } } \details{ A tessellation (object of class \code{"tess"}, see \code{\link{tess}}) is effectively a list of tiles (spatial regions) that cover a spatial region. The subset operator \code{[.tess} extracts some of these tiles and forms a new tessellation, which of course covers a smaller region than the original. The replacement operator changes the selected tiles. The replacement \code{value} may be either \code{NULL} (which causes the selected tiles to be removed from \code{x}) or a list of the same length as the selected subset. The entries of \code{value} may be windows (objects of class \code{"owin"}) or \code{NULL} to indicate that the corresponding tile should be deleted. Generally it does not make sense to replace a tile in a tessellation with a completely different tile, because the tiles are expected to fit together. However this facility is sometimes useful for making small adjustments to polygonal tiles. } \value{ A tessellation (object of class \code{"tess"}). } \seealso{ \code{\link{tess}}, } \examples{ \testonly{op <- spatstat.options(npixel=10)} A <- tess(xgrid=0:4, ygrid=0:3) B <- A[c(1, 3, 7)] E <- A[-1] A[c(2, 5, 11)] <- NULL \testonly{spatstat.options(op)} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/as.mask.psp.Rd0000755000176000001440000000320512237642732015360 0ustar ripleyusers\name{as.mask.psp} \alias{as.mask.psp} \title{ Convert Line Segment Pattern to Binary Pixel Mask } \description{ Converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. } \usage{ as.mask.psp(x, W=NULL, ...) } \arguments{ \item{x}{ Line segment pattern (object of class \code{"psp"}). } \item{W}{ Optional window (object of class \code{"owin"}) determining the pixel raster. } \item{\dots}{ Optional extra arguments passed to \code{\link{as.mask}} to determine the pixel resolution. } } \details{ This function converts a line segment pattern to a binary pixel mask by determining which pixels intersect the lines. The pixel raster is determined by \code{W} and the optional arguments \code{\dots}. If \code{W} is missing or \code{NULL}, it defaults to the window containing \code{x}. Then \code{W} is converted to a binary pixel mask using \code{\link{as.mask}}. The arguments \code{\dots} are passed to \code{\link{as.mask}} to control the pixel resolution. } \value{ A window (object of class \code{"owin"}) which is a binary pixel mask (type \code{"mask"}). } \seealso{ \code{\link{pixellate.psp}}, \code{\link{as.mask}}. Use \code{\link{pixellate.psp}} if you want to measure the length of line in each pixel. } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(as.mask.psp(X)) plot(X, add=TRUE, col="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rMosaicField.Rd0000755000176000001440000000301112237642733015557 0ustar ripleyusers\name{rMosaicField} \alias{rMosaicField} \title{Mosaic Random Field} \description{ Generate a realisation of a random field which is piecewise constant on the tiles of a given tessellation. } \usage{ rMosaicField(X, rgen = function(n) { sample(0:1, n, replace = TRUE)}, ..., rgenargs=NULL) } \arguments{ \item{X}{ A tessellation (object of class \code{"tess"}). } \item{\dots}{ Arguments passed to \code{\link{as.mask}} determining the pixel resolution. } \item{rgen}{ Function that generates random values for the tiles of the tessellation. } \item{rgenargs}{ List containing extra arguments that should be passed to \code{rgen} (typically specifying parameters of the distribution of the values). } } \details{ This function generates a realisation of a random field which is piecewise constant on the tiles of the given tessellation \code{X}. The values in each tile are independent and identically distributed. } \value{ A pixel image (object of class \code{"im"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rpoislinetess}}, \code{\link{rMosaicSet}} } \examples{ X <- rpoislinetess(3) plot(rMosaicField(X, runif)) plot(rMosaicField(X, runif, dimyx=256)) plot(rMosaicField(X, rnorm, rgenargs=list(mean=10, sd=2))) plot(rMosaicField(dirichlet(runifpoint(30)), rnorm)) } \keyword{spatial} \keyword{datagen} spatstat/man/leverage.ppm.Rd0000755000176000001440000000670612237642732015620 0ustar ripleyusers\name{leverage.ppm} \alias{leverage} \alias{leverage.ppm} \title{ Leverage Measure for Spatial Point Process Model } \description{ Computes the leverage measure for a fitted spatial point process model. } \usage{ leverage(model, ...) \method{leverage}{ppm}(model, ..., drop = FALSE, iScore=NULL, iHessian=NULL, iArgs=list()) } \arguments{ \item{model}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{ Ignored. } \item{drop}{ Logical. Whether to include (\code{drop=FALSE}) or exclude (\code{drop=TRUE}) contributions from quadrature points that were not used to fit the model. } \item{iScore,iHessian}{ Components of the score vector and Hessian matrix for the irregular parameters, if required. See Details. } \item{iArgs}{ List of extra arguments for the functions \code{iScore}, \code{iHessian} if required. } } \details{ The function \code{leverage} is generic, and \code{leverage.ppm} is the method for objects of class \code{"ppm"}. Given a fitted spatial point process model \code{model}, the function \code{leverage.ppm} computes the leverage of the model, described in Baddeley, Chang and Song (2013). The leverage of a spatial point process model is a function of spatial location, and is typically displayed as a colour pixel image. The leverage value \eqn{h(u)} at a spatial location \eqn{u} represents the change in the fitted trend of the fitted point process model that would have occurred if a data point were to have occurred at the location \eqn{u}. A relatively large value of \eqn{h()} indicates a part of the space where the data have a \emph{potentially} strong effect on the fitted model (specifically, a strong effect on the intensity or trend of the fitted model) due to the values of the covariates. If the point process model trend has irregular parameters that were fitted (using \code{\link{ippm}}) then the leverage calculation requires the first and second derivatives of the log trend with respect to the irregular parameters. The argument \code{iScore} should be a list, with one entry for each irregular parameter, of \R functions that compute the partial derivatives of the log trend (i.e. log intensity or log conditional intensity) with respect to each irregular parameter. The argument \code{iHessian} should be a list, with \eqn{p^2} entries where \eqn{p} is the number of irregular parameters, of \R functions that compute the second order partial derivatives of the log trend with respect to each pair of irregular parameters. The result of \code{leverage.ppm} is an object of class \code{"leverage.ppm"}. It can be plotted (by \code{\link{plot.leverage.ppm}}) or converted to a pixel image by \code{as.im} (see \code{\link{as.im.leverage.ppm}}). } \value{ An object of class \code{"leverage.ppm"} that can be plotted by \code{\link{plot.leverage.ppm}}. } \references{ Baddeley, A., Chang, Y.M. and Song, Y. (2013) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics} \bold{40}, 86--104. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{influence.ppm}}, \code{\link{dfbetas.ppm}}, \code{\link{plot.leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(leverage(fit)) } \keyword{spatial} \keyword{models} spatstat/man/Strauss.Rd0000755000176000001440000000617112237642731014672 0ustar ripleyusers\name{Strauss} \alias{Strauss} \title{The Strauss Point Process Model} \description{ Creates an instance of the Strauss point process model which can then be fitted to point pattern data. } \usage{ Strauss(r) } \arguments{ \item{r}{The interaction radius of the Strauss process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the Strauss process with interaction radius \eqn{r}. } \details{ The (stationary) Strauss process with interaction radius \eqn{r} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process in which each point contributes a factor \eqn{\beta}{beta} to the probability density of the point pattern, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. Thus the probability density is \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{r} units apart, and \eqn{\alpha}{alpha} is the normalising constant. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} so that this model describes an ``ordered'' or ``inhibitive'' pattern. The nonstationary Strauss process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the Strauss process pairwise interaction is yielded by the function \code{Strauss()}. See the examples below. Note the only argument is the interaction radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Strauss()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \references{ Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{63}, 467--475. } \examples{ Strauss(r=0.1) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, Strauss(r=0.07)) # fit the stationary Strauss process to `cells' } ppm(cells, ~polynom(x,y,3), Strauss(r=0.07)) # fit a nonstationary Strauss process with log-cubic polynomial trend } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/Hest.Rd0000755000176000001440000001226412237642731014131 0ustar ripleyusers\name{Hest} \alias{Hest} \title{Spherical Contact Distribution Function} \description{ Estimates the spherical contact distribution function of a random set. } \usage{ Hest(X, r=NULL, breaks=NULL, ..., correction=c("km", "rs", "han"), conditional=TRUE) } \arguments{ \item{X}{The observed random set. An object of class \code{"ppp"}, \code{"psp"} or \code{"owin"}. } \item{r}{ Optional. Vector of values for the argument \eqn{r} at which \eqn{H(r)} should be evaluated. Users are advised \emph{not} to specify this argument; there is a sensible default. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control the discretisation. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{H(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"han"} and \code{"best"}. } \item{conditional}{ Logical value indicating whether to compute the conditional or unconditional distribution. See Details. } } \details{ The spherical contact distribution function of a stationary random set \eqn{X} is the cumulative distribution function \eqn{H} of the distance from a fixed point in space to the nearest point of \eqn{X}, given that the point lies outside \eqn{X}. That is, \eqn{H(r)} equals the probability that \code{X} lies closer than \eqn{r} units away from the fixed point \eqn{x}, given that \code{X} does not cover \eqn{x}. Let \eqn{D = d(x,X)} be the shortest distance from an arbitrary point \eqn{x} to the set \code{X}. Then the spherical contact distribution function is \deqn{H(r) = P(D \le r \mid D > 0)}{H(r) = P(D <= r | D > 0)} For a point process, the spherical contact distribution function is the same as the empty space function \eqn{F} discussed in \code{\link{Fest}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}) or a window (object of class \code{"owin"}). It is assumed to be a realisation of a stationary random set. The algorithm first calls \code{\link{distmap}} to compute the distance transform of \code{X}, then computes the Kaplan-Meier and reduced-sample estimates of the cumulative distribution following Hansen et al (1999). If \code{conditional=TRUE} (the default) the algorithm returns an estimate of the spherical contact function \eqn{H(r)} as defined above. If \code{conditional=FALSE}, it instead returns an estimate of the cumulative distribution function \eqn{H^\ast(r) = P(D \le r)}{H*(r) = P(D <= r)} which includes a jump at \eqn{r=0} if \code{X} has nonzero area. Accuracy depends on the pixel resolution, which is controlled by the arguments \code{eps}, \code{dimyx} and \code{xy} passed to \code{\link{as.mask}}. For example, use \code{eps=0.1} to specify square pixels of side 0.1 units, and \code{dimyx=256} to specify a 256 by 256 grid of pixels. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to six columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{H(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{H(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{H(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{H(r)} by the spatial Kaplan-Meier method } \item{han}{the spatial Hanisch-Chiu-Stoyan estimator of \eqn{H(r)} } \item{raw}{the uncorrected estimate of \eqn{H(r)}, i.e. the empirical distribution of the distance from a fixed point in the window to the nearest point of \code{X} } } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Hansen, M.B., Baddeley, A.J. and Gill, R.D. First contact distributions for spatial patterns: regularity and estimation. \emph{Advances in Applied Probability} \bold{31} (1999) 15-33. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \seealso{\code{\link{Fest}}} \examples{ X <- runifpoint(42) H <- Hest(X) Y <- rpoisline(10) H <- Hest(Y) H <- Hest(Y, dimyx=256) data(heather) H <- Hest(heather$coarse) H <- Hest(heather$coarse, conditional=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/dclf.progress.Rd0000644000176000001440000001122312237642732015771 0ustar ripleyusers\name{dclf.progress} \alias{dclf.progress} \alias{mad.progress} \alias{mctest.progress} \title{ Progress Plot of Test of Spatial Pattern } \description{ Generates a progress plot (envelope representation) of the Diggle-Cressie-Loosmore-Ford test or the Maximum Absolute Deviation test for a spatial point pattern. } \usage{ dclf.progress(X, ..., nrank = 1) mad.progress(X, ..., nrank = 1) mctest.progress(X, fun = Lest, \dots, expo = 1, nrank = 1) } \arguments{ \item{X}{ Either a point pattern (object of class \code{"ppp"}, \code{"lpp"} or other class), a fitted point process model (object of class \code{"ppm"}, \code{"kppm"} or other class) or an envelope object (class \code{"envelope"}). } \item{\dots}{ Arguments passed to \code{\link[spatstat]{envelope}}. Useful arguments include \code{fun} to determine the summary function, \code{nsim} to specify the number of Monte Carlo simulations, and \code{verbose=FALSE} to turn off the messages. } \item{nrank}{ Integer. The rank of the critical value of the Monte Carlo test, amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will become the critical values for the test. } \item{fun}{ Function that computes the desired summary statistic for a point pattern. } \item{expo}{ Positive number. The exponent of the \eqn{L^p} distance. See Details. } } \details{ The Diggle-Cressie-Loosmore-Ford test and the Maximum Absolute Deviation test for a spatial point pattern are described in \code{\link[spatstat]{dclf.test}}. These tests depend on the choice of an interval of distance values (the argument \code{rinterval}). A \emph{progress plot} or \emph{envelope representation} of the test (Baddeley et al, 2013) is a plot of the test statistic (and the corresponding critical value) against the length of the interval \code{rinterval}. The command \code{dclf.progress} performs \code{\link[spatstat]{dclf.test}} on \code{X} using all possible intervals of the form \eqn{c(0,r)}, and returns the resulting values of the test statistic, and the corresponding critical values of the test, as a function of \eqn{r}. Similarly \code{mad.progress} performs \code{\link[spatstat]{mad.test}} using all possible intervals and returns the test statistic and critical value. More generally, \code{mctest.progress} performs a test based on the \eqn{L^p} discrepancy between the curves. The deviation between two curves is measured by the \eqn{p}th root of the integral of the \eqn{p}th power of the absolute value of the difference between the two curves. The exponent \eqn{p} is given by the argument \code{expo}. The case \code{expo=2} is the Cressie-Loosmore-Ford test, while \code{expo=Inf} is the MAD test. The result of each command is an object of class \code{"fv"} that can be plotted to obtain the progress plot. The display shows the test statistic (solid black line) and the Monte Carlo acceptance region (grey shading). The significance level for the Monte Carlo test is \code{nrank/(nsim+1)}. Note that \code{nsim} defaults to 99, so if the values of \code{nrank} and \code{nsim} are not given, the default is a test with significance level 0.01. If \code{X} is an envelope object, then some of the data stored in \code{X} may be re-used: \itemize{ \item If \code{X} is an envelope object containing simulated functions, and \code{fun=NULL}, then the code will re-use the simulated functions stored in \code{X}. \item If \code{X} is an envelope object containing simulated point patterns, then \code{fun} will be applied to the stored point patterns to obtain the simulated functions. If \code{fun} is not specified, it defaults to \code{\link{Lest}}. \item Otherwise, new simulations will be performed, and \code{fun} defaults to \code{\link{Lest}}. } } \value{ An object of class \code{"fv"} that can be plotted to obtain the progress plot. } \references{ Baddeley, A., Diggle, P., Hardegen, A., Lawrence, T., Milne, R. and Nair, G. (2013) \emph{On tests of spatial pattern based on simulation envelopes}. Submitted for publication. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Andrew Hardegen, Tom Lawrence, Gopal Nair and Robin Milne. } \seealso{ \code{\link[spatstat]{dclf.test}} and \code{\link[spatstat]{mad.test}} for the tests. See \code{\link[spatstat]{plot.fv}} for information on plotting objects of class \code{"fv"}. } \examples{ plot(dclf.progress(cells, nsim=19)) } \keyword{spatial} \keyword{htest} spatstat/man/envelope.lpp.Rd0000755000176000001440000001535512237642732015642 0ustar ripleyusers\name{envelope.lpp} \alias{envelope.lpp} \alias{envelope.lppm} \title{ Envelope for Point Patterns on Linear Network } \description{ Enables envelopes to be computed for point patterns on a linear network. } \usage{ \method{envelope}{lpp}(Y, fun=linearK, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) \method{envelope}{lppm}(Y, fun=linearK, nsim=99, nrank=1, \dots, simulate=NULL, verbose=TRUE, transform=NULL,global=FALSE,ginterval=NULL, savefuns=FALSE, savepatterns=FALSE, nsim2=nsim, VARIANCE=FALSE, nSD=2, Yname=NULL, do.pwrong=FALSE, envir.simul=NULL) } \arguments{ \item{Y}{ A point pattern on a linear network (object of class \code{"lpp"}) or a fitted point process model on a linear network (object of class \code{"lppm"}). } \item{fun}{ Function that is to be computed for each simulated pattern. } \item{nsim}{ Number of simulations to perform. } \item{nrank}{ Integer. Rank of the envelope value amongst the \code{nsim} simulated values. A rank of 1 means that the minimum and maximum simulated values will be used. } \item{\dots}{ Extra arguments passed to \code{fun}. } \item{simulate}{ Optional. Specifies how to generate the simulated point patterns. If \code{simulate} is an expression in the R language, then this expression will be evaluated \code{nsim} times, to obtain \code{nsim} point patterns which are taken as the simulated patterns from which the envelopes are computed. If \code{simulate} is a list of point patterns, then the entries in this list will be treated as the simulated patterns from which the envelopes are computed. Alternatively \code{simulate} may be an object produced by the \code{envelope} command: see Details. } \item{verbose}{ Logical flag indicating whether to print progress reports during the simulations. } \item{transform}{ Optional. A transformation to be applied to the function values, before the envelopes are computed. An expression object (see Details). } \item{global}{ Logical flag indicating whether envelopes should be pointwise (\code{global=FALSE}) or simultaneous (\code{global=TRUE}). } \item{ginterval}{ Optional. A vector of length 2 specifying the interval of \eqn{r} values for the simultaneous critical envelopes. Only relevant if \code{global=TRUE}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } \item{nsim2}{ Number of extra simulated point patterns to be generated if it is necessary to use simulation to estimate the theoretical mean of the summary function. Only relevant when \code{global=TRUE} and the simulations are not based on CSR. } \item{VARIANCE}{ Logical. If \code{TRUE}, critical envelopes will be calculated as sample mean plus or minus \code{nSD} times sample standard deviation. } \item{nSD}{ Number of estimated standard deviations used to determine the critical envelopes, if \code{VARIANCE=TRUE}. } \item{Yname}{ Character string that should be used as the name of the data point pattern \code{Y} when printing or plotting the results. } \item{do.pwrong}{ Logical. If \code{TRUE}, the algorithm will also estimate the true significance level of the \dQuote{wrong} test (the test that declares the summary function for the data to be significant if it lies outside the \emph{pointwise} critical boundary at any point). This estimate is printed when the result is printed. } \item{envir.simul}{ Environment in which to evaluate the expression \code{simulate}, if not the current environment. } } \details{ This is a method for the generic function \code{\link[spatstat]{envelope}} applicable to point patterns on a linear network. The argument \code{Y} can be either a point pattern on a linear network, or a fitted point process model on a linear network. The function \code{fun} will be evaluated for the data and also for \code{nsim} simulated point patterns on the same linear network. The upper and lower envelopes of these evaluated functions will be computed as described in \code{\link[spatstat]{envelope}}. The type of simulation is determined as follows. \itemize{ \item if \code{Y} is a point pattern (object of class \code{"lpp"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated according to a Poisson point process on the linear network on which \code{Y} is defined, with intensity estimated from \code{Y}. \item if \code{Y} is a fitted point process model (object of class \code{"lppm"}) and \code{simulate} is missing or \code{NULL}, then random point patterns will be generated by simulating from the fitted model. \item If \code{simulate} is present, it should be an expression that can be evaluated to yield random point patterns on the same linear network as \code{Y}. } The function \code{fun} should accept as its first argument a point pattern on a linear network (object of class \code{"lpp"}) and should have another argument called \code{r} or a \code{\dots} argument. } \value{ Function value table (object of class \code{"fv"}) with additional information, as described in \code{\link[spatstat]{envelope}}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link[spatstat]{envelope}}, \code{\link{linearK}} } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. Okabe, A. and Yamada, I. (2001) The K-function method on a network and its computational implementation. \emph{Geographical Analysis} \bold{33}, 271-290. } \examples{ if(interactive()) { ns <- 39 np <- 40 } else { ns <- np <- 3 } X <- runiflpp(np, simplenet) # uniform Poisson envelope(X, nsim=ns) # nonuniform Poisson fit <- lppm(X, ~x) envelope(fit, nsim=ns) #multitype marks(X) <- sample(letters[1:2], np, replace=TRUE) envelope(X, nsim=ns) } \keyword{spatial} spatstat/man/redwood.Rd0000755000176000001440000000321512237642733014667 0ustar ripleyusers\name{redwood} \alias{redwood} \docType{data} \title{ California Redwoods Point Pattern (Ripley's Subset) } \description{ Locations of 62 seedlings and saplings of California redwood trees. The data represent the locations of 62 seedlings and saplings of California redwood trees in a square sampling region. They originate from Strauss (1975); the present data are a subset extracted by Ripley (1977) in a subregion that has been rescaled to a unit square. The coordinates are rounded to the nearest 0.01 units, except for one point which has an \eqn{x} coordinate of 0.999, presumably to ensure that it is properly inside the window. There are many further analyses of this dataset. It is often used as a canonical example of a clustered point pattern (see e.g. Diggle, 1983). The original, full redwood dataset is supplied in the \code{spatstat} library as \code{redwoodfull}. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations. The window has been rescaled to the unit square. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(redwood)} \source{Strauss (1975), subset extracted by Ripley (1977)} \seealso{ \code{\link{redwoodfull}} } \references{ Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{39}, 172--212. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{63}, 467--475. } \keyword{datasets} \keyword{spatial} spatstat/man/marks.psp.Rd0000755000176000001440000000447612237642733015154 0ustar ripleyusers\name{marks.psp} \alias{marks.psp} \alias{marks<-.psp} \title{Marks of a Line Segment Pattern} \description{ Extract or change the marks attached to a line segment pattern. } \usage{ \method{marks}{psp}(x, \dots, dfok=TRUE) \method{marks}{psp}(x, \dots) <- value } \arguments{ \item{x}{ Line segment pattern dataset (object of class \code{"psp"}). } \item{\dots}{ Ignored. } \item{dfok}{ Logical. If \code{FALSE}, data frames of marks are not permitted and will generate an error. } \item{value}{ Vector or data frame of mark values, or \code{NULL}. } } \value{ For \code{marks(x)}, the result is a vector, factor or data frame, containing the mark values attached to the line segments of \code{x}. If there are no marks, the result is \code{NULL}. For \code{marks(x) <- value}, the result is the updated line segment pattern \code{x} (with the side-effect that the dataset \code{x} is updated in the current environment). } \details{ These functions extract or change the marks attached to each of the line segments in the pattern \code{x}. They are methods for the generic functions \code{\link{marks}} and \code{\link{marks<-}} for the class \code{"psp"} of line segment patterns. The expression \code{marks(x)} extracts the marks of \code{x}. The assignment \code{marks(x) <- value} assigns new marks to the dataset \code{x}, and updates the dataset \code{x} in the current environment. The marks can be a vector, a factor, or a data frame. For the assignment \code{marks(x) <- value}, the \code{value} should be a vector or factor of length equal to the number of segments in \code{x}, or a data frame with as many rows as there are segments in \code{x}. If \code{value} is a single value, or a data frame with one row, then it will be replicated so that the same marks will be attached to each segment. To remove marks, use \code{marks(x) <- NULL} or \code{unmark(x)}. } \seealso{ \code{\link{psp.object}}, \code{\link{marks}}, \code{\link{marks<-}} } \examples{ example(psp) marks(X) marks(X)[,2] marks(X) <- 42 marks(X) <- NULL } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/runifpointx.Rd0000755000176000001440000000204612237642734015613 0ustar ripleyusers\name{runifpointx} \alias{runifpointx} \title{ Generate N Uniform Random Points in Any Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in any number of spatial dimensions. } \usage{ runifpointx(n, domain) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Multi-dimensional box in which the process should be generated. An object of class \code{"boxx"}. } } \value{ The simulated point pattern (an object of class \code{"ppx"}). } \details{ This function generates \code{n} independent random points, uniformly distributed in the multi-dimensional box \code{domain}. } \seealso{ \code{\link{rpoisppx}}, \code{\link{ppx}}, \code{\link{boxx}} } \examples{ w <- boxx(x=c(0,1), y=c(0,1), z=c(0,1), t=c(0,3)) X <- runifpointx(50, w) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/funxy.Rd0000644000176000001440000000305412237642732014372 0ustar ripleyusers\name{funxy} \Rdversion{1.1} \alias{funxy} \title{ Spatial Function Class } \description{ A simple class of functions of spatial location } \usage{ funxy(f, W) } \arguments{ \item{f}{ A \code{function} in the \R language with arguments \code{x,y} (at least) } \item{W}{ Window (object of class \code{"owin"}) inside which the function is well-defined. } } \details{ This creates an object of class \code{"funxy"}. This is a simple mechanism for handling a function of spatial location \eqn{f(x,y)} to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language. The first two arguments of \code{f} must be named \code{x} and \code{y} respectively. \code{W} should be a window (object of class \code{"owin"}) inside which the function \code{f} is well-defined. The function \code{f} should be vectorised: that is, if \code{x} and \code{y} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y)} should be a vector of length \code{n}. } \value{ A \code{function}, in fact a copy of \code{f}, which also belongs to the class \code{"funxy"}. This class has methods for \code{print}, \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{plot.funxy}} } \examples{ f <- function(x,y) { x^2 + y^2 - 1} g <- funxy(f, square(2)) g(0.2, 0.3) g } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Fest.Rd0000755000176000001440000002451212237642731014126 0ustar ripleyusers\name{Fest} \alias{Fest} \alias{empty.space} \title{Estimate the empty space function F} \description{ Estimates the empty space function \eqn{F(r)} from a point pattern in a window of arbitrary shape. } \usage{ Fest(X, ..., eps, r=NULL, breaks=NULL, correction=c("rs", "km", "cs")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{F(r)} will be computed. An object of class \code{ppp}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{Optional. A positive number. The resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which \eqn{F(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ Optional. The edge correction(s) to be used to estimate \eqn{F(r)}. A vector of character strings selected from \code{"none"}, \code{"rs"}, \code{"km"}, \code{"cs"} and \code{"best"}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing up to seven columns: \item{r}{the values of the argument \eqn{r} at which the function \eqn{F(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{F(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{F(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{F(r)} by the spatial Kaplan-Meier method } \item{cs}{the Chiu-Stoyan estimator of \eqn{F(r)} } \item{raw}{the uncorrected estimate of \eqn{F(r)}, i.e. the empirical distribution of the distance from a random point in the window to the nearest point of the data pattern \code{X} } \item{theo}{the theoretical value of \eqn{F(r)} for a stationary Poisson process of the same estimated intensity. } } \details{ The empty space function (also called the ``\emph{spherical contact distribution}'' or the ``\emph{point-to-nearest-event}'' distribution) of a stationary point process \eqn{X} is the cumulative distribution function \eqn{F} of the distance from a fixed point in space to the nearest point of \eqn{X}. An estimate of \eqn{F} derived from a spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern (Cressie, 1991; Diggle, 1983; Ripley, 1988). In exploratory analyses, the estimate of \eqn{F} is a useful statistic summarising the sizes of gaps in the pattern. For inferential purposes, the estimate of \eqn{F} is usually compared to the true value of \eqn{F} for a completely random (Poisson) point process, which is \deqn{F(r) = 1 - e^{ - \lambda \pi r^2}}{% F(r) = 1 - exp( - lambda * pi * r^2) % } where \eqn{\lambda}{lambda} is the intensity (expected number of points per unit area). Deviations between the empirical and theoretical \eqn{F} curves may suggest spatial clustering or spatial regularity. This algorithm estimates the empty space function \eqn{F} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X}) may have arbitrary shape. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}}. The algorithm uses two discrete approximations which are controlled by the parameter \code{eps} and by the spacing of values of \code{r} respectively. (See below for details.) First-time users are strongly advised not to specify these arguments. The estimation of \eqn{F} is hampered by edge effects arising from the unobservability of points of the random pattern outside the window. An edge correction is needed to reduce bias (Baddeley, 1998; Ripley, 1988). The edge corrections implemented here are the border method or "\emph{reduced sample}" estimator, the spatial Kaplan-Meier estimator (Baddeley and Gill, 1997) and the Chiu-Stoyan estimator (Chiu and Stoyan, 1998). Our implementation makes essential use of the distance transform algorithm of image processing (Borgefors, 1986). A fine grid of pixels is created in the observation window. The Euclidean distance between two pixels is approximated by the length of the shortest path joining them in the grid, where a path is a sequence of steps between adjacent pixels, and horizontal, vertical and diagonal steps have length \eqn{1}, \eqn{1} and \eqn{\sqrt 2}{sqrt(2)} respectively in pixel units. If the pixel grid is sufficiently fine then this is an accurate approximation. The parameter \code{eps} is the pixel width of the rectangular raster used to compute the distance transform (see below). It must not be too large: the absolute error in distance values due to discretisation is bounded by \code{eps}. If \code{eps} is not specified, the function checks whether the window \code{X$window} contains pixel raster information. If so, then \code{eps} is set equal to the pixel width of the raster; otherwise, \code{eps} defaults to 1/100 of the width of the observation window. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{F(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}} for the computation of histograms of distances. The estimators are computed from histogram counts. This introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the spacing of successive \code{r} values must be very fine (ideally not greater than \code{eps/4}). The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{F(r)}. The hazard rate is defined by \deqn{\lambda(r) = - \frac{d}{dr} \log(1 - F(r))}{% lambda(r) = - (d/dr) log(1 - F(r)) % } The hazard rate of \eqn{F} has been proposed as a useful exploratory statistic (Baddeley and Gill, 1994). The estimate of \eqn{\lambda(r)}{lambda(r)} given here is a discrete approximation to the hazard rate of the Kaplan-Meier estimator of \eqn{F}. Note that \eqn{F} is absolutely continuous (for any stationary point process \eqn{X}), so the hazard function always exists (Baddeley and Gill, 1997). The naive empirical distribution of distances from each location in the window to the nearest point of the data pattern, is a biased estimate of \eqn{F}. However this is also returned by the algorithm (if \code{correction="none"}), as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{F} as if it were an unbiased estimator of \eqn{F}. } \note{ Sizeable amounts of memory may be needed during the calculation. } \references{ Baddeley, A.J. Spatial sampling and censoring. In O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}. Chapman and Hall, 1998. Chapter 2, pages 37-78. Baddeley, A.J. and Gill, R.D. The empty space hazard of a spatial pattern. Research Report 1994/3, Department of Mathematics, University of Western Australia, May 1994. Baddeley, A.J. and Gill, R.D. Kaplan-Meier estimators of interpoint distance distributions for spatial point processes. \emph{Annals of Statistics} \bold{25} (1997) 263-292. Borgefors, G. Distance transformations in digital images. \emph{Computer Vision, Graphics and Image Processing} \bold{34} (1986) 344-371. Chiu, S.N. and Stoyan, D. (1998) Estimators of distance distributions for spatial patterns. \emph{Statistica Neerlandica} \bold{52}, 239--246. Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. } \section{Warnings}{ The reduced sample (border method) estimator of \eqn{F} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{F} is always nondecreasing but its maximum value may be less than \eqn{1}. The estimate of \eqn{\lambda(r)}{lambda(r)} returned by the algorithm is an approximately unbiased estimate for the integral of \eqn{\lambda()}{lambda()} over the corresponding histogram cell. It may exhibit oscillations due to discretisation effects. We recommend modest smoothing, such as kernel smoothing with kernel width equal to the width of a histogram cell. } \seealso{ \code{\link{Gest}}, \code{\link{Jest}}, \code{\link{Kest}}, \code{\link{km.rs}}, \code{\link{reduced.sample}}, \code{\link{kaplan.meier}} } \examples{ data(cells) Fc <- Fest(cells, 0.01) # Tip: don't use F for the left hand side! # That's an abbreviation for FALSE plot(Fc) # P-P style plot plot(Fc, cbind(km, theo) ~ theo) # The empirical F is above the Poisson F # indicating an inhibited pattern \dontrun{ plot(Fc, . ~ theo) plot(Fc, asin(sqrt(.)) ~ asin(sqrt(theo))) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.owin.Rd0000755000176000001440000001566212237642732014612 0ustar ripleyusers\name{as.owin} \alias{as.owin} \alias{as.owin.owin} \alias{as.owin.ppp} \alias{as.owin.ppm} \alias{as.owin.kppm} \alias{as.owin.lpp} \alias{as.owin.lppm} \alias{as.owin.psp} \alias{as.owin.quad} \alias{as.owin.tess} \alias{as.owin.im} \alias{as.owin.layered} \alias{as.owin.data.frame} \alias{as.owin.distfun} \alias{as.owin.nnfun} \alias{as.owin.funxy} \alias{as.owin.rmhmodel} \alias{as.owin.default} \title{Convert Data To Class owin} \description{ Converts data specifying an observation window in any of several formats, into an object of class \code{"owin"}. } \usage{ as.owin(W, \dots, fatal=TRUE) \method{as.owin}{owin}(W, \dots, fatal=TRUE) \method{as.owin}{ppp}(W, \dots, fatal=TRUE) \method{as.owin}{ppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{kppm}(W, \dots, from=c("points", "covariates"), fatal=TRUE) \method{as.owin}{lpp}(W, \dots, fatal=TRUE) \method{as.owin}{lppm}(W, \dots, fatal=TRUE) \method{as.owin}{psp}(W, \dots, fatal=TRUE) \method{as.owin}{quad}(W, \dots, fatal=TRUE) \method{as.owin}{tess}(W, \dots, fatal=TRUE) \method{as.owin}{im}(W, \dots, fatal=TRUE) \method{as.owin}{layered}(W, \dots, fatal=TRUE) \method{as.owin}{data.frame}(W, \dots, fatal=TRUE) \method{as.owin}{distfun}(W, \dots, fatal=TRUE) \method{as.owin}{nnfun}(W, \dots, fatal=TRUE) \method{as.owin}{funxy}(W, \dots, fatal=TRUE) \method{as.owin}{rmhmodel}(W, \dots, fatal=FALSE) \method{as.owin}{default}(W, \dots, fatal=TRUE) } \arguments{ \item{W}{Data specifying an observation window, in any of several formats described under \emph{Details} below.} \item{fatal}{Logical flag determining what to do if the data cannot be converted to an observation window. See Details. } \item{\dots}{Ignored.} \item{from}{Character string. See Details.} } \value{ An object of class \code{"owin"} (see \code{\link{owin.object}}) specifying an observation window. } \details{ The class \code{"owin"} is a way of specifying the observation window for a point pattern. See \code{\link{owin.object}} for an overview. This function converts data in any of several formats into an object of class \code{"owin"} for use by the \pkg{spatstat} package. The function \code{as.owin} is generic, with methods for different classes of objects, and a default method. The argument \code{W} may be \itemize{ \item an object of class \code{"owin"} \item a structure with entries \code{xrange}, \code{yrange} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a four-element vector (interpreted as \code{(xmin, xmax, ymin, ymax)}) specifying the \eqn{x} and \eqn{y} dimensions of a rectangle \item a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu} specifying the \eqn{x} and \eqn{y} dimensions of a rectangle as \code{(xmin, xmax) = (xl, xu)} and \code{(ymin, ymax) = (yl, yu)}. This will accept objects of class \code{spp} used in the Venables and Ripley \pkg{spatial} library. \item an object of class \code{"ppp"} representing a point pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"psp"} representing a line segment pattern. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"tess"} representing a tessellation. In this case, the object's \code{window} structure will be extracted. \item an object of class \code{"quad"} representing a quadrature scheme. In this case, the window of the \code{data} component will be extracted. \item an object of class \code{"im"} representing a pixel image. In this case, a window of type \code{"mask"} will be returned, with the same pixel raster coordinates as the image. An image pixel value of \code{NA}, signifying that the pixel lies outside the window, is transformed into the logical value \code{FALSE}, which is the corresponding convention for window masks. \item an object of class \code{"ppm"} or \code{"kppm"} representing a fitted point process model. In this case, if \code{from="data"} (the default), \code{as.owin} extracts the original point pattern data to which the model was fitted, and returns the observation window of this point pattern. If \code{from="covariates"} then \code{as.owin} extracts the covariate images to which the model was fitted, and returns a binary mask window that specifies the pixel locations. \item an object of class \code{"lpp"} representing a point pattern on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item an object of class \code{"lppm"} representing a fitted point process model on a linear network. In this case, \code{as.owin} extracts the linear network and returns a window containing this network. \item A \code{data.frame} with exactly three columns. Each row of the data frame corresponds to one pixel. Each row contains the \eqn{x} and \eqn{y} coordinates of a pixel, and a logical value indicating whether the pixel lies inside the window. \item an object of class \code{"distfun"}, \code{"nnfun"} or \code{"funxy"} representing a function of spatial location, defined on a spatial domain. The spatial domain of the function will be extracted. \item an object of class \code{"rmhmodel"} representing a point process model that can be simulated using \code{\link{rmh}}. The window (spatial domain) of the model will be extracted. The window may be \code{NULL} in some circumstances (indicating that the simulation window has not yet been determined). This is not treated as an error, because the argument \code{fatal} defaults to \code{FALSE} for this method. \item an object of class \code{"layered"} representing a list of spatial objects. See \code{\link{layered}}. In this case, \code{as.owin} will be applied to each of the objects in the list, and the union of these windows will be returned. } If the argument \code{W} is not in one of these formats and cannot be converted to a window, then an error will be generated (if \code{fatal=TRUE}) or a value of \code{NULL} will be returned (if \code{fatal=FALSE}). } \seealso{ \code{\link{owin.object}}, \code{\link{owin}} } \examples{ w <- as.owin(c(0,1,0,1)) w <- as.owin(list(xrange=c(0,5),yrange=c(0,10))) # point pattern data(demopat) w <- as.owin(demopat) # image Z <- as.im(function(x,y) { x + 3}, unit.square()) w <- as.owin(Z) # Venables & Ripley 'spatial' package require(spatial) towns <- ppinit("towns.dat") w <- as.owin(towns) detach(package:spatial) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/matclust.estK.Rd0000755000176000001440000001406612237642733015773 0ustar ripleyusers\name{matclust.estK} \alias{matclust.estK} \title{Fit the Matern Cluster Point Process by Minimum Contrast} \description{ Fits the Matern Cluster point process to a point pattern dataset by the Method of Minimum Contrast. } \usage{ matclust.estK(X, startpar=c(kappa=1,R=1), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the Matern Cluster model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the Matern Cluster process. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits the Matern Cluster point process model to a point pattern dataset by the Method of Minimum Contrast, using the \eqn{K} function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits the Matern Cluster point process to \code{X}, by finding the parameters of the Matern Cluster model which give the closest match between the theoretical \eqn{K} function of the Matern Cluster process and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The Matern Cluster point process is described in Moller and Waagepetersen (2003, p. 62). It is a cluster process formed by taking a pattern of parent points, generated according to a Poisson process with intensity \eqn{\kappa}{kappa}, and around each parent point, generating a random number of offspring points, such that the number of offspring of each parent is a Poisson random variable with mean \eqn{\mu}{mu}, and the locations of the offspring points of one parent are independent and uniformly distributed inside a circle of radius \eqn{R} centred on the parent point. The theoretical \eqn{K}-function of the Matern Cluster process is \deqn{ K(r) = \pi r^2 + \frac 1 \kappa h(\frac{r}{2R}) }{ K(r) = pi r^2 + h(r/(2*R))/kappa } where \deqn{ h(z) = 2 + \frac 1 \pi [ ( 8 z^2 - 4 ) \mbox{arccos}(z) - 2 \mbox{arcsin}(z) + 4 z \sqrt{(1 - z^2)^3} - 6 z \sqrt{1 - z^2} ] }{ h(z) = 2 + (1/pi) * ((8 * z^2 - 4) * arccos(z) - 2 * arcsin(z) + 4 * z * sqrt((1 - z^2)^3) - 6 * z * sqrt(1 - z^2)) } for \eqn{z <= 1}, and \eqn{h(z) = 1} for \eqn{z > 1}. The theoretical intensity of the Matern Cluster process is \eqn{\lambda = \kappa \mu}{lambda=kappa* mu}. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\kappa}{kappa} and \eqn{R}{R}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The Matern Cluster process can be simulated, using \code{\link{rMatClust}}. Homogeneous or inhomogeneous Matern Cluster models can also be fitted using the function \code{\link{kppm}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \references{ Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Rasmus Waagepetersen \email{rw@math.auc.dk} Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{lgcp.estK}}, \code{\link{thomas.estK}}, \code{\link{mincontrast}}, \code{\link{Kest}}, \code{\link{rMatClust}} to simulate the fitted model. } \examples{ data(redwood) u <- matclust.estK(redwood, c(kappa=10, R=0.1)) u plot(u) } \keyword{spatial} \keyword{models} spatstat/man/rpoisline.Rd0000755000176000001440000000264312237642734015235 0ustar ripleyusers\name{rpoisline} \alias{rpoisline} \title{Generate Poisson Random Line Process} \description{ Generate a random pattern of line segments obtained from the Poisson line process. } \usage{ rpoisline(lambda, win=owin()) } \arguments{ \item{lambda}{ Intensity of the Poisson line process. A positive number. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Currently, the window must be a rectangle. } } \value{ A line segment pattern (an object of class \code{"psp"}). } \details{ This algorithm generates a realisation of the uniform Poisson line process, and clips it to the window \code{win}. The argument \code{lambda} must be a positive number. It controls the intensity of the process. The expected number of lines intersecting a convex region of the plane is equal to \code{lambda} times the perimeter length of the region. The expected total length of the lines crossing a region of the plane is equal to \code{lambda * pi} times the area of the region. } \seealso{ \code{\link{psp}} } \examples{ # uniform Poisson line process with intensity 10, # clipped to the unit square rpoisline(10) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/commonGrid.Rd0000644000176000001440000000361612237642732015323 0ustar ripleyusers\name{commonGrid} \alias{commonGrid} \title{Determine A Common Spatial Domain And Pixel Resolution} \description{ Determine a common spatial domain and pixel resolution for several spatial objects such as images, masks, windows and point patterns. } \usage{ commonGrid(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}), binary masks (objects of class \code{"owin"} of type \code{"mask"}) or data which can be converted to binary masks by \code{\link{as.mask}}. } } \details{ This function determines a common spatial resolution and spatial domain for several spatial objects. The arguments \code{\dots} may be pixel images, binary masks, or other spatial objects acceptable to \code{\link{as.mask}}. The common pixel grid is determined by inspecting all the pixel images and binary masks in the argument list, finding the pixel grid with the highest spatial resolution, and extending this pixel grid to cover the bounding box of all the spatial objects. The return value is a binary mask \code{M}, representing the bounding box at the chosen pixel resolution. Use \code{\link{as.im}(X, W=M)} to convert a pixel image \code{X} to this new pixel resolution. Use \code{\link{as.mask}(W, xy=M)} to convert a window \code{W} to a binary mask at this new pixel resolution. See the Examples. } \value{ A binary mask (object of class \code{"owin"} and type \code{"mask"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ A <- setcov(square(1)) G <- density(runifpoint(42), dimyx=16) H <- commonGrid(A, letterR, G) newR <- as.mask(letterR, xy=H) newG <- as.im(G, W=H) } \seealso{ \code{\link{harmonise.im}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/rescale.owin.Rd0000755000176000001440000000365712237642734015630 0ustar ripleyusers\name{rescale.owin} \alias{rescale.owin} \title{Convert Window to Another Unit of Length} \description{ Converts a window to another unit of length. } \usage{ \method{rescale}{owin}(X, s) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} } \value{ Another window object (of class \code{"owin"}) representing the same window, but expressed in the new units. } \details{ This is a method for the generic function \code{\link{rescale}}. The spatial coordinates in the window \code{X} (and its window) will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. (Thus, the coordinate values are \emph{divided} by \code{s}, while the unit value is multiplied by \code{s}). The result is a window representing the \emph{same} region of space, but re-expressed in a different unit. If \code{s} is missing, then the coordinates will be re-expressed in \sQuote{native} units; for example if the current unit is equal to 0.1 metres, then the coordinates will be re-expressed in metres. } \section{Note}{ The result of this operation is equivalent to the original window. If you want to actually change the coordinates by a linear transformation, producing a window that is larger or smaller than the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(swedishpines) W <- swedishpines$window W # coordinates are in decimetres (0.1 metre) # convert to metres: rescale(W, 10) # or equivalently rescale(W) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/bw.ppl.Rd0000644000176000001440000000527212237642732014427 0ustar ripleyusers\name{bw.ppl} \alias{bw.ppl} \title{ Likelihood Cross Validation Bandwidth Selection for Kernel Density } \description{ Uses likelihood cross-validation to select a smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.ppl(X, ..., srange=NULL, ns=32) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{srange}{ Optional numeric vector of length 2 giving the range of values of bandwidth to be searched. } \item{ns}{ Optional integer giving the number of values of bandwidth to search. } } \details{ This function selects an appropriate bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{sigma} is chosen to maximise the point process likelihood cross-validation criterion \deqn{ \mbox{LCV}(\sigma) = \sum_i \log\hat\lambda_{-i}(x_i) - \int_W \hat\lambda(u) \, {\rm d}u }{ LCV(sigma) = sum[i] log(lambda[-i](x[i])) - integral[W] lambda(u) du } where the sum is taken over all the data points \eqn{x_i}{x[i]}, where \eqn{\hat\lambda_{-i}(x_i)}{lambda[-i](x_i)} is the leave-one-out kernel-smoothing estimate of the intensity at \eqn{x_i}{x[i]} with smoothing bandwidth \eqn{\sigma}{sigma}, and \eqn{\hat\lambda(u)}{lambda(u)} is the kernel-smoothing estimate of the intensity at a spatial location \eqn{u} with smoothing bandwidth \eqn{\sigma}{sigma}. See Loader(1999, Section 5.3). The value of \eqn{\mbox{LCV}(\sigma)}{LCV(sigma)} is computed directly, using \code{\link{density.ppp}}, for \code{ns} different values of \eqn{\sigma}{sigma} between \code{srange[1]} and \code{srange[2]}. The result is a numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted to show the (rescaled) mean-square error as a function of \code{sigma}. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.scott}} } \examples{ \donttest{ b <- bw.ppl(redwood) plot(b, main="Likelihood cross validation for redwoods") plot(density(redwood, b)) } \testonly{ b <- bw.ppl(redwood, srange=c(0.03, 0.07), ns=2) } } \references{ Loader, C. (1999) \emph{Local Regression and Likelihood}. Springer, New York. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/dilation.Rd0000755000176000001440000000541512243333130015014 0ustar ripleyusers\name{dilation} %DontDeclareMethods \alias{dilation} \alias{dilation.owin} \alias{dilation.ppp} \alias{dilation.psp} \title{Morphological Dilation} \description{ Perform morphological dilation of a window, a line segment pattern or a point pattern } \usage{ dilation(w, r, \dots) \method{dilation}{owin}(w, r, \dots, polygonal=NULL, tight=TRUE) \method{dilation}{ppp}(w, r, \dots, polygonal=TRUE, tight=TRUE) \method{dilation}{psp}(w, r, \dots, polygonal=TRUE, tight=TRUE) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of dilation.} \item{\dots}{extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if the pixel approximation is used. } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the dilation (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } \item{tight}{ Logical flag indicating whether the bounding frame of the window should be taken as the smallest rectangle enclosing the dilated region (\code{tight=TRUE}), or should be the dilation of the bounding frame of \code{w} (\code{tight=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the dilated region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological dilation of a set \eqn{W} by a distance \eqn{r > 0} is the set consisting of all points lying at most \eqn{r} units away from \eqn{W}. Effectively, dilation adds a margin of width \eqn{r} onto the set \eqn{W}. If \code{polygonal=TRUE} then a polygonal approximation to the dilation is computed. If \code{polygonal=FALSE} then a pixel approximation to the dilation is computed from the distance map of \code{w}. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to control the pixel resolution. When \code{w} is a window, the default (when \code{polygonal=NULL}) is to compute a polygonal approximation if \code{w} is a rectangle or polygonal window, and to compute a pixel approximation if \code{w} is a window of type \code{"mask"}. } \seealso{ \code{\link{erosion}} for the opposite operation. \code{\link{owin}}, \code{\link{as.owin}} } \examples{ plot(dilation(letterR, 0.2)) plot(letterR, add=TRUE, lwd=2, border="red") X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(dilation(X, 0.1)) plot(X, add=TRUE, col="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/ripras.Rd0000755000176000001440000000631512237642734014531 0ustar ripleyusers\name{ripras} \alias{ripras} \title{Estimate window from points alone} \description{ Given an observed pattern of points, computes the Ripley-Rasson estimate of the spatial domain from which they came. } \usage{ ripras(x, y=NULL, shape="convex", f) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} \item{shape}{String indicating the type of window to be estimated: either \code{"convex"} or \code{"rectangle"}. } \item{f}{ (optional) scaling factor. See Details. } } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function computes an estimate due to Ripley and Rasson (1977) of the spatial domain from which the points came. The points are assumed to have been generated independently and uniformly distributed inside an unknown domain \eqn{D}. If \code{shape="convex"} (the default), the domain \eqn{D} is assumed to be a convex set. The maximum likelihood estimate of \eqn{D} is the convex hull of the points (computed by \code{\link{convexhull.xy}}). Analogously to the problems of estimating the endpoint of a uniform distribution, the MLE is not optimal. Ripley and Rasson's estimator is a rescaled copy of the convex hull, centred at the centroid of the convex hull. The scaling factor is \eqn{1/sqrt(1 - m/n)}{1/\sqrt{1 - \frac m n}} where \eqn{n} is the number of data points and \eqn{m} the number of vertices of the convex hull. The scaling factor may be overridden using the argument \code{f}. If \code{shape="rectangle"}, the domain \eqn{D} is assumed to be a rectangle with sides parallel to the coordinate axes. The maximum likelihood estimate of \eqn{D} is the bounding box of the points (computed by \code{\link{bounding.box.xy}}). The Ripley-Rasson estimator is a rescaled copy of the bounding box, with scaling factor \eqn{(n+1)/(n-1)} where \eqn{n} is the number of data points, centred at the centroid of the bounding box. The scaling factor may be overridden using the argument \code{f}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{bounding.box.xy}}, \code{\link{convexhull.xy}} } \examples{ x <- runif(30) y <- runif(30) w <- ripras(x,y) plot(owin(), main="ripras(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(15) plot(X, main="ripras(X)") plot(ripras(X), add=TRUE) # two points insufficient ripras(c(0,1),c(0,0)) # triangle ripras(c(0,1,0.5), c(0,0,1)) # three collinear points ripras(c(0,0,0), c(0,1,2)) } \references{ Ripley, B.D. and Rasson, J.-P. (1977) Finding the edge of a Poisson forest. \emph{Journal of Applied Probability}, \bold{14}, 483 -- 491. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/print.ppm.Rd0000755000176000001440000000322112237642733015150 0ustar ripleyusers\name{print.ppm} \alias{print.ppm} \title{Print a Fitted Point Process Model} \description{ Default \code{print} method for a fitted point process model. } \usage{ \method{print}{ppm}(x,\dots, what=c("all", "model", "trend", "interaction", "se", "errors")) } \arguments{ \item{x}{ A fitted point process model, typically obtained from the model-fittingg algorithm \code{\link{ppm}}. An object of class \code{"ppm"}. } \item{what}{ Character vector (partially-matched) indicating what information should be printed. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"ppm"}. It prints information about the fitted model in a sensible format. The argument \code{what} makes it possible to print only some of the information. If \code{what} is missing, then by default, standard errors for the estimated coefficients of the model will be printed only if the model is a Poisson point process. To print the standard errors for a non-Poisson model, call \code{print.ppm} with the argument \code{what} given explicitly, or reset the default rule by typing \code{spatstat.options(print.ppm.SE="always")}. } \seealso{ \code{\link{ppm.object}} for details of the class \code{"ppm"}. \code{\link{ppm}} for generating these objects. \code{\link{plot.ppm}}, \code{\link{predict.ppm}} } \examples{ \dontrun{ m <- ppm(cells, ~1, Strauss(0.05)) m } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} \keyword{models} spatstat/man/nnwhich.lpp.Rd0000755000176000001440000000270212237642733015454 0ustar ripleyusers\name{nnwhich.lpp} \alias{nnwhich.lpp} \title{ Identify Nearest Neighbours on a Linear Network } \description{ Given a pattern of points on a linear network, identify the nearest neighbour for each point, measured by the shortest path in the network. } \usage{ \method{nnwhich}{lpp}(X, ..., method="C") } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{method}{ Optional string determining the method of calculation. Either \code{"interpreted"} or \code{"C"}. } \item{\dots}{ Ignored. } } \details{ Given a pattern of points on a linear network, this function finds the nearest neighbour of each point (i.e. for each point it identifies the nearest other point) measuring distance by the shortest path in the network. If \code{method="C"} the task is performed using code in the C language. If \code{method="interpreted"} then the computation is performed using interpreted \R code. The \R code is much slower, but is provided for checking purposes. } \value{ An integer vector, of length equal to the number of points in \code{X}, identifying the nearest neighbour of each point. If \code{nnwhich(X)[2] = 4} then the nearest neighbour of point 2 is point 4. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lpp}} } \examples{ example(lpp) nnwhich(X) } \keyword{spatial} spatstat/man/methods.kppm.Rd0000755000176000001440000000322412237642733015635 0ustar ripleyusers\name{methods.kppm} \alias{methods.kppm} %DoNotExport \alias{coef.kppm} \alias{formula.kppm} \alias{print.kppm} \alias{terms.kppm} \alias{labels.kppm} \title{ Methods for Cluster Point Process Models } \description{ These are methods for the class \code{"kppm"}. } \usage{ \method{coef}{kppm}(object, \dots) \method{formula}{kppm}(x, \dots) \method{print}{kppm}(x, ...) \method{terms}{kppm}(x, \dots) \method{labels}{kppm}(object, \dots) } \arguments{ \item{x,object}{ An object of class \code{"kppm"}, representing a fitted cluster point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{coef}}, \code{\link{formula}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"kppm"}. An object of class \code{"kppm"} represents a fitted cluster point process model. It is obtained from \code{\link{kppm}}. The method \code{coef.kppm} returns the vector of \emph{regression coefficients} of the fitted model. It does not return the clustering parameters. } \value{ See the help files for the corresponding generic functions. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{kppm}}, \code{\link{plot.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link{as.ppm.kppm}}. } \examples{ data(redwood) fit <- kppm(redwood, ~x, "MatClust") coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/chicago.Rd0000755000176000001440000000375712237642732014633 0ustar ripleyusers\name{chicago} \alias{chicago} \docType{data} \title{ Chicago Street Crime Data } \description{ This dataset is a record of street crimes reported in the period 25 April to 8 May 2002, in an area of Chicago (Illinois, USA) close to the University of Chicago. The original street crime map was published in the Chicago Weekly News in 2002. The data give the spatial location of each crime, and the type of crime. The type labels are interpreted as follows: \tabular{ll}{ \code{assault} & battery/assault \cr \code{burglary} & burglary \cr \code{cartheft} & motor vehicle theft \cr \code{damage} & criminal damage \cr \code{robbery} & robbery \cr \code{theft} & theft \cr \code{trespass} & criminal trespass } All crimes occurred on or near a street. The data give the coordinates of all streets in the survey area, and their connectivity. The dataset \code{chicago} is an object of class \code{"lpp"} representing a point pattern on a linear network. See \code{\link{lpp}} for further information on the format. These data were published and analysed in Ang, Baddeley and Nair (2012). } \format{ Object of class \code{"lpp"}. See \code{\link{lpp}}. } \usage{data(chicago)} \examples{ data(chicago) plot(chicago) plot(as.linnet(chicago), main="Chicago Street Crimes",col="green") plot(as.ppp(chicago), add=TRUE, col="red", chars=c(16,2,22,17,24,15,6)) } \source{ Chicago Weekly News, 2002. Manually digitised by Adrian Baddeley. } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. Chicago Weekly News website: \url{www.chicagoweeklynews.com} } \keyword{datasets} \keyword{spatial} spatstat/man/pool.envelope.Rd0000755000176000001440000000565412237642733016022 0ustar ripleyusers\name{pool.envelope} \alias{pool.envelope} \title{ Pool Data from Several Envelopes } \description{ Pool the simulation data from several simulation envelopes (objects of class \code{"envelope"}) and compute a new envelope. } \usage{ \method{pool}{envelope}(..., savefuns=FALSE, savepatterns=FALSE) } \arguments{ \item{\dots}{ Objects of class \code{"envelope"}. } \item{savefuns}{ Logical flag indicating whether to save all the simulated function values. } \item{savepatterns}{ Logical flag indicating whether to save all the simulated point patterns. } } \details{ The function \code{\link{pool}} is generic. This is the method for the class \code{"envelope"} of simulation envelopes. It is used to combine the simulation data from several simulation envelopes and to compute an envelope based on the combined data. Each of the arguments \code{\dots} must be an object of class \code{"envelope"}. These envelopes must be compatible, in that they are envelopes for the same function, and were computed using the same options. \itemize{ \item In normal use, each envelope object will have been created by running the command \code{\link{envelope}} with the argument \code{savefuns=TRUE}. This ensures that each object contains the simulated data (summary function values for the simulated point patterns) that were used to construct the envelope. The simulated data are extracted from each object and combined. A new envelope is computed from the combined set of simulations. \item Alternatively, if each envelope object was created by running \code{\link{envelope}} with \code{VARIANCE=TRUE}, then the saved functions are not required. The sample means and sample variances from each envelope will be pooled. A new envelope is computed from the pooled mean and variance. } Warnings or errors will be issued if the envelope objects \code{\dots} appear to be incompatible. Apart from these basic checks, the code is not smart enough to decide whether it is sensible to pool the data. To modify the envelope parameters or the type of envelope that is computed, first pool the envelope data using \code{pool.envelope}, then use \code{\link{envelope.envelope}} to modify the envelope parameters. } \value{ An object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}}, \code{\link{envelope.envelope}}, \code{\link{pool}}, \code{\link{pool.fasp}} } \examples{ data(cells) E1 <- envelope(cells, Kest, nsim=10, savefuns=TRUE) E2 <- envelope(cells, Kest, nsim=20, savefuns=TRUE) pool(E1, E2) V1 <- envelope(E1, VARIANCE=TRUE) V2 <- envelope(E2, VARIANCE=TRUE) pool(V1, V2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/rescale.Rd0000755000176000001440000000343312237642734014645 0ustar ripleyusers\name{rescale} \alias{rescale} \title{Convert dataset to another unit of length} \description{ Converts between different units of length in a spatial dataset, such as a point pattern or a window. } \usage{ rescale(X, s) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} \item{s}{Conversion factor: the new units are \code{s} times the old units.} } \value{ Another object of the same type, representing the same data, but expressed in the new units. } \details{ This is generic. Methods are provided for point patterns (\code{\link{rescale.ppp}}) and windows (\code{\link{rescale.owin}}). The spatial coordinates in the dataset \code{X} will be re-expressed in terms of a new unit of length that is \code{s} times the current unit of length given in \code{X}. For example if \code{X} is a dataset giving coordinates in metres, then \code{rescale(X,1000)} will divide the coordinate values by 1000 to obtain coordinates in kilometres, and the unit name will be changed from \code{"metres"} to \code{"1000 metres"}. } \section{Note}{ The result of this operation is equivalent to the original dataset. If you want to actually change the coordinates by a linear transformation, producing a dataset that is not equivalent to the original one, use \code{\link{affine}}. } \seealso{ \code{\link{unitname}}, \code{\link{rescale.ppp}}, \code{\link{rescale.owin}}, \code{\link{affine}}, \code{\link{rotate}}, \code{\link{shift}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/coef.mppm.Rd0000644000176000001440000000446212241443111015072 0ustar ripleyusers\name{coef.mppm} \alias{coef.mppm} \title{ Coefficients of Point Process Model Fitted to Multiple Point Patterns } \description{ Given a point process model fitted to a list of point patterns, extract the coefficients of the fitted model. A method for \code{coef}. } \usage{ \method{coef}{mppm}(object, \dots) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"mppm"}) } \item{\dots}{ Ignored. } } \value{ A vector containing the fitted coefficients. } \details{ This function is a method for the generic function \code{\link{coef}}. The argument \code{object} must be a fitted point process model (object of class \code{"mppm"}) produced by the fitting algorithm \code{\link{mppm}}). This represents a point process model that has been fitted to a list of several point pattern datasets. See \code{\link{mppm}} for information. This function extracts the vector of coefficients of the fitted model. This is the estimate of the parameter vector \eqn{\theta}{theta} such that the conditional intensity of the model is of the form \deqn{ \lambda(u,x) = \exp(\theta S(u,x)) }{ lambda(u,x) = exp(theta . S(u,x)) } where \eqn{S(u,x)} is a (vector-valued) statistic. For example, if the model \code{object} is the uniform Poisson process, then \code{coef(object)} will yield a single value (named \code{"(Intercept)"}) which is the logarithm of the fitted intensity of the Poisson process. Use \code{\link{print.mppm}} to print a more useful description of the fitted model. } \seealso{ \code{\link{print.mppm}}, \code{\link{mppm}} } \examples{ data(waterstriders) H <- hyperframe(X=waterstriders) fit.Poisson <- mppm(X ~ 1, H) coef(fit.Poisson) # The single entry "(Intercept)" # is the log of the fitted intensity of the Poisson process fit.Strauss <- mppm(X~1, H, Strauss(7)) coef(fit.Strauss) # The two entries "(Intercept)" and "Interaction" # are respectively log(beta) and log(gamma) # in the usual notation for Strauss(beta, gamma, r) } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/fitted.mppm.Rd0000644000176000001440000000516512241443111015436 0ustar ripleyusers\name{fitted.mppm} \alias{fitted.mppm} \title{Fitted Conditional Intensity for Multiple Point Process Model} \description{ Given a point process model fitted to multiple point patterns, compute the fitted conditional intensity of the model at the points of each data pattern, or at the points of the quadrature schemes used to fit the model. } \usage{ \method{fitted}{mppm}(object, ..., type = "lambda", dataonly = FALSE) } \arguments{ \item{object}{ The fitted model. An object of class \code{"mppm"} obtained from \code{\link{mppm}}. } \item{\dots}{Ignored.} \item{type}{ Type of fitted values: either \code{"trend"} for the spatial trend, or \code{"lambda"} or \code{"cif"} for the conditional intensity. } \item{dataonly}{ If \code{TRUE}, fitted values are computed only for the points of the data point patterns. If \code{FALSE}, fitted values are computed for the points of the quadrature schemes used to fit the model. } } \details{ This function evaluates the conditional intensity \eqn{\hat\lambda(u,x)}{lambdahat(u,x)} or spatial trend \eqn{\hat{b(u)}}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, for each of the original point patterns \eqn{x} to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature schemes used to fit the model in \code{\link{mppm}}. They include the data points (the points of the original point pattern datasets) and other ``dummy'' points in the window of observation. Use \code{\link{predict.mppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \value{ A list of vectors (one for each row of the original hyperframe, i.e. one vector for each of the original point patterns) containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in these vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{\link{quad.mppm}(object)}. } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \examples{ data(waterstriders) model <- mppm(Bugs ~ x, data=hyperframe(Bugs=waterstriders), interaction=Strauss(7)) cifs <- fitted(model) } \seealso{ \code{\link{mppm}}, \code{\link{predict.mppm}} } \keyword{spatial} \keyword{models} spatstat/man/runifpoint3.Rd0000755000176000001440000000175112237642734015510 0ustar ripleyusers\name{runifpoint3} \alias{runifpoint3} \title{ Generate N Uniform Random Points in Three Dimensions } \description{ Generate a random point pattern containing \code{n} independent, uniform random points in three dimensions. } \usage{ runifpoint3(n, domain = box3()) } \arguments{ \item{n}{ Number of points to be generated. } \item{domain}{ Three-dimensional box in which the process should be generated. An object of class \code{"box3"}. } } \value{ The simulated point pattern (an object of class \code{"pp3"}). } \details{ This function generates \code{n} independent random points, uniformly distributed in the three-dimensional box \code{domain}. } \seealso{ \code{\link{rpoispp3}}, \code{\link{pp3}}, \code{\link{box3}} } \examples{ X <- runifpoint3(50) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/linearKcross.Rd0000644000176000001440000000600312237642732015655 0ustar ripleyusers\name{linearKcross} \alias{linearKcross} \title{ Multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{Kcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link[spatstat]{linearK}}. } \examples{ data(chicago) K <- linearKcross(chicago, "assault", "robbery") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rStrauss.Rd0000755000176000001440000001040612237642734015053 0ustar ripleyusers\name{rStrauss} \alias{rStrauss} \title{Perfect Simulation of the Strauss Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss process, using a perfect simulation algorithm. } \usage{ rStrauss(beta, gamma = 1, R = 0, W = owin()) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } } \details{ This function generates a realisation of the Strauss point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss process (Strauss, 1975; Kelly and Ripley, 1976) is a model for spatial inhibition, ranging from a strong `hard core' inhibition to a completely random pattern according to the value of \code{gamma}. The Strauss process with interaction radius \eqn{R} and parameters \eqn{\beta}{beta} and \eqn{\gamma}{gamma} is the pairwise interaction point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{s(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^s(x) } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, \eqn{s(x)} is the number of distinct unordered pairs of points that are closer than \eqn{R} units apart, and \eqn{\alpha}{alpha} is the normalising constant. Intuitively, each point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density, and each pair of points closer than \eqn{r} units apart contributes a factor \eqn{\gamma}{gamma} to the density. The interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1} in order that the process be well-defined (Kelly and Ripley, 1976). This model describes an ``ordered'' or ``inhibitive'' pattern. If \eqn{\gamma=1}{gamma=1} it reduces to a Poisson process (complete spatial randomness) with intensity \eqn{\beta}{beta}. If \eqn{\gamma=0}{gamma=0} it is called a ``hard core process'' with hard core radius \eqn{R/2}, since no pair of points is permitted to lie closer than \eqn{R} units apart. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Berthelsen, K.K. and Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. Strauss, D.J. (1975) A model for clustering. \emph{Biometrika} \bold{63}, 467--475. } \author{ Kasper Klitgaard Berthelsen, adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \examples{ X <- rStrauss(0.05,0.2,1.5,square(141.4)) Z <- rStrauss(100,0.7,0.05) } \seealso{ \code{\link{rmh}}, \code{\link{Strauss}}, \code{\link{rHardcore}}, \code{\link{rStraussHard}}, \code{\link{rDiggleGratton}}, \code{\link{rDGS}}. } \keyword{spatial} \keyword{datagen} spatstat/man/is.owin.Rd0000755000176000001440000000146212237642732014613 0ustar ripleyusers\name{is.owin} \alias{is.owin} \title{Test Whether An Object Is A Window} \description{ Checks whether its argument is a window (object of class \code{"owin"}). } \usage{ is.owin(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a window object of class \code{"owin"}. See \code{\link{owin.object}} for details of this class. The result is determined to be \code{TRUE} if \code{x} inherits from \code{"owin"}, i.e. if \code{x} has \code{"owin"} amongst its classes. } \value{ \code{TRUE} if \code{x} is a point pattern, otherwise \code{FALSE}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/dummy.ppm.Rd0000755000176000001440000000447512237642732015162 0ustar ripleyusers\name{dummy.ppm} \alias{dummy.ppm} \title{Extract Dummy Points Used to Fit a Point Process Model} \description{ Given a fitted point process model, this function extracts the `dummy points' of the quadrature scheme used to fit the model. } \usage{ dummy.ppm(object, drop=FALSE) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } \item{drop}{ Logical value determining whether to delete dummy points that were not used to fit the model. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The maximum pseudolikelihood algorithm in \code{\link{ppm}} approximates the pseudolikelihood integral by a sum over a finite set of quadrature points, which is constructed by augmenting the original data point pattern by a set of ``dummy'' points. The fitted model object returned by \code{\link{ppm}} contains complete information about this quadrature scheme. See \code{\link{ppm}} or \code{\link{ppm.object}} for further information. This function \code{dummy.ppm} extracts the dummy points of the quadrature scheme. A typical use of this function would be to count the number of dummy points, to gauge the accuracy of the approximation to the exact pseudolikelihood. It may happen that some dummy points are not actually used in fitting the model (typically because the value of a covariate is \code{NA} at these points). The argument \code{drop} specifies whether these unused dummy points shall be deleted (\code{drop=TRUE}) or retained (\code{drop=FALSE}) in the return value. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- dummy.ppm(fit) X$n # this is the number of dummy points in the quadrature scheme } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} \keyword{models} spatstat/man/rpoisppOnLines.Rd0000755000176000001440000000630112237642734016210 0ustar ripleyusers\name{rpoisppOnLines} \alias{rpoisppOnLines} \title{Generate Poisson Point Pattern on Line Segments} \description{ Given a line segment pattern, generate a Poisson random point pattern on the line segments. } \usage{ rpoisppOnLines(lambda, L, lmax = NULL, ...) } \arguments{ \item{lambda}{Intensity of the Poisson process. A single number, a \code{function(x,y)}, a pixel image (object of class \code{"im"}), or a vector of numbers, a list of functions, or a list of images. } \item{L}{Line segment pattern (object of class \code{"psp"}) on which the points should be generated. } \item{lmax}{Maximum possible value of \code{lambda} if it is a function or a pixel image. } \item{\dots}{Additional arguments passed to \code{lambda} if it is a function. } } \details{ This command generates a Poisson point process on the one-dimensional system of line segments in \code{L}. The result is a point pattern consisting of points lying on the line segments in \code{L}. The number of random points falling on any given line segment follows a Poisson distribution. The patterns of points on different segments are independent. The intensity \code{lambda} is the expected number of points per unit \bold{length} of line segment. It may be constant, or it may depend on spatial location. In order to generate an unmarked Poisson process, the argument \code{lambda} may be a single number, or a \code{function(x,y)}, or a pixel image (object of class \code{"im"}). In order to generate a \emph{marked} Poisson process, \code{lambda} may be a numeric vector, a list of functions, or a list of images, each entry giving the intensity for a different mark value. If \code{lambda} is not numeric, then the (Lewis-Shedler) rejection method is used. The rejection method requires knowledge of \code{lmax}, the maximum possible value of \code{lambda}. This should be either a single number, or a numeric vector of the same length as \code{lambda}. If \code{lmax} is not given, it will be computed approximately, by sampling many values of \code{lambda}. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as \code{L}. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}}, \code{\link{rpoispp}} } \examples{ live <- interactive() L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) if(live) plot(L, main="") # uniform intensity Y <- rpoisppOnLines(4, L) if(live) plot(Y, add=TRUE, pch="+") # uniform MARKED process with types 'a' and 'b' Y <- rpoisppOnLines(c(a=4, b=5), L) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is a function Y <- rpoisppOnLines(function(x,y){ 10 * x^2}, L, 10) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } # intensity is an image Z <- as.im(function(x,y){10 * sqrt(x+y)}, unit.square()) Y <- rpoisppOnLines(Z, L, 10) if(live) { plot(L, main="") plot(Y, add=TRUE, pch="+") } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/tweak.colourmap.Rd0000644000176000001440000000313312237642734016334 0ustar ripleyusers\name{tweak.colourmap} \alias{tweak.colourmap} \title{ Change Colour Values in a Colour Map } \description{ Assign new colour values to some of the entries in a colour map. } \usage{ tweak.colourmap(m, col, ..., inputs=NULL, range=NULL) } \arguments{ \item{m}{ A colour map (object of class \code{"colourmap"}). } \item{inputs}{ Input values to the colour map, to be assigned new colours. Incompatible with \code{range}. } \item{range}{ Numeric vector of length 2 specifying a range of numerical values which should be assigned a new colour. Incompatible with \code{inputs}. } \item{col}{ Replacement colours for the specified \code{inputs} or the specified \code{range} of values. } \item{\dots}{Other arguments are ignored.} } \details{ This function changes the colour map \code{m} by assigning new colours to each of the input values specified by \code{inputs}, or by assigning a single new colour to the range of input values specified by \code{range}. The modified colour map is returned. } \value{ Another colour map (object of class \code{"colourmap"}). } \seealso{ \code{\link{colourmap}}, \code{\link{interp.colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ co <- colourmap(rainbow(32), range=c(0,1)) plot(tweak.colourmap(co, inputs=c(0.5, 0.6), "white")) plot(tweak.colourmap(co, range=c(0.5,0.6), "white")) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{color} spatstat/man/diameter.Rd0000755000176000001440000000204712237642732015017 0ustar ripleyusers\name{diameter} \alias{diameter} \title{Diameter of an Object} \description{ Computes the diameter of an object such as a two-dimensional window or three-dimensional box. } \usage{ diameter(x) } \arguments{ \item{x}{ A window or other object whose diameter will be computed. } } \value{ The numerical value of the diameter of the object. } \details{ This function computes the diameter of an object such as a two-dimensional window or a three-dimensional box. The diameter is the maximum distance between any two points in the object. The function \code{diameter} is generic, with methods for the class \code{"owin"} (two-dimensional windows), \code{"box3"} (three-dimensional boxes) and \code{"boxx"} (multi-dimensional boxes). } \seealso{ \code{\link{diameter.owin}}, \code{\link{diameter.box3}}, \code{\link{diameter.boxx}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/plot.psp.Rd0000755000176000001440000000612212237642733015003 0ustar ripleyusers\name{plot.psp} \alias{plot.psp} \title{plot a Spatial Line Segment Pattern} \description{ Plot a two-dimensional line segment pattern } \usage{ \method{plot}{psp}(x, \dots, add=FALSE, which.marks=1, ribbon=TRUE, ribsep=0.15, ribwid=0.05, ribn=1024) } \arguments{ \item{x}{ The line segment pattern to be plotted. An object of class \code{"psp"}, or data which can be converted into this format by \code{\link{as.psp}()}. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{segments}} (to plot the segments) and \code{\link{plot.owin}} (to plot the observation window). } \item{add}{ Logical. If \code{TRUE}, the current plot is not erased; the segments are plotted on top of the current plot, and the window is not plotted. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character string or an integer. Defaults to \code{1} indicating the first column of marks. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map (in which mark values are associated with colours). } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } } \value{ \code{NULL} } \details{ This is the \code{plot} method for line segment pattern datasets (of class \code{"psp"}, see \code{\link{psp.object}}). It plots both the observation window \code{x$window} and the line segments themselves. Plotting of the window \code{x$window} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. Plotting of the segments themselves is performed by the standard R function \code{\link{segments}}. Its plotting behaviour may also be modified through the \code{...} arguments. For a \emph{marked} line segment pattern (i.e. if \code{marks(x)} is not \code{NULL}) the line segments are plotted in colours determined by the mark values. If \code{marks(x)} is a data frame, the default is to use the first column of \code{marks(x)} to determine the colours. To specify another column, use the argument \code{which.marks}. The colour map (associating mark values with colours) will be displayed as a vertical colour ribbon to the right of the plot, if \code{ribbon=TRUE}. } \seealso{ \code{\link{psp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{plot.owin}}, \code{\link{symbols}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) plot(X) plot(X, lwd=3) lettuce <- sample(letters[1:4], 20, replace=TRUE) marks(X) <- data.frame(A=1:20, B=factor(lettuce)) plot(X) plot(X, which.marks="B") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/Kinhom.Rd0000755000176000001440000003225612237642731014456 0ustar ripleyusers\name{Kinhom} \alias{Kinhom} \title{Inhomogeneous K-function} \description{ Estimates the inhomogeneous \eqn{K} function of a non-stationary point pattern. } \usage{ Kinhom(X, lambda=NULL, \dots, r = NULL, breaks = NULL, correction=c("border", "bord.modif", "isotropic", "translate"), renormalise=TRUE, normpower=1, nlarge = 1000, lambda2=NULL, reciplambda=NULL, reciplambda2=NULL, sigma=NULL, varcov=NULL) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{K} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{ Extra arguments. Ignored if \code{lambda} is present. Passed to \code{\link{density.ppp}} if \code{lambda} is omitted. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{renormalise}{ Logical. Whether to renormalise the estimate. See Details. } \item{normpower}{ Integer (usually either 1 or 2). Normalisation power. See Details. } \item{nlarge}{ Optional. Efficiency threshold. If the number of points exceeds \code{nlarge}, then only the border correction will be computed, using a fast algorithm. } \item{lambda2}{ Advanced use only. Matrix containing estimates of the products \eqn{\lambda(x_i)\lambda(x_j)}{lambda(x[i]) * lambda(x[j])} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{reciplambda}{ Alternative to \code{lambda}. Values of the estimated \emph{reciprocal} \eqn{1/\lambda}{1/lambda} of the intensity function. Either a vector giving the reciprocal intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the reciprocal intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the reciprocal intensity value at any location. } \item{reciplambda2}{ Advanced use only. Alternative to \code{lambda2}. A matrix giving values of the estimated \emph{reciprocal products} \eqn{1/\lambda(x_i)\lambda(x_j)}{1/(lambda(x[i]) * lambda(x[j]))} of the intensities at each pair of data points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing at least the following columns, \item{r}{the vector of values of the argument \eqn{r} at which \eqn{K_{\rm inhom}(r)}{Kinhom(r)} has been estimated } \item{theo}{vector of values of \eqn{\pi r^2}{pi * r^2}, the theoretical value of \eqn{K_{\rm inhom}(r)}{Kinhom(r)} for an inhomogeneous Poisson process } and containing additional columns according to the choice specified in the \code{correction} argument. The additional columns are named \code{border}, \code{trans} and \code{iso} and give the estimated values of \eqn{K_{\rm inhom}(r)}{Kinhom(r)} using the border correction, translation correction, and Ripley isotropic correction, respectively. } \details{ This computes a generalisation of the \eqn{K} function for inhomogeneous point patterns, proposed by Baddeley, Moller and Waagepetersen (2000). The ``ordinary'' \eqn{K} function (variously known as the reduced second order moment function and Ripley's \eqn{K} function), is described under \code{\link{Kest}}. It is defined only for stationary point processes. The inhomogeneous \eqn{K} function \eqn{K_{\rm inhom}(r)}{Kinhom(r)} is a direct generalisation to nonstationary point processes. Suppose \eqn{x} is a point process with non-constant intensity \eqn{\lambda(u)}{lambda(u)} at each location \eqn{u}. Define \eqn{K_{\rm inhom}(r)}{Kinhom(r)} to be the expected value, given that \eqn{u} is a point of \eqn{x}, of the sum of all terms \eqn{1/\lambda(x_j)}{1/lambda(x[j])} over all points \eqn{x_j}{x[j]} in the process separated from \eqn{u} by a distance less than \eqn{r}. This reduces to the ordinary \eqn{K} function if \eqn{\lambda()}{lambda()} is constant. If \eqn{x} is an inhomogeneous Poisson process with intensity function \eqn{\lambda(u)}{lambda(u)}, then \eqn{K_{\rm inhom}(r) = \pi r^2}{Kinhom(r) = pi * r^2}. Given a point pattern dataset, the inhomogeneous \eqn{K} function can be estimated essentially by summing the values \eqn{1/(\lambda(x_i)\lambda(x_j))}{1/(lambda(x[i]) * lambda(x[j]))} for all pairs of points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance less than \eqn{r}. This allows us to inspect a point pattern for evidence of interpoint interactions after allowing for spatial inhomogeneity of the pattern. Values \eqn{K_{\rm inhom}(r) > \pi r^2}{Kinhom(r) > pi * r^2} are suggestive of clustering. The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda}. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}) whose fitted \emph{trend} can be used as the fitted intensity. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Edge corrections are used to correct bias in the estimation of \eqn{K_{\rm inhom}}{Kinhom}. Each edge-corrected estimate of \eqn{K_{\rm inhom}(r)}{Kinhom(r)} is of the form \deqn{\widehat K_{\rm inhom}(r) = \sum_i \sum_j \frac{1\{d_{ij} \le r\} e(x_i,x_j,r)}{\lambda(x_i)\lambda(x_j)} }{K^inhom(r)= sum[i] sum[j] 1(d[i,j] <= r) * e(x[i],x[j],r)/(lambda(x[i]) * lambda(x[j])) } where \eqn{d_{ij}}{d[i,j]} is the distance between points \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r)}{e(x[i],x[j],r)} is an edge correction factor. For the `border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\sum_j 1(b_j > r)/\lambda(x_j)} }{ 1(b[i] > r)/(sum[j] 1(b[j] > r)/lambda(x[j])) } where \eqn{b_i}{b[i]} is the distance from \eqn{x_i}{x[i]} to the boundary of the window. For the `modified border' correction, \deqn{ e(x_i,x_j,r) = \frac{1(b_i > r)}{\mbox{area}(W \ominus r)} }{ 1(b[i] > r)/area(W [-] r) } where \eqn{W \ominus r}{W [-] r} is the eroded window obtained by trimming a margin of width \eqn{r} from the border of the original window. For the `translation' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W \cap (W + (x_j - x_i)))} }{ 1/area(W intersect (W + x[j]-x[i])) } and for the `isotropic' correction, \deqn{ e(x_i,x_j,r) = \frac 1 {\mbox{area}(W) g(x_i,x_j)} }{ 1/(area(W) g(x[i],x[j])) } where \eqn{g(x_i,x_j)}{g(x[i],x[j])} is the fraction of the circumference of the circle with centre \eqn{x_i}{x[i]} and radius \eqn{||x_i - x_j||}{||x[i]-x[j]||} which lies inside the window. If \code{renormalise=TRUE} (the default), then the estimates are multiplied by \eqn{c^{\mbox{normpower}}}{c^normpower} where \eqn{ c = \mbox{area}(W)/\sum (1/\lambda(x_i)). }{ c = area(W)/sum[i] (1/lambda(x[i])). } This rescaling reduces the variability and bias of the estimate in small samples and in cases of very strong inhomogeneity. The default value of \code{normpower} is 1 (for consistency with previous versions of \pkg{spatstat}) but the most sensible value is 2, which would correspond to rescaling the \code{lambda} values so that \eqn{ \sum (1/\lambda(x_i)) = \mbox{area}(W). }{ sum[i] (1/lambda(x[i])) = area(W). } If the point pattern \code{X} contains more than about 1000 points, the isotropic and translation edge corrections can be computationally prohibitive. The computations for the border method are much faster, and are statistically efficient when there are large numbers of points. Accordingly, if the number of points in \code{X} exceeds the threshold \code{nlarge}, then only the border correction will be computed. Setting \code{nlarge=Inf} or \code{correction="best"} will prevent this from happening. Setting \code{nlarge=0} is equivalent to selecting only the border correction with \code{correction="border"}. The pair correlation function can also be applied to the result of \code{Kinhom}; see \code{\link{pcf}}. } \references{ Baddeley, A., Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kest}}, \code{\link{pcf}} } \examples{ data(lansing) # inhomogeneous pattern of maples X <- unmark(split(lansing)$maple) \testonly{ sub <- sample(c(TRUE,FALSE), X$n, replace=TRUE, prob=c(0.1,0.9)) X <- X[sub] } # (1) intensity function estimated by model-fitting # Fit spatial trend: polynomial in x and y coordinates fit <- ppm(X, ~ polynom(x,y,2), Poisson()) # (a) predict intensity values at points themselves, # obtaining a vector of lambda values lambda <- predict(fit, locations=X, type="trend") # inhomogeneous K function Ki <- Kinhom(X, lambda) plot(Ki) # (b) predict intensity at all locations, # obtaining a pixel image lambda <- predict(fit, type="trend") Ki <- Kinhom(X, lambda) plot(Ki) # (2) intensity function estimated by heavy smoothing Ki <- Kinhom(X, sigma=0.1) plot(Ki) # (3) simulated data: known intensity function lamfun <- function(x,y) { 50 + 100 * x } # inhomogeneous Poisson process Y <- rpoispp(lamfun, 150, owin()) # inhomogeneous K function Ki <- Kinhom(Y, lamfun) plot(Ki) # How to make simulation envelopes: # Example shows method (2) \dontrun{ smo <- density.ppp(X, sigma=0.1) Ken <- envelope(X, Kinhom, nsim=99, simulate=expression(rpoispp(smo)), sigma=0.1, correction="trans") plot(Ken) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/nndensity.Rd0000644000176000001440000000567212247336365015247 0ustar ripleyusers\name{nndensity.ppp} \alias{nndensity} \alias{nndensity.ppp} \title{ Estimate Intensity of Point Pattern Using Nearest Neighbour Distances } \description{ Estimates the intensity of a point pattern using the distance from each spatial location to the \code{k}th nearest data point. } \usage{ nndensity(x, ...) \method{nndensity}{ppp}(x, k, ..., verbose = TRUE) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}) or some other spatial object. } \item{k}{ Integer. The distance to the \code{k}th nearest data point will be computed. There is a sensible default. } \item{\dots}{ Arguments passed to \code{\link{nnmap}} and \code{\link{as.mask}} controlling the pixel resolution. } \item{verbose}{ Logical. If \code{TRUE}, print the value of \code{k} when it is automatically selected. If \code{FALSE}, remain silent. } } \details{ This function computes a quick estimate of the intensity of the point process that generated the point pattern \code{x}. For each spatial location \eqn{s}, let \eqn{d(s)} be the distance from \eqn{s} to the \eqn{k}-th nearest point in the dataset \code{x}. If the data came from a homogeneous Poisson process with intensity \eqn{\lambda}{lambda}, then \eqn{\pi d(s)^2}{pi * d(s)^2} would follow a negative exponential distribution with mean \eqn{1/\lambda}{1/lambda}, and the maximum likelihood estimate of \eqn{\lambda}{lambda} would be \eqn{1/(\pi d(s)^2)}{1/(pi * d(s)^2)}. This is the estimate computed by \code{nndensity}, apart from an edge effect correction. This estimator of intensity is relatively fast to compute, and is spatially adaptive (so that it can handle wide variation in the intensity function). However, it implicitly assumes the points are independent, so it does not perform well if the pattern is strongly clustered or strongly inhibited. The value of \code{k} should be greater than 1 in order to avoid infinite peaks in the intensity estimate around each data point. The default value of \code{k} is the square root of the number of points in \code{x}, which seems to work well in many cases. The window of \code{x} is digitised using \code{\link{as.mask}} and the values \eqn{d(s)} are computed using \code{\link{nnmap}}. To control the pixel resolution, see \code{\link{as.mask}}. } \value{ A pixel image (object of class \code{"im"}) giving the estimated intensity of the point process at each spatial location. Pixel values are intensities (number of points per unit area). } \references{ NEED REFERENCES. TRY CRESSIE } \seealso{ \code{\link{density.ppp}}, \code{\link{intensity}} for alternative estimates of point process intensity. } \examples{ plot(nndensity(swedishpines)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/chop.tess.Rd0000755000176000001440000000323612237642732015134 0ustar ripleyusers\name{chop.tess} \alias{chop.tess} \title{Subdivide a Window or Tessellation using a Set of Lines} \description{ Divide a given window into tiles delineated by a set of infinite straight lines, obtaining a tessellation of the window. Alternatively, given a tessellation, divide each tile of the tessellation into sub-tiles delineated by the lines. } \usage{ chop.tess(X, L) } \arguments{ \item{X}{ A window (object of class \code{"owin"}) or tessellation (object of class \code{"tess"}) to be subdivided by lines. } \item{L}{ A set of infinite straight lines (object of class \code{"infline"}) } } \details{ The argument \code{L} should be a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}). If \code{X} is a window, then it is divided into tiles delineated by the lines in \code{L}. If \code{X} is a tessellation, then each tile of \code{X} is subdivided into sub-tiles delineated by the lines in \code{L}. The result is a tessellation. } \section{Warning}{ If \code{X} is a non-convex window, or a tessellation containing non-convex tiles, then \code{chop.tess(X,L)} may contain a tile which consists of several unconnected pieces. } \value{ A tessellation (object of class \code{"tess"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{infline}}, \code{\link{clip.infline}} } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) chop.tess(W, L) } \keyword{spatial} \keyword{math} spatstat/man/letterR.Rd0000755000176000001440000000070012237642732014640 0ustar ripleyusers\name{letterR} \alias{letterR} \docType{data} \title{Window in Shape of Letter R} \description{ A window in the shape of the capital letter R, for use in demonstrations. } \format{ An object of class \code{"owin"} representing the capital letter R, in the same font as the R package logo. See \code{\link{owin.object}} for details of the format. } \usage{ data(letterR) } \source{Adrian Baddeley} \keyword{datasets} \keyword{spatial} spatstat/man/nnfun.Rd0000644000176000001440000000531512252030012014322 0ustar ripleyusers\name{nnfun} \Rdversion{1.1} \alias{nnfun} \alias{nnfun.ppp} \alias{nnfun.psp} \title{ Nearest Neighbour Index Map as a Function } \description{ Compute the nearest neighbour index map of an object, and return it as a function. } \usage{ nnfun(X, ...) \method{nnfun}{ppp}(X, ..., k=1) \method{nnfun}{psp}(X, ...) } \arguments{ \item{X}{ Any suitable dataset representing a two-dimensional collection of objects, such as a point pattern (object of class \code{"ppp"}) or a line segment pattern (object of class \code{"psp"}). } \item{k}{ A single integer. The \code{k}th nearest neighbour will be found. } \item{\dots}{ Extra arguments are ignored. } } \details{ For a collection \eqn{X} of two dimensional objects (such as a point pattern or a line segment pattern), the \dQuote{nearest neighbour index function} of \eqn{X} is the mathematical function \eqn{f} such that, for any two-dimensional spatial location \eqn{(x,y)}, the function value \code{f(x,y)} is the index \eqn{i} identifying the closest member of \eqn{X}. That is, if \eqn{i = f(x,y)} then \eqn{X[i]} is the closest member of the collection \eqn{X} to the location \eqn{(x,y)}. The command \code{f <- nnfun(X)} returns a \emph{function} in the \R language, with arguments \code{x,y}, that represents the nearest neighbour index function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the indices of the nearest neighbours to these locations. If the argument \code{k} is specified then the \code{k}-th nearest neighbour will be found. The result of \code{f <- nnfun(X)} also belongs to the class \code{"funxy"} and to the special class \code{"nnfun"}. It can be printed and plotted immediately as shown in the Examples. A \code{nnfun} object can be converted to a pixel image using \code{\link{as.im}}. } \value{ A \code{function} with arguments \code{x,y}. The function also belongs to the class \code{"nnfun"} which has a method for \code{print}. It also belongs to the class \code{"funxy"} which has methods for \code{plot}, \code{contour} and \code{persp}. } \seealso{ \code{\link{distfun}}, \code{\link{plot.funxy}} } \examples{ f <- nnfun(cells) f plot(f) f(0.2, 0.3) g <- nnfun(cells, k=2) g(0.2, 0.3) L <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) h <- nnfun(L) h(0.2, 0.3) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/shift.psp.Rd0000755000176000001440000000401712237642734015144 0ustar ripleyusers\name{shift.psp} \alias{shift.psp} \title{Apply Vector Translation To Line Segment Pattern} \description{ Applies a vector shift to a line segment pattern. } \usage{ \method{shift}{psp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Line Segment pattern (object of class \code{"psp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another line segment pattern (of class \code{"psp"}) representing the result of applying the vector shift. } \details{ The line segment pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, col="red") Y <- shift(X, c(0.05,0.05)) plot(Y, add=TRUE, col="blue") shift(Y, origin="mid") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/linearKdot.Rd0000644000176000001440000000543012237642732015315 0ustar ripleyusers\name{linearKdot} \alias{linearKdot} \title{ Multitype K Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype \eqn{K} function which counts the expected number of points (of any type) within a given distance of a point of type \eqn{i}. } \usage{ linearKdot(X, i, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the dot type \eqn{K} function \eqn{K_{i\bullet}(r)}{K[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{i\bullet}(r)}{K[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{Ignored.} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{Kdot}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{i\bullet}(r)}{Ki.(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link[spatstat]{Kdot}}, \code{\link{linearKcross}}, \code{\link[spatstat]{linearK}}. } \examples{ data(chicago) K <- linearKdot(chicago, "assault") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.marked.Rd0000755000176000001440000000264512237642732015106 0ustar ripleyusers\name{is.marked} \alias{is.marked} \title{Test Whether Marks Are Present} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points. } \usage{ is.marked(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is marked. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points. It is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.marked.ppp}}, \code{\link{is.marked.ppm}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/print.im.Rd0000755000176000001440000000137612237642733014772 0ustar ripleyusers\name{print.im} \alias{print.im} \title{Print Brief Details of an Image} \description{ Prints a very brief description of a pixel image object. } \usage{ \method{print}{im}(x, \dots) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the pixel image \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{im.object}}, \code{\link{summary.im}} } \examples{ data(letterR) U <- as.im(letterR) U } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} spatstat/man/murchison.Rd0000755000176000001440000001041712237642733015235 0ustar ripleyusers\name{murchison} \alias{murchison} \docType{data} \title{ Murchison gold deposits } \description{ Data recording the spatial locations of gold deposits and associated geological features in the Murchison area of Western Australia. Extracted from a large scale (1:500,000) study of the Murchison area by the Geological Survey of Western Australia (Watkins and Hickman, 1990). The features recorded are \itemize{ \item the locations of gold deposits; \item the locations of geological faults; \item the region that contains greenstone bedrock. } The study region is contained in a \eqn{330\times 400}{330 * 400} kilometre rectangle. At this scale, gold deposits are points, i.e. their spatial extent is negligible. Gold deposits in this region occur only in greenstone bedrock. Geological faults can be observed reliably only within the same region. However, some faults have been extrapolated (by geological ``interpretation'') outside the greenstone boundary from information observed in the greenstone region. These data were analysed by Foxall and Baddeley (2002) and Brown et al (2002); see also Groves et al (2000), Knox-Robinson and Groves (1997). The main aim is to predict the intensity of the point pattern of gold deposits from the more easily observable fault pattern. } \format{ \code{murchison} is a list with the following entries: \describe{ \item{gold}{a point pattern (object of class \code{"ppp"}) representing the point pattern of gold deposits. See \code{\link{ppp.object}} for details of the format. } \item{faults}{a line segment pattern (object of class \code{"psp"}) representing the geological faults. See \code{\link{psp.object}} for details of the format. } \item{greenstone}{the greenstone bedrock region. An object of class \code{"owin"}. Consists of multiple irregular polygons with holes. } } All coordinates are given in \bold{metres}. } \usage{ data(murchison) } \examples{ if(interactive()) { data(murchison) plot(murchison$greenstone, main="Murchison data", col="lightgreen") plot(murchison$gold, add=TRUE, pch="+",col="blue") plot(murchison$faults, add=TRUE, col="red") } } \source{ Data were kindly provided by Dr Carl Knox-Robinson of the Department of Geology and Geophysics, University of Western Australia. Permission to use the data is granted by Dr Tim Griffin, Geological Survey of Western Australia and by Dr Knox-Robinson. \emph{Please make appropriate acknowledgement} to Watkins and Hickman (1990) and the Geological Survey of Western Australia. } \references{ Brown, W.M., Gedeon, T.D., Baddeley, A.J. and Groves, D.I. (2002) Bivariate J-function and other graphical statistical methods help select the best predictor variables as inputs for a neural network method of mineral prospectivity mapping. In U. Bayer, H. Burger and W. Skala (eds.) \emph{IAMG 2002: 8th Annual Conference of the International Association for Mathematical Geology}, Volume 1, 2002. International Association of Mathematical Geology. Pages 257--268. Foxall, R. and Baddeley, A. (2002) Nonparametric measures of association between a spatial point process and a random set, with geological applications. \emph{Applied Statistics} \bold{51}, 165--182. Groves, D.I., Goldfarb, R.J., Knox-Robinson, C.M., Ojala, J., Gardoll, S, Yun, G.Y. and Holyland, P. (2000) Late-kinematic timing of orogenic gold deposits and significance for computer-based exploration techniques with emphasis on the Yilgarn Block, Western Australia. \emph{Ore Geology Reviews}, \bold{17}, 1--38. Knox-Robinson, C.M. and Groves, D.I. (1997) Gold prospectivity mapping using a geographic information system (GIS), with examples from the Yilgarn Block of Western Australia. \emph{Chronique de la Recherche Miniere} \bold{529}, 127--138. Watkins, K.P. and Hickman, A.H. (1990) \emph{Geological evolution and mineralization of the Murchison Province, Western Australia}. Bulletin 137, Geological Survey of Western Australia. 267 pages. Published by Department of Mines, Western Australia, 1990. Available online from Department of Industry and Resources, State Government of Western Australia, \code{www.doir.wa.gov.au} } \keyword{datasets} \keyword{spatial} spatstat/man/plot.slrm.Rd0000755000176000001440000000251312237642733015156 0ustar ripleyusers\name{plot.slrm} \Rdversion{1.1} \alias{plot.slrm} \title{ Plot a Fitted Spatial Logistic Regression } \description{ Plots a fitted Spatial Logistic Regression model. } \usage{ \method{plot}{slrm}(x, ..., type = "intensity") } \arguments{ \item{x}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat]{plot.im}} to control the appearance of the plot. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } } \details{ This is a method for \code{\link{plot}} for fitted spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). This function plots the result of \code{\link{predict.slrm}}. } \value{ None. } \seealso{ \code{\link{slrm}}, \code{\link{predict.slrm}}, \code{\link[spatstat]{plot.im}} } \examples{ data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fit <- slrm(X ~ Z) plot(fit) plot(fit, type="link") } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/predict.lppm.Rd0000755000176000001440000000627112237642733015632 0ustar ripleyusers\name{predict.lppm} \alias{predict.lppm} \title{ Predict Point Process Model on Linear Network } \description{ Given a fitted point process model on a linear network, compute the fitted intensity or conditional intensity of the model. } \usage{ \method{predict}{lppm}(object, ..., type = "trend", locations = NULL) } \arguments{ \item{object}{ The fitted model. An object of class \code{"lppm"}, see \code{\link{lppm}}. } \item{type}{ Type of values to be computed. Either \code{"trend"}, \code{"cif"} or \code{"se"}. } \item{locations}{ Optional. Locations at which predictions should be computed. Either a data frame with two columns of coordinates, or a binary image mask. } \item{\dots}{ Optional arguments passed to \code{\link[spatstat]{as.mask}} to determine the pixel resolution (if \code{locations} is missing). } } \details{ This function computes the fitted poin process intensity, fitted conditional intensity, or standard error of the fitted intensity, for a point process model on a linear network. It is a method for the generic \code{\link[stats]{predict}} for the class \code{"lppm"}. The argument \code{object} should be an object of class \code{"lppm"} (produced by \code{\link{lppm}}) representing a point process model on a linear network. Predicted values are computed at the locations given by the argument \code{locations}. If this argument is missing, then predicted values are computed at a fine grid of points on the linear network. \itemize{ \item If \code{locations} is missing or \code{NULL} (the default), the return value is a pixel image (object of class \code{"linim"} which inherits class \code{"im"}) corresponding to a discretisation of the linear network, with numeric pixel values giving the predicted values at each location on the linear network. \item If \code{locations} is a data frame, the result is a numeric vector of predicted values at the locations specified by the data frame. \item If \code{locations} is a binary mask, the result is a pixel image with predicted values computed at the pixels of the mask. } } \value{ A pixel image (object of class \code{"linim"} which inherits class \code{"im"}) or a numeric vector, depending on the argument \code{locations}. See Details. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lpp}}, \code{\link{linim}} } \examples{ example(lpp) fit <- lppm(X, ~x) v <- predict(fit, type="trend") plot(v) } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} \keyword{models} spatstat/man/simulate.kppm.Rd0000755000176000001440000000506112237642734016017 0ustar ripleyusers\name{simulate.kppm} \alias{simulate.kppm} \title{Simulate a Fitted Cluster Point Process Model} \description{ Generates simulated realisations from a fitted cluster point process model. } \usage{ \method{simulate}{kppm}(object, nsim = 1, seed=NULL, ..., window=NULL, covariates=NULL, verbose=TRUE, retry=10) } \arguments{ \item{object}{ Fitted cluster point process model. An object of class \code{"kppm"}. } \item{nsim}{ Number of simulated realisations. } \item{seed}{ an object specifying whether and how to initialise the random number generator. Either \code{NULL} or an integer that will be used in a call to \code{\link[base:Random]{set.seed}} before simulating the point patterns. } \item{\dots}{Ignored.} \item{window}{ Optional. Window (object of class \code{"owin"}) in which the model should be simulated. } \item{covariates}{ Optional. A named list containing new values for the covariates in the model. } \item{verbose}{ Logical. Whether to print progress reports (when \code{nsim > 1}). } \item{retry}{ Number of times to repeat the simulation if it fails (e.g. because of insufficient memory). } } \details{ This function is a method for the generic function \code{\link[stats]{simulate}} for the class \code{"kppm"} of fitted cluster point process models. Simulations are performed by \code{\link{rThomas}}, \code{\link{rMatClust}} or \code{\link{rLGCP}} depending on the model. The return value is a list of point patterns. It also carries an attribute \code{"seed"} that captures the initial state of the random number generator. This follows the convention used in \code{simulate.lm} (see \code{\link[stats]{simulate}}). It can be used to force a sequence of simulations to be repeated exactly, as shown in the examples for \code{\link[stats]{simulate}}. } \value{ A list of length \code{nsim} containing simulated point patterns (objects of class \code{"ppp"}). The return value also carries an attribute \code{"seed"} that captures the initial state of the random number generator. See Details. } \examples{ data(redwood) fit <- kppm(redwood, ~1, "Thomas") simulate(fit, 2) } \seealso{ \code{\link{kppm}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rLGCP}}, \code{\link{simulate.ppm}}, \code{\link[stats]{simulate}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/plot.linnet.Rd0000755000176000001440000000216212237642733015472 0ustar ripleyusers\name{plot.linnet} \alias{plot.linnet} \title{ Plot a linear network } \description{ Plots a linear network } \usage{ \method{plot}{linnet}(x, ..., main=NULL, add=FALSE, vertices=FALSE, window=FALSE) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } \item{\dots}{ Arguments passed to \code{\link{plot.psp}} controlling the plot. } \item{main}{ Main title for plot. Use \code{main=""} to suppress it. } \item{add}{ Logical. If code{TRUE}, superimpose the graphics over the current plot. If \code{FALSE}, generate a new plot. } \item{vertices}{ Logical. Whether to plot the vertices as well. } \item{window}{ Logical. Whether to plot the window containing the linear network. } } \details{ This is the plot method for class \code{"linnet"}. } \value{ Null. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linnet}} } \examples{ example(linnet) plot(letterA) } \keyword{spatial} spatstat/man/demopat.Rd0000755000176000001440000000115412237642732014654 0ustar ripleyusers\name{demopat} \alias{demopat} \docType{data} \title{ Artificial Data Point Pattern } \description{ This is an artificial dataset, for use in testing and demonstrating the capabilities of the \code{spatstat} package. It is a multitype point pattern in an irregular polygonal window. There are two types of points. The window contains a polygonal hole. } \format{ An object of class \code{"ppp"} representing the point pattern. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(demopat)} \source{Adrian Baddeley} \keyword{datasets} \keyword{spatial} spatstat/man/project.ppm.Rd0000644000176000001440000000727712237642733015476 0ustar ripleyusers\name{project.ppm} \alias{project.ppm} \title{ Force Point Process Model to be Valid } \description{ Ensures that a fitted point process model satisfies the integrability conditions for existence of the point process. } \usage{ project.ppm(object, ..., fatal=FALSE, trace=FALSE) } \arguments{ \item{object}{ Fitted point process model (object of class \code{"ppm"}). } \item{\dots}{Ignored.} \item{fatal}{ Logical value indicating whether to generate an error if the model cannot be projected to a valid model. } \item{trace}{ Logical value indicating whether to print a trace of the decision process. } } \details{ The model-fitting function \code{\link{ppm}} fits Gibbs point process models to point pattern data. By default, the fitted model returned by \code{\link{ppm}} may not actually exist as a point process. First, some of the fitted coefficients of the model may be \code{NA} or infinite values. This usually occurs when the data are insufficient to estimate all the parameters. The model is said to be \emph{unidentifiable} or \emph{confounded}. Second, unlike a regression model, which is well-defined for any finite values of the fitted regression coefficients, a Gibbs point process model is only well-defined if the fitted interaction parameters satisfy some constraints. A famous example is the Strauss process (see \code{\link{Strauss}}) which exists only when the interaction parameter \eqn{\gamma}{gamma} is less than or equal to 1. For values \eqn{\gamma > 1}{gamma > 1}, the probability density is not integrable and the process does not exist (and cannot be simulated). By default, \code{\link{ppm}} does not enforce the constraint that a fitted Strauss process (for example) must satisfy \eqn{\gamma \le 1}{gamma <= 1}. This is because a fitted parameter value of \eqn{\gamma > 1}{gamma > 1} could be useful information for data analysis, as it indicates that the Strauss model is not appropriate, and suggests a clustered model should be fitted. The function \code{project.ppm} modifies the model \code{object} so that the model is valid. \code{project.ppm} identifies the terms in the model \code{object} that are associated with illegal parameter values (i.e. parameter values which are either \code{NA}, infinite, or outside their permitted range). It considers all possible sub-models of \code{object} obtained by deleting one or more of these terms. It identifies which of these submodels are valid, and chooses the valid submodel with the largest pseudolikelihood. The result of \code{project.ppm} is the true maximum pseudolikelihood fit to the data. For large datasets or complex models, the algorithm used in \code{project.ppm} may be time-consuming, because it takes time to compute all the sub-models. A faster, approximate algorithm can be applied by setting \code{spatstat.options(project.fast=TRUE)}. This produces a valid submodel, which may not be the maximum pseudolikelihood submodel. Use the function \code{\link{valid.ppm}} to check whether a fitted model object specifies a well-defined point process. Use the expression \code{all(is.finite(coef(object)))} to determine whether all parameters are identifiable. } \value{ Another point process model (object of class \code{"ppm"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppm}}, \code{\link{valid.ppm}}, \code{\link{spatstat.options}} } \examples{ fit <- ppm(redwood, ~1, Strauss(0.1)) coef(fit) fit2 <- project.ppm(fit) coef(fit2) } \keyword{spatial} \keyword{models} spatstat/man/markconnect.Rd0000755000176000001440000001440512237642733015533 0ustar ripleyusers\name{markconnect} \alias{markconnect} \title{ Mark Connection Function } \description{ Estimate the marked connection function of a multitype point pattern. } \usage{ markconnect(X, i, j, r=NULL, correction=c("isotropic", "Ripley", "translate"), method="density", \dots, normalise=FALSE) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{method}{ A character vector indicating the user's choice of density estimation technique to be used. Options are \code{"density"}, \code{"loess"}, \code{"sm"} and \code{"smrep"}. } \item{\dots}{ Arguments passed to the density estimation routine (\code{\link{density}}, \code{\link{loess}} or \code{sm.density}) selected by \code{method}. } \item{normalise}{ If \code{TRUE}, normalise the pair connection function by dividing it by \eqn{p_i p_j}{p[i]*p[j]}, the estimated probability that randomly-selected points will have marks \eqn{i} and \eqn{j}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} has been estimated } \item{theo}{the theoretical value of \eqn{p_{ij}(r)}{p[i,j](r)} when the marks attached to different points are independent } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{p_{ij}(r)}{p[i,j](r)} obtained by the edge corrections named. } \details{ The mark connection function \eqn{p_{ij}(r)}{p[i,j](r)} of a multitype point process \eqn{X} is a measure of the dependence between the types of two points of the process a distance \eqn{r} apart. Informally \eqn{p_{ij}(r)}{p[i,j](r)} is defined as the conditional probability, given that there is a point of the process at a location \eqn{u} and another point of the process at a location \eqn{v} separated by a distance \eqn{||u-v|| = r}, that the first point is of type \eqn{i} and the second point is of type \eqn{j}. See Stoyan and Stoyan (1994). If the marks attached to the points of \code{X} are independent and identically distributed, then \eqn{p_{ij}(r) \equiv p_i p_j}{p[i,j](r) = p[i]p[j]} where \eqn{p_i}{p[i]} denotes the probability that a point is of type \eqn{i}. Values larger than this, \eqn{p_{ij}(r) > p_i p_j}{p[i,j](r) > p[i]p[j]}, indicate positive association between the two types, while smaller values indicate negative association. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a multitype point pattern (a marked point pattern with factor-valued marks). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[i,j](r)} is estimated. There is a sensible default. This algorithm assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Kest}}. The edge corrections implemented here are \describe{ \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is implemented only for rectangular and polygonal windows (not for binary masks). } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries, but slow for complex windows. } } Note that the estimator assumes the process is stationary (spatially homogeneous). The mark connection function is estimated using density estimation techniques. The user can choose between \describe{ \item{\code{"density"}}{ which uses the standard kernel density estimation routine \code{\link{density}}, and works only for evenly-spaced \code{r} values; } \item{\code{"loess"}}{ which uses the function \code{loess} in the package \pkg{modreg}; } \item{\code{"sm"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is extremely slow; } \item{\code{"smrep"}}{ which uses the function \code{sm.density} in the package \pkg{sm} and is relatively fast, but may require manual control of the smoothing parameter \code{hmult}. } } } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ Multitype pair correlation \code{\link{pcfcross}} and multitype K-functions \code{\link{Kcross}}, \code{\link{Kdot}}. Use \code{\link{alltypes}} to compute the mark connection functions between all pairs of types. Mark correlation \code{\link{markcorr}} and mark variogram \code{\link{markvario}} for numeric-valued marks. } \examples{ # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) M <- markconnect(amacrine, "on", "off") plot(M) # Compute for all pairs of types at once plot(alltypes(amacrine, markconnect)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/fryplot.Rd0000755000176000001440000001254312237642732014726 0ustar ripleyusers\name{fryplot} \alias{fryplot} \alias{frypoints} \title{Fry Plot of Point Pattern} \description{ Displays the Fry plot (Patterson plot) of a spatial point pattern. } \usage{ fryplot(X, ..., width=NULL, from=NULL, to=NULL) frypoints(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link{as.ppp}}. } \item{\dots}{Optional arguments to control the appearance of the plot.} \item{width}{Optional parameter indicating the width of a box for a zoomed-in view of the Fry plot near the origin.} \item{from,to}{ Optional. Subset indices specifying which points of \code{X} will be considered when forming the vectors (drawn from each point of \code{from}, to each point of \code{to}.) } } \details{ The function \code{fryplot} generates a Fry plot (or Patterson plot); \code{frypoints} returns the points of the Fry plot as a point pattern dataset. Fry (1979) and Hanna and Fry (1979) introduced a manual graphical method for investigating features of a spatial point pattern of mineral deposits. A transparent sheet, marked with an origin or centre point, is placed over the point pattern. The transparent sheet is shifted so that the origin lies over one of the data points, and the positions of all the \emph{other} data points are copied onto the transparent sheet. This procedure is repeated for each data point in turn. The resulting plot (the Fry plot) is a pattern of \eqn{n(n-1)} points, where \eqn{n} is the original number of data points. This procedure was previously proposed by Patterson (1934, 1935) for studying inter-atomic distances in crystals, and is also known as a Patterson plot. The function \code{fryplot} generates the Fry/Patterson plot. Standard graphical parameters such as \code{main}, \code{pch}, \code{lwd}, \code{col}, \code{bg}, \code{cex} can be used to control the appearance of the plot. To zoom in (to view only a subset of the Fry plot at higher magnification), use the argument \code{width} to specify the width of a rectangular field of view centred at the origin, or the standard graphical arguments \code{xlim} and \code{ylim} to specify another rectangular field of view. (The actual field of view may be slightly larger, depending on the graphics device.) The function \code{frypoints} returns the points of the Fry plot as a point pattern object. There may be a large number of points in this pattern, so this function should be used only if further analysis of the Fry plot is required. Fry plots are particularly useful for recognising anisotropy in regular point patterns. A void around the origin in the Fry plot suggests regularity (inhibition between points) and the shape of the void gives a clue to anisotropy in the pattern. Fry plots are also useful for detecting periodicity or rounding of the spatial coordinates. In mathematical terms, the Fry plot of a point pattern \code{X} is simply a plot of the vectors \code{X[i] - X[j]} connecting all pairs of distinct points in \code{X}. The Fry plot is related to the \eqn{K} function (see \code{\link{Kest}}) and the reduced second moment measure (see \code{\link{Kmeasure}}). For example, the number of points in the Fry plot lying within a circle of given radius is an unnormalised and uncorrected version of the \eqn{K} function. The Fry plot has a similar appearance to the plot of the reduced second moment measure \code{\link{Kmeasure}} when the smoothing parameter \code{sigma} is very small. The Fry plot does not adjust for the effect of the size and shape of the sampling window. The density of points in the Fry plot tapers off near the edges of the plot. This is an edge effect, a consequence of the bounded sampling window. In geological applications this is usually not important, because interest is focused on the behaviour near the origin where edge effects can be ignored. To correct for the edge effect, use \code{\link{Kmeasure}} or \code{\link{Kest}} or its relatives. } \value{ \code{fryplot} returns \code{NULL}. \code{frypoints} returns a point pattern (object of class \code{"ppp"}). } \references{ Fry, N. (1979) Random point distributions and strain measurement in rocks. \emph{Tectonophysics} \bold{60}, 89--105. Hanna, S.S. and Fry, N. (1979) A comparison of methods of strain determination in rocks from southwest Dyfed (Pembrokeshire) and adjacent areas. \emph{Journal of Structural Geology} \bold{1}, 155--162. Patterson, A.L. (1934) A Fourier series method for the determination of the component of inter-atomic distances in crystals. \emph{Physics Reviews} \bold{46}, 372--376. Patterson, A.L. (1935) A direct method for the determination of the components of inter-atomic distances in crystals. \emph{Zeitschrift fuer Krystallographie} \bold{90}, 517--554. } \seealso{ \code{\link{Kmeasure}}, \code{\link{Kest}} } \examples{ data(cells) fryplot(cells) Y <- frypoints(cells) data(amacrine) fryplot(amacrine, width=0.2, from=(marks(amacrine) == "on"), chars=c(3,16), cols=2:3, main="Fry plot centred at an On-cell") points(0,0) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/rmhmodel.Rd0000755000176000001440000000607112237642734015037 0ustar ripleyusers\name{rmhmodel} \alias{rmhmodel} \title{Define Point Process Model for Metropolis-Hastings Simulation.} \description{ Builds a description of a point process model for use in simulating the model by the Metropolis-Hastings algorithm. } \usage{ rmhmodel(...) } \arguments{ \item{\dots}{Arguments specifying the point process model in some format. } } \value{ An object of class \code{"rmhmodel"}, which is essentially a list of parameter values for the model. There is a \code{print} method for this class, which prints a sensible description of the model chosen. } \details{ Simulated realisations of many point process models can be generated using the Metropolis-Hastings algorithm \code{\link{rmh}}. The algorithm requires the model to be specified in a particular format: an object of class \code{"rmhmodel"}. The function \code{\link{rmhmodel}} takes a description of a point process model in some other format, and converts it into an object of class \code{"rmhmodel"}. It also checks that the parameters of the model are valid. The function \code{\link{rmhmodel}} is generic, with methods for \describe{ \item{fitted point process models:}{ an object of class \code{"ppm"}, obtained by a call to the model-fitting function \code{\link{ppm}}. See \code{\link{rmhmodel.ppm}}. } \item{lists:}{ a list of parameter values in a certain format. See \code{\link{rmhmodel.list}}. } \item{default:}{ parameter values specified as separate arguments to \code{\dots}. See \code{\link{rmhmodel.default}}. } } } \references{ Diggle, P. J. (2003) \emph{Statistical Analysis of Spatial Point Patterns} (2nd ed.) Arnold, London. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmhmodel.ppm}}, \code{\link{rmhmodel.default}}, \code{\link{rmhmodel.list}}, \code{\link{rmh}}, \code{\link{rmhcontrol}}, \code{\link{rmhstart}}, \code{\link{ppm}}, \code{\link{Strauss}}, \code{\link{Softcore}}, \code{\link{StraussHard}}, \code{\link{Triplets}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{DiggleGratton}}, \code{\link{PairPiece}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/fv.Rd0000755000176000001440000001556412237642732013650 0ustar ripleyusers\name{fv} \alias{fv} \title{ Create a Function Value Table } \description{ Advanced Use Only. This low-level function creates an object of class \code{"fv"} from raw numerical data. } \usage{ fv(x, argu = "r", ylab = NULL, valu, fmla = NULL, alim = NULL, labl = names(x), desc = NULL, unitname = NULL, fname = NULL, yexp = ylab) } \arguments{ \item{x}{ A data frame with at least 2 columns containing the values of the function argument and the corresponding values of (one or more versions of) the function. } \item{argu}{ String. The name of the column of \code{x} that contains the values of the function argument. } \item{ylab}{ Either \code{NULL}, or an \R language expression representing the mathematical name of the function. See Details. } \item{valu}{ String. The name of the column of \code{x} that should be taken as containing the function values, in cases where a single column is required. } \item{fmla}{ Either \code{NULL}, or a \code{formula} specifying the default plotting behaviour. See Details. } \item{alim}{ Optional. The default range of values of the function argument for which the function will be plotted. Numeric vector of length 2. } \item{labl}{ Optional. Plot labels for the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{desc}{ Optional. Descriptions of the columns of \code{x}. A vector of strings, with one entry for each column of \code{x}. } \item{unitname}{ Optional. Name of the unit (usually a unit of length) in which the function argument is expressed. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } \item{fname}{ Optional. The name of the function itself. A character string. } \item{yexp}{ Optional. Alternative form of \code{ylab} more suitable for annotating an axis of the plot. See Details. } } \details{ This documentation is provided for experienced programmers who want to modify the internal behaviour of \pkg{spatstat}. Other users please see \code{\link{fv.object}}. The low-level function \code{fv} is used to create an object of class \code{"fv"} from raw numerical data. The data frame \code{x} contains the numerical data. It should have one column (typically but not necessarily named \code{"r"}) giving the values of the function argument for which the function has been evaluated; and at least one other column, containing the corresponding values of the function. Typically there is more than one column of function values. These columns typically give the values of different versions or estimates of the same function, for example, different estimates of the \eqn{K} function obtained using different edge corrections. However they may also contain the values of related functions such as the derivative or hazard rate. \code{argu} specifies the name of the column of \code{x} that contains the values of the function argument (typically \code{argu="r"} but this is not compulsory). \code{valu} specifies the name of another column that contains the \sQuote{recommended} estimate of the function. It will be used to provide function values in those situations where a single column of data is required. For example, \code{\link{envelope}} computes its simulation envelopes using the recommended value of the summary function. \code{fmla} specifies the default plotting behaviour. It should be a formula, or a string that can be converted to a formula. Variables in the formula are names of columns of \code{x}. See \code{\link{plot.fv}} for the interpretation of this formula. \code{alim} specifies the recommended range of the function argument. This is used in situations where statistical theory or statistical practice indicates that the computed estimates of the function are not trustworthy outside a certain range of values of the function argument. By default, \code{\link{plot.fv}} will restrict the plot to this range. \code{fname} is a string giving the name of the function itself. For example, the \eqn{K} function would have \code{fname="K"}. \code{ylab} is a mathematical expression for the function value, used when labelling an axis of the plot, or when printing a description of the function. It should be an \R language object. For example the \eqn{K} function's mathematical name \eqn{K(r)} is rendered by \code{ylab=substitute(K(r), NULL)}. If \code{yexp} is present, then \code{ylab} will be used only for printing, and \code{yexp} will be used for annotating axes in a plot. (Otherwise \code{yexp} defaults to \code{ylab}). For example the cross-type \eqn{K} function \eqn{K_{1,2}(r)}{K[1,2](r)} is rendered by something like \code{ylab=substitute(Kcross[i,j](r), list(i=1,j=2))} and \code{yexp=substitute(Kcross[list(i,j)](r), list(i=1,j=2))} to get the most satisfactory behaviour. \code{labl} is a character vector specifying plot labels for each column of \code{x}. These labels will appear on the plot axes (in non-default plots), legends and printed output. Entries in \code{labl} may contain the string \code{"\%s"} which will be replaced by \code{fname}. For example the border-corrected estimate of the \eqn{K} function has label \code{"\%sbord(r)"} which becomes \code{"Kbord(r)"}. \code{desc} is a character vector containing intelligible explanations of each column of \code{x}. Entries in \code{desc} may contain the string \code{"\%s"} which will be replaced by \code{ylab}. For example the border correction estimate of the \eqn{K} function has description \code{"border correction estimate of \%s"}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}. } \seealso{ See \code{\link{plot.fv}} for plotting an \code{"fv"} object. See \code{\link{as.function.fv}} to convert an \code{"fv"} object to an \R function. Use \code{\link{cbind.fv}} to combine several \code{"fv"} objects. Use \code{\link{bind.fv}} to glue additional columns onto an existing \code{"fv"} object. \emph{Undocumented} functions for modifying an \code{"fv"} object include \code{fvnames}, \code{fvnames<-}, \code{tweak.fv.entry} and \code{rebadge.fv}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ df <- data.frame(r=seq(0,5,by=0.1)) df <- transform(df, a=pi*r^2, b=3*r^2) X <- fv(df, "r", substitute(A(r), NULL), "a", cbind(a, b) ~ r, alim=c(0,4), labl=c("r", "\%s[true](r)", "\%s[approx](r)"), desc=c("radius of circle", "true area \%s", "rough area \%s"), fname="A") X } \keyword{spatial} \keyword{classes} spatstat/man/rknn.Rd0000755000176000001440000000404212237642734014174 0ustar ripleyusers\name{rknn} \alias{dknn} \alias{pknn} \alias{qknn} \alias{rknn} \title{ Theoretical Distribution of Nearest Neighbour Distance } \description{ Density, distribution function, quantile function and random generation for the random distance to the \eqn{k}th nearest neighbour in a Poisson point process in \eqn{d} dimensions. } \usage{ dknn(x, k = 1, d = 2, lambda = 1) pknn(q, k = 1, d = 2, lambda = 1) qknn(p, k = 1, d = 2, lambda = 1) rknn(n, k = 1, d = 2, lambda = 1) } \arguments{ \item{x,q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations to be generated.} \item{k}{order of neighbour.} \item{d}{dimension of space.} \item{lambda}{intensity of Poisson point process.} } \details{ In a Poisson point process in \eqn{d}-dimensional space, let the random variable \eqn{R} be the distance from a fixed point to the \eqn{k}-th nearest random point, or the distance from a random point to the \eqn{k}-th nearest other random point. Then \eqn{R^d} has a Gamma distribution with shape parameter \eqn{k} and rate \eqn{\lambda * \alpha}{lambda * alpha} where \eqn{\alpha}{alpha} is a constant (equal to the volume of the unit ball in \eqn{d}-dimensional space). See e.g. Cressie (1991, page 61). These functions support calculation and simulation for the distribution of \eqn{R}. } \value{ A numeric vector: \code{dknn} returns the probability density, \code{pknn} returns cumulative probabilities (distribution function), \code{qknn} returns quantiles, and \code{rknn} generates random deviates. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for spatial data}. John Wiley and Sons, 1991. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ x <- seq(0, 5, length=20) densities <- dknn(x, k=3, d=2) cdfvalues <- pknn(x, k=3, d=2) randomvalues <- rknn(100, k=3, d=2) deciles <- qknn((1:9)/10, k=3, d=2) } \keyword{spatial} \keyword{distribution} spatstat/man/Concom.Rd0000644000176000001440000001344012237642731014436 0ustar ripleyusers\name{Concom} \alias{Concom} \title{The Connected Component Process Model} \description{ Creates an instance of the Connected Component point process model which can then be fitted to point pattern data. } \usage{ Concom(r) } \arguments{ \item{r}{Threshold distance} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the connected component process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the connected component process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the connected component interaction is yielded by the function \code{Concom()}. See the examples below. In \bold{standard form}, the connected component process (Baddeley and Moller, 1989) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{kappa} and interaction parameter \eqn{\gamma}{gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-C(x)} }{ f(x[1],\ldots,x[n]) = alpha . kappa^n(x) . gamma^(-C(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{C(x)} is defined below. Here \eqn{\alpha}{alpha} is a normalising constant. To define the term \code{C(x)}, suppose that we construct a planar graph by drawing an edge between each pair of points \eqn{x_i,x_j}{x[i],x[j]} which are less than \eqn{r} units apart. Two points belong to the same connected component of this graph if they are joined by a path in the graph. Then \eqn{C(x)} is the number of connected components of the graph. The interaction parameter \eqn{\gamma}{gamma} can be any positive number. If \eqn{\gamma = 1}{gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{kappa}. If \eqn{\gamma < 1}{gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{gamma > 1} the process is clustered. Thus, a connected-component interaction process can be used to model either clustered or regular point patterns. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \gamma^{-U(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) gamma^(-U(x)) } where \eqn{\beta}{beta} is the new intensity parameter and \eqn{U(x) = C(x) - n(x)} is the interaction potential. In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density (so the first order trend is \eqn{\beta}{beta}). The quantity \eqn{U(x)} is a true interaction potential, in the sense that \eqn{U(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together. When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-U(x)} increases by zero or a positive integer. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is a positive integer \eqn{k} if there are \eqn{k} different connected components of \eqn{x} that lie close to \eqn{u}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{beta * eta^delta} to the probability density, where \eqn{\delta}{delta} is the increase in potential. If desired, the original parameter \eqn{\kappa}{kappa} can be recovered from the canonical parameter by \eqn{\kappa = \beta\gamma}{kappa = beta * gamma}. The \emph{nonstationary} connected component process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{Concom()} is the threshold distance \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\gamma)}{log(gamma)} are estimated by \code{\link{ppm}()}, not fixed in \code{Concom()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Edge correction}{ The interaction distance of this process is infinite. There are no well-established procedures for edge correction for fitting such models, and accordingly the model-fitting function \code{\link{ppm}} will give an error message saying that the user must specify an edge correction. A reasonable solution is to use the border correction at the same distance \code{r}, as shown in the Examples. } \examples{ # prints a sensible description of itself Concom(r=0.1) # Fit the stationary connected component process to redwood data ppm(redwood, ~1, Concom(r=0.07), rbord=0.07) # Fit the stationary connected component process to `cells' data ppm(cells, ~1, Concom(r=0.06), rbord=0.06) # eta=0 indicates hard core process. # Fit a nonstationary connected component model # with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), Concom(r=7), rbord=7) } } \references{ Baddeley, A.J. and Moller, J. (1989) Nearest-neighbour Markov point processes and random sets. \emph{International Statistical Review} \bold{57}, 89--121. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/gridweights.Rd0000755000176000001440000000455412237642732015552 0ustar ripleyusers\name{gridweights} \alias{gridweights} \title{Compute Quadrature Weights Based on Grid Counts} \description{ Computes quadrature weights for a given set of points, using the ``counting weights'' for a grid of rectangular tiles. } \usage{ gridweights(X, ntile, \dots, window=NULL, verbose=FALSE, npix=NULL, areas=NULL) } \arguments{ \item{X}{Data defining a point pattern.} \item{ntile}{Number of tiles in each row and column of the rectangular grid. An integer vector of length 1 or 2. } \item{\dots}{Ignored.} \item{window}{Default window for the point pattern} \item{verbose}{Logical flag. If \code{TRUE}, information will be printed about the computation of the grid weights. } \item{npix}{Dimensions of pixel grid to use when computing a digital approximation to the tile areas. } \item{areas}{Vector of areas of the tiles, if they are already known.} } \value{ Vector of nonnegative weights for each point in \code{X}. } \details{ This function computes a set of quadrature weights for a given pattern of points (typically comprising both ``data'' and `dummy'' points). See \code{\link{quad.object}} for an explanation of quadrature weights and quadrature schemes. The weights are computed by the ``counting weights'' rule based on a regular grid of rectangular tiles. First \code{X} and (optionally) \code{window} are converted into a point pattern object. Then the bounding rectangle of the window of the point pattern is divided into a regular \code{ntile[1] * ntile[2]} grid of rectangular tiles. The weight attached to a point of \code{X} is the area of the tile in which it lies, divided by the number of points of \code{X} lying in that tile. For non-rectangular windows the tile areas are currently calculated by approximating the window as a binary mask. The accuracy of this approximation is controlled by \code{npix}, which becomes the argument \code{dimyx} of \code{\link{as.mask}}. } \seealso{ \code{\link{quad.object}}, \code{\link{dirichlet.weights}} } \examples{ Q <- quadscheme(runifpoispp(10)) X <- as.ppp(Q) # data and dummy points together w <- gridweights(X, 10) w <- gridweights(X, c(10, 10)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Gdot.Rd0000755000176000001440000002151612237642731014123 0ustar ripleyusers\name{Gdot} \alias{Gdot} \title{ Multitype Nearest Neighbour Distance Function (i-to-any) } \description{ For a multitype point pattern, estimate the distribution of the distance from a point of type \eqn{i} to the nearest other point of any type. } \usage{ Gdot(X, i, r=NULL, breaks=NULL, \dots, correction=c("km", "rs", "han")) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the distance distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{\dots}{Ignored.} \item{correction}{ Optional. Character string specifying the edge correction(s) to be used. Options are \code{"none"}, \code{"rs"}, \code{"km"}, \code{"hanisch"} and \code{"best"}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing six numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{G_{i\bullet}(r)}{Gi.(r)} has been estimated } \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{han}{the Hanisch-style estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{km}{the spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}(r)}{Gi.(r)} } \item{hazard}{the hazard rate \eqn{\lambda(r)}{lambda(r)} of \eqn{G_{i\bullet}(r)}{Gi.(r)} by the spatial Kaplan-Meier method } \item{raw}{the uncorrected estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)}, i.e. the empirical distribution of the distances from each point of type \eqn{i} to the nearest other point of any type. } \item{theo}{the theoretical value of \eqn{G_{i\bullet}(r)}{Gi.(r)} for a marked Poisson process with the same estimated intensity (see below). } } \details{ This function \code{Gdot} and its companions \code{\link{Gcross}} and \code{\link{Gmulti}} are generalisations of the function \code{\link{Gest}} to multitype point patterns. A multitype point pattern is a spatial pattern of points classified into a finite number of possible ``colours'' or ``types''. In the \pkg{spatstat} package, a multitype pattern is represented as a single point pattern object in which the points carry marks, and the mark value attached to each point determines the type of that point. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The argument will be interpreted as a level of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level.) The ``dot-type'' (type \eqn{i} to any type) nearest neighbour distance distribution function of a multitype point process is the cumulative distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} of the distance from a typical random point of the process with type \eqn{i} the nearest other point of the process, regardless of type. An estimate of \eqn{G_{i\bullet}(r)}{Gi.(r)} is a useful summary statistic in exploratory data analysis of a multitype point pattern. If the type \eqn{i} points were independent of all other points, then \eqn{G_{i\bullet}(r)}{Gi.(r)} would equal \eqn{G_{ii}(r)}{Gii(r)}, the nearest neighbour distance distribution function of the type \eqn{i} points alone. For a multitype Poisson point process with total intensity \eqn{\lambda}{lambda}, we have \deqn{G_{i\bullet}(r) = 1 - e^{ - \lambda \pi r^2} }{% Gi.(r) = 1 - exp( - lambda * pi * r^2)} Deviations between the empirical and theoretical \eqn{G_{i\bullet}}{Gi.} curves may suggest dependence of the type \eqn{i} points on the other points. This algorithm estimates the distribution function \eqn{G_{i\bullet}(r)}{Gi.(r)} from the point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial point process in the plane, observed through a bounded window. The window (which is specified in \code{X} as \code{X$window}) may have arbitrary shape. Biases due to edge effects are treated in the same manner as in \code{\link{Gest}}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{G_{i\bullet}(r)}{Gi.(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. The reduced-sample and Kaplan-Meier estimators are computed from histogram counts. In the case of the Kaplan-Meier estimator this introduces a discretisation error which is controlled by the fineness of the breakpoints. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Furthermore, the successive entries of \code{r} must be finely spaced. The algorithm also returns an estimate of the hazard rate function, \eqn{\lambda(r)}{lambda(r)}, of \eqn{G_{i\bullet}(r)}{Gi.(r)}. This estimate should be used with caution as \eqn{G_{i\bullet}(r)}{Gi.(r)} is not necessarily differentiable. The naive empirical distribution of distances from each point of the pattern \code{X} to the nearest other point of the pattern, is a biased estimate of \eqn{G_{i\bullet}}{Gi.}. However this is also returned by the algorithm, as it is sometimes useful in other contexts. Care should be taken not to use the uncorrected empirical \eqn{G_{i\bullet}}{Gi.} as if it were an unbiased estimator of \eqn{G_{i\bullet}}{Gi.}. } \references{ Cressie, N.A.C. \emph{Statistics for spatial data}. John Wiley and Sons, 1991. Diggle, P.J. \emph{Statistical analysis of spatial point patterns}. Academic Press, 1983. Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit : analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. Harkness, R.D and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303 Lotwick, H. W. and Silverman, B. W. (1982). Methods for analysing spatial processes of several types of points. \emph{J. Royal Statist. Soc. Ser. B} \bold{44}, 406--413. Ripley, B.D. \emph{Statistical inference for spatial processes}. Cambridge University Press, 1988. Stoyan, D, Kendall, W.S. and Mecke, J. \emph{Stochastic geometry and its applications}. 2nd edition. Springer Verlag, 1995. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{X$marks}. It is converted to a character string if it is not already a character string. The value \code{i=1} does \bold{not} refer to the first level of the factor. The function \eqn{G_{i\bullet}}{Gi.} does not necessarily have a density. The reduced sample estimator of \eqn{G_{i\bullet}}{Gi.} is pointwise approximately unbiased, but need not be a valid distribution function; it may not be a nondecreasing function of \eqn{r}. Its range is always within \eqn{[0,1]}. The spatial Kaplan-Meier estimator of \eqn{G_{i\bullet}}{Gi.} is always nondecreasing but its maximum value may be less than \eqn{1}. } \seealso{ \code{\link{Gcross}}, \code{\link{Gest}}, \code{\link{Gmulti}} } \examples{ # amacrine cells data G0. <- Gdot(amacrine, "off") plot(G0.) # synthetic example pp <- runifpoispp(30) pp <- pp \%mark\% factor(sample(0:1, npoints(pp), replace=TRUE)) G <- Gdot(pp, "0") G <- Gdot(pp, 0) # equivalent } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/as.interact.Rd0000755000176000001440000000403212237642732015434 0ustar ripleyusers\name{as.interact} \alias{as.interact} \alias{as.interact.fii} \alias{as.interact.interact} \alias{as.interact.ppm} \title{Extract Interaction Structure} \description{ Extracts the interpoint interaction structure from a point pattern model. } \usage{ as.interact(object) \method{as.interact}{fii}(object) \method{as.interact}{interact}(object) \method{as.interact}{ppm}(object) } \arguments{ \item{object}{A fitted point process model (object of class \code{"ppm"}) or an interpoint interaction structure (object of class \code{"interact"}). } } \details{ The function \code{as.interact} extracts the interpoint interaction structure from a suitable object. An object of class \code{"interact"} describes an interpoint interaction structure, before it has been fitted to point pattern data. The irregular parameters of the interaction (such as the interaction range) are fixed, but the regular parameters (such as interaction strength) are undetermined. Objects of this class are created by the functions \code{\link{Poisson}}, \code{\link{Strauss}} and so on. The main use of such objects is in a call to \code{\link{ppm}}. The function \code{as.interact} is generic, with methods for the classes \code{"ppm"}, \code{"fii"} and \code{"interact"}. The result is an object of class \code{"interact"} which can be printed. } \section{Note on parameters}{ This function does \bold{not} extract the fitted coefficients of the interaction. To extract the fitted interaction including the fitted coefficients, use \code{\link{fitin}}. } \value{ An object of class \code{"interact"} representing the interpoint interaction. This object can be printed and plotted. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{fitin}}, \code{\link{ppm}}. } \examples{ data(cells) model <- ppm(cells, ~1, Strauss(0.07)) f <- as.interact(model) f } \keyword{spatial} \keyword{models} spatstat/man/plot.quad.Rd0000755000176000001440000000366112237642733015140 0ustar ripleyusers\name{plot.quad} \alias{plot.quad} \title{plot a Spatial Quadrature Scheme} \description{ Plot a two-dimensional spatial quadrature scheme. } \usage{ \method{plot}{quad}(x, ..., main, dum=list()) } \arguments{ \item{x}{ The spatial quadrature scheme to be plotted. An object of class \code{"quad"}. } \item{\dots}{ extra arguments controlling the plotting of the data points of the quadrature scheme. } \item{dum}{ list of extra arguments controlling the plotting of the dummy points of the quadrature scheme. See below. } \item{main}{ text to be displayed as a title above the plot. } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for quadrature schemes (objects of class \code{"quad"}, see \code{\link{quad.object}}). First the data points of the quadrature scheme are plotted (in their observation window) using \code{\link{plot.ppp}} with any arguments specified in \code{...} Then the dummy points of the quadrature scheme are plotted using \code{\link{plot.ppp}} with any arguments specified in \code{dum}. By default the dummy points are superimposed onto the plot of data points. This can be overridden by including the argument \code{add=FALSE} in the list \code{dum} as shown in the examples. In this case the data and dummy point patterns are plotted separately. See \code{\link{par}} and \code{\link{plot.ppp}} for other possible arguments controlling the plots. } \seealso{ \code{\link{quad.object}}, \code{\link{plot.ppp}}, \code{\link{par}} } \examples{ data(nztrees) Q <- quadscheme(nztrees) plot(Q, main="NZ trees: quadrature scheme") oldpar <- par(mfrow=c(2,1)) plot(Q, main="NZ trees", dum=list(add=FALSE)) par(oldpar) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/print.quad.Rd0000755000176000001440000000167112237642733015315 0ustar ripleyusers\name{print.quad} \alias{print.quad} \title{Print a Quadrature Scheme} \description{ \code{print} method for a quadrature scheme. } \usage{ \method{print}{quad}(x,\dots) } \arguments{ \item{x}{ A quadrature scheme object, typically obtained from \code{\link{quadscheme}}. An object of class \code{"quad"}. } \item{\dots}{Ignored.} } \value{ none. } \details{ This is the \code{print} method for the class \code{"quad"}. It prints simple information about the quadrature scheme. See \code{\link{quad.object}} for details of the class \code{"quad"}. } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{plot.quad}}, \code{\link{summary.quad}} } \examples{ data(cells) Q <- quadscheme(cells) Q } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} spatstat/man/endpoints.psp.Rd0000755000176000001440000000500212237642732016023 0ustar ripleyusers\name{endpoints.psp} \alias{endpoints.psp} \title{Endpoints of Line Segment Pattern} \description{ Extracts the endpoints of each line segment in a line segment pattern. } \usage{ endpoints.psp(x, which="both") } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{which}{ String specifying which endpoint or endpoints should be returned. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function extracts one endpoint, or both endpoints, from each of the line segments in \code{x}, and returns these points as a point pattern object. The argument \code{which} determines which endpoint or endpoints of each line segment should be returned: \describe{ \item{\code{which="both"}}{ (the default): both endpoints of each line segment are returned. The result is a point pattern with twice as many points as there are line segments in \code{x}. } \item{\code{which="first"}}{ select the first endpoint of each line segment (returns the points with coordinates \code{x$ends$x0, x$ends$y0}). } \item{\code{which="second"}}{ select the second endpoint of each line segment (returns the points with coordinates \code{x$ends$x1, x$ends$y1}). } \item{\code{which="left"}}{ select the left-most endpoint (the endpoint with the smaller \eqn{x} coordinate) of each line segment. } \item{\code{which="right"}}{ select the right-most endpoint (the endpoint with the greater \eqn{x} coordinate) of each line segment. } \item{\code{which="lower"}}{ select the lower endpoint (the endpoint with the smaller \eqn{y} coordinate) of each line segment. } \item{\code{which="upper"}}{ select the upper endpoint (the endpoint with the greater \eqn{y} coordinate) of each line segment. } } The result is a point pattern. It also has an attribute \code{"id"} which is an integer vector identifying the segment which contributed each point. } \seealso{ \code{\link{psp.object}}, \code{\link{ppp.object}}, \code{\link{midpoints.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a) b <- endpoints.psp(a, "left") plot(b, add=TRUE) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/nndist.Rd0000755000176000001440000001510312237642733014522 0ustar ripleyusers\name{nndist} \alias{nndist} \alias{nndist.ppp} \alias{nndist.default} \title{Nearest neighbour distances} \description{ Computes the distance from each point to its nearest neighbour in a point pattern. Alternatively computes the distance to the second nearest neighbour, or third nearest, etc. } \usage{ nndist(X, \dots) \method{nndist}{ppp}(X, \dots, k=1, by=NULL, method="C") \method{nndist}{default}(X, Y=NULL, \dots, k=1, by=NULL, method="C") } \arguments{ \item{X,Y}{ Arguments specifying the locations of a set of points. For \code{nndist.ppp}, the argument \code{X} should be a point pattern (object of class \code{"ppp"}). For \code{nndist.default}, typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored by \code{nndist.ppp} and \code{nndist.default}. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } \item{by}{ Optional. A factor, which separates \code{X} into groups. The algorithm will compute the distance to the nearest point in each group. } \item{method}{String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } } \value{ Numeric vector or matrix containing the nearest neighbour distances for each point. If \code{k = 1} (the default), the return value is a numeric vector \code{v} such that \code{v[i]} is the nearest neighbour distance for the \code{i}th data point. If \code{k} is a single integer, then the return value is a numeric vector \code{v} such that \code{v[i]} is the \code{k}th nearest neighbour distance for the \code{i}th data point. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the \code{k[j]}th nearest neighbour distance for the \code{i}th data point. If the argument \code{by} is given, then the result is a data frame containing the distances described above, from each point of \code{X}, to the nearest point in each subset of \code{X} defined by the factor \code{by}. } \details{ This function computes the Euclidean distance from each point in a point pattern to its nearest neighbour (the nearest other point of the pattern). If \code{k} is specified, it computes the distance to the \code{k}th nearest neighbour. The function \code{nndist} is generic, with a method for point patterns (objects of class \code{"ppp"}), and a default method for coordinate vectors. There is also a method for line segment patterns, \code{\link{nndist.psp}}. The method for point patterns expects a single point pattern argument \code{X} and returns the vector of its nearest neighbour distances. The default method expects that \code{X} and \code{Y} will determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. The argument \code{k} may be a single integer, or an integer vector. If it is a vector, then the \eqn{k}th nearest neighbour distances are computed for each value of \eqn{k} specified in the vector. If the argument \code{by} is given, it should be a \code{factor}, of length equal to the number of points in \code{X}. This factor effectively partitions \code{X} into subsets, each subset associated with one of the levels of \code{X}. The algorithm will then compute, for each point of \code{X}, the distance to the nearest neighbour \emph{in each subset}. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by two to three orders of magnitude and uses much less memory. If there is only one point (if \code{x} has length 1), then a nearest neighbour distance of \code{Inf} is returned. If there are no points (if \code{x} has length zero) a numeric vector of length zero is returned. To identify \emph{which} point is the nearest neighbour of a given point, use \code{\link{nnwhich}}. To use the nearest neighbour distances for statistical inference, it is often advisable to use the edge-corrected empirical distribution, computed by \code{\link{Gest}}. To find the nearest neighbour distances from one point pattern to another point pattern, use \code{\link{nncross}}. } \section{Nearest neighbours of each type}{ If \code{X} is a multitype point pattern and \code{by=marks(X)}, then the algorithm will compute, for each point of \code{X}, the distance to the nearest neighbour of each type. See the Examples. To find the minimum distance from \emph{any} point of type \code{i} to the nearest point of type \code{j}, for all combinations of \code{i} and \code{j}, use the \R function \code{\link[stats]{aggregate}} as suggested in the Examples. } \section{Warnings}{ An infinite or \code{NA} value is returned if the distance is not defined (e.g. if there is only one point in the point pattern). } \seealso{ \code{\link{nndist.psp}}, \code{\link{pairdist}}, \code{\link{Gest}}, \code{\link{nnwhich}}, \code{\link{nncross}}. } \examples{ data(cells) # nearest neighbours d <- nndist(cells) # second nearest neighbours d2 <- nndist(cells, k=2) # first, second and third nearest d1to3 <- nndist(cells, k=1:3) x <- runif(100) y <- runif(100) d <- nndist(x, y) # Stienen diagram plot(cells \%mark\% (nndist(cells)/2), markscale=1) # distance to nearest neighbour of each type nnda <- nndist(ants, by=marks(ants)) head(nnda) # For nest number 1, the nearest Cataglyphis nest is 87.32125 units away # Use of 'aggregate': # minimum distance between each pair of types aggregate(nnda, by=list(from=marks(ants)), min) # Always a symmetric matrix # mean nearest neighbour distances aggregate(nnda, by=list(from=marks(ants)), mean) # The mean distance from a Messor nest to # the nearest other Messor nest is 59.02549 units } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/Kcross.inhom.Rd0000755000176000001440000002450112237642731015600 0ustar ripleyusers\name{Kcross.inhom} \alias{Kcross.inhom} \title{ Inhomogeneous Cross K Function } \description{ For a multitype point pattern, estimate the inhomogeneous version of the cross \eqn{K} function, which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}, adjusted for spatially varying intensity. } \usage{ Kcross.inhom(X, i, j, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction = c("border", "isotropic", "Ripley", "translate"), sigma=NULL, varcov=NULL, lambdaIJ=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). See under Details. } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{i}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{i} points in \code{X}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the the estimated intensity of the sub-process of points of type \code{j}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the type \code{j} points in \code{X}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{r}{ Optional. Numeric vector giving the values of the argument \eqn{r} at which the cross K function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"} ,\code{"translate"}, \code{"translation"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{\dots}{ Ignored. } \item{sigma}{ Standard deviation of isotropic Gaussian smoothing kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. } \item{varcov}{ Variance-covariance matrix of anisotropic Gaussian kernel, used in computing leave-one-out kernel estimates of \code{lambdaI}, \code{lambdadot} if they are omitted. Incompatible with \code{sigma}. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points of types \code{i} and \code{j} respectively. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{ij}(r)}{Kij(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{ij}(r)}{Kij(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{ij}(r)}{Kij(r)} obtained by the edge corrections named. } \details{ This is a generalisation of the function \code{\link{Kcross}} to include an adjustment for spatially inhomogeneous intensity, in a manner similar to the function \code{\link{Kinhom}}. The inhomogeneous cross-type \eqn{K} function is described by Moller and Waagepetersen (2003, pages 48-49 and 51-53). Briefly, given a multitype point process, suppose the sub-process of points of type \eqn{j} has intensity function \eqn{\lambda_j(u)}{lambda[j](u)} at spatial locations \eqn{u}. Suppose we place a mass of \eqn{1/\lambda_j(\zeta)}{1/lambda[j](z)} at each point \eqn{\zeta}{z} of type \eqn{j}. Then the expected total mass per unit area is 1. The inhomogeneous ``cross-type'' \eqn{K} function \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} equals the expected total mass within a radius \eqn{r} of a point of the process of type \eqn{i}. If the process of type \eqn{i} points were independent of the process of type \eqn{j} points, then \eqn{K_{ij}^{\mbox{inhom}}(r)}{K[ij]inhom(r)} would equal \eqn{\pi r^2}{pi * r^2}. Deviations between the empirical \eqn{K_{ij}}{Kij} curve and the theoretical curve \eqn{\pi r^2}{pi * r^2} suggest dependence between the points of types \eqn{i} and \eqn{j}. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. It must be a marked point pattern, and the mark vector \code{X$marks} must be a factor. The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{X$marks}. (Warning: this means that an integer value \code{i=3} will be interpreted as the number 3, \bold{not} the 3rd smallest level). If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{lambdaI} supplies the values of the intensity of the sub-process of points of type \code{i}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the type \code{i} intensity at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the type \code{i} intensity evaluated only at the data points of type \code{i}. The length of this vector must equal the number of type \code{i} points in \code{X}. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} should contain estimated values of the intensity of the sub-process of points of type \code{j}. It may be either a pixel image, a function, a numeric vector, or omitted. The optional argument \code{lambdaIJ} is for advanced use only. It is a matrix containing estimated values of the products of these two intensities for each pair of data points of types \code{i} and \code{j} respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must exceed the radius of the largest disc contained in the window. The argument \code{correction} chooses the edge correction as explained e.g. in \code{\link{Kest}}. The pair correlation function can also be applied to the result of \code{Kcross.inhom}; see \code{\link{pcf}}. } \references{ Moller, J. and Waagepetersen, R. Statistical Inference and Simulation for Spatial Point Processes Chapman and Hall/CRC Boca Raton, 2003. } \section{Warnings}{ The arguments \code{i} and \code{j} are always interpreted as levels of the factor \code{X$marks}. They are converted to character strings if they are not already character strings. The value \code{i=1} does \bold{not} refer to the first level of the factor. } \seealso{ \code{\link{Kcross}}, \code{\link{Kinhom}}, \code{\link{Kdot.inhom}}, \code{\link{Kmulti.inhom}}, \code{\link{pcf}} } \examples{ # Lansing Woods data data(lansing) lansing <- lansing[seq(1,lansing$n, by=10)] ma <- split(lansing)$maple wh <- split(lansing)$whiteoak # method (1): estimate intensities by nonparametric smoothing lambdaM <- density.ppp(ma, sigma=0.15, at="points") lambdaW <- density.ppp(wh, sigma=0.15, at="points") K <- Kcross.inhom(lansing, "whiteoak", "maple", lambdaW, lambdaM) # method (2): leave-one-out K <- Kcross.inhom(lansing, "whiteoak", "maple", sigma=0.15) # method (3): fit parametric intensity model fit <- ppm(lansing, ~marks * polynom(x,y,2)) # evaluate fitted intensities at data points # (these are the intensities of the sub-processes of each type) inten <- fitted(fit, dataonly=TRUE) # split according to types of points lambda <- split(inten, lansing$marks) K <- Kcross.inhom(lansing, "whiteoak", "maple", lambda$whiteoak, lambda$maple) # synthetic example: type A points have intensity 50, # type B points have intensity 100 * x lamB <- as.im(function(x,y){50 + 100 * x}, owin()) X <- superimpose(A=runifpoispp(50), B=rpoispp(lamB)) K <- Kcross.inhom(X, "A", "B", lambdaI=as.im(50, X$window), lambdaJ=lamB) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/envelope.envelope.Rd0000755000176000001440000000677012237642732016665 0ustar ripleyusers\name{envelope.envelope} \alias{envelope.envelope} \title{ Recompute Envelopes } \description{ Given a simulation envelope (object of class \code{"envelope"}), compute another envelope from the same simulation data using different parameters. } \usage{ \method{envelope}{envelope}(Y, fun = NULL, ..., transform=NULL, global=FALSE, VARIANCE=FALSE) } \arguments{ \item{Y}{ A simulation envelope (object of class \code{"envelope"}). } \item{fun}{ Optional. Summary function to be applied to the simulated point patterns. } \item{\dots,transform,global,VARIANCE}{ Parameters controlling the type of envelope that is re-computed. See \code{\link{envelope}}. } } \details{ This function can be used to re-compute a simulation envelope from previously simulated data, using different parameter settings for the envelope: for example, a different significance level, or a global envelope instead of a pointwise envelope. The function \code{\link{envelope}} is generic. This is the method for the class \code{"envelope"}. The argument \code{Y} should be a simulation envelope (object of class \code{"envelope"}) produced by any of the methods for \code{\link{envelope}}. Additionally, \code{Y} must contain either \itemize{ \item the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savepatterns=TRUE}); \item the summary functions of the simulated point patterns that were used to create the original envelope (so \code{Y} should have been created by calling \code{\link{envelope}} with \code{savefuns=TRUE}). } If the argument \code{fun} is given, it should be a summary function that can be applied to the simulated point patterns that were used to create \code{Y}. The envelope of the summary function \code{fun} for these point patterns will be computed using the parameters specified in \code{\dots}. If \code{fun} is not given, then: \itemize{ \item If \code{Y} contains the summary functions that were used to compute the original envelope, then the new envelope will be computed from these original summary functions. \item Otherwise, if \code{Y} contains the simulated point patterns. then the \eqn{K} function \code{\link{Kest}} will be applied to each of these simulated point patterns, and the new envelope will be based on the \eqn{K} functions. } The new envelope will be computed using the parameters specified in \code{\dots}. See \code{\link{envelope}} for a full list of envelope parameters. Frequently-used parameters include \code{nrank} and \code{nsim} (to change the number of simulations used and the significance level of the envelope), \code{global} (to change from pointwise to global envelopes) and \code{VARIANCE} (to compute the envelopes from the sample moments instead of the ranks). } \value{ An envelope (object of class \code{"envelope"}. } \seealso{ \code{\link{envelope}} } \examples{ data(cells) E <- envelope(cells, Kest, nsim=19, savefuns=TRUE, savepatterns=TRUE) E2 <- envelope(E, nrank=2) Eg <- envelope(E, global=TRUE) EG <- envelope(E, Gest) EL <- envelope(E, transform=expression(sqrt(./pi))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{htest} \keyword{hplot} \keyword{iteration} spatstat/man/formula.fv.Rd0000644000176000001440000000400012237642732015270 0ustar ripleyusers\name{formula.fv} \alias{formula.fv} \alias{formula<-} \alias{formula<-.fv} \title{ Extract or Change the Plot Formula for a Function Value Table } \description{ Extract or change the default plotting formula for an object of class \code{"fv"} (function value table). } \usage{ \method{formula}{fv}(x, \dots) formula(x, \dots) <- value \method{formula}{fv}(x, \dots) <- value } \arguments{ \item{x}{ An object of class \code{"fv"}, containing the values of several estimates of a function. } \item{\dots}{ Arguments passed to other methods. } \item{value}{ New value of the formula. Either a \code{formula} or a character string. } } \details{ A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) is a convenient way of storing and plotting several different estimates of the same function. The default behaviour of \code{plot(x)} for a function value table \code{x} is determined by a formula associated with \code{x} called its \emph{plot formula}. See \code{\link{plot.fv}} for explanation about these formulae. The function \code{formula.fv} is a method for the generic command \code{\link{formula}}. It extracts the plot formula associated with the object. The function \code{formula<-} is generic. It changes the formula associated with an object. The function \code{formula<-.fv} is the method for \code{formula<-} for the class \code{"fv"}. It changes the plot formula associated with the object. } \value{ The result of \code{formula.fv} is a character string containing the plot formula. The result of \code{formula<-.fv} is a new object of class \code{"fv"}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{fv}}, \code{\link{plot.fv}}, \code{\link[stats]{formula}}. } \examples{ K <- Kest(cells) formula(K) formula(K) <- (iso ~ r) } \keyword{spatial} \keyword{methods} spatstat/man/dilated.areas.Rd0000755000176000001440000000444012237642732015724 0ustar ripleyusers\name{dilated.areas} \Rdversion{1.1} \alias{dilated.areas} \title{ Areas of Morphological Dilations } \description{ Computes the areas of successive morphological dilations. } \usage{ dilated.areas(X, r, W=as.owin(X), ..., constrained=TRUE, exact = FALSE) } \arguments{ \item{X}{ Object to be dilated. A point pattern (object of class \code{"ppp"}), a line segment pattern (object of class \code{"psp"}), or a window (object of class \code{"owin"}). } \item{r}{ Numeric vector of radii for the dilations. } \item{W}{ Window (object of class \code{"owin"}) inside which the areas will be computed, if \code{constrained=TRUE}. } \item{\dots}{Ignored.} \item{constrained}{ Logical flag indicating whether areas should be restricted to the window \code{W}. } \item{exact}{ Logical flag indicating whether areas should be computed using analytic geometry (which is slower but more accurate). Currently available only when \code{X} is a point pattern. } } \details{ This function computes the areas of the dilations of \code{X} by each of the radii \code{r[i]}. Areas may also be computed inside a specified window \code{W}. The morphological dilation of a set \eqn{X} by a distance \eqn{r > 0} is the subset consisting of all points \eqn{x}{x} such that the distance from \eqn{x} to \eqn{X} is less than or equal to \eqn{r}. When \code{X} is a point pattern, the dilation by a distance \eqn{r} is the union of discs of radius \eqn{r} centred at the points of \code{X}. The argument \code{r} should be a vector of nonnegative numbers. If \code{exact=TRUE} and if \code{X} is a point pattern, then the areas are computed using analytic geometry, which is slower but much more accurate. Otherwise the computation is performed using \code{\link{distmap}}. To compute the dilated object itself, use \code{\link{dilation}}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{dilation}}, \code{\link{eroded.areas}} } \examples{ X <- runifpoint(10) a <- dilated.areas(X, c(0.1,0.2), W=square(1), exact=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/as.lpp.Rd0000644000176000001440000000506312237642732014420 0ustar ripleyusers\name{as.lpp} \Rdversion{1.1} \alias{as.lpp} \title{ Convert Data to a Point Pattern on a Linear Network } \description{ Convert various kinds of data to a point pattern on a linear network. } \usage{ as.lpp(x, y=NULL, seg=NULL, tp=NULL, \dots, marks=NULL, L=NULL, check=FALSE) } \arguments{ \item{x,y}{ Vectors of cartesian coordinates, or any data acceptable to \code{\link[grDevices]{xy.coords}}. Alternatively \code{x} can be a point pattern on a linear network (object of class \code{"lpp"}) or a planar point pattern (object of class \code{"ppp"}). } \item{seg,tp}{ Optional local coordinates. Vectors of the same length as \code{x,y}. See Details. } \item{\dots}{Ignored.} \item{marks}{ Optional marks for the point pattern. A vector or factor with one entry for each point, or a data frame or hyperframe with one row for each point. } \item{L}{ Linear network (object of class \code{"linnet"}) on which the points lie. } \item{check}{ Logical. Whether to check the validity of the spatial coordinates. } } \details{ This function converts data in various formats into a point pattern on a linear network (object of class \code{"lpp"}). The possible formats are: \itemize{ \item \code{x} is already a point pattern on a linear network (object of class \code{"lpp"}). Then \code{x} is returned unchanged. \item \code{x} is a planar point pattern (object of class \code{"ppp"}). Then \code{x} is converted to a point pattern on the linear network \code{L} using \code{\link{lpp}}. \item \code{x,y,seg,tp} are vectors of equal length. These specify that the \code{i}th point has Cartesian coordinates \code{(x[i],y[i])}, and lies on segment number \code{seg[i]} of the network \code{L}, at a fractional position \code{tp[i]} along that segment (with \code{tp=0} representing one endpoint and \code{tp=1} the other endpoint of the segment). \item \code{seg,tp} are \code{NULL}, and \code{x,y} are data in a format acceptable to \code{\link[grDevices]{xy.coords}} specifying the Cartesian coordinates. } } \value{ A point pattern on a linear network (object of class \code{"lpp"}). } \seealso{ \code{\link{lpp}}. } \examples{ A <- as.psp(simplenet) X <- runifpointOnLines(10, A) is.ppp(X) Y <- as.lpp(X, L=simplenet) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/fitted.ppm.Rd0000755000176000001440000000770712237642732015307 0ustar ripleyusers\name{fitted.ppm} \alias{fitted.ppm} \title{ Fitted Conditional Intensity for Point Process Model } \description{ Given a point process model fitted to a point pattern, compute the fitted conditional intensity of the model at the points of the pattern, or at the points of the quadrature scheme used to fit the model. } \usage{ \method{fitted}{ppm}(object, \dots, type="lambda", dataonly=FALSE, new.coef=NULL, drop=FALSE, check=TRUE, repair=TRUE) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) } \item{\dots}{ Ignored. } \item{type}{ String (partially matched) indicating whether the fitted value is the conditional intensity (\code{"lambda"}) or the trend (\code{"trend"}). } \item{dataonly}{ Logical. If \code{TRUE}, then values will only be computed at the points of the data point pattern. If \code{FALSE}, then values will be computed at all the points of the quadrature scheme used to fit the model, including the points of the data point pattern. } \item{new.coef}{ Numeric vector of parameter values to replace the fitted model parameters \code{coef(object)}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{repair}{ Logical value indicating whether to repair the internal format of \code{object}, if it is found to be damaged. } } \value{ A vector containing the values of the fitted conditional intensity or (if \code{type="trend"}) the fitted spatial trend. Entries in this vector correspond to the quadrature points (data or dummy points) used to fit the model. The quadrature points can be extracted from \code{object} by \code{union.quad(quad.ppm(object))}. } \details{ The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the model-fitting algorithm \code{\link{ppm}}). This function evaluates the conditional intensity \eqn{\hat\lambda(u, x)}{lambdahat(u,x)} or spatial trend \eqn{\hat b(u)}{bhat(u)} of the fitted point process model for certain locations \eqn{u}, where \code{x} is the original point pattern dataset to which the model was fitted. The locations \eqn{u} at which the fitted conditional intensity/trend is evaluated, are the points of the quadrature scheme used to fit the model in \code{\link{ppm}}. They include the data points (the points of the original point pattern dataset \code{x}) and other ``dummy'' points in the window of observation. The argument \code{drop} is explained in \code{\link{quad.ppm}}. Use \code{\link{predict.ppm}} to compute the fitted conditional intensity at other locations or with other values of the explanatory variables. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005). Residual analysis for spatial point processes (with discussion). \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppm}}, \code{\link{predict.ppm}} } \examples{ data(cells) str <- ppm(cells, ~x, Strauss(r=0.15)) lambda <- fitted(str) # extract quadrature points in corresponding order quadpoints <- union.quad(quad.ppm(str)) # plot conditional intensity values # as circles centred on the quadrature points quadmarked <- setmarks(quadpoints, lambda) plot(quadmarked) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/rMatClust.Rd0000755000176000001440000001266112237642734015150 0ustar ripleyusers\name{rMatClust} \alias{rMatClust} \title{Simulate Matern Cluster Process} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Cluster Process. } \usage{ rMatClust(kappa, r, mu, win = owin(c(0,1),c(0,1))) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{r}{ Radius parameter of the clusters. } \item{mu}{ Mean number of points per cluster (a single positive number) or reference intensity for the cluster points (a function or a pixel image). } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s cluster process, a special case of the Neyman-Scott process, inside the window \code{win}. In the simplest case, where \code{kappa} and \code{mu} are single numbers, the algorithm generates a uniform Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Then each parent point is replaced by a random cluster of \dQuote{offspring} points, the number of points per cluster being Poisson (\code{mu}) distributed, and their positions being placed and uniformly inside a disc of radius \code{r} centred on the parent point. The resulting point pattern is a realisation of the classical \dQuote{stationary Matern cluster process} generated inside the window \code{win}. This point process has intensity \code{kappa * mu}. The algorithm can also generate spatially inhomogeneous versions of the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process: \itemize{ \item The parent points can be spatially inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process that generates the parent points. \item The offspring points can be inhomogeneous. If the argument \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is interpreted as the reference density for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu/(pi * r^2)} inside the disc of radius \code{r} centred on the parent point, and zero intensity outside this disc. Equivalently we first generate, for each parent point, a Poisson (\eqn{M}) random number of offspring (where \eqn{M} is the maximum value of \code{mu}) placed independently and uniformly in the disc of radius \code{r} centred on the parent location, and then randomly thin the offspring points, with retention probability \code{mu/M}. \item Both the parent points and the offspring points can be inhomogeneous, as described above. } Note that if \code{kappa} is a pixel image, its domain must be larger than the window \code{win}. This is because an offspring point inside \code{win} could have its parent point lying outside \code{win}. In order to allow this, the simulation algorithm first expands the original window \code{win} by a distance \code{r} and generates the Poisson process of parent points on this larger window. If \code{kappa} is a pixel image, its domain must contain this larger window. The intensity of the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process is \code{kappa * mu} if either \code{kappa} or \code{mu} is a single number. In the general case the intensity is an integral involving \code{kappa}, \code{mu} and \code{r}. The \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model with homogeneous parents (i.e. where \code{kappa} is a single number) can be fitted to data using \code{\link{kppm}} or related functions. Currently it is not possible to fit the \ifelse{latex}{\out{Mat\'ern}}{Matern} cluster process model with inhomogeneous parents. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rNeymanScott}}, \code{\link{matclust.estK}}, \code{\link{matclust.estpcf}}, \code{\link{kppm}}. } \examples{ # homogeneous X <- rMatClust(10, 0.05, 4) # inhomogeneous Z <- as.im(function(x,y){ 4 * exp(2 * x - 1) }, owin()) Y <- rMatClust(10, 0.05, Z) } \references{ \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1960) \emph{Spatial Variation}. Meddelanden \ifelse{latex}{\out{fr\r{a}n}}{fraan} Statens Skogsforskningsinstitut, volume 59, number 5. Statens Skogsforskningsinstitut, Sweden. \ifelse{latex}{\out{Mat\'ern}}{Matern}, B. (1986) \emph{Spatial Variation}. Lecture Notes in Statistics 36, Springer-Verlag, New York. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/methods.boxx.Rd0000755000176000001440000000274012237642733015650 0ustar ripleyusers\name{methods.boxx} \Rdversion{1.1} \alias{methods.boxx} %DoNotExport \alias{print.boxx} \alias{unitname.boxx} \alias{unitname<-.boxx} \title{ Methods for Multi-Dimensional Box } \description{ Methods for class \code{"boxx"}. } \usage{ \method{print}{boxx}(x, ...) \method{unitname}{boxx}(x) \method{unitname}{boxx}(x) <- value } \arguments{ \item{x}{ Object of class \code{"boxx"} representing a multi-dimensional box. } \item{\dots}{ Other arguments passed to \code{print.default}. } \item{value}{ Name of the unit of length. See \code{\link{unitname}}. } } \details{ These are methods for the generic functions \code{\link{print}} and \code{\link{unitname}} for the class \code{"boxx"} of multi-dimensional boxes. The \code{print} method prints a description of the box, while the \code{unitname} method extracts the name of the unit of length in which the box coordinates are expressed. } \value{ For \code{print.boxx} the value is \code{NULL}. For \code{unitname.boxx} an object of class \code{"units"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{boxx}}, \code{\link{print}}, \code{\link{unitname}} } \examples{ X <- boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre", "metres")) X unitname(X) # Northern European usage unitname(X) <- "meter" } \keyword{spatial} \keyword{methods} spatstat/man/scalardilate.Rd0000644000176000001440000000505012237642734015651 0ustar ripleyusers\name{scalardilate} %DontDeclareMethods \alias{scalardilate} \alias{scalardilate.im} \alias{scalardilate.owin} \alias{scalardilate.ppp} \alias{scalardilate.psp} \alias{scalardilate.default} \title{Apply Scalar Dilation} \description{ Applies scalar dilation to a plane geometrical object, such as a point pattern or a window, relative to a specified origin. } \usage{ scalardilate(X, f, \dots) \method{scalardilate}{im}(X, f, \dots, origin=NULL) \method{scalardilate}{owin}(X, f, \dots, origin=NULL) \method{scalardilate}{ppp}(X, f, \dots, origin=NULL) \method{scalardilate}{psp}(X, f, \dots, origin=NULL) \method{scalardilate}{default}(X, f, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}), a pixel image (class \code{"im"}) and so on. } \item{f}{ Scalar dilation factor. A finite number greater than zero. } \item{\dots}{Ignored by the methods.} \item{origin}{ Origin for the scalar dilation. Either a vector of 2 numbers, or one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"} (partially matched). } } \value{ Another object of the same type, representing the result of applying the scalar dilation. } \details{ This command performs scalar dilation of the object \code{X} by the factor \code{f} relative to the origin specified by \code{origin}. The function \code{scalardilate} is generic, with methods for windows (class \code{"owin"}), point patterns (class \code{"ppp"}), pixel images (class \code{"im"}), line segment patterns (class \code{"psp"}) and a default method. If the argument \code{origin} is not given, then every spatial coordinate is multiplied by the factor \code{f}. If \code{origin} is given, then scalar dilation is performed relative to the specified origin. Effectively, \code{X} is shifted so that \code{origin} is moved to \code{c(0,0)}, then scalar dilation is performed, then the result is shifted so that \code{c(0,0)} is moved to \code{origin}. This command is a special case of an affine transformation: see \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{shift}} } \examples{ plot(letterR) plot(scalardilate(letterR, 0.7, origin="bot"), col="red", add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/Iest.Rd0000755000176000001440000001263712237642731014136 0ustar ripleyusers\name{Iest} \alias{Iest} \title{Estimate the I-function} \description{ Estimates the summary function \eqn{I(r)} for a multitype point pattern. } \usage{ Iest(X, ..., eps=NULL, r=NULL, breaks=NULL, correction=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of \eqn{I(r)} will be computed. An object of class \code{"ppp"}, or data in any format acceptable to \code{\link{as.ppp}()}. } \item{\dots}{Ignored.} \item{eps}{ the resolution of the discrete approximation to Euclidean distance (see below). There is a sensible default. } \item{r}{Optional. Numeric vector of values for the argument \eqn{r} at which \eqn{I(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \code{r}. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details section. } \item{correction}{ Optional. Vector of character strings specifying the edge correction(s) to be used by \code{\link{Jest}}. } } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{I} has been estimated} \item{rs}{the ``reduced sample'' or ``border correction'' estimator of \eqn{I(r)} computed from the border-corrected estimates of \eqn{J} functions} \item{km}{the spatial Kaplan-Meier estimator of \eqn{I(r)} computed from the Kaplan-Meier estimates of \eqn{J} functions} \item{han}{the Hanisch-style estimator of \eqn{I(r)} computed from the Hanisch-style estimates of \eqn{J} functions} \item{un}{the uncorrected estimate of \eqn{I(r)} computed from the uncorrected estimates of \eqn{J} } \item{theo}{the theoretical value of \eqn{I(r)} for a stationary Poisson process: identically equal to \eqn{0} } } \note{ Sizeable amounts of memory may be needed during the calculation. } \details{ The \eqn{I} function summarises the dependence between types in a multitype point process (Van Lieshout and Baddeley, 1999) It is based on the concept of the \eqn{J} function for an unmarked point process (Van Lieshout and Baddeley, 1996). See \code{\link{Jest}} for information about the \eqn{J} function. The \eqn{I} function is defined as \deqn{ % I(r) = \sum_{i=1}^m p_i J_{ii}(r) % - J_{\bullet\bullet}(r)}{ % I(r) = (sum p[i] Jii(r)) - J(r) } where \eqn{J_{\bullet\bullet}}{J} is the \eqn{J} function for the entire point process ignoring the marks, while \eqn{J_{ii}}{Jii} is the \eqn{J} function for the process consisting of points of type \eqn{i} only, and \eqn{p_i}{p[i]} is the proportion of points which are of type \eqn{i}. The \eqn{I} function is designed to measure dependence between points of different types, even if the points are not Poisson. Let \eqn{X} be a stationary multitype point process, and write \eqn{X_i}{X[i]} for the process of points of type \eqn{i}. If the processes \eqn{X_i}{X[i]} are independent of each other, then the \eqn{I}-function is identically equal to \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} typically indicate negative and positive association, respectively, between types. See Van Lieshout and Baddeley (1999) for further information. An estimate of \eqn{I} derived from a multitype spatial point pattern dataset can be used in exploratory data analysis and formal inference about the pattern. The estimate of \eqn{I(r)} is compared against the constant function \eqn{0}. Deviations \eqn{I(r) < 1} or \eqn{I(r) > 1} may suggest negative and positive association, respectively. This algorithm estimates the \eqn{I}-function from the multitype point pattern \code{X}. It assumes that \code{X} can be treated as a realisation of a stationary (spatially homogeneous) random spatial marked point process in the plane, observed through a bounded window. The argument \code{X} is interpreted as a point pattern object (of class \code{"ppp"}, see \code{\link{ppp.object}}) and can be supplied in any of the formats recognised by \code{\link{as.ppp}()}. It must be a multitype point pattern (it must have a \code{marks} vector which is a \code{factor}). The function \code{\link{Jest}} is called to compute estimates of the \eqn{J} functions in the formula above. In fact three different estimates are computed using different edge corrections. See \code{\link{Jest}} for information. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \seealso{ \code{\link{Jest}} } \examples{ data(amacrine) Ic <- Iest(amacrine) plot(Ic, main="Amacrine Cells data") # values are below I= 0, suggesting negative association # between 'on' and 'off' cells. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/plot.plotppm.Rd0000755000176000001440000000632612237642733015702 0ustar ripleyusers\name{plot.plotppm} \alias{plot.plotppm} \title{Plot a plotppm Object Created by plot.ppm} \description{ The function plot.ppm produces objects which specify plots of fitted point process models. The function plot.plotppm carries out the actual plotting of these objects. } \usage{ \method{plot}{plotppm}(x, data = NULL, trend = TRUE, cif = TRUE, se = TRUE, pause = interactive(), how = c("persp", "image", "contour"), ...) } \arguments{ \item{x}{ An object of class \code{plotppm} produced by \code{\link{plot.ppm}()} }. \item{data}{ The point pattern (an object of class \code{ppp}) to which the point process model was fitted (by \code{\link{ppm}}). } \item{trend}{ Logical scalar; should the trend component of the fitted model be plotted? } \item{cif}{ Logical scalar; should the complete conditional intensity of the fitted model be plotted? } \item{se}{ Logical scalar; should the estimated standard error of the fitted intensity be plotted? } \item{pause}{ Logical scalar indicating whether to pause with a prompt after each plot. Set \code{pause=FALSE} if plotting to a file. } \item{how}{ Character string or character vector indicating the style or styles of plots to be performed. } \item{\dots}{ Extra arguments to the plotting functions \code{\link{persp}}, \code{\link{image}} and \code{\link{contour}}. } } \details{ If argument \code{data} is supplied then the point pattern will be superimposed on the image and contour plots. Sometimes a fitted model does not have a trend component, or the trend component may constitute all of the conditional intensity (if the model is Poisson). In such cases the object \code{x} will not contain a trend component, or will contain only a trend component. This will also be the case if one of the arguments \code{trend} and \code{cif} was set equal to \code{FALSE} in the call to \code{plot.ppm()} which produced \code{x}. If this is so then only the item which is present will be plotted. Explicitly setting \code{trend=TRUE}, or \code{cif=TRUE}, respectively, will then give an error. } \value{ None. } \section{Warning}{ Arguments which are passed to \code{persp}, \code{image}, and \code{contour} via the \dots argument get passed to any of the other functions listed in the \code{how} argument, and won't be recognized by them. This leads to a lot of annoying but harmless warning messages. Arguments to \code{persp} may be supplied via \code{\link{spatstat.options}()} which alleviates the warning messages in this instance. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{plot.ppm}()} } \examples{ \dontrun{ data(cells) Q <- quadscheme(cells) m <- ppm(Q, ~1, Strauss(0.05)) mpic <- plot(m) # Perspective plot only, with altered parameters: plot(mpic,how="persp", theta=-30,phi=40,d=4) # All plots, with altered parameters for perspective plot: op <- spatstat.options(par.persp=list(theta=-30,phi=40,d=4)) plot(mpic) # Revert spatstat.options(op) } } \keyword{spatial} \keyword{hplot} \keyword{models} spatstat/man/sumouter.Rd0000755000176000001440000000470512237642734015115 0ustar ripleyusers\name{sumouter} \alias{sumouter} \alias{quadform} \alias{bilinearform} \title{Compute Quadratic Forms} \description{ Calculates certain quadratic forms of matrices. } \usage{ sumouter(x, w=NULL) quadform(x, v) bilinearform(x, v, y) } \arguments{ \item{x,y}{A matrix, whose rows are the vectors in the quadratic form.} \item{w}{Optional vector of weights} \item{v}{Matrix determining the quadratic form} } \value{ A vector or matrix. } \details{ The matrix \code{x} will be interpreted as a collection of row vectors. The command \code{sumouter} computes the sum of the outer products of these vectors, weighted by the entries of \code{w}: \deqn{ M = \sum_i w_i x_i x_i^\top }{ M = sum[i] (w[i] * outer(x[i,], x[i,])) } where the sum is over all rows of \code{x} (after removing any rows containing \code{NA} or other non-finite values). If \code{w} is missing, the weights will be taken as 1. The result is a \eqn{p \times p}{p * p} matrix where \code{p = ncol(x)}. The command \code{quadform} evaluates the quadratic form, defined by the matrix \code{v}, for each of the row vectors of \code{x}: \deqn{ y_i = x_i V x_i^\top }{ y[i] = x[i,] \%*\% v \%*\% t(x[i,]) } The result \code{y} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} contains \code{NA} or other non-finite values, then \code{y[i] = NA}. The command \code{bilinearform} evaluates the more general bilinear form defined by the matrix \code{v}. Here \code{x} and \code{y} must be matrices of the same dimensins. For each of the row vectors of \code{x} and corresponding row vector of \code{y}, the bilinear form is \deqn{ z_i = x_i V y_i^\top }{ z[i] = x[i,] \%*\% v \%*\% t(y[i,]) } The result \code{z} is a numeric vector of length \code{n} where \code{n = nrow(x)}. If \code{x[i,]} or \code{y[i,]} contains \code{NA} or other non-finite values, then \code{z[i] = NA}. } \examples{ x <- matrix(1:12, 4, 3) dimnames(x) <- list(c("Wilma", "Fred", "Barney", "Betty"), letters[1:3]) x sumouter(x) w <- 4:1 sumouter(x, w) v <- matrix(1, 3, 3) quadform(x, v) # should be the same as quadform(x, v) bilinearform(x, v, x) # See what happens with NA's x[3,2] <- NA sumouter(x, w) quadform(x, v) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{array} spatstat/man/linearpcfcross.Rd0000644000176000001440000000605512237642732016242 0ustar ripleyusers\name{linearpcfcross} \alias{linearpcfcross} \title{ Multitype Pair Correlation Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the multitype pair correlation function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearpcfcross(X, i, j, r=NULL, \dots, correction="Ang") } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{ij}(r)}{g[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{pcfcross}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{ij}(r)}{g[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfdot}}, \code{\link[spatstat]{linearpcf}}, \code{\link[spatstat]{pcfcross}}. } \examples{ data(chicago) g <- linearpcfcross(chicago, "assault") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/bramblecanes.Rd0000755000176000001440000000441612237642732015645 0ustar ripleyusers\name{bramblecanes} \alias{bramblecanes} \docType{data} \title{Hutchings' Bramble Canes data} \description{ Data giving the locations and ages of bramble canes in a field. A marked point pattern. } \format{ An object of class \code{"ppp"} representing the point pattern of plant locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of plant \cr \code{y} \tab Cartesian \eqn{y}-coordinate of plant \cr \code{marks} \tab factor with levels 0,1, 2 indicating age } See \code{\link{ppp.object}} for details of the format. } \usage{data(bramblecanes)} \source{Hutchings (1979), data published in Diggle (1983)} \section{Notes}{ These data record the \eqn{(x,y)} locations and ages of bramble canes in a field \eqn{9} metres square, rescaled to the unit square. The canes were classified according to age as either newly emergent, one or two years old. These are encoded as marks 0, 1 and 2 respectively in the dataset. The data were recorded and analysed by Hutchings (1979) and further analysed by Diggle (1981a, 1981b, 1983), Diggle and Milne (1983), and Van Lieshout and Baddeley (1999). All analyses found that the pattern of newly emergent canes exhibits clustering, which Hutchings attributes to ``vigorous vegetative reproduction''. } \references{ Diggle, P. J. (1981a) Some graphical methods in the analysis of spatial point patterns. In \emph{Interpreting multivariate data}, V. Barnett (Ed.) John Wiley and Sons. Diggle, P. J. (1981b). Statistical analysis of spatial point patterns. \emph{N.Z. Statist.} \bold{16}, 22--41. Diggle, P.J. (1983) \emph{Statistical analysis of spatial point patterns}. Academic Press. Diggle, P. J. and Milne, R. K. (1983) Bivariate Cox processes: some models for bivariate spatial point patterns. \emph{Journal of the Royal Statistical Soc. Series B} \bold{45}, 11--21. Hutchings, M. J. (1979) Standing crop and pattern in pure stands of Mercurialis perennis and Rubus fruticosus in mixed deciduous woodland. \emph{Oikos} \bold{31}, 351--357. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. } \keyword{datasets} \keyword{spatial} spatstat/man/pcf.Rd0000755000176000001440000000735712237642733014007 0ustar ripleyusers\name{pcf} \alias{pcf} \title{Pair Correlation Function} \description{ Estimate the pair correlation function. } \usage{ pcf(X, \dots) } \arguments{ \item{X}{ Either the observed data point pattern, or an estimate of its \eqn{K} function, or an array of multitype \eqn{K} functions (see Details). } \item{\dots}{ Other arguments passed to the appropriate method. } } \value{ Either a function value table (object of class \code{"fv"}, see \code{\link{fv.object}}) representing a pair correlation function, or a function array (object of class \code{"fasp"}, see \code{\link{fasp.object}}) representing an array of pair correlation functions. } \details{ The pair correlation function of a stationary point process is \deqn{ g(r) = \frac{K'(r)}{2\pi r} }{ g(r) = K'(r)/ ( 2 * pi * r) } where \eqn{K'(r)} is the derivative of \eqn{K(r)}, the reduced second moment function (aka ``Ripley's \eqn{K} function'') of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. For a stationary Poisson process, the pair correlation function is identically equal to 1. Values \eqn{g(r) < 1} suggest inhibition between points; values greater than 1 suggest clustering. We also apply the same definition to other variants of the classical \eqn{K} function, such as the multitype \eqn{K} functions (see \code{\link{Kcross}}, \code{\link{Kdot}}) and the inhomogeneous \eqn{K} function (see \code{\link{Kinhom}}). For all these variants, the benchmark value of \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} corresponds to \eqn{g(r) = 1}. This routine computes an estimate of \eqn{g(r)} either directly from a point pattern, or indirectly from an estimate of \eqn{K(r)} or one of its variants. This function is generic, with methods for the classes \code{"ppp"}, \code{"fv"} and \code{"fasp"}. If \code{X} is a point pattern (object of class \code{"ppp"}) then the pair correlation function is estimated using a traditional kernel smoothing method (Stoyan and Stoyan, 1994). See \code{\link{pcf.ppp}} for details. If \code{X} is a function value table (object of class \code{"fv"}), then it is assumed to contain estimates of the \eqn{K} function or one of its variants (typically obtained from \code{Kest} or \code{Kinhom}). This routine computes an estimate of \eqn{g(r)} using smoothing splines to approximate the derivative. See \code{\link{pcf.fv}} for details. If \code{X} is a function value array (object of class \code{"fasp"}), then it is assumed to contain estimates of several \eqn{K} functions (typically obtained from \code{\link{Kmulti}} or \code{\link{alltypes}}). This routine computes an estimate of \eqn{g(r)} for each cell in the array, using smoothing splines to approximate the derivatives. See \code{\link{pcf.fasp}} for details. } \references{ Stoyan, D. and Stoyan, H. (1994) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcf.fv}}, \code{\link{pcf.fasp}}, \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{Kcross}}, \code{\link{Kdot}}, \code{\link{Kmulti}}, \code{\link{alltypes}} } \examples{ # ppp object data(simdat) \testonly{ simdat <- simdat[seq(1,npoints(simdat), by=4)] } p <- pcf(simdat) plot(p) # fv object K <- Kest(simdat) p2 <- pcf(K, spar=0.8, method="b") plot(p2) # multitype pattern; fasp object amaK <- alltypes(amacrine, "K") amap <- pcf(amaK, spar=1, method="b") plot(amap) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/nnwhich.ppx.Rd0000755000176000001440000000554312237642733015476 0ustar ripleyusers\name{nnwhich.ppx} \alias{nnwhich.ppx} \title{Nearest Neighbours in Any Dimensions} \description{ Finds the nearest neighbour of each point in a multi-dimensional point pattern. } \usage{ \method{nnwhich}{ppx}(X, \dots, k=1) } \arguments{ \item{X}{ Multi-dimensional point pattern (object of class \code{"ppx"}). } \item{\dots}{ Arguments passed to \code{\link{coords.ppx}} to determine which coordinates should be used. } \item{k}{ Integer, or integer vector. The algorithm will compute the distance to the \code{k}th nearest neighbour. } } \value{ Numeric vector or matrix giving, for each point, the index of its nearest neighbour (or \code{k}th nearest neighbour). If \code{k = 1} (the default), the return value is a numeric vector \code{v} giving the indices of the nearest neighbours (the nearest neighbout of the \code{i}th point is the \code{j}th point where \code{j = v[i]}). If \code{k} is a single integer, then the return value is a numeric vector giving the indices of the \code{k}th nearest neighbours. If \code{k} is a vector, then the return value is a matrix \code{m} such that \code{m[i,j]} is the index of the \code{k[j]}th nearest neighbour for the \code{i}th data point. } \details{ For each point in the given multi-dimensional point pattern, this function finds its nearest neighbour (the nearest other point of the pattern). By default it returns a vector giving, for each point, the index of the point's nearest neighbour. If \code{k} is specified, the algorithm finds each point's \code{k}th nearest neighbour. The function \code{nnwhich} is generic. This is the method for the class \code{"ppx"}. If there are no points in the pattern, a numeric vector of length zero is returned. If there is only one point, then the nearest neighbour is undefined, and a value of \code{NA} is returned. In general if the number of points is less than or equal to \code{k}, then a vector of \code{NA}'s is returned. To evaluate the \emph{distance} between a point and its nearest neighbour, use \code{\link{nndist}}. To find the nearest neighbours from one point pattern to another point pattern, use \code{\link{nncross}}. By default, both spatial and temporal coordinates are extracted. To obtain the spatial distance between points in a space-time point pattern, set \code{temporal=FALSE}. } \section{Warnings}{ A value of \code{NA} is returned if there is only one point in the point pattern. } \seealso{ \code{\link{nnwhich}}, \code{\link{nndist}}, \code{\link{nncross}} } \examples{ df <- data.frame(x=runif(5),y=runif(5),z=runif(5),w=runif(5)) X <- ppx(data=df) m <- nnwhich(X) m2 <- nnwhich(X, k=2) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/bdist.pixels.Rd0000755000176000001440000000444712237642732015643 0ustar ripleyusers\name{bdist.pixels} \alias{bdist.pixels} \title{Distance to Boundary of Window} \description{ Computes the distances from each pixel in a window to the boundary of the window. } \usage{ bdist.pixels(w, \dots, style="image") } \arguments{ \item{w}{A window (object of class \code{"owin"}).} \item{\dots}{Arguments passed to \code{\link{as.mask}} to determine the pixel resolution.} \item{style}{Character string determining the format of the output: either \code{"matrix"}, \code{"coords"} or \code{"image"}. } } \value{ If \code{style="image"}, a pixel image (object of class \code{"im"}) containing the distances from each pixel in the image raster to the boundary of the window. If \code{style="matrix"}, a matrix giving the distances from each pixel in the image raster to the boundary of the window. Rows of this matrix correspond to the \eqn{y} coordinate and columns to the \eqn{x} coordinate. If \code{style="coords"}, a list with three components \code{x,y,z}, where \code{x,y} are vectors of length \eqn{m,n} giving the \eqn{x} and \eqn{y} coordinates respectively, and \code{z} is an \eqn{m \times n}{m x n} matrix such that \code{z[i,j]} is the distance from \code{(x[i],y[j])} to the boundary of the window. Rows of this matrix correspond to the \eqn{x} coordinate and columns to the \eqn{y} coordinate. This result can be plotted with \code{persp}, \code{image} or \code{contour}. } \details{ This function computes, for each pixel \eqn{u} in the window \code{w}, the shortest distance \eqn{d(u, W^c)}{dist(u, W')} from \eqn{u} to the boundary of \eqn{W}. If the window is not of type \code{"mask"} then it is first converted to that type. The arguments \code{"\dots"} are passed to \code{\link{as.mask}} to determine the pixel resolution. } \seealso{ \code{\link{owin.object}}, \code{\link{erosion}}, \code{\link{bdist.points}}, \code{\link{bdist.tiles}}. } \examples{ u <- owin(c(0,1),c(0,1)) d <- bdist.pixels(u, eps=0.01) image(d) d <- bdist.pixels(u, eps=0.01, style="matrix") mean(d >= 0.1) # value is approx (1 - 2 * 0.1)^2 = 0.64 } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/im.Rd0000755000176000001440000001046212237642732013632 0ustar ripleyusers\name{im} \alias{im} \title{Create a Pixel Image Object} \description{ Creates an object of class \code{"im"} representing a two-dimensional pixel image. } \usage{ im(mat, xcol=seq_len(ncol(mat)), yrow=seq_len(nrow(mat)), xrange=NULL, yrange=NULL, unitname=NULL) } \arguments{ \item{mat}{ matrix or vector containing the pixel values of the image. } \item{xcol}{ vector of \eqn{x} coordinates for the pixel gid } \item{yrow}{ vector of \eqn{y} coordinates for the pixel grid } \item{xrange,yrange}{ Optional. Vectors of length 2 giving the \eqn{x} and \eqn{y} limits of the enclosing rectangle. (Ignored if \code{xcol}, \code{yrow} are present.) } \item{unitname}{ Optional. Name of unit of length. Either a single character string, or a vector of two character strings giving the singular and plural forms, respectively. } } \details{ This function creates an object of class \code{"im"} representing a two-dimensional pixel image. See \code{\link{im.object}} for details of this class. The matrix \code{mat} contains the \sQuote{greyscale} values for a rectangular grid of pixels. Note carefully that the entry \code{mat[i,j]} gives the pixel value at the location \code{(xcol[j],yrow[i])}. That is, the \bold{row} index of the matrix \code{mat} corresponds to increasing \bold{y} coordinate, while the column index of \code{mat} corresponds to increasing \bold{x} coordinate. Thus \code{yrow} has one entry for each row of \code{mat} and \code{xcol} has one entry for each column of \code{mat}. Under the usual convention in \R, a correct display of the image would be obtained by transposing the matrix, e.g. \code{image.default(xcol, yrow, t(mat))}, if you wanted to do it by hand. The entries of \code{mat} may be numeric (real or integer), complex, logical, character, or factor values. If \code{mat} is not a matrix, it will be converted into a matrix with \code{nrow(mat) = length(yrow)} and \code{ncol(mat) = length(xcol)}. To make a factor-valued image, note that \R has a quirky way of handling matrices with factor-valued entries. The command \code{\link{matrix}} cannot be used directly, because it destroys factor information. To make a factor-valued image, do one of the following: \itemize{ \item Create a \code{factor} containing the pixel values, say \code{mat <- factor(.....)}, and then assign matrix dimensions to it by \code{dim(mat) <- c(nr, nc)} where \code{nr, nc} are the numbers of rows and columns. The resulting object \code{mat} is both a factor and a vector. \item Supply \code{mat} as a one-dimensional factor and specify the arguments \code{xcol} and \code{yrow} to determine the dimensions of the image. \item Use the functions \code{\link{cut.im}} or \code{\link{eval.im}} to make factor-valued images from other images). } For a description of the methods available for pixel image objects, see \code{\link{im.object}}. To convert other kinds of data to a pixel image (for example, functions or windows), use \code{\link{as.im}}. } \seealso{ \code{\link{im.object}}, \code{\link{as.im}}, \code{\link{as.matrix.im}}, \code{\link{[.im}}, \code{\link{eval.im}} } \section{Warnings}{ The internal representation of images is likely to change in future releases of \pkg{spatstat}. The safe way to extract pixel values from an image object is to use \code{\link{as.matrix.im}} or \code{\link{[.im}}. } \examples{ vec <- rnorm(1200) mat <- matrix(vec, nrow=30, ncol=40) whitenoise <- im(mat) whitenoise <- im(mat, xrange=c(0,1), yrange=c(0,1)) whitenoise <- im(mat, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) whitenoise <- im(vec, xcol=seq(0,1,length=40), yrow=seq(0,1,length=30)) plot(whitenoise) # Factor-valued images: f <- factor(letters[1:12]) dim(f) <- c(3,4) Z <- im(f) # Factor image from other image: cutwhite <- cut(whitenoise, 3) plot(cutwhite) # Factor image from raw data cutmat <- cut(mat, 3) dim(cutmat) <- c(30,40) cutwhite <- im(cutmat) plot(cutwhite) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{datagen} spatstat/man/identify.ppp.Rd0000755000176000001440000000346112237642732015637 0ustar ripleyusers\name{identify.ppp} \alias{identify.ppp} \title{Identify Points in a Point Pattern} \description{ If a point pattern is plotted in the graphics window, this function will find the point of the pattern which is nearest to the mouse position, and print its mark value (or its serial number if there is no mark). } \usage{ \method{identify}{ppp}(x, \dots) } \arguments{ \item{x}{ A point pattern (object of class \code{"ppp"}). } \item{\dots}{ Arguments passed to \code{\link{identify.default}}. } } \value{ If \code{x} is unmarked, the result is a vector containing the serial numbers of the points in the pattern \code{x} that were identified. If \code{x} is marked, the result is a 2-column matrix, the first column containing the serial numbers and the second containing the marks for these points. } \details{ This is a method for the generic function \code{\link{identify}} for point pattern objects. The point pattern \code{x} should first be plotted using \code{\link{plot.ppp}}. Then \code{identify(x)} reads the position of the graphics pointer each time the left mouse button is pressed. It then finds the point of the pattern \code{x} closest to the mouse position. If this closest point is sufficiently close to the mouse pointer, its index (and its mark if any) will be returned as part of the value of the call. Each time a point of the pattern is identified, text will be displayed next to the point, showing its serial number (if \code{x} is unmarked) or its mark value (if \code{x} is marked). } \seealso{ \code{\link{identify}}, \code{\link{clickppp}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{iplot} spatstat/man/vcov.slrm.Rd0000644000176000001440000000700012237642734015147 0ustar ripleyusers\name{vcov.slrm} \alias{vcov.slrm} \title{Variance-Covariance Matrix for a Fitted Spatial Logistic Regression} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a point process model that was fitted by spatial logistic regression. } \usage{ \method{vcov}{slrm}(object, \dots, what=c("vcov", "corr", "fisher", "Fisher")) } \arguments{ \item{object}{A fitted point process model of class \code{"slrm"}.} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"slrm"}, typically produced by \code{\link{slrm}}. It represents a Poisson point process model fitted by spatial logistic regression. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.slrm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.slrm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.slrm}(object)}. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. Standard errors for the fitted intensity can be obtained using \code{\link{predict.slrm}}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x + y) vcov(fit) vcov(fit, what="corr") vcov(fit, what="f") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz}. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{slrm}} for information about fitted models, \code{\link{predict.slrm}} for other kinds of calculation about the model, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/PairPiece.Rd0000755000176000001440000000765312237642731015075 0ustar ripleyusers\name{PairPiece} \alias{PairPiece} \title{The Piecewise Constant Pairwise Interaction Point Process Model} \description{ Creates an instance of a pairwise interaction point process model with piecewise constant potential function. The model can then be fitted to point pattern data. } \usage{ PairPiece(r) } \arguments{ \item{r}{vector of jump points for the potential function} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. The process is a pairwise interaction process, whose interaction potential is piecewise constant, with jumps at the distances given in the vector \eqn{r}. } \details{ A pairwise interaction point process in a bounded region is a stochastic point process with probability density of the form \deqn{ f(x_1,\ldots,x_n) = \alpha \prod_i b(x_i) \prod_{i < j} h(x_i, x_j) }{ f(x_1,\ldots,x_n) = alpha . product { b(x[i]) } product { h(x_i, x_j) } } where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern. The first product on the right hand side is over all points of the pattern; the second product is over all unordered pairs of points of the pattern. Thus each point \eqn{x_i}{x[i]} of the pattern contributes a factor \eqn{b(x_i)}{b(x[i])} to the probability density, and each pair of points \eqn{x_i, x_j}{x[i], x[j]} contributes a factor \eqn{h(x_i,x_j)}{h(x[i], x[j])} to the density. The pairwise interaction term \eqn{h(u, v)} is called \emph{piecewise constant} if it depends only on the distance between \eqn{u} and \eqn{v}, say \eqn{h(u,v) = H(||u-v||)}, and \eqn{H} is a piecewise constant function (a function which is constant except for jumps at a finite number of places). The use of piecewise constant interaction terms was first suggested by Takacs (1986). The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the piecewise constant pairwise interaction is yielded by the function \code{PairPiece()}. See the examples below. The entries of \code{r} must be strictly increasing, positive numbers. They are interpreted as the points of discontinuity of \eqn{H}. It is assumed that \eqn{H(s) =1} for all \eqn{s > r_{max}}{s > rmax} where \eqn{r_{max}}{rmax} is the maximum value in \code{r}. Thus the model has as many regular parameters (see \code{\link{ppm}}) as there are entries in \code{r}. The \eqn{i}-th regular parameter \eqn{\theta_i}{theta[i]} is the logarithm of the value of the interaction function \eqn{H} on the interval \eqn{[r_{i-1},r_i)}{[r[i-1],r[i])}. If \code{r} is a single number, this model is similar to the Strauss process, see \code{\link{Strauss}}. The difference is that in \code{PairPiece} the interaction function is continuous on the right, while in \code{\link{Strauss}} it is continuous on the left. The analogue of this model for multitype point processes has not yet been implemented. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}}, \code{\link{Strauss}} \code{\link{rmh.ppm}} } \examples{ PairPiece(c(0.1,0.2)) # prints a sensible description of itself data(cells) \dontrun{ ppm(cells, ~1, PairPiece(r = c(0.05, 0.1, 0.2))) # fit a stationary piecewise constant pairwise interaction process } ppm(cells, ~polynom(x,y,3), PairPiece(c(0.05, 0.1))) # nonstationary process with log-cubic polynomial trend } \references{ Takacs, R. (1986) Estimator for the pair potential of a Gibbsian point process. \emph{Statistics} \bold{17}, 429--433. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/bw.relrisk.Rd0000755000176000001440000000717412237642732015315 0ustar ripleyusers\name{bw.relrisk} \alias{bw.relrisk} \title{ Cross Validated Bandwidth Selection for Relative Risk Estimation } \description{ Uses cross-validation to select a smoothing bandwidth for the estimation of relative risk. } \usage{ bw.relrisk(X, method = "likelihood", nh = spatstat.options("n.bandwidth"), hmin=NULL, hmax=NULL, warn=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{method}{ Character string determining the cross-validation method. Current options are \code{"likelihood"}, \code{"leastsquares"} or \code{"weightedleastsquares"}. } \item{nh}{ Number of trial values of smoothing bandwith \code{sigma} to consider. The default is 32. } \item{hmin, hmax}{ Optional. Numeric values. Range of trial values of smoothing bandwith \code{sigma} to consider. There is a sensible default. } \item{warn}{ Logical. If \code{TRUE}, issue a warning if the minimum of the cross-validation criterion occurs at one of the ends of the search interval. } } \details{ This function selects an appropriate bandwidth for the nonparametric estimation of relative risk using \code{\link{relrisk}}. Consider the indicators \eqn{y_{ij}}{y[i,j]} which equal \eqn{1} when data point \eqn{x_i}{x[i]} belongs to type \eqn{j}, and equal \eqn{0} otherwise. For a particular value of smoothing bandwidth, let \eqn{\hat p_j(u)}{p*[j](u)} be the estimated probabilities that a point at location \eqn{u} will belong to type \eqn{j}. Then the bandwidth is chosen to minimise either the likelihood, the squared error, or the approximately standardised squared error, of the indicators \eqn{y_{ij}}{y[i,j]} relative to the fitted values \eqn{\hat p_j(x_i)}{p*[j](x[i])}. See Diggle (2003). The result is a numerical value giving the selected bandwidth \code{sigma}. The result also belongs to the class \code{"bw.optim"} allowing it to be printed and plotted. The plot shows the cross-validation criterion as a function of bandwidth. The range of values for the smoothing bandwidth \code{sigma} is set by the arguments \code{hmin, hmax}. There is a sensible default, based on multiples of Stoyan's rule of thumb \code{\link{bw.stoyan}}. If the optimal bandwidth is achieved at an endpoint of the interval \code{[hmin, hmax]}, the algorithm will issue a warning (unless \code{warn=FALSE}). If this occurs, then it is probably advisable to expand the interval by changing the arguments \code{hmin, hmax}. Computation time depends on the number \code{nh} of trial values considered, and also on the range \code{[hmin, hmax]} of values considered, because larger values of \code{sigma} require calculations involving more pairs of data points. } \value{ A numerical value giving the selected bandwidth. The result also belongs to the class \code{"bw.optim"} which can be plotted. } \seealso{ \code{\link{relrisk}}, \code{\link{bw.stoyan}} } \examples{ data(urkiola) \testonly{op <- spatstat.options(n.bandwidth=8)} b <- bw.relrisk(urkiola) b plot(b) b <- bw.relrisk(urkiola, hmax=20) plot(b) \testonly{spatstat.options(op)} } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. Kelsall, J.E. and Diggle, P.J. (1995) Kernel estimation of relative risk. \emph{Bernoulli} \bold{1}, 3--16. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/cut.im.Rd0000755000176000001440000000330512237642732014422 0ustar ripleyusers\name{cut.im} \alias{cut.im} \title{Convert Pixel Image from Numeric to Factor} \description{ Transform the values of a pixel image from numeric values into a factor. } \usage{ \method{cut}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Arguments passed to \code{\link{cut.default}}. They determine the breakpoints for the mapping from numerical values to factor values. See \code{\link{cut.default}}. } } \value{ A pixel image (object of class \code{"im"}) with pixel values that are a factor. See \code{\link{im.object}}. } \details{ This simple function applies the generic \code{\link{cut}} operation to the pixel values of the image \code{x}. The range of pixel values is divided into several intervals, and each interval is associated with a level of a factor. The result is another pixel image, with the same window and pixel grid as \code{x}, but with the numeric value of each pixel discretised by replacing it by the factor level. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. To select a subset of an image, use the subset operator \code{\link{[.im}} instead. } \seealso{ \code{\link{cut}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) Y <- cut(Z, 3) Y <- cut(Z, breaks=seq(0,1,length=5)) # cut at the quartiles # (divides the image into 4 equal areas) Y <- cut(Z, quantile(Z)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/rmhcontrol.Rd0000755000176000001440000003151412237642734015417 0ustar ripleyusers\name{rmhcontrol} \alias{rmhcontrol} \alias{rmhcontrol.default} \title{Set Control Parameters for Metropolis-Hastings Algorithm.} \description{ Sets up a list of parameters controlling the iterative behaviour of the Metropolis-Hastings algorithm. } \usage{ rmhcontrol(\dots) \method{rmhcontrol}{default}(\dots, p=0.9, q=0.5, nrep=5e5, expand=NULL, periodic=NULL, ptypes=NULL, x.cond=NULL, fixall=FALSE, nverb=0, nsave=NULL, nburn=nsave, track=FALSE) } \arguments{ \item{\dots}{Arguments passed to methods.} \item{p}{Probability of proposing a shift (as against a birth/death).} \item{q}{Conditional probability of proposing a death given that a birth or death will be proposed.} \item{nrep}{Total number of steps (proposals) of Metropolis-Hastings algorithm that should be run.} \item{expand}{ Simulation window or expansion rule. Either a window (object of class \code{"owin"}) or a numerical expansion factor, specifying that simulations are to be performed in a domain other than the original data window, then clipped to the original data window. This argument is passed to \code{\link{rmhexpand}}. A numerical expansion factor can be in several formats: see \code{\link{rmhexpand}}. } \item{periodic}{ Logical value (or \code{NULL}) indicating whether to simulate ``periodically'', i.e. identifying opposite edges of the rectangular simulation window. A \code{NULL} value means ``undecided.'' } \item{ptypes}{For multitype point processes, the distribution of the mark attached to a new random point (when a birth is proposed)} \item{x.cond}{Conditioning points for conditional simulation.} \item{fixall}{(Logical) for multitype point processes, whether to fix the number of points of each type.} \item{nverb}{Progress reports will be printed every \code{nverb} iterations} \item{nsave,nburn}{ If these values are specified, then intermediate states of the simulation algorithm will be saved every \code{nsave} iterations, after an initial burn-in period of \code{nburn} iterations. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations. } } \value{ An object of class \code{"rmhcontrol"}, which is essentially a list of parameter values for the algorithm. There is a \code{print} method for this class, which prints a sensible description of the parameters chosen. } \details{ The Metropolis-Hastings algorithm, implemented as \code{\link{rmh}}, generates simulated realisations of point process models. The function \code{rmhcontrol} sets up a list of parameters which control the iterative behaviour and termination of the Metropolis-Hastings algorithm, for use in a subsequent call to \code{\link{rmh}}. It also checks that the parameters are valid. (A separate function \code{\link{rmhstart}} determines the initial state of the algorithm, and \code{\link{rmhmodel}} determines the model to be simulated.) The parameters are as follows: \describe{ \item{p}{The probability of proposing a ``shift'' (as opposed to a birth or death) in the Metropolis-Hastings algorithm. If \eqn{p = 1} then the algorithm only alters existing points, so the number of points never changes, i.e. we are simulating conditionally upon the number of points. The number of points is determined by the initial state (specified by \code{\link{rmhstart}}). If \eqn{p=1} and \code{fixall=TRUE} and the model is a multitype point process model, then the algorithm only shifts the locations of existing points and does not alter their marks (types). This is equivalent to simulating conditionally upon the number of points of each type. These numbers are again specified by the initial state. If \eqn{p = 1} then no expansion of the simulation window is allowed (see \code{expand} below). The default value of \code{p} can be changed by setting the parameter \code{rmh.p} in \code{\link{spatstat.options}}. } \item{q}{The conditional probability of proposing a death (rather than a birth) given that a shift is not proposed. This is of course ignored if \code{p} is equal to 1. The default value of \code{q} can be changed by setting the parameter \code{rmh.q} in \code{\link{spatstat.options}}. } \item{nrep}{The number of repetitions or iterations to be made by the Metropolis-Hastings algorithm. It should be large. The default value of \code{nrep} can be changed by setting the parameter \code{rmh.nrep} in \code{\link{spatstat.options}}. } \item{expand}{ Either a number or a window (object of class \code{"owin"}). Indicates that the process is to be simulated on a domain other than the original data window \code{w}, then clipped to \code{w} when the algorithm has finished. This would often be done in order to approximate the simulation of a stationary process (Geyer, 1999) or more generally a process existing in the whole plane, rather than just in the window \code{w}. If \code{expand} is a window object, it is taken as the larger domain in which simulation is performed. If \code{expand} is numeric, it is interpreted as an expansion factor or expansion distance for determining the simulation domain from the data window. It should be a \emph{named} scalar, such as \code{expand=c(area=2)}, \code{expand=c(distance=0.1)}, \code{expand=c(length=1.2)}. See \code{\link{rmhexpand}()} for more details. If the name is omitted, it defaults to \code{area}. Expansion is not permitted if the number of points has been fixed by setting \code{p = 1} or if the starting configuration has been specified via the argument \code{x.start} in \code{\link{rmhstart}}. If \code{expand} is \code{NULL}, this is interpreted to mean \dQuote{not yet decided}. An expansion rule will be determined at a later stage, using appropriate defaults. See \code{\link{rmhexpand}}. } \item{periodic}{A logical value (or \code{NULL}) determining whether to simulate \dQuote{periodically}. If \code{periodic} is \code{TRUE}, and if the simulation window is a rectangle, then the simulation algorithm effectively identifies opposite edges of the rectangle. Points near the right-hand edge of the rectangle are deemed to be close to points near the left-hand edge. Periodic simulation usually gives a better approximation to a stationary point process. For periodic simulation, the simulation window must be a rectangle. (The simulation window is determined by \code{expand} as described above.) The value \code{NULL} means \sQuote{undecided}. The decision is postponed until \code{\link{rmh}} is called. Depending on the point process model to be simulated, \code{rmh} will then set \code{periodic=TRUE} if the simulation window is expanded \emph{and} the expanded simulation window is rectangular; otherwise \code{periodic=FALSE}. Note that \code{periodic=TRUE} is only permitted when the simulation window (i.e. the expanded window) is rectangular. } \item{ptypes}{A vector of probabilities (summing to 1) to be used in assigning a random type to a new point. Defaults to a vector each of whose entries is \eqn{1/nt} where \eqn{nt} is the number of types for the process. Convergence of the simulation algorithm should be improved if \code{ptypes} is close to the relative frequencies of the types which will result from the simulation. } \item{x.cond}{ If this argument is given, then \emph{conditional simulation} will be performed, and \code{x.cond} specifies the location of the fixed points as well as the type of conditioning. It should be either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)} or a \code{data.frame}. See the section on Conditional Simulation. } \item{fixall}{A logical scalar specifying whether to condition on the number of points of each type. Meaningful only if a marked process is being simulated, and if \eqn{p = 1}. A warning message is given if \code{fixall} is set equal to \code{TRUE} when it is not meaningful. } \item{nverb}{An integer specifying how often ``progress reports'' (which consist simply of the number of repetitions completed) should be printed out. If nverb is left at 0, the default, the simulation proceeds silently. } \item{nsave,nburn}{If these integers are given, then the current state of the simulation algorithm (i.e. the current random point pattern) will be saved every \code{nsave} iterations, starting from iteration \code{nburn}. } \item{track}{ Logical flag indicating whether to save the transition history of the simulations (i.e. information specifying what type of proposal was made, and whether it was accepted or rejected, for each iteration). } } } \section{Conditional Simulation}{ For a Gibbs point process \eqn{X}, the Metropolis-Hastings algorithm easily accommodates several kinds of conditional simulation: \describe{ \item{conditioning on the total number of points:}{ We fix the total number of points \eqn{N(X)} to be equal to \eqn{n}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(X) = n}. } \item{conditioning on the number of points of each type:}{ In a multitype point process, where \eqn{Y_j}{Y[[j]]} denotes the process of points of type \eqn{j}, we fix the number \eqn{N(Y_j)}{N(Y[[j]])} of points of type \eqn{j} to be equal to \eqn{n_j}{n[j]}, for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. We simulate from the conditional distribution of \eqn{X} given \eqn{N(Y_j)=n_j}{N(Y[[j]]) = n[j]} for \eqn{j=1,2,\ldots,m}{j=1,2,...,m}. } \item{conditioning on the realisation in a subwindow:}{ We require that the point process \eqn{X} should, within a specified sub-window \eqn{V}, coincide with a specified point pattern \eqn{y}. We simulate from the conditional distribution of \eqn{X} given \eqn{X \cap V = y}{(X intersect V) = y}. } \item{Palm conditioning:}{ We require that the point process \eqn{X} include a specified list of points \eqn{y}. We simulate from the point process with probability density \eqn{g(x) = c f(x \cup y)}{g(x) = c * f(x union y)} where \eqn{f} is the probability density of the original process \eqn{X}, and \eqn{c} is a normalising constant. } } To achieve each of these types of conditioning we do as follows: \describe{ \item{conditioning on the total number of points:}{ Set \code{p=1}. The number of points is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the number of points of each type:}{ Set \code{p=1} and \code{fixall=TRUE}. The number of points of each type is determined by the initial state of the simulation: see \code{\link{rmhstart}}. } \item{conditioning on the realisation in a subwindow:}{ Set \code{x.cond} to be a point pattern (object of class \code{"ppp"}). Its window \code{V=x.cond$window} becomes the conditioning subwindow \eqn{V}. } \item{Palm conditioning:}{ Set \code{x.cond} to be a \code{list(x,y)} or \code{data.frame} with two columns containing the coordinates of the points, or a \code{list(x,y,marks)} or \code{data.frame} with three columns containing the coordinates and marks of the points. } } The arguments \code{x.cond}, \code{p} and \code{fixall} can be combined. } \references{ Geyer, C.J. (1999) Likelihood Inference for Spatial Point Processes. Chapter 3 in O.E. Barndorff-Nielsen, W.S. Kendall and M.N.M. Van Lieshout (eds) \emph{Stochastic Geometry: Likelihood and Computation}, Chapman and Hall / CRC, Monographs on Statistics and Applied Probability, number 80. Pages 79--140. } \seealso{ \code{\link{rmh}}, \code{\link{rmhmodel}}, \code{\link{rmhstart}}, \code{\link{rmhexpand}}, \code{\link{spatstat.options}} } \examples{ # parameters given as named arguments c1 <- rmhcontrol(p=0.3,periodic=TRUE,nrep=1e6,nverb=1e5) # parameters given as a list liz <- list(p=0.9, nrep=1e4) c2 <- rmhcontrol(liz) # parameters given in rmhcontrol object c3 <- rmhcontrol(c1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/pairdist.pp3.Rd0000755000176000001440000000377512237642733015557 0ustar ripleyusers\name{pairdist.pp3} \alias{pairdist.pp3} \title{Pairwise distances in Three Dimensions} \description{ Computes the matrix of distances between all pairs of points in a three-dimensional point pattern. } \usage{ \method{pairdist}{pp3}(X, \dots, periodic=FALSE, squared=FALSE) } \arguments{ \item{X}{ A point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ This is a method for the generic function \code{pairdist}. Given a three-dimensional point pattern \code{X} (an object of class \code{"pp3"}), this function computes the Euclidean distances between all pairs of points in \code{X}, and returns the matrix of distances. Alternatively if \code{periodic=TRUE} and the window containing \code{X} is a box, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite faces of the box are regarded as equivalent. This is meaningless if the window is not a box. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). } \seealso{ \code{\link{pairdist}}, \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{K3est}} } \examples{ X <- runifpoint3(20) d <- pairdist(X) d <- pairdist(X, periodic=TRUE) d <- pairdist(X, squared=TRUE) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on two-dimensional code by Pavel Grabarnik. } \keyword{spatial} \keyword{math} spatstat/man/bw.stoyan.Rd0000755000176000001440000000355112237642732015152 0ustar ripleyusers\name{bw.stoyan} \alias{bw.stoyan} \title{ Stoyan's Rule of Thumb for Bandwidth Selection } \description{ Computes a rough estimate of the appropriate bandwidth for kernel smoothing estimators of the pair correlation function and other quantities. } \usage{ bw.stoyan(X, co=0.15) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{co}{ Coefficient appearing in the rule of thumb. See Details. } } \details{ Estimation of the pair correlation function and other quantities by smoothing methods requires a choice of the smoothing bandwidth. Stoyan and Stoyan (1995, equation (15.16), page 285) proposed a rule of thumb for choosing the smoothing bandwidth. For the Epanechnikov kernel, the rule of thumb is to set the kernel's half-width \eqn{h} to \eqn{0.15/\sqrt{\lambda}}{0.15/sqrt(lambda)} where \eqn{\lambda}{lambda} is the estimated intensity of the point pattern, typically computed as the number of points of \code{X} divided by the area of the window containing \code{X}. For a general kernel, the corresponding rule is to set the standard deviation of the kernel to \eqn{\sigma = 0.15/\sqrt{5\lambda}}{sigma = 0.15/sqrt(5 * lambda)}. The coefficient \eqn{0.15} can be tweaked using the argument \code{co}. } \value{ A numerical value giving the selected bandwidth (the standard deviation of the smoothing kernel). } \seealso{ \code{\link{pcf}}, \code{\link{bw.relrisk}} } \examples{ data(shapley) bw.stoyan(shapley) } \references{ Stoyan, D. and Stoyan, H. (1995) Fractals, random shapes and point fields: methods of geometrical statistics. John Wiley and Sons. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/summary.splitppp.Rd0000755000176000001440000000215612237642734016577 0ustar ripleyusers\name{summary.splitppp} \alias{summary.splitppp} \title{Summary of a Split Point Pattern} \description{ Prints a useful summary of a split point pattern. } \usage{ \method{summary}{splitppp}(object, \dots) } \arguments{ \item{object}{ Split point pattern (object of class \code{"splitppp"}, effectively a list of point patterns, usually created by \code{\link{split.ppp}}). } \item{\dots}{ Ignored. } } \details{ This is a method for the generic function \code{\link{summary}}. An object of the class \code{"splitppp"} is effectively a list of point patterns (objects of class \code{"ppp"}) representing different sub-patterns of an original point pattern. This function extracts a useful summary of each of the sub-patterns. } \seealso{ \code{\link{summary}}, \code{\link{split}}, \code{\link{split.ppp}} } \examples{ data(amacrine) # multitype point pattern summary(split(amacrine)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/mean.im.Rd0000755000176000001440000000465012237642733014554 0ustar ripleyusers\name{mean.im} \alias{max.im} \alias{min.im} \alias{mean.im} \alias{median.im} \alias{range.im} \alias{sum.im} \title{Maximum, Minimum, Mean, Median, Range or Sum of Pixel Values in an Image} \description{ Calculates the mean, median, range, sum, maximum or minimum of the pixel values in a pixel image. } \usage{ \method{max}{im}(x, \dots) \method{min}{im}(x, \dots) \method{mean}{im}(x, \dots) \method{median}{im}(x, \dots) \method{range}{im}(x, \dots) \method{sum}{im}(x, \dots) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Arguments passed to \code{\link{mean.default}}.} } \details{ These functions calculate the mean, median, range, sum, maximum or minimum of the pixel values in the image \code{x}. An object of class \code{"im"} describes a pixel image. See \code{\link{im.object}}) for details of this class. The function \code{mean.im} is a method for the generic function \code{\link{mean}} for the class \code{"im"}. Similarly \code{median.im} is a method for the generic \code{\link{median}} and \code{range.im} is a method for \code{\link{range}}. If the image \code{x} is logical-valued, the mean value of \code{x} is the fraction of pixels that have the value \code{TRUE}. The median is not defined. If the image \code{x} is factor-valued, then the mean of \code{x} is the mean of the integer codes of the pixel values. The median and range are not defined. Any arguments in \code{...} are passed to the default method, for example \code{\link{mean.default}}. In particular, using the argument \code{trim} will compute the trimmed mean, as explained in the help for \code{\link{mean.default}}. Other information about an image can be obtained using \code{\link{summary.im}} or \code{\link{quantile.im}}. } \value{ A single number. } \seealso{ \code{\link{mean}}, \code{\link{median}}, \code{\link{range}}, \code{\link{sum}}, \code{\link{mean.default}}, \code{\link{median.default}}, \code{\link{range.default}}, \code{\link{quantile.im}}, \code{\link{im.object}}, \code{\link{summary.im}}. } \examples{ X <- as.im(function(x,y) {x^2}, unit.square()) mean(X) median(X) range(X) mean(X, trim=0.05) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/layout.boxes.Rd0000644000176000001440000000274612250607776015670 0ustar ripleyusers\name{layout.boxes} \alias{layout.boxes} \title{ Generate a Row or Column Arrangement of Rectangles. } \description{ A simple utility to generate a row or column of boxes (rectangles) for use in point-and-click panels. } \usage{ layout.boxes(B, n, horizontal = FALSE, aspect = 0.5, usefrac = 0.9) } \arguments{ \item{B}{ Bounding rectangle for the boxes. An object of class \code{"owin"}. } \item{n}{ Integer. The number of boxes. } \item{horizontal}{ Logical. If \code{TRUE}, arrange the boxes in a horizontal row. If \code{FALSE} (the default), arrange them in a vertical column. } \item{aspect}{ Aspect ratio (height/width) of each box. } \item{usefrac}{ Number between 0 and 1. The fraction of height or width of \code{B} that should be occupied by boxes. } } \details{ This simple utility generates a list of boxes (rectangles) inside the bounding box \code{B} arranged in a regular row or column. It is useful for generating the positions of the panel buttons in the function \code{\link{simplepanel}}. } \value{ A list of rectangles. } \examples{ B <- owin(c(0,10),c(0,1)) boxes <- layout.boxes(B, 5, horizontal=TRUE) plot(B, main="", col="blue") niets <- lapply(boxes, plot, add=TRUE, col="grey") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{simplepanel}} } \keyword{utilities} spatstat/man/levelset.Rd0000755000176000001440000000423012237642732015044 0ustar ripleyusers\name{levelset} \alias{levelset} \title{Level Set of a Pixel Image} \description{ Given a pixel image, find all pixels which have values less than a specified threshold value (or greater than a threshold, etc), and assemble these pixels into a window. } \usage{ levelset(X, thresh, compare="<=") } \arguments{ \item{X}{A pixel image (object of class "im")}. \item{thresh}{Threshold value. A single number or value compatible with the pixel values in \code{X}}. \item{compare}{Character string specifying one of the comparison operators \code{"<", ">", "==", "<=", ">=", "!="}. } } \details{ If \code{X} is a pixel image with numeric values, then \code{levelset(X, thresh)} finds the region of space where the pixel values are less than or equal to the threshold value \code{thresh}. This region is returned as a spatial window. The argument \code{compare} specifies how the pixel values should be compared with the threshold value. Instead of requiring pixel values to be less than or equal to \code{thresh}, you can specify that they must be less than (\code{<}), greater than (\code{>}), equal to (\code{==}), greater than or equal to (\code{>=}), or not equal to (\code{!=}) the threshold value \code{thresh}. If \code{X} has non-numeric pixel values (for example, logical or factor values) it is advisable to use only the comparisons \code{==} and \code{!=}, unless you really know what you are doing. For more complicated logical comparisons, see \code{\link{solutionset}}. } \value{ A spatial window (object of class \code{"owin"}, see \code{\link{owin.object}}) containing the pixels satisfying the constraint. } \seealso{ \code{\link{im.object}}, \code{\link{as.owin}}, \code{\link{solutionset}}. } \examples{ # test image X <- as.im(function(x,y) { x^2 - y^2 }, unit.square()) W <- levelset(X, 0.2) W <- levelset(X, -0.3, ">") # compute area of level set area.owin(levelset(X, 0.1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{programming} \keyword{manip} spatstat/man/quantile.im.Rd0000755000176000001440000000222412237642733015451 0ustar ripleyusers\name{quantile.im} \alias{quantile.im} \title{Sample Quantiles of Pixel Image} \description{ Compute the sample quantiles of the pixel values of a given pixel image. } \usage{ \method{quantile}{im}(x, \dots) } \arguments{ \item{x}{ A pixel image. An object of class \code{"im"}. } \item{\dots}{ Optional arguments passed to \code{\link{quantile.default}}. They determine the probabilities for which quantiles should be computed. See \code{\link{quantile.default}}. } } \value{ A vector of quantiles. } \details{ This simple function applies the generic \code{\link{quantile}} operation to the pixel values of the image \code{x}. This function is a convenient way to inspect an image and to obtain summary statistics. See the examples. } \seealso{ \code{\link{quantile}}, \code{\link{cut.im}}, \code{\link{im.object}} } \examples{ # artificial image data Z <- setcov(square(1)) # find the quartiles quantile(Z) # find the deciles quantile(Z, probs=(0:10)/10) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/simdat.Rd0000755000176000001440000000141712237642734014510 0ustar ripleyusers\name{simdat} \alias{simdat} \docType{data} \title{ Simulated Point Pattern } \description{ This point pattern data set was simulated (using the Metropolis-Hastings algorithm) from a model fitted to the Numata Japanese black pine data set referred to in Baddeley and Turner (2000). } \format{ An object of class \code{"ppp"} in a square window of size 10 by 10 units. See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(simdat)} \source{Rolf Turner \email{r.turner@auckland.ac.nz}} \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. } \keyword{datasets} \keyword{spatial} spatstat/man/ord.family.Rd0000755000176000001440000000340712237642733015273 0ustar ripleyusers\name{ord.family} \alias{ord.family} \title{Ord Interaction Process Family} \description{ An object describing the family of all Ord interaction point processes } \details{ \bold{Advanced Use Only!} This structure would not normally be touched by the user. It describes the family of point process models introduced by Ord (1977). If you need to create a specific Ord-type model for use in analysis, use the function \code{\link{OrdThresh}} or \code{\link{Ord}}. Anyway, \code{ord.family} is an object of class \code{"isf"} containing a function \code{ord.family$eval} for evaluating the sufficient statistics of any Ord type point process model taking an exponential family form. } \seealso{ \code{\link{pairwise.family}}, \code{\link{pairsat.family}}, \code{\link{Poisson}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{Softcore}}, \code{\link{Geyer}}, \code{\link{SatPiece}}, \code{\link{Saturated}}, \code{\link{Ord}}, \code{\link{OrdThresh}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/linearKinhom.Rd0000755000176000001440000000567612237642732015660 0ustar ripleyusers\name{linearKinhom} \alias{linearKinhom} \title{ Inhomogeneous Linear K Function } \description{ Computes an estimate of the inhomogeneous linear \eqn{K} function for a point pattern on a linear network. } \usage{ linearKinhom(X, lambda=NULL, r=NULL, ..., correction="Ang", normalise=TRUE) } \arguments{ \item{X}{ Point pattern on linear network (object of class \code{"lpp"}). } \item{lambda}{ Intensity values for the point pattern. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{ Optional. Numeric vector of values of the function argument \eqn{r}. There is a sensible default. } \item{\dots}{ Ignored. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the data points), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \details{ This command computes the inhomogeneous version of the linear \eqn{K} function from point pattern data on a linear network. If \code{lambda = NULL} the result is equivalent to the homogeneous \eqn{K} function \code{\link{linearK}}. If \code{lambda} is given, then it is expected to provide estimated values of the intensity of the point process at each point of \code{X}. The argument \code{lambda} may be a numeric vector (of length equal to the number of points in \code{X}), or a \code{function(x,y)} that will be evaluated at the points of \code{X} to yield numeric values, or a pixel image (object of class \code{"im"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). If \code{correction="none"}, the calculations do not include any correction for the geometry of the linear network. If \code{correction="Ang"}, the pair counts are weighted using Ang's correction (Ang, 2010). } \value{ Function value table (object of class \code{"fv"}). } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \references{ Ang, Q.W. (2010) Statistical methodology for spatial point patterns on a linear network. MSc thesis, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. } \seealso{ \code{\link{lpp}} } \examples{ data(simplenet) X <- rpoislpp(5, simplenet) fit <- lppm(X, ~x) K <- linearKinhom(X, lambda=fit) plot(K) } \keyword{spatial} \keyword{nonparametric} spatstat/man/complement.owin.Rd0000755000176000001440000000417212237642732016344 0ustar ripleyusers\name{complement.owin} \alias{complement.owin} \title{Take Complement of a Window} \description{ Take the set complement of a window, within its enclosing rectangle or in a larger rectangle. } \usage{ complement.owin(w, frame=as.rectangle(w)) } \arguments{ \item{w}{ an object of class \code{"owin"} describing a window of observation for a point pattern. } \item{frame}{ Optional. The enclosing rectangle, with respect to which the set complement is taken. } } \value{ Another object of class \code{"owin"} representing the complement of the window, i.e. the inside of the window becomes the outside. } \details{ This yields a window object (of class \code{"owin"}, see \code{\link{owin.object}}) representing the set complement of \code{w} with respect to the rectangle \code{frame}. By default, \code{frame} is the enclosing box of \code{w} (originally specified by the arguments \code{xrange} and \code{yrange} given to \code{\link{owin}} when \code{w} was created). If \code{frame} is specified, it must be a rectangle (an object of class \code{"owin"} whose type is \code{"rectangle"}) and it must be larger than the enclosing box of \code{w}. This rectangle becomes the enclosing box for the resulting window. If \code{w} is a rectangle, then \code{frame} must be specified. Otherwise an error will occur (since the complement of \code{w} in itself is empty). For rectangular and polygonal windows, the complement is computed by reversing the sign of each boundary polygon, while for binary masks it is computed by negating the pixel values. } \seealso{ \code{\link{owin}}, \code{\link{owin.object}} } \examples{ # rectangular a <- owin(c(0,1),c(0,1)) b <- owin(c(-1,2),c(-1,2)) bmina <- complement.owin(a, frame=b) # polygonal data(demopat) w <- demopat$window outside <- complement.owin(w) # mask w <- as.mask(demopat$window) outside <- complement.owin(w) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/plot.pp3.Rd0000755000176000001440000000157612237642733014713 0ustar ripleyusers\name{plot.pp3} \Rdversion{1.1} \alias{plot.pp3} \title{ Plot a three-dimensional point pattern } \description{ Plots a three-dimensional point pattern. } \usage{ \method{plot}{pp3}(x, ...) } \arguments{ \item{x}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Arguments passed to \code{scatterplot3d} controlling the plot. } } \details{ This is the plot method for objects of class \code{"pp3"}. This function requires the \pkg{scatterplot3d} package. The coordinates of the point pattern are passed to the function \code{scatterplot3d} along with any extra arguments \code{\dots}. } \value{ Null. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{pp3}} } \keyword{spatial} \keyword{hplot} spatstat/man/swedishpines.Rd0000755000176000001440000000244012237642734015731 0ustar ripleyusers\name{swedishpines} \alias{swedishpines} \docType{data} \title{ Swedish Pines Point Pattern } \description{ The data give the locations of pine saplings in a Swedish forest. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations in a 10 x 10 metre square. Cartesian coordinates are in decimetres (multiples of 0.1 metre). See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(swedishpines)} \source{Strand (1975), Ripley (1981)} \section{Note}{ For previous analyses see Ripley (1981, pp. 172-175), Venables and Ripley (1997, p. 483), Baddeley and Turner (2000). } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ripley, B.D. (1981) \emph{Spatial statistics}. John Wiley and Sons. Strand, L. (1972). A model for stand growth. \emph{IUFRO Third Conference Advisory Group of Forest Statisticians}, INRA, Institut National de la Recherche Agronomique, Paris. Pages 207--216. Venables, W.N. and Ripley, B.D. (1997) \emph{Modern applied statistics with S-PLUS}. Second edition. Springer Verlag. } \keyword{datasets} \keyword{spatial} spatstat/man/trim.rectangle.Rd0000755000176000001440000000262312237642734016145 0ustar ripleyusers\name{trim.rectangle} \alias{trim.rectangle} \title{Cut margins from rectangle} \description{ Trims a margin from a rectangle. } \usage{ trim.rectangle(W, xmargin=0, ymargin=xmargin) } \arguments{ \item{W}{ A window (object of class \code{"owin"}). Must be of type \code{"rectangle"}. } \item{xmargin}{Width of horizontal margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at left and right. } \item{ymargin}{Height of vertical margin to be trimmed. A single nonnegative number, or a vector of length 2 indicating margins of unequal width at bottom and top. } } \value{ Another object of class \code{"owin"} representing the window after margins are trimmed. } \details{ This is a simple convenience function to trim off a margin of specified width and height from each side of a rectangular window. Unequal margins can also be trimmed. } \seealso{ \code{\link{owin.object}} } \examples{ w <- square(10) # trim a margin of width 1 from all four sides square9 <- trim.rectangle(w, 1) # trim margin of width 3 from the right side # and margin of height 4 from top edge. v <- trim.rectangle(w, c(0,3), c(0,4)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/smooth.ppp.Rd0000755000176000001440000001453312237642734015341 0ustar ripleyusers\name{smooth.ppp} \alias{smooth.ppp} \alias{Smooth.ppp} \alias{markmean} \alias{markvar} \title{Spatial smoothing of observations at irregular points} \description{ Performs spatial smoothing of numeric values observed at a set of irregular locations. Uses Gaussian kernel smoothing and least-squares cross-validated bandwidth selection. } \usage{ smooth.ppp(X, ..., weights = rep(1, npoints(X)), at="pixels") \method{Smooth}{ppp}(X, ..., weights = rep(1, npoints(X)), at="pixels") markmean(X, ...) markvar(X, ...) } \arguments{ \item{X}{A marked point pattern (object of class \code{"ppp"}).} \item{\dots}{Arguments passed to \code{\link{bw.smoothppp}} and \code{\link{density.ppp}} to control the kernel smoothing and the pixel resolution of the result.} \item{weights}{Optional weights attached to the observations.} \item{at}{ String specifying whether to compute the smoothed values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } } \details{ The function \code{smooth.ppp} or \code{Smooth.ppp} performs spatial smoothing of numeric values observed at a set of irregular locations. The functions \code{markmean} and \code{markvar} are wrappers for \code{smooth.ppp} which compute the spatially-varying mean and variance of the marks of a point pattern. \code{Smooth.ppp} is a method for the generic function \code{\link{Smooth}} for the class \code{"ppp"} of point patterns. Thus you can type simply \code{Smooth(X)}, but not \code{smooth(X)}. Smoothing is performed by Gaussian kernel weighting. If the observed values are \eqn{v_1,\ldots,v_n}{v[1],...,v[n]} at locations \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} respectively, then the smoothed value at a location \eqn{u} is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i}{\sum_i k(u-x_i)} }{ g(u) = (sum of k(u-x[i]) v[i])/(sum of k(u-x[i])) } where \eqn{k} is a Gaussian kernel. This is known as the Nadaraya-Watson smoother (Nadaraya, 1964, 1989; Watson, 1964). By default, the smoothing kernel bandwidth is chosen by least squares cross-validation (see below). The argument \code{X} must be a marked point pattern (object of class \code{"ppp"}, see \code{\link{ppp.object}}). The points of the pattern are taken to be the observation locations \eqn{x_i}{x[i]}, and the marks of the pattern are taken to be the numeric values \eqn{v_i}{v[i]} observed at these locations. The marks are allowed to be a data frame (in \code{Smooth.ppp}, \code{smooth.ppp} and \code{markmean}). Then the smoothing procedure is applied to each column of marks. The numerator and denominator are computed by \code{\link{density.ppp}}. The arguments \code{...} control the smoothing kernel parameters and determine whether edge correction is applied. The smoothing kernel bandwidth can be specified by either of the arguments \code{sigma} or \code{varcov} which are passed to \code{\link{density.ppp}}. If neither of these arguments is present, then by default the bandwidth is selected by least squares cross-validation, using \code{\link{bw.smoothppp}}. The optional argument \code{weights} allows numerical weights to be applied to the data. If a weight \eqn{w_i}{w[i]} is associated with location \eqn{x_i}{x[i]}, then the smoothed function is (ignoring edge corrections) \deqn{ g(u) = \frac{\sum_i k(u-x_i) v_i w_i}{\sum_i k(u-x_i) w_i} }{ g(u) = (sum of k(u-x[i]) v[i] w[i])/(sum of k(u-x[i]) w[i]) } An alternative to kernel smoothing is inverse-distance weighting, which is performed by \code{\link{idw}}. } \section{Very small bandwidth}{ If the chosen bandwidth \code{sigma} is very small, kernel smoothing is mathematically equivalent to nearest-neighbour interpolation; the result will be computed by \code{\link{nnmark}}. This is unless \code{at="points"} and \code{leaveoneout=FALSE}, when the original mark values are returned. } \value{ \emph{If \code{X} has a single column of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a pixel image (object of class \code{"im"}). Pixel values are values of the interpolated function. \item If \code{at="points"}, the result is a numeric vector of length equal to the number of points in \code{X}. Entries are values of the interpolated function at the points of \code{X}. } \emph{If \code{X} has a data frame of marks:} \itemize{ \item If \code{at="pixels"} (the default), the result is a named list of pixel images (object of class \code{"im"}). There is one image for each column of marks. This list also belongs to the class \code{listof}, for which there is a plot method. \item If \code{at="points"}, the result is a data frame with one row for each point of \code{X}, and one column for each column of marks. Entries are values of the interpolated function at the points of \code{X}. } The return value has attributes \code{"sigma"} and \code{"varcov"} which report the smoothing bandwidth that was used. } \seealso{ \code{\link{Smooth}}, \code{\link{density.ppp}}, \code{\link{bw.smoothppp}}, \code{\link{nnmark}}, \code{\link{ppp.object}}, \code{\link{im.object}}. See \code{\link{idw}} for inverse-distance weighted smoothing. To perform interpolation, see also the \code{akima} package. } \examples{ # Longleaf data - tree locations, marked by tree diameter data(longleaf) # Local smoothing of tree diameter (automatic bandwidth selection) Z <- Smooth(longleaf) # Kernel bandwidth sigma=5 plot(Smooth(longleaf, 5)) # mark variance plot(markvar(longleaf, sigma=5)) # data frame of marks: trees marked by diameter and height data(finpines) plot(Smooth(finpines, sigma=2)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \references{ Nadaraya, E.A. (1964) On estimating regression. \emph{Theory of Probability and its Applications} \bold{9}, 141--142. Nadaraya, E.A. (1989) \emph{Nonparametric estimation of probability densities and regression curves}. Kluwer, Dordrecht. Watson, G.S. (1964) Smooth regression analysis. \emph{Sankhya A} \bold{26}, 359--372. } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/quadrat.test.mppm.Rd0000644000176000001440000000744212241443112016577 0ustar ripleyusers\name{quadrat.test.mppm} \alias{quadrat.test.mppm} \title{Chi-Squared Test for Multiple Point Process Model Based on Quadrat Counts} \description{ Performs a chi-squared goodness-of-fit test of a Poisson point process model fitted to multiple point patterns. } \usage{ \method{quadrat.test}{mppm}(X, ...) } \arguments{ \item{X}{ An object of class \code{"mppm"} representing a point process model fitted to multiple point patterns. It should be a Poisson model. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{quadrat.test.ppm}} which determine the size of the quadrats. } } \details{ This function performs a \eqn{\chi^2}{chi^2} test of goodness-of-fit for a Poisson point process model, based on quadrat counts. It can also be used to perform a test of Complete Spatial Randomness for a list of point patterns. The function \code{quadrat.test} is generic, with methods for point patterns (class \code{"ppp"}), point process models (class \code{"ppm"}) and multiple point process models (class \code{"mppm"}). For this function, the argument \code{X} should be a multiple point process model (object of class \code{"mppm"}) obtained by fitting a point process model to a list of point patterns using the function \code{\link{mppm}}. To perform the test, the data point patterns are extracted from \code{X}. For each point pattern \itemize{ \item the window of observation is divided into rectangular tiles, and the number of data points in each tile is counted, as described in \code{\link[spatstat]{quadratcount}}. \item The expected number of points in each quadrat is calculated, as determined by the fitted model. } Then we perform a single \eqn{\chi^2}{chi^2} test of goodness-of-fit based on these observed and expected counts. } \section{Testing Complete Spatial Randomness}{ If the intention is to test Complete Spatial Randomness (CSR) there are two options: \itemize{ \item CSR with the same intensity of points in each point pattern; \item CSR with a different, unrelated intensity of points in each point pattern. } In the first case, suppose \code{P} is a list of point patterns we want to test. Then fit the multiple model \code{fit1 <- mppm(P, ~1)} which signifies a Poisson point process model with a constant intensity. Then apply \code{quadrat.test(fit1)}. In the second case, fit the model code{fit2 <- mppm(P, ~id)} which signifies a Poisson point process with a different constant intensity for each point pattern. Then apply \code{quadrat.test(fit2)}. } \value{ An object of class \code{"htest"}. Printing the object gives comprehensible output about the outcome of the test. The \eqn{p}-value of the test is stored in the component \code{p.value}. The return value also belongs to the special class \code{"quadrat.test"}. Plotting the object will display, for each window, the position of the quadrats, annotated by their observed and expected counts and the Pearson residuals. See the examples. The return value also has an attribute \code{"components"} which is a list containing the results of \eqn{\chi^2}{chi^2} tests of goodness-of-fit for each individual point pattern. } \seealso{ \code{\link{mppm}}, \code{\link[spatstat]{quadrat.test}} } \examples{ data(waterstriders) H <- hyperframe(X=waterstriders) # Poisson with constant intensity for all patterns fit1 <- mppm(X~1, H) quadrat.test(fit1, nx=2) # uniform Poisson with different intensity for each pattern fit2 <- mppm(X ~ id, H) quadrat.test(fit2, nx=2) } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \keyword{spatial} \keyword{htest} spatstat/man/as.fv.Rd0000644000176000001440000000605012237642732014235 0ustar ripleyusers\name{as.fv} %DontDeclareMethods \alias{as.fv} \alias{as.fv.fv} \alias{as.fv.fasp} \alias{as.fv.data.frame} \alias{as.fv.matrix} \alias{as.fv.minconfit} \alias{as.fv.kppm} \alias{as.fv.bw.optim} \title{Convert Data To Class fv} \description{ Converts data into a function table (an object of class \code{"fv"}). } \usage{ as.fv(x) \method{as.fv}{fv}(x) \method{as.fv}{data.frame}(x) \method{as.fv}{matrix}(x) \method{as.fv}{fasp}(x) \method{as.fv}{minconfit}(x) \method{as.fv}{kppm}(x) \method{as.fv}{bw.optim}(x) } \arguments{ \item{x}{Data which will be converted into a function table} } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This command converts data \code{x}, that could be interpreted as the values of a function, into a function value table (object of the class \code{"fv"} as described in \code{\link{fv.object}}). This object can then be plotted easily using \code{\link{plot.fv}}. The dataset \code{x} may be any of the following: \itemize{ \item an object of class \code{"fv"}; \item a matrix or data frame with at least two columns; \item an object of class \code{"fasp"}, representing an array of \code{"fv"} objects. \item an object of class \code{"minconfit"}, giving the results of a minimum contrast fit by the command \code{\link{mincontrast}}. The \item an object of class \code{"kppm"}, representing a fitted Cox or cluster point process model, obtained from the model-fitting command \code{\link{kppm}}; \item an object of class \code{"bw.optim"}, representing an optimal choice of smoothing bandwidth by a cross-validation method, obtained from commands like \code{\link{bw.diggle}}. } The function \code{as.fv} is generic, with methods for each of the classes listed above. The behaviour is as follows: \itemize{ \item If \code{x} is an object of class \code{"fv"}, it is returned unchanged. \item If \code{x} is a matrix or data frame, the first column is interpreted as the function argument, and subsequent columns are interpreted as values of the function computed by different methods. \item If \code{x} is an object of class \code{"fasp"} representing an array of \code{"fv"} objects, these are combined into a single \code{"fv"} object. \item If \code{x} is an object of class \code{"minconfit"}, or an object of class \code{"kppm"} that was fitted by minimum contrast, the result is a function table containing the observed summary function and the best fit summary function. \item If \code{x} is an object of class \code{"bw.optim"}, the result is a function table of the optimisation criterion as a function of the smoothing bandwidth. } } \examples{ r <- seq(0, 1, length=101) x <- data.frame(r=r, y=r^2) as.fv(x) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/varblock.Rd0000755000176000001440000001041012237642734015023 0ustar ripleyusers\name{varblock} \alias{varblock} \title{ Estimate Variance of Summary Statistic by Subdivision } \description{ This command estimates the variance of any summary statistic (such as the \eqn{K}-function) by spatial subdivision of a single point pattern dataset. } \usage{ varblock(X, fun = Kest, blocks = quadrats(X, nx = nx, ny = ny), ..., nx = 3, ny = nx) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}). } \item{fun}{ Function that computes the summary statistic. } \item{blocks}{ Optional. A tessellation that specifies the division of the space into blocks. } \item{\dots}{ Arguments passed to \code{fun}. } \item{nx,ny}{ Optional. Number of rectangular blocks in the \eqn{x} and \eqn{y} directions. Incompatible with \code{blocks}. } } \details{ This command computes an estimate of the variance of the summary statistic \code{fun(X)} from a single point pattern dataset \code{X} using a subdivision method. It can be used to plot \bold{confidence intervals} for the true value of a summary function such as the \eqn{K}-function. The window containing \code{X} is divided into pieces by an \code{nx * ny} array of rectangles (or is divided into pieces of more general shape, according to the argument \code{blocks} if it is present). The summary statistic \code{fun} is applied to each of the corresponding sub-patterns of \code{X} as described below. Then the pointwise sample mean, sample variance and sample standard deviation of these summary statistics are computed. The two-standard-deviation confidence intervals are computed. The variance is estimated by equation (4.21) of Diggle (2003, page 52). This assumes that the point pattern \code{X} is stationary. For further details see Diggle (2003, pp 52--53). The estimate of the summary statistic from each block is computed as follows. For most functions \code{fun}, the estimate from block \code{B} is computed by finding the subset of \code{X} consisting of points that fall inside \code{B}, and applying \code{fun} to these points, by calling \code{fun(X[B])}. However if \code{fun} is the \eqn{K}-function \code{\link{Kest}}, or any function which has an argument called \code{domain}, the estimate for each block \code{B} is computed by calling \code{fun(X, domain=B)}. In the case of the \eqn{K}-function this means that the estimate from block \code{B} is computed by counting pairs of points in which the \emph{first} point lies in \code{B}, while the second point may lie anywhere. } \section{Errors}{ If the blocks are too small, there may be insufficient data in some blocks, and the function \code{fun} may report an error. If this happens, you need to take larger blocks. An error message about incompatibility may occur. The different function estimates may be incompatible in some cases, for example, because they use different default edge corrections (typically because the tiles of the tessellation are not the same kind of geometric object as the window of \code{X}, or because the default edge correction depends on the number of points). To prevent this, specify the choice of edge correction, in the \code{correction} argument to \code{fun}, if it has one. An alternative to \code{varblock} is Loh's mark bootstrap \code{\link{lohboot}}. } \value{ A function value table (object of class \code{"fv"}) that contains the result of \code{fun(X)} as well as the sample mean, sample variance and sample standard deviation of the block estimates, together with the upper and lower two-standard-deviation confidence limits. } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{tess}}, \code{\link{quadrats}} for basic manipulation. \code{\link{lohboot}} for an alternative bootstrap technique. } \examples{ v <- varblock(amacrine, Kest, nx=4, ny=2) v <- varblock(amacrine, Kcross, nx=4, ny=2) if(interactive()) plot(v, iso ~ r, shade=c("hiiso", "loiso")) } \keyword{nonparametric} \keyword{spatial} spatstat/man/longleaf.Rd0000755000176000001440000000340312237642733015012 0ustar ripleyusers\name{longleaf} \alias{longleaf} \docType{data} \title{ Longleaf Pines Point Pattern } \description{ Locations and sizes of Longleaf pine trees. A marked point pattern. The data record the locations and diameters of 584 Longleaf pine (\emph{Pinus palustris}) trees in a 200 x 200 metre region in southern Georgia (USA). They were collected and analysed by Platt, Evans and Rathbun (1988). This is a marked point pattern; the mark associated with a tree is its diameter at breast height (\code{dbh}), a convenient measure of its size. Several analyses have considered only the ``adult'' trees which are conventionally defined as those trees with \code{dbh} greater than or equal to 30 cm. The pattern is regarded as spatially inhomogeneous. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of tree \cr \code{y} \tab Cartesian \eqn{y}-coordinate of tree \cr \code{marks} \tab diameter at breast height, in centimetres. } See \code{\link{ppp.object}} for details of the format of a point pattern object. } \usage{data(longleaf)} \examples{ data(longleaf) plot(longleaf) plot(cut(longleaf, breaks=c(0,30,Inf), labels=c("Sapling","Adult"))) } \source{Platt, Evans and Rathbun (1988)} \references{ Platt, W. J., Evans, G. W. and Rathbun, S. L. (1988) The population dynamics of a long-lived Conifer (Pinus palustris). \emph{The American Naturalist} \bold{131}, 491--525. Rathbun, S. L. and Cressie, N. (1994) A space-time survival point process for a longleaf pine forest in southern Georgia. \emph{Journal of the American Statistical Association} \bold{89}, 1164--1173. } \keyword{datasets} \keyword{spatial} spatstat/man/reflect.Rd0000644000176000001440000000245012237642733014645 0ustar ripleyusers\name{reflect} %DontDeclareMethods \alias{reflect} \alias{reflect.im} \alias{reflect.default} \title{Reflect In Origin} \description{ Reflects a geometrical object through the origin. } \usage{ reflect(X) \method{reflect}{im}(X) \method{reflect}{default}(X) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), or a window (object of class \code{"owin"}).} } \value{ Another object of the same type, representing the result of reflection. } \details{ The object \code{X} is reflected through the origin. That is, each point in \code{X} with coordinates \eqn{(x,y)} is mapped to the position \eqn{(-x, -y)}. This is equivalent to applying the affine transformation with matrix \code{diag(c(-1,-1))}. It is also equivalent to rotation about the origin by 180 degrees. The command \code{reflect} is generic, with a method for pixel images and a default method. } \seealso{ \code{\link{affine}}, \code{\link{flipxy}} } \examples{ plot(reflect(as.im(letterR))) plot(reflect(letterR), add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/shift.ppp.Rd0000755000176000001440000000400212237642734015133 0ustar ripleyusers\name{shift.ppp} \alias{shift.ppp} \title{Apply Vector Translation To Point Pattern} \description{ Applies a vector shift to a point pattern. } \usage{ \method{shift}{ppp}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another point pattern (of class \code{"ppp"}) representing the result of applying the vector shift. } \details{ The point pattern, and its window, are translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.owin}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}} } \examples{ data(cells) X <- shift(cells, c(2,3)) \dontrun{ plot(X) # no discernible difference except coordinates are different } plot(cells, pch=16) plot(shift(cells, c(0.03,0.03)), add=TRUE) shift(cells, origin="mid") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/exactMPLEstrauss.Rd0000644000176000001440000001044612237642732016433 0ustar ripleyusers\name{exactMPLEstrauss} \alias{exactMPLEstrauss} \title{ Exact Maximum Pseudolikelihood Estimate for Stationary Strauss Process } \description{ Computes, to very high accuracy, the Maximum Pseudolikelihood Estimates of the parameters of a stationary Strauss point process. } \usage{ exactMPLEstrauss(X, R, ngrid = 2048, plotit = FALSE, project=TRUE) } \arguments{ \item{X}{ Data to which the Strauss process will be fitted. A point pattern dataset (object of class \code{"ppp"}). } \item{R}{ Interaction radius of the Strauss process. A non-negative number. } \item{ngrid}{ Grid size for calculation of integrals. An integer, giving the number of grid points in the \eqn{x} and \eqn{y} directions. } \item{plotit}{ Logical. If \code{TRUE}, the log pseudolikelihood is plotted on the current device. } \item{project}{ Logical. If \code{TRUE} (the default), the parameter \eqn{\gamma}{gamma} is constrained to lie in the interval \eqn{[0,1]}. If \code{FALSE}, this constraint is not applied. } } \details{ This function is intended mainly for technical investigation of algorithm performance. Its practical use is quite limited. It fits the stationary Strauss point process model to the point pattern dataset \code{X} by maximum pseudolikelihood (with the border edge correction) using an algorithm with very high accuracy. This algorithm is more accurate than the \emph{default} behaviour of the model-fitting function \code{\link{ppm}} because the discretisation is much finer. Ripley (1988) and Baddeley and Turner (2000) derived the log pseudolikelihood for the stationary Strauss process, and eliminated the parameter \eqn{\beta}{beta}, obtaining an exact formula for the partial log pseudolikelihood as a function of the interaction parameter \eqn{\gamma}{gamma} only. The algorithm evaluates this expression to a high degree of accuracy, using numerical integration on a \code{ngrid * ngrid} lattice, uses \code{\link[stats]{optim}} to maximise the log pseudolikelihood with respect to \eqn{\gamma}{gamma}, and finally recovers \eqn{\beta}{beta}. The result is a vector of length 2, containing the fitted coefficients \eqn{\log\beta}{log(beta)} and \eqn{\log\gamma}{log(gamma)}. These values correspond to the entries that would be obtained with \code{coef(ppm(X, ~1, Strauss(R)))}. The fitted coefficients are typically accurate to within \eqn{10^{-6}}{10^(-6)} as shown in Baddeley and Turner (2013). Note however that (by default) \code{exactMPLEstrauss} constrains the parameter \eqn{\gamma}{gamma} to lie in the interval \eqn{[0,1]} in which the point process is well defined (Kelly and Ripley, 1976) whereas \code{\link{ppm}} does not constrain the value of \eqn{\gamma}{gamma} (by default). This behaviour is controlled by the argument \code{project} to \code{\link{ppm}} and \code{exactMPLEstrauss}. The default for \code{\link{ppm}} is \code{project=FALSE}, while the default for \code{exactMPLEstrauss} is \code{project=TRUE}. } \value{ Vector of length 2. } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2013) Bias correction for parameter estimates of spatial point process models. \emph{Journal of Statistical Computation and Simulation} \bold{2012}. {doi: 10.1080/00949655.2012.755976} Kelly, F.P. and Ripley, B.D. (1976) On Strauss's model for clustering. \emph{Biometrika} \bold{63}, 357--360. Ripley, B.D. (1988) \emph{Statistical inference for spatial processes}. Cambridge University Press. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppm}} } \examples{ \testonly{ exactMPLEstrauss(cells, 0.1, ngrid=128) exactMPLEstrauss(cells, 0.1, ngrid=128, project=FALSE) } if(interactive()) { exactMPLEstrauss(cells, 0.1) coef(ppm(cells, ~1, Strauss(0.1))) coef(ppm(cells, ~1, Strauss(0.1), nd=128)) exactMPLEstrauss(redwood, 0.04) exactMPLEstrauss(redwood, 0.04, project=FALSE) coef(ppm(redwood, ~1, Strauss(0.04))) } } \keyword{spatial} \keyword{models} spatstat/man/heather.Rd0000755000176000001440000000732012237642732014644 0ustar ripleyusers\name{heather} \alias{heather} \docType{data} \title{Diggle's Heather Data} \description{ The spatial mosaic of vegetation of the heather plant (\emph{Calluna vulgaris}) recorded in a 10 by 20 metre sampling plot in Sweden. } \format{ A list with three entries, representing the same data at different spatial resolutions: \tabular{ll}{ \code{coarse} \tab original heather data, 100 by 200 pixels \cr \code{medium} \tab current heather data, 256 by 512 pixels \cr \code{fine} \tab finest resolution data, 778 by 1570 pixels } Each of these entries is an object of class \code{"owin"} containing a binary pixel mask. } \usage{data(heather)} \source{Peter Diggle} \section{Notes on data}{ These data record the spatial mosaic of vegetation of the heather plant (\emph{Calluna vulgaris}) in a 10 by 20 metre sampling plot near Jadraas, Sweden. They were recorded and first analysed by Diggle(1981). The dataset \code{heather} contains three different versions of the data that have been analysed by different writers over the decades. \describe{ \item{coarse:}{ Data as originally digitised by Diggle in 1983 at 100 by 200 pixels resolution (i.e. 10 pixels = 1 metre). These data were entered by hand in the form of a run-length encoding (original file no longer available) and translated by a program into a 100 by 200 pixel binary image. There are known to be some errors in the image which arise from errors in counting the run-length so that occasionally there will be an unexpected 'spike' on one single column. } \item{fine:}{ A fine scale digitisation of the original map, prepared by CWI (Centre for Computer Science, Amsterdam, Netherlands) in 1994. The original hand-drawn map was scanned by Adrian Baddeley, and processed by Chris Jonker, Henk Heijmans and Adrian Baddeley to yield a clean binary image of 778 by 1570 pixels resolution. } \item{medium:}{ The version of the heather data currently supplied on Professor Diggle's website. This is a 256 by 512 pixel image. The method used to create this image is not stated. } } } \section{History of analysis of data}{ The data were recorded, presented and analysed by Diggle (1983). He proposed a Boolean model consisting of discs of random size with centres generated by of a Poisson point process. Renshaw and Ford (1983) reported that spectral analysis of the data suggested the presence of strong row and column effects. However, this may have been attributable to errors in the run-length encoding of the original data. Hall (1985) and Hall (1988, pp 301-318) took a bootstrap approach. Ripley (1988, pp. 121-122, 131-135] used opening and closing functions to argue that a Boolean model of discs is inappropriate. Cressie (1991, pp. 763-770) tried a more general Boolean model. } \references{ Cressie, N.A.C. (1991) \emph{Statistics for Spatial Data}. John Wiley and Sons, New York. Diggle, P.J. (1981) Binary mosaics and the spatial pattern of heather. \emph{Biometrics} \bold{37}, 531-539. Hall, P. (1985) Resampling a coverage pattern. \emph{Stochastic Processes and their Applications} \bold{20} 231-246. Hall, P. (1988) \emph{An introduction to the theory of coverage processes}. John Wiley and Sons, New York. Renshaw, E. and Ford, E.D. (1983) The interpretation of process from pattern using two-dimensional spectral analysis: Methods and problems of interpretation. \emph{Applied Statistics} \bold{32} 51-63. Ripley, B.D. (1988) \emph{Statistical Inference for Spatial Processes}. Cambridge University Press. } \keyword{datasets} \keyword{spatial} spatstat/man/rGaussPoisson.Rd0000755000176000001440000000371512237642734016051 0ustar ripleyusers\name{rGaussPoisson} \alias{rGaussPoisson} \title{Simulate Gauss-Poisson Process} \description{ Generate a random point pattern, a simulated realisation of the Gauss-Poisson Process. } \usage{ rGaussPoisson(kappa, r, p2, win = owin(c(0,1),c(0,1))) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{r}{ Diameter of each cluster that consists of exactly 2 points. } \item{p2}{ Probability that a cluster contains exactly 2 points. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern. See \code{\link{rNeymanScott}}. } \details{ This algorithm generates a realisation of the Gauss-Poisson point process inside the window \code{win}. The process is constructed by first generating a Poisson point process of parent points with intensity \code{kappa}. Then each parent point is either retained (with probability \code{1 - p2}) or replaced by a pair of points at a fixed distance \code{r} apart (with probability \code{p2}). In the case of clusters of 2 points, the line joining the two points has uniform random orientation. In this implementation, parent points are not restricted to lie in the window; the parent process is effectively the uniform Poisson process on the infinite plane. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rMatClust}}, \code{\link{rNeymanScott}} } \examples{ pp <- rGaussPoisson(30, 0.07, 0.5) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/gridcentres.Rd0000755000176000001440000000374612237642732015545 0ustar ripleyusers\name{gridcentres} \alias{gridcentres} \alias{gridcenters} \title{Rectangular grid of points} \description{ Generates a rectangular grid of points in a window } \usage{ gridcentres(window, nx, ny) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of points in each row of the rectangular grid. } \item{ny}{Number of points in each column of the rectangular grid. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the points of the rectangular grid. } \details{ This function creates a rectangular grid of points in the window. The bounding rectangle of the \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. The function returns the \eqn{x,y} coordinates of the centres of these tiles. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) and for other miscellaneous purposes. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{stratrand}} } \examples{ w <- unit.square() xy <- gridcentres(w, 10,15) \dontrun{ plot(w) points(xy) } bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- gridcentres(w, 30, 30) ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/slrm.Rd0000755000176000001440000001535212237642734014207 0ustar ripleyusers\name{slrm} \alias{slrm} \title{Spatial Logistic Regression} \description{ Fits a spatial logistic regression model to a spatial point pattern. } \usage{ slrm(formula, ..., data = NULL, offset = TRUE, link = "logit", dataAtPoints=NULL, splitby=NULL) } \arguments{ \item{formula}{The model formula. See Details.} \item{\dots}{ Optional arguments passed to \code{\link[spatstat]{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{data}{ Optional. A list containing data required in the formula. The names of entries in the list should correspond to variable names in the formula. The entries should be point patterns, pixel images or windows. } \item{offset}{ Logical flag indicating whether the model formula should be augmented by an offset equal to the logarithm of the pixel area. } \item{link}{The link function for the regression model. A character string, specifying a link function for binary regression. } \item{dataAtPoints}{Optional. Exact values of the covariates at the data points. A data frame, with column names corresponding to variables in the \code{formula}, with one row for each point in the point pattern dataset. } \item{splitby}{ Optional. Character string identifying a window. The window will be used to split pixels into sub-pixels. } } \details{ This function fits a Spatial Logistic Regression model (Tukey, 1972; Agterberg, 1974) to a spatial point pattern dataset. The logistic function may be replaced by another link function. The \code{formula} specifies the form of the model to be fitted, and the data to which it should be fitted. The \code{formula} must be an \R formula with a left and right hand side. The left hand side of the \code{formula} is the name of the point pattern dataset, an object of class \code{"ppp"}. The right hand side of the \code{formula} is an expression, in the usual \R formula syntax, representing the functional form of the linear predictor for the model. Each variable name that appears in the formula may be \itemize{ \item one of the reserved names \code{x} and \code{y}, referring to the Cartesian coordinates; \item the name of an entry in the list \code{data}, if this argument is given; \item the name of an object in the parent environment, that is, in the environment where the call to \code{slrm} was issued. } Each object appearing on the right hand side of the formula may be \itemize{ \item a pixel image (object of class \code{"im"}) containing the values of a covariate; \item a window (object of class \code{"owin"}), which will be interpreted as a logical covariate which is \code{TRUE} inside the window and \code{FALSE} outside it; \item a \code{function} in the \R language, with arguments \code{x,y}, which can be evaluated at any location to obtain the values of a covariate. } See the Examples below. The fitting algorithm discretises the point pattern onto a pixel grid. The value in each pixel is 1 if there are any points of the point pattern in the pixel, and 0 if there are no points in the pixel. The dimensions of the pixel grid will be determined as follows: \itemize{ \item The pixel grid will be determined by the extra arguments \code{\dots} if they are specified (for example the argument \code{dimyx} can be used to specify the number of pixels). \item Otherwise, if the right hand side of the \code{formula} includes the names of any pixel images containing covariate values, these images will determine the pixel grid for the discretisation. The covariate image with the finest grid (the smallest pixels) will be used. \item Otherwise, the default pixel grid size is given by \code{spatstat.options("npixel")}. } If \code{link="logit"} (the default), the algorithm fits a Spatial Logistic Regression model. This model states that the probability \eqn{p} that a given pixel contains a data point, is related to the covariates through \deqn{\log\frac{p}{1-p} = \eta}{log(p/(1-p)) = eta} where \eqn{\eta}{eta} is the linear predictor of the model (a linear combination of the covariates, whose form is specified by the \code{formula}). If \code{link="cloglog"} then the algorithm fits a model stating that \deqn{\log(-\log(1-p)) = \eta}{log(-log(1-p)) = eta}. If \code{offset=TRUE} (the default), the model formula will be augmented by adding an offset term equal to the logarithm of the pixel area. This ensures that the fitted parameters are approximately independent of pixel size. If \code{offset=FALSE}, the offset is not included, and the traditional form of Spatial Logistic Regression is fitted. } \value{ An object of class \code{"slrm"} representing the fitted model. There are many methods for this class, including methods for \code{print}, \code{fitted}, \code{predict}, \code{anova}, \code{coef}, \code{logLik}, \code{terms}, \code{update}, \code{formula} and \code{vcov}. Automated stepwise model selection is possible using \code{\link{step}}. Confidence intervals for the parameters can be computed using \code{\link[stats]{confint}}. } \seealso{ \code{\link{anova.slrm}}, \code{\link{coef.slrm}}, \code{\link{fitted.slrm}}, \code{\link{logLik.slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{vcov.slrm}} } \references{ Agterberg, F.P. (1974) Automatic contouring of geological maps to detect target areas for mineral exploration. \emph{Journal of the International Association for Mathematical Geology} \bold{6}, 373--395. Baddeley, A., Berman, M., Fisher, N.I., Hardegen, A., Milne, R.K., Schuhmacher, D., Shah, R. and Turner, R. (2010) Spatial logistic regression and change-of-support for spatial Poisson point processes. \emph{Electronic Journal of Statistics} \bold{4}, 1151--1201. {doi: 10.1214/10-EJS581} Tukey, J.W. (1972) Discussion of paper by F.P. Agterberg and S.C. Robinson. \emph{Bulletin of the International Statistical Institute} \bold{44} (1) p. 596. Proceedings, 38th Congress, International Statistical Institute. } \examples{ X <- copper$SouthPoints slrm(X ~ 1) slrm(X ~ x+y) slrm(X ~ x+y, link="cloglog") # specify a grid of 2-km-square pixels slrm(X ~ 1, eps=2) Y <- copper$SouthLines Z <- distmap(Y) slrm(X ~ Z) slrm(X ~ Z, dataAtPoints=list(Z=nncross(X,Y,what="dist"))) dat <- list(A=X, V=Z) slrm(A ~ V, data=dat) } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/concatxy.Rd0000755000176000001440000000244612237642732015060 0ustar ripleyusers\name{concatxy} \alias{concatxy} \title{Concatenate x,y Coordinate Vectors} \description{ Concatenate any number of pairs of \code{x} and \code{y} coordinate vectors. } \usage{ concatxy(\dots) } \arguments{ \item{\dots}{ Any number of arguments, each of which is a structure containing elements \code{x} and \code{y}. } } \value{ A list with two components \code{x} and \code{y}, which are the concatenations of all the corresponding \code{x} and \code{y} vectors in the argument list. } \details{ This function can be used to superimpose two or more point patterns of unmarked points (but see also \code{\link{superimpose}} which is recommended). It assumes that each of the arguments in \code{\dots} is a structure containing (at least) the elements \code{x} and \code{y}. It concatenates all the \code{x} elements into a vector \code{x}, and similarly for \code{y}, and returns these concatenated vectors. } \seealso{ \code{\link{superimpose}}, \code{\link{quadscheme}} } \examples{ dat <- runifrect(30) xy <- list(x=runif(10),y=runif(10)) new <- concatxy(dat, xy) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/is.multitype.ppm.Rd0000755000176000001440000000463712237642732016475 0ustar ripleyusers\name{is.multitype.ppm} \alias{is.multitype.ppm} \alias{is.multitype.lppm} \title{Test Whether A Point Process Model is Multitype} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points that classify the points into several types. } \usage{ \method{is.multitype}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a multitype point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a multitype point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.multitype}}, \code{\link{is.multitype.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.multitype(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.multitype(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.multitype(fit3) # FALSE } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/vcov.ppm.Rd0000755000176000001440000002140712237642734015000 0ustar ripleyusers\name{vcov.ppm} \alias{vcov.ppm} \title{Variance-Covariance Matrix for a Fitted Point Process Model} \description{ Returns the variance-covariance matrix of the estimates of the parameters of a fitted point process model. } \usage{ \method{vcov}{ppm}(object, \dots, what = "vcov", verbose = TRUE, gam.action=c("warn", "fatal", "silent"), matrix.action=c("warn", "fatal", "silent"), logi.action=c("warn", "fatal", "silent"), hessian=FALSE) } \arguments{ \item{object}{A fitted point process model (an object of class \code{"ppm"}.)} \item{\dots}{Ignored.} \item{what}{Character string (partially-matched) that specifies what matrix is returned. Options are \code{"vcov"} for the variance-covariance matrix, \code{"corr"} for the correlation matrix, and \code{"fisher"} or \code{"Fisher"} for the Fisher information matrix. } \item{verbose}{Logical. If \code{TRUE}, a message will be printed if various minor problems are encountered. } \item{gam.action}{String indicating what to do if \code{object} was fitted by \code{gam}. } \item{matrix.action}{String indicating what to do if the matrix is ill-conditioned (so that its inverse cannot be calculated). } \item{logi.action}{String indicating what to do if \code{object} was fitted via the logistic regression approximation using a non-standard dummy point process. } \item{hessian}{ Logical. Use the negative Hessian matrix of the log pseudolikelihood instead of the Fisher information. } } \details{ This function computes the asymptotic variance-covariance matrix of the estimates of the canonical parameters in the point process model \code{object}. It is a method for the generic function \code{\link{vcov}}. \code{object} should be an object of class \code{"ppm"}, typically produced by \code{\link{ppm}}. The canonical parameters of the fitted model \code{object} are the quantities returned by \code{coef.ppm(object)}. The function \code{vcov} calculates the variance-covariance matrix for these parameters. The argument \code{what} provides three options: \describe{ \item{\code{what="vcov"}}{ return the variance-covariance matrix of the parameter estimates } \item{\code{what="corr"}}{ return the correlation matrix of the parameter estimates } \item{\code{what="fisher"}}{ return the observed Fisher information matrix. } } In all three cases, the result is a square matrix. The rows and columns of the matrix correspond to the canonical parameters given by \code{\link{coef.ppm}(object)}. The row and column names of the matrix are also identical to the names in \code{\link{coef.ppm}(object)}. For models fitted by the Berman-Turner approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the maximum pseudolikelihood (using the default \code{method="mpl"} in the call to \code{\link{ppm}}), the implementation works as follows. \itemize{ \item If the fitted model \code{object} is a Poisson process, the calculations are based on standard asymptotic theory for the maximum likelihood estimator (Kutoyants, 1998). The observed Fisher information matrix of the fitted model \code{object} is first computed, by summing over the Berman-Turner quadrature points in the fitted model. The asymptotic variance-covariance matrix is calculated as the inverse of the observed Fisher information. The correlation matrix is then obtained by normalising. \item If the fitted model is not a Poisson process (i.e. it is some other Gibbs point process) then the calculations are based on Coeurjolly and Rubak (2012). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. } For models fitted by the Huang-Ogata method (\code{method="ho"} in the call to \code{\link{ppm}}), the implementation uses the Monte Carlo estimate of the Fisher information matrix that was computed when the original model was fitted. For models fitted by the logistic regression approximation to the maximum pseudolikelihood (\code{method="logi"} in the call to \code{\link{ppm}}), calculations are based on (Baddeley et al., 2013). A consistent estimator of the variance-covariance matrix is computed by summing terms over all pairs of data points. If required, the Fisher information is calculated as the inverse of the variance-covariance matrix. In this case the calculations depend on the type of dummy pattern used, and currently only the types \code{"stratrand"}, \code{"binomial"} and \code{"poisson"} as generated by \code{\link{quadscheme.logi}} are implemented. For other types the behavior depends on the argument \code{logi.action}. If \code{logi.action="fatal"} an error is produced. Otherwise, for types \code{"grid"} and \code{"transgrid"} the formulas for \code{"stratrand"} are used which in many cases should be conservative. For an arbitrary user specified dummy pattern (type \code{"given"}) the formulas for \code{"poisson"} are used which in many cases should be conservative. If \code{logi.action="warn"} a warning is issued otherwise the calculation proceeds without a warning. The argument \code{verbose} makes it possible to suppress some diagnostic messages. The asymptotic theory is not correct if the model was fitted using \code{gam} (by calling \code{\link{ppm}} with \code{use.gam=TRUE}). The argument \code{gam.action} determines what to do in this case. If \code{gam.action="fatal"}, an error is generated. If \code{gam.action="warn"}, a warning is issued and the calculation proceeds using the incorrect theory for the parametric case, which is probably a reasonable approximation in many applications. If \code{gam.action="silent"}, the calculation proceeds without a warning. If \code{hessian=TRUE} then the negative Hessian (second derivative) matrix of the log pseudolikelihood, and its inverse, will be computed. For non-Poisson models, this is not a valid estimate of variance, but is useful for other calculations. Note that standard errors and 95\% confidence intervals for the coefficients can also be obtained using \code{confint(object)} or \code{coef(summary(object))}. } \section{Error messages}{ An error message that reports \emph{system is computationally singular} indicates that the determinant of the Fisher information matrix was either too large or too small for reliable numerical calculation. This can occur because of numerical overflow or collinearity in the covariates. To check this, rescale the coordinates of the data points and refit the model. See the Examples. In a Gibbs model, a singular matrix may also occur if the fitted model is a hard core process: this is a feature of the variance estimator. } \value{ A square matrix. } \examples{ X <- rpoispp(42) fit <- ppm(X, ~ x + y) vcov(fit) vcov(fit, what="Fish") # example of singular system data(demopat) m <- ppm(demopat, ~polynom(x,y,2)) \dontrun{ try(v <- vcov(m)) } # rescale x, y coordinates to range [0,1] x [0,1] approximately demopat <- rescale(demopat, 10000) m <- ppm(demopat, ~polynom(x,y,2)) v <- vcov(m) # Gibbs example fitS <- ppm(swedishpines, ~1, Strauss(9)) coef(fitS) sqrt(diag(vcov(fitS))) } \author{ Original code for Poisson point process was written by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz}. New code for stationary Gibbs point processes was generously contributed by Ege Rubak and Jean-Francois Coeurjolly. New code for generic Gibbs process written by Adrian Baddeley. New code for logistic method contributed by Ege Rubak. } \seealso{ \code{\link{vcov}} for the generic, \code{\link{ppm}} for information about fitted models, \code{\link[stats]{confint}} for confidence intervals. } \references{ Baddeley, A., Coeurjolly, J.-F., Rubak, E. and Waagepetersen, R. (2013) \emph{A logistic regression estimating function for spatial Gibbs point processes.} Research Report, Centre for Stochastic Geometry and Bioimaging, Denmark. \url{www.csgb.dk} Coeurjolly, J.-F. and Rubak, E. (2012) \emph{Fast covariance estimation for innovations computed from a spatial Gibbs point process}. Research Report, Centre for Stochastic Geometry and Bioimaging, Denmark. \url{www.csgb.dk} Kutoyants, Y.A. (1998) \bold{Statistical Inference for Spatial Poisson Processes}, Lecture Notes in Statistics 134. New York: Springer 1998. } \keyword{spatial} \keyword{methods} \keyword{models} spatstat/man/kppm.Rd0000755000176000001440000002277112237642732014202 0ustar ripleyusers\name{kppm} \alias{kppm} \title{Fit Cluster or Cox Point Process Model} \description{ Fit a homogeneous or inhomogeneous cluster process or Cox point process model to a point pattern. } \usage{ kppm(X, trend = ~1, clusters = c("Thomas","MatClust","Cauchy","VarGamma","LGCP"), covariates = NULL, ..., method = c("mincon", "clik"), weightfun=NULL, control=list(), statistic="K", statargs=list(), rmax = NULL, covfunargs=NULL, use.gam=FALSE, nd=NULL, eps=NULL) } \arguments{ \item{X}{ Point pattern dataset (object of class \code{"ppp"}) to which the model should be fitted. See Details. } \item{trend}{ An \R formula, with no left hand side, specifying the form of the log intensity. } \item{clusters}{ Character string determining the cluster model. Partially matched. Options are \code{"Thomas"}, \code{"MatClust"}, \code{"Cauchy"}, \code{"VarGamma"} and \code{"LGCP"}. } \item{covariates}{ The values of any spatial covariates (other than the Cartesian coordinates) required by the model. A named list of pixel images, functions, windows or numeric constants. } \item{\dots}{ Additional arguments. See Details. } \item{method}{ The fitting method. Either \code{"mincon"} for minimum contrast, or \code{"clik"} for composite likelihood. Partially matched. } \item{weightfun}{ Optional weighting function \eqn{w} in the composite likelihood. A \code{function} in the \R language. See Details. } \item{control}{ List of control parameters passed to the optimization algorithm \code{\link[stats]{optim}}. } \item{statistic}{ Name of the summary statistic to be used for minimum contrast estimation: either \code{"K"} or \code{"pcf"}. } \item{statargs}{ Optional list of arguments to be used when calculating the \code{statistic}. See Details. } \item{rmax}{ Maximum value of interpoint distance to use in the composite likelihood. } \item{covfunargs,use.gam,nd,eps}{ Arguments passed to \code{\link{ppm}} when fitting the intensity. } } \details{ This function fits a clustered point process model to the point pattern dataset \code{X}. The model may be either a \emph{Poisson cluster process} or another \emph{Cox process}. The type of model is determined by the argument \code{clusters}. Currently the options are \code{clusters="Thomas"} for the Thomas process, \code{clusters="MatClust"} for the Matern cluster process, \code{clusters="Cauchy"} for the Neyman-Scott cluster process with Cauchy kernel, \code{clusters="VarGamma"} for the Neyman-Scott cluster process with Variance Gamma kernel, and \code{clusters="LGCP"} for the log-Gaussian Cox process. The first four models are Poisson cluster processes. The algorithm first estimates the intensity function of the point process using \code{\link{ppm}}. The argument \code{X} may be a point pattern (object of class \code{"ppp"}) or a quadrature scheme (object of class \code{"quad"}). The intensity is specified by the \code{trend} argument. If the trend formula is \code{~1} (the default) then the model is \emph{homogeneous}. The algorithm begins by estimating the intensity as the number of points divided by the area of the window. Otherwise, the model is \emph{inhomogeneous}. The algorithm begins by fitting a Poisson process with log intensity of the form specified by the formula \code{trend}. (See \code{\link{ppm}} for further explanation). The clustering parameters of the model are then fitted either by minimum contrast estimation, or by maximum composite likelihood. \describe{ \item{Minimum contrast:}{ If \code{method = "mincon"} (the default) clustering parameters of the model will be fitted by minimum contrast estimation, that is, by matching the theoretical \eqn{K}-function of the model to the empirical \eqn{K}-function of the data, as explained in \code{\link{mincontrast}}. For a homogeneous model (\code{ trend = ~1 }) the empirical \eqn{K}-function of the data is computed using \code{\link{Kest}}, and the parameters of the cluster model are estimated by the method of minimum contrast. For an inhomogeneous model, the inhomogeneous \eqn{K} function is estimated by \code{\link{Kinhom}} using the fitted intensity. Then the parameters of the cluster model are estimated by the method of minimum contrast using the inhomogeneous \eqn{K} function. This two-step estimation procedure is due to Waagepetersen (2007). If \code{statistic="pcf"} then instead of using the \eqn{K}-function, the algorithm will use the pair correlation function \code{\link{pcf}} for homogeneous models and the inhomogeneous pair correlation function \code{\link{pcfinhom}} for inhomogeneous models. In this case, the smoothing parameters of the pair correlation can be controlled using the argument \code{statargs}, as shown in the Examples. Additional arguments \code{\dots} will be passed to the appropriate fitting function \code{\link{thomas.estK}} or \code{\link{thomas.estpcf}} or \code{\link{matclust.estK}} or \code{\link{matclust.estpcf}} or \code{\link{lgcp.estK}} or \code{\link{lgcp.estpcf}} or \code{\link{cauchy.estK}} or \code{\link{cauchy.estpcf}} or \code{\link{vargamma.estK}} or \code{\link{vargamma.estpcf}} to control the minimum contrast fitting algorithm. } \item{Composite likelihood:}{ If \code{method = "clik"} the clustering parameters of the model will be fitted by maximising the second-order composite likelihood (Guan, 2006). The log composite likelihood is \deqn{ \sum_{i,j} w(d_{ij}) \log\rho(d_{ij}; \theta) - \left( \sum_{i,j} w(d_{ij}) \right) \log \int_D \int_D w(\|u-v\|) \rho(\|u-v\|; \theta)\, du\, dv }{ sum[i,j] w(d[i,j]) log(rho(d[i,j]; theta)) - (sum[i,j] w(d[i,j])) log(integral[D,D] w(||u-v||) rho(||u-v||; theta) du dv) } where the sums are taken over all pairs of data points \eqn{x_i, x_j}{x[i], x[j]} separated by a distance \eqn{d_{ij} = \| x_i - x_j\|}{d[i,j] = ||x[i] - x[j]||} less than \code{rmax}, and the double integral is taken over all pairs of locations \eqn{u,v} in the spatial window of the data. Here \eqn{\rho(d;\theta)}{rho(d;theta)} is the pair correlation function of the model with cluster parameters \eqn{\theta}{theta}. The function \eqn{w} in the composite likelihood is a weighting function and may be chosen arbitrarily. It is specified by the argument \code{weightfun}. If this is missing or \code{NULL} then \eqn{w} is taken to equal 1. } } In both methods, the optimisation is performed by the generic optimisation algorithm \code{\link[stats]{optim}}. The behaviour of this algorithm can be modified using the argument \code{control}. Useful control arguments include \code{trace}, \code{maxit} and \code{abstol} (documented in the help for \code{\link[stats]{optim}}). } \value{ An object of class \code{"kppm"} representing the fitted model. There are methods for printing, plotting, predicting, simulating and updating objects of this class. } \seealso{ methods for \code{kppm} objects: \code{\link{plot.kppm}}, \code{\link{fitted.kppm}}, \code{\link{predict.kppm}}, \code{\link{simulate.kppm}}, \code{\link{update.kppm}}, \code{\link{vcov.kppm}}, \code{\link[spatstat:methods.kppm]{methods.kppm}}, \code{\link{as.ppm.kppm}}, \code{\link{Kmodel.kppm}}, \code{\link{pcfmodel.kppm}}. Minimum contrast fitting algorithms: \code{\link{thomas.estK}}, \code{\link{matclust.estK}}, \code{\link{lgcp.estK}}, \code{\link{cauchy.estK}}, \code{\link{vargamma.estK}}, \code{\link{thomas.estpcf}}, \code{\link{matclust.estpcf}}, \code{\link{lgcp.estpcf}}, \code{\link{cauchy.estpcf}}, \code{\link{vargamma.estpcf}}, \code{\link{mincontrast}}. Summary statistics: \code{\link{Kest}}, \code{\link{Kinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}}. See also \code{\link{ppm}} } \references{ Guan, Y. (2006) A composite likelihood approach in fitting spatial point process models. \emph{Journal of the American Statistical Association} \bold{101}, 1502--1512. Jalilian, A., Guan, Y. and Waagepetersen, R. (2012) Decomposition of variance for spatial Cox processes. \emph{Scandinavian Journal of Statistics}, in press. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \examples{ data(redwood) kppm(redwood, ~1, "Thomas") kppm(redwood, ~1, "Thomas", method="c") kppm(redwood, ~x, "MatClust") kppm(redwood, ~x, "MatClust", statistic="pcf", statargs=list(stoyan=0.2)) kppm(redwood, ~1, "LGCP", statistic="pcf") kppm(redwood, ~x, cluster="Cauchy", statistic="K") kppm(redwood, cluster="VarGamma", nu.ker = 0.5, statistic="pcf") if(require(RandomFields)) { kppm(redwood, ~x, "LGCP", statistic="pcf", covmodel=list(model="matern", nu=0.3), control=list(maxit=10)) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, and Rolf Turner \email{r.turner@auckland.ac.nz} with contributions from Abdollah Jalilian and Rasmus Waagepetersen. } \keyword{spatial} \keyword{models} spatstat/man/transect.im.Rd0000644000176000001440000000366312237642734015460 0ustar ripleyusers\name{transect.im} \alias{transect.im} \title{ Pixel Values Along a Transect } \description{ Extract the pixel values of a pixel image at each point along a linear transect. } \usage{ transect.im(X, ..., from="bottomleft", to="topright", click=FALSE, add=FALSE) } \arguments{ \item{X}{ A pixel image (object of class \code{"im"}). } \item{\dots}{ Ignored. } \item{from,to}{ Optional. Start point and end point of the transect. Pairs of \eqn{(x,y)} coordinates in a format acceptable to \code{\link{xy.coords}}, or keywords \code{"bottom"}, \code{"left"}, \code{"top"}, \code{"right"}, \code{"bottomleft"} etc. } \item{click}{ Optional. Logical value. If \code{TRUE}, the linear transect is determined interactively by the user, who clicks two points on the current plot. } \item{add}{ Logical. If \code{click=TRUE}, this argument determines whether to perform interactive tasks on the current plot (\code{add=TRUE}) or to start by plotting \code{X} (\code{add=FALSE}). } } \details{ The pixel values of the image \code{X} along a line segment will be extracted. The result is a function table (\code{"fv"} object) which can be plotted directly. If \code{click=TRUE}, then the user is prompted to click two points on the plot of \code{X}. These endpoints define the transect. Otherwise, the transect is defined by the endpoints \code{from} and \code{to}. The default is a diagonal transect from bottom left to top right of the frame. } \value{ An object of class \code{"fv"} which can be plotted. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{im}} } \examples{ Z <- density(redwood) plot(transect.im(Z)) \dontrun{ plot(transect.im(Z, click=TRUE)) } } \keyword{spatial} \keyword{manip} \keyword{iplot} spatstat/man/rDGS.Rd0000644000176000001440000000600212237642733014015 0ustar ripleyusers\name{rDGS} \alias{rDGS} \title{Perfect Simulation of the Diggle-Gates-Stibbard Process} \description{ Generate a random pattern of points, a simulated realisation of the Diggle-Gates-Stibbard process, using a perfect simulation algorithm. } \usage{ rDGS(beta, rho, W = owin()) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{rho}{ interaction range (a non-negative number). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } } \details{ This function generates a realisation of the Diggle-Gates-Stibbard point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. Diggle, Gates and Stibbard (1987) proposed a pairwise interaction point process in which each pair of points separated by a distance \eqn{d} contributes a factor \eqn{e(d)} to the probability density, where \deqn{ e(d) = \sin^2\left(\frac{\pi d}{2\rho}\right) }{ e(d) = sin^2((pi * d)/(2 * rho)) } for \eqn{d < \rho}{d < rho}, and \eqn{e(d)} is equal to 1 for \eqn{d \ge \rho}{d >= rho}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Berthelsen, K.K. and Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Diggle, P.J., Gates, D.J., and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. Biometrika \bold{74}, 763 -- 770. \emph{Scandinavian Journal of Statistics} \bold{21}, 359--373. Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} based on original code for the Strauss process by Kasper Klitgaard Berthelsen. } \examples{ X <- rDGS(50, 0.05) } \seealso{ \code{\link{rmh}}, \code{\link{DiggleGatesStibbard}}, \code{\link{rStrauss}}, \code{\link{rHardcore}}, \code{\link{rDiggleGratton}}. } \keyword{spatial} \keyword{datagen} spatstat/man/clip.infline.Rd0000755000176000001440000000230712237642732015576 0ustar ripleyusers\name{clip.infline} \alias{clip.infline} \title{Intersect Infinite Straight Lines with a Window} \description{ Take the intersection between a set of infinite straight lines and a window, yielding a set of line segments. } \usage{ clip.infline(L, win) } \arguments{ \item{L}{ Object of class \code{"infline"} specifying a set of infinite straight lines in the plane. } \item{win}{ Window (object of class \code{"owin"}). } } \details{ This function computes the intersection between a set of infinite straight lines in the plane (stored in an object \code{L} of class \code{"infline"} created by the function \code{\link{infline}}) and a window \code{win}. The result is a pattern of line segments. } \value{ A line segment pattern (object of class \code{"psp"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{infline}},\code{\link{psp}}. To divide a window into pieces using infinite lines, use \code{\link{chop.tess}}. } \examples{ L <- infline(p=1:3, theta=pi/4) W <- square(4) clip.infline(L, W) } \keyword{spatial} \keyword{math} spatstat/man/methods.slrm.Rd0000644000176000001440000000343512237642733015644 0ustar ripleyusers\name{methods.slrm} \alias{methods.slrm} %DoNotExport \alias{formula.slrm} \alias{update.slrm} \alias{print.slrm} \alias{terms.slrm} \alias{labels.slrm} \title{ Methods for Spatial Logistic Regression Models } \description{ These are methods for the class \code{"slrm"}. } \usage{ \method{formula}{slrm}(x, \dots) \method{print}{slrm}(x, ...) \method{terms}{slrm}(x, \dots) \method{labels}{slrm}(object, \dots) \method{update}{slrm}(object, ..., evaluate = TRUE, env = parent.frame()) } \arguments{ \item{x,object}{ An object of class \code{"slrm"}, representing a fitted spatial logistic regression model. } \item{\dots}{ Arguments passed to other methods. } \item{evaluate}{ Logical value. If \code{TRUE}, evaluate the updated call to \code{slrm}, so that the model is refitted; if \code{FALSE}, simply return the updated call. } \item{env}{ Optional environment in which the model should be updated. } } \details{ These functions are methods for the generic commands \code{\link{formula}}, \code{\link{update}}, \code{\link{print}}, \code{\link{terms}} and \code{\link{labels}} for the class \code{"slrm"}. An object of class \code{"slrm"} represents a fitted spatial logistic regression model. It is obtained from \code{\link{slrm}}. } \value{ See the help files for the corresponding generic functions. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{slrm}}, \code{\link{plot.slrm}}, \code{\link{predict.slrm}}, \code{\link{simulate.slrm}}, \code{\link{vcov.slrm}}, \code{\link{coef.slrm}}. } \examples{ data(redwood) fit <- slrm(redwood ~ x) coef(fit) formula(fit) tf <- terms(fit) labels(fit) } \keyword{spatial} \keyword{methods} spatstat/man/marktable.Rd0000755000176000001440000000370712237642733015174 0ustar ripleyusers\name{marktable} \alias{marktable} \title{Tabulate Marks in Neighbourhood of Every Point in a Point Pattern} \description{ Visit each point in a point pattern, find the neighbouring points, and compile a frequency table of the marks of these neighbour points. } \usage{ marktable(X, R, exclude=TRUE) } \arguments{ \item{X}{ A marked point pattern. An object of class \code{"ppp"}. } \item{R}{ Neighbourhood radius. } \item{exclude}{ Logical. If \code{exclude=TRUE}, the neighbours of a point do not include the point itself. If \code{exclude=FALSE}, a point belongs to its own neighbourhood. } } \value{ A contingency table (object of class \code{"table"}) with one row for each point in \code{X}, and one column for each possible mark value. } \details{ This algorithm visits each point in the point pattern \code{X}, inspects all the neighbouring points within a radius \code{R} of the current point, and compiles a frequency table of the marks attached to the neighbours. The dataset \code{X} must be a multitype point pattern, that is, \code{marks(X)} must be a \code{factor}. The result is a two-dimensional contingency table with one row for each point in the pattern, and one column for each possible mark value. The \code{[i,j]} entry in the table gives the number of neighbours of point \code{i} that have mark \code{j}. To perform more complicated calculations on the neighbours of every point, use \code{\link{markstat}} or \code{\link{applynbd}}. } \seealso{ \code{\link{markstat}}, \code{\link{applynbd}}, \code{\link{Kcross}}, \code{\link{ppp.object}}, \code{\link{table}} } \examples{ data(amacrine) head(marktable(amacrine, 0.1)) head(marktable(amacrine, 0.1, exclude=FALSE)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{programming} spatstat/man/harmonise.im.Rd0000644000176000001440000000353412237642732015615 0ustar ripleyusers\name{harmonise.im} \alias{harmonise.im} \alias{harmonize.im} \title{Make Pixel Images Compatible} \description{ Convert several pixel images to a common pixel raster. } \usage{ harmonise.im(\dots) harmonize.im(\dots) } \arguments{ \item{\dots}{ Any number of pixel images (objects of class \code{"im"}) or data which can be converted to pixel images by \code{\link{as.im}}. } } \details{ This function makes any number of pixel images compatible, by converting them all to a common pixel grid. At least one of the arguments \code{\dots} must be a pixel image. Some arguments may be windows (objects of class \code{"owin"}), functions (\code{function(x,y)}) or numerical constants. These will be converted to images using \code{\link{as.im}}. The common pixel grid is determined by inspecting all the pixel images in the argument list, computing the bounding box of all the images, then finding the image with the highest spatial resolution, and extending its pixel grid to cover the bounding box. The return value is a list with entries corresponding to the input arguments. If the arguments were named (\code{name=value}) then the return value also carries these names. If you just want to determine the appropriate pixel resolution, without converting the images, use \code{\link{commonGrid}}. } \value{ A list, of length equal to the number of arguments \code{\dots}, whose entries are pixel images. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \examples{ A <- setcov(square(1)) B <- function(x,y) { x } G <- density(runifpoint(42)) harmonise.im(X=A, Y=B, Z=G) } \seealso{ \code{\link{commonGrid}}, \code{\link{compatible.im}}, \code{\link{as.im}} } \keyword{spatial} \keyword{manip} spatstat/man/subfits.Rd0000644000176000001440000000460412241443112014664 0ustar ripleyusers\name{subfits} \alias{subfits} \alias{subfits.new} \alias{subfits.old} \title{Extract List of Individual Point Process Models} \description{ Takes a Gibbs point process model that has been fitted to several point patterns simultaneously, and produces a list of fitted point process models for the individual point patterns. } \usage{ subfits(object, what="models", verbose=FALSE) subfits.old(object, what="models", verbose=FALSE) subfits.new(object, what="models", verbose=FALSE) } \arguments{ \item{object}{ An object of class \code{"mppm"} representing a point process model fitted to several point patterns. } \item{what}{ What should be returned. Either \code{"models"} to return the fitted models, or \code{"interactions"} to return the fitted interactions only. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ \code{object} is assumed to have been generated by \code{\link{mppm}}. It represents a point process model that has been fitted to a list of several point patterns, with covariate data. For each of the \emph{individual} point pattern datasets, this function derives the corresponding fitted model for that dataset only (i.e. a point process model for the \eqn{i}th point pattern, that is consistent with \code{object}). If \code{what="models"}, the result is a list of point process models (a list of objects of class \code{"ppm"}), one model for each point pattern dataset in the original fit. If \code{what="interactions"}, the result is a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). Two different algorithms are provided, as \code{subfits.old} and \code{subfits.new}. Currently \code{subfits} is the same as the old algorithm \code{subfits.old} because the newer algorithm is too memory-hungry. } \value{ A list of point process models (a list of objects of class \code{"ppm"}) or a list of fitted interpoint interactions (a list of objects of class \code{"fii"}). } \examples{ data(waterstriders) fit <- mppm(Wat~x, data=hyperframe(Wat=waterstriders)) subfits(fit) } \author{Adrian Baddeley \email{adrian.baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{rolf@math.unb.ca} \url{http://www.math.unb.ca/~rolf} } \seealso{ \code{\link[spatstat]{ppm}} } \keyword{spatial} \keyword{models} spatstat/man/as.ppp.Rd0000755000176000001440000001206112237642732014423 0ustar ripleyusers\name{as.ppp} \alias{as.ppp} \alias{as.ppp.ppp} \alias{as.ppp.psp} \alias{as.ppp.quad} \alias{as.ppp.matrix} \alias{as.ppp.data.frame} \alias{as.ppp.influence.ppm} \alias{as.ppp.default} \title{Convert Data To Class ppp} \description{ Tries to coerce any reasonable kind of data to a point pattern (an object of class \code{"ppp"}) for use by the \pkg{spatstat} package). } \usage{ as.ppp(X, \dots, fatal=TRUE) \method{as.ppp}{ppp}(X, \dots, fatal=TRUE) \method{as.ppp}{psp}(X, \dots, fatal=TRUE) \method{as.ppp}{quad}(X, \dots, fatal=TRUE) \method{as.ppp}{matrix}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{data.frame}(X, W=NULL, \dots, fatal=TRUE) \method{as.ppp}{influence.ppm}(X, \dots) \method{as.ppp}{default}(X, W=NULL, \dots, fatal=TRUE) } \arguments{ \item{X}{Data which will be converted into a point pattern} \item{W}{Data which define a window for the pattern when \code{X} does not contain a window} \item{\dots}{Ignored.} \item{fatal}{Logical value. See Details.} } \value{ An object of class \code{"ppp"} (see \code{\link{ppp.object}}) describing the point pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{X} to a point pattern (an object of class \code{"ppp"}; see \code{\link{ppp.object}} for an overview). This function is normally used to convert an existing point pattern dataset, stored in another format, to the \code{"ppp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{ppp}}. The dataset \code{X} may be: \itemize{ \item an object of class \code{"ppp"} \item an object of class \code{"psp"} \item an object of class \code{"spp"} as defined in the \pkg{spatial} library \item an object of class \code{"quad"} representing a quadrature scheme (see \code{\link{quad.object}}) \item a matrix or data frame with at least two columns \item a structure with entries \code{x}, \code{y} which are numeric vectors of equal length \item a numeric vector of length 2, interpreted as the coordinates of a single point. } In the last three cases, we need the second argument \code{W} which is converted to a window object by the function \code{\link{as.owin}}. In the first four cases, \code{W} will be ignored. If \code{X} is a line segment pattern (an object of class \code{psp}) the point pattern returned consists of the endpoints of the segments. If \code{X} is marked then the point pattern returned will also be marked, the mark associated with a point being the mark of the segment of which that point was an endpoint. If \code{X} is a matrix or data frame, the first and second columns will be interpreted as the \eqn{x} and \eqn{y} coordinates respectively. Any additional columns will be interpreted as marks. The argument \code{fatal} indicates what to do when \code{W} is missing and \code{X} contains no information about the window. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. An \code{spp} object is the representation of a point pattern in the \pkg{spatial} library. Our implementation recognises the following formats: \itemize{ \item a structure with entries \code{x}, \code{y} \code{xl}, \code{xu}, \code{yl}, \code{yu} \item a structure with entries \code{x}, \code{y} and \code{area}, where \code{area} is a structure with entries \code{xl}, \code{xu}, \code{yl}, \code{yu} } (used in \pkg{spatial} versions 1 to 6 and version 7.1 respectively) where \code{x} and \code{y} are vectors of equal length giving the point coordinates, and \code{xl}, \code{xu}, \code{yl}, \code{yu} are numbers giving the dimensions of a rectangular window. The function \code{as.ppp} is generic, with methods for the classes \code{"ppp"}, \code{"psp"}, \code{"quad"}, \code{"matrix"}, \code{"data.frame"} and a default method. Point pattern datasets can also be created by the function \code{\link{ppp}}. } \seealso{ \code{\link{ppp}}, \code{\link{ppp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}} } \section{Warnings}{ If the format of \code{spp} objects is changed in future versions of the \pkg{spatial} library, then \code{as.ppp} may not be able to interpret them. It currently handles all versions of \pkg{spatial} up to 7.1-4. } \examples{ xy <- matrix(runif(40), ncol=2) pp <- as.ppp(xy, c(0,1,0,1)) # Venables-Ripley format # check for 'spatial' package spatialpath <- system.file(package="spatial") if(nchar(spatialpath) > 0) { require(spatial) towns <- ppinit("towns.dat") pp <- as.ppp(towns) # converted to our format detach(package:spatial) } xyzt <- matrix(runif(40), ncol=4) Z <- as.ppp(xyzt, square(1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/lohboot.Rd0000644000176000001440000000756112237642733014677 0ustar ripleyusers\name{lohboot} \alias{lohboot} \title{Bootstrap Confidence Bands for Summary Function} \description{ Computes a bootstrap confidence band for a summary function of a point process. } \usage{ lohboot(X, fun=c("pcf", "Kest", "Lest", "pcfinhom", "Kinhom", "Linhom"), \dots, nsim=200, confidence=0.95, global=FALSE, type=7) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{fun}{ Name of the summary function to be computed: one of the strings \code{"pcf"}, \code{"Kest"}, \code{"Lest"}, \code{"pcfinhom"}, \code{"Kinhom"} or \code{"Linhom"}. } \item{\dots}{ Arguments passed to the corresponding local version of the summary function (see Details). } \item{nsim}{ Number of bootstrap simulations. } \item{confidence}{ Confidence level, as a fraction between 0 and 1. } \item{global}{ Logical. If \code{FALSE} (the default), pointwise confidence intervals are constructed. If \code{TRUE}, a global (simultaneous) confidence band is constructed. } \item{type}{ Integer. Argument passed to \code{\link[stats]{quantile}} controlling the way the quantiles are calculated. } } \value{ A function value table (object of class \code{"fv"}) containing columns giving the estimate of the summary function, the upper and lower limits of the bootstrap confidence interval, and the theoretical value of the summary function for a Poisson process. } \details{ This algorithm computes confidence bands for the true value of the summary statistic \code{fun} using the bootstrap method of Loh (2008). If \code{fun="pcf"}, for example, the algorithm computes a pointwise \code{(100 * confidence)}\% confidence interval for the true value of the pair correlation function \code{\link{pcf}} for the point process. It starts by computing the array of \emph{local} pair correlation functions, \code{\link{localpcf}}, of the data pattern \code{X}. This array consists of the contributions to \code{\link{pcf}} from each data point. Then these contributions are resampled \code{nsim} times with replacement; from each resampled dataset the total contribution is computed, yielding \code{nsim} random pair correlation functions. The pointwise \code{alpha/2} and \code{1 - alpha/2} quantiles of these functions are computed, where \code{alpha = 1 - confidence}. To control the smoothing and estimation algorithm, use the arguments \code{\dots}, which are passed to the local version of the summary function, as shown below: \tabular{ll}{ \bold{fun} \tab \bold{local version} \cr \code{\link{pcf}} \tab \code{\link{localpcf}} \cr \code{\link{Kest}} \tab \code{\link{localK}} \cr \code{\link{Lest}} \tab \code{\link{localK}} \cr \code{\link{pcfinhom}} \tab \code{\link{localpcfinhom}} \cr \code{\link{Kinhom}} \tab \code{\link{localKinhom}} \cr \code{\link{Linhom}} \tab \code{\link{localKinhom}} } For \code{fun="Lest"}, the calculations are first performed as if \code{fun="Kest"}, and then the square-root transformation is applied to obtain the \eqn{L}-function. An alternative to \code{lohboot} is \code{\link{varblock}}. } \references{ Loh, J.M. (2008) A valid and fast spatial bootstrap for correlation functions. \emph{The Astrophysical Journal}, \bold{681}, 726--734. } \seealso{ Summary functions \code{\link{Kest}}, \code{\link{pcf}}, \code{\link{Kinhom}}, \code{\link{pcfinhom}}, \code{\link{localK}}, \code{\link{localpcf}}, \code{\link{localKinhom}}, \code{\link{localpcfinhom}}. See \code{\link{varblock}} for an alternative bootstrap technique. } \examples{ p <- lohboot(simdat, stoyan=0.5) plot(p) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/markcorrint.Rd0000644000176000001440000001050512237642733015554 0ustar ripleyusers\name{markcorrint} \alias{markcorrint} \title{Mark Correlation Integral} \description{ Estimates the mark correlation integral of a marked point pattern. } \usage{ markcorrint(X, f = NULL, r = NULL, correction = c("isotropic", "Ripley", "translate"), ..., f1 = NULL, normalise = TRUE, returnL = FALSE, fargs = NULL) } \arguments{ \item{X}{The observed point pattern. An object of class \code{"ppp"} or something acceptable to \code{\link{as.ppp}}. } \item{f}{Optional. Test function \eqn{f} used in the definition of the mark correlation function. An \R function with at least two arguments. There is a sensible default. } \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the mark correlation function \eqn{k_f(r)}{k[f](r)} should be evaluated. There is a sensible default. } \item{correction}{ A character vector containing any selection of the options \code{"isotropic"}, \code{"Ripley"} or \code{"translate"}. It specifies the edge correction(s) to be applied. } \item{\dots}{ Ignored. } \item{f1}{ An alternative to \code{f}. If this argument is given, then \eqn{f} is assumed to take the form \eqn{f(u,v)=f_1(u)f_1(v)}{f(u,v)=f1(u) * f1(v)}. } \item{normalise}{ If \code{normalise=FALSE}, compute only the numerator of the expression for the mark correlation. } \item{returnL}{ Compute the analogue of the K-function if \code{returnL=FALSE} or the analogue of the L-function if \code{returnL=TRUE}. } \item{fargs}{ Optional. A list of extra arguments to be passed to the function \code{f} or \code{f1}. } } \details{ Given a marked point pattern \code{X}, this command estimates the weighted indefinite integral \deqn{ K_f(r) = 2 \pi \int_0^r s k_f(s) ds }{ K[f](r) = 2 * pi * integral[0,r] (s * k[f](s)) ds } of the mark correlation function \eqn{k_f(r)}{k[f](r)}. See \code{\link{markcorr}} for a definition of the mark correlation function. The use of the weighted indefinite integral was advocated by Penttinen et al (1992). The relationship between \eqn{K_f}{K[f]} and \eqn{k_f}{k[f]} is analogous to the relationship between the classical K-function \eqn{K(r)} and the pair correlation function \eqn{g(r)}. If \code{returnL=FALSE} then the function \eqn{K_f(r)}{K[f](r)} is returned; otherwise the function \deqn{ L_f(r) = \sqrt{K_f(r)/pi} }{ L[f](r) = sqrt(K[f](r)/pi) } is returned. } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the mark correlation integral \eqn{K_f(r)}{K[f](r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_f(r)}{K[f](r)} when the marks attached to different points are independent, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the mark correlation integral \eqn{K_f(r)}{K[f](r)} obtained by the edge corrections named (if \code{returnL=FALSE}). } \references{ Penttinen, A., Stoyan, D. and Henttonen, H. M. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38} (1992) 806-824. Illian, J., Penttinen, A., Stoyan, H. and Stoyan, D. (2008) \emph{Statistical analysis and modelling of spatial point patterns}. Chichester: John Wiley. } \seealso{ \code{\link{markcorr}} to estimate the mark correlation function. } \examples{ # CONTINUOUS-VALUED MARKS: # (1) Spruces # marks represent tree diameter data(spruces) # mark correlation function ms <- markcorrint(spruces) plot(ms) # (2) simulated data with independent marks X <- rpoispp(100) X <- X \%mark\% runif(X$n) Xc <- markcorrint(X) plot(Xc) # MULTITYPE DATA: # Hughes' amacrine data # Cells marked as 'on'/'off' data(amacrine) M <- markcorrint(amacrine, function(m1,m2) {m1==m2}, correction="translate") plot(M) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/psp.Rd0000755000176000001440000000732612237642733014035 0ustar ripleyusers\name{psp} \alias{psp} \title{Create a Line Segment Pattern} \description{ Creates an object of class \code{"psp"} representing a line segment pattern in the two-dimensional plane. } \usage{ psp(x0,y0, x1, y1, window, marks=NULL, check=spatstat.options("checksegments")) } \arguments{ \item{x0}{Vector of \eqn{x} coordinates of first endpoint of each segment} \item{y0}{Vector of \eqn{y} coordinates of first endpoint of each segment} \item{x1}{Vector of \eqn{x} coordinates of second endpoint of each segment} \item{y1}{Vector of \eqn{y} coordinates of second endpoint of each segment} \item{window}{window of observation, an object of class \code{"owin"}} \item{marks}{(optional) vector or data frame of mark values} \item{check}{Logical value indicating whether to check that the line segments lie inside the window.} } \value{ An object of class \code{"psp"} describing a line segment pattern in the two-dimensional plane (see \code{\link{psp.object}}). } \details{ In the \pkg{spatstat} library, a spatial pattern of line segments is described by an object of class \code{"psp"}. This function creates such objects. The vectors \code{x0}, \code{y0}, \code{x1} and \code{y1} must be numeric vectors of equal length. They are interpreted as the cartesian coordinates of the endpoints of the line segments. A line segment pattern is assumed to have been observed within a specific region of the plane called the observation window. An object of class \code{"psp"} representing a point pattern contains information specifying the observation window. This window must always be specified when creating a point pattern dataset; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. The argument \code{window} must be an object of class \code{"owin"}. It is a full description of the window geometry, and could have been obtained from \code{\link{owin}} or \code{\link{as.owin}}, or by just extracting the observation window of another dataset, or by manipulating such windows. See \code{\link{owin}} or the Examples below. The optional argument \code{marks} is given if the line segment pattern is marked, i.e. if each line segment carries additional information. For example, line segments which are classified into two or more different types, or colours, may be regarded as having a mark which identifies which colour they are. The object \code{marks} must be a vector of the same length as \code{x0}, or a data frame with number of rows equal to the length of \code{x0}. The interpretation is that \code{marks[i]} or \code{marks[i,]} is the mark attached to the \eqn{i}th line segment. If the marks are real numbers then \code{marks} should be a numeric vector, while if the marks takes only a finite number of possible values (e.g. colours or types) then \code{marks} should be a \code{factor}. See \code{\link{psp.object}} for a description of the class \code{"psp"}. Users would normally invoke \code{psp} to create a line segment pattern, and the function \code{\link{as.psp}} to convert data in another format into a line segment pattern. } \seealso{ \code{\link{psp.object}}, \code{\link{as.psp}}, \code{\link{owin.object}}, \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{marks.psp}} } \examples{ X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) m <- data.frame(A=1:10, B=letters[1:10]) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin(), marks=m) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/paracou.Rd0000755000176000001440000000376112237642733014664 0ustar ripleyusers\name{paracou} \alias{paracou} \docType{data} \title{ Kimboto trees at Paracou, French Guiana } \description{ This dataset is a point pattern of adult and juvenile Kimboto trees (\emph{Pradosia cochlearia} or \emph{P. ptychandra}) recorded at Paracou in French Guiana. See Flores (2005). The dataset \code{paracou} is a point pattern (object of class \code{"ppp"}) containing the spatial coordinates of each tree, marked by age (a factor with levels \code{adult} and \code{juvenile}. The survey region is a rectangle approximately 400 by 525 metres. Coordinates are given in metres. Note that the data contain duplicated points (two points at the same location). To determine which points are duplicates, use \code{\link{duplicated.ppp}}. To remove the duplication, use \code{\link{unique.ppp}}. } \usage{data(paracou)} \examples{ plot(paracou, cols=2:3, chars=c(16,3)) } \source{ Data kindly contributed by Olivier Flores. All data belong to CIRAD \url{http://www.cirad.fr} and UMR EcoFoG \url{http://www.ecofog.gf} and are included in \pkg{spatstat} with permission. Original data sources: juvenile and some adult trees collected by Flores (2005); adult tree data sourced from CIRAD Paracou experimental plots dataset (2003 campaign). } \references{ Flores, O. (2005) \emph{\ifelse{latex}{\out{D{\'e}terminisme de la r{\'e}g{\'e}n{\'e}ration chez quinze esp{\`e}ces d'arbres tropicaux en for{\^e}t guyanaise: les effets de l'environnement et de la limitation par la dispersion.}}{ Determinisme de la regeneration chez quinze espces d'arbres tropicaux en foret guyanaise: les effets de l'environnement et de la limitation par la dispersion.}} PhD Thesis, University of Montpellier 2, Montpellier, France. Picard, N, Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009) The multi-scale marked area-interaction point process: a model for the spatial pattern of trees. \emph{Scandinavian Journal of Statistics} \bold{36} 23--41 } \keyword{datasets} \keyword{spatial} spatstat/man/linearKcross.inhom.Rd0000644000176000001440000001056012237642732016771 0ustar ripleyusers\name{linearKcross.inhom} \alias{linearKcross.inhom} \title{ Inhomogeneous multitype K Function (Cross-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype \eqn{K} function which counts the expected number of points of type \eqn{j} within a given distance of a point of type \eqn{i}. } \usage{ linearKcross.inhom(X, i, j, lambdaI, lambdaJ, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross type \eqn{K} function \eqn{K_{ij}(r)}{Kij(r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{j}{Number or character string identifying the type (mark value) of the points in \code{X} to which distances are measured. Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdaJ}{ Intensity values for the points of type \code{j}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the \eqn{K}-function \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{lambdaI} and \code{lambdaJ} if they are functions. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{Kcross.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The arguments \code{i} and \code{j} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} and \code{j} are missing, they default to the first and second level of the marks factor, respectively. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{ij}(r)}{Kij(r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The arguments \code{i} and \code{j} are interpreted as levels of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearKdot}}, \code{\link[spatstat]{linearK}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lamJ <- function(x,y,const=lam[["robbery"]]){ rep(const, length(x)) } K <- linearKcross.inhom(chicago, "assault", "robbery", lamI, lamJ) \dontrun{ fit <- lppm(chicago, ~marks + x) linearKcross.inhom(chicago, "assault", "robbery", fit, fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pixelquad.Rd0000755000176000001440000000576612237642733015235 0ustar ripleyusers\name{pixelquad} \alias{pixelquad} \title{Quadrature Scheme Based on Pixel Grid} \description{ Makes a quadrature scheme with a dummy point at every pixel of a pixel image. } \usage{ pixelquad(X, W = as.owin(X)) } \arguments{ \item{X}{Point pattern (object of class \code{"ppp"}) containing the data points for the quadrature scheme. } \item{W}{ Specifies the pixel grid. A pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. } } \value{ An object of class \code{"quad"} describing the quadrature scheme (data points, dummy points, and quadrature weights) suitable as the argument \code{Q} of the function \code{\link{ppm}()} for fitting a point process model. The quadrature scheme can be inspected using the \code{print} and \code{plot} methods for objects of class \code{"quad"}. } \details{ This is a method for producing a quadrature scheme for use by \code{\link{ppm}}. It is an alternative to \code{\link{quadscheme}}. The function \code{\link{ppm}} fits a point process model to an observed point pattern using the Berman-Turner quadrature approximation (Berman and Turner, 1992; Baddeley and Turner, 2000) to the pseudolikelihood of the model. It requires a quadrature scheme consisting of the original data point pattern, an additional pattern of dummy points, and a vector of quadrature weights for all these points. Such quadrature schemes are represented by objects of class \code{"quad"}. See \code{\link{quad.object}} for a description of this class. Given a grid of pixels, this function creates a quadrature scheme in which there is one dummy point at the centre of each pixel. The counting weights are used (the weight attached to each quadrature point is 1 divided by the number of quadrature points falling in the same pixel). The argument \code{X} specifies the locations of the data points for the quadrature scheme. Typically this would be a point pattern dataset. The argument \code{W} specifies the grid of pixels for the dummy points of the quadrature scheme. It should be a pixel image (object of class \code{"im"}), a window (object of class \code{"owin"}), or anything that can be converted to a window by \code{\link{as.owin}}. If \code{W} is a pixel image or a binary mask (a window of type \code{"mask"}) then the pixel grid of \code{W} will be used. If \code{W} is a rectangular or polygonal window, then it will first be converted to a binary mask using \code{\link{as.mask}} at the default pixel resolution. } \examples{ W <- owin(c(0,1),c(0,1)) X <- runifpoint(42, W) W <- as.mask(W,dimyx=128) pixelquad(X,W) } \seealso{ \code{\link{quadscheme}}, \code{\link{quad.object}}, \code{\link{ppm}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/plot.owin.Rd0000755000176000001440000001533112247607350015154 0ustar ripleyusers\name{plot.owin} \alias{plot.owin} \title{Plot a Spatial Window} \description{ Plot a two-dimensional window of observation for a spatial point pattern } \usage{ \method{plot}{owin}(x, main, add=FALSE, \dots, box, edge=0.04, type=c("w","n"), hatch=FALSE, angle=45, spacing=diameter(x)/50, invert=FALSE) } \arguments{ \item{x}{ The window to be plotted. An object of class \code{\link{owin}}, or data which can be converted into this format by \code{\link{as.owin}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{add}{ logical flag: if \code{TRUE}, draw the window in the current plot; if \code{FALSE}, generate a new plot. } \item{\dots}{ extra arguments passed to the generic \code{\link{plot}} function. } \item{box}{ logical flag; if \code{TRUE}, plot the enclosing rectangular box } \item{edge}{ nonnegative number; the plotting region will have coordinate limits that are \code{1 + edge} times as large as the limits of the rectangular box that encloses the pattern. } \item{type}{ Type of plot: either \code{"w"} or \code{"n"}. If \code{type="w"} (the default), the window is plotted. If \code{type="n"} and \code{add=TRUE}, a new plot is initialised and the coordinate system is established, but nothing is drawn. } \item{hatch}{ logical flag; if \code{TRUE}, the interior of the window will be shaded by a grid of parallel lines. } \item{angle}{ orientation of the shading lines (in degrees anticlockwise from the \eqn{x} axis) when \code{hatch=TRUE}. } \item{spacing}{ spacing between the shading lines, when \code{hatch=TRUE}. } \item{invert}{ logical flag; when the window is a binary pixel mask, the mask colours will be inverted if \code{invert=TRUE}. } } \value{ none. } \details{ This is the \code{plot} method for the class \code{\link{owin}}. The action is to plot the boundary of the window on the current plot device, using equal scales on the \code{x} and \code{y} axes. If the window \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, the boundary of the window is plotted as a polygon or series of polygons. If \code{x} is of type \code{"mask"} the discrete raster approximation of the window is displayed as a binary image (white inside the window, black outside). Graphical parameters controlling the display (e.g. setting the colours) may be passed directly via the \code{...} arguments, or indirectly reset using \code{\link{spatstat.options}}. When \code{x} is of type \code{"rectangle"} or \code{"polygonal"}, it is plotted by the \R function \code{\link{polygon}}. To control the appearance (colour, fill density, line density etc) of the polygon plot, determine the required argument of \code{\link{polygon}} and pass it through \code{...} For example, to paint the interior of the polygon in red, use the argument \code{col="red"}. To draw the polygon edges in green, use \code{border="green"}. To suppress the drawing of polygon edges, use \code{border=NA}. When \code{x} is of type \code{"mask"}, it is plotted by \code{\link{image.default}}. The appearance of the image plot can be controlled by passing arguments to \code{\link{image.default}} through \code{...}. The default appearance can also be changed by setting the parameter \code{par.binary} of \code{\link{spatstat.options}}. To zoom in (to view only a subset of the window at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the desired rectangular field of view. (The actual field of view may be larger, depending on the graphics device). } \section{Notes on Filled Polygons with Holes}{ The function \code{\link{polygon}} can only handle polygons without holes. To plot polygons with holes in a solid colour, we have implemented two workarounds. \describe{ \item{polypath function:}{ The first workaround uses the relatively new function \code{\link{polypath}} which \emph{does} have the capability to handle polygons with holes. However, not all graphics devices support \code{\link{polypath}}. The older devices \code{\link{xfig}} and \code{\link{pictex}} do not support \code{\link{polypath}}. On a Windows system, the default graphics device #ifdef windows \code{\link{windows}} #endif #ifndef windows \code{windows} #endif supports \code{\link{polypath}}. #ifdef unix On a Linux system, the default graphics device \code{X11(type="Xlib")} does \emph{not} support \code{\link{polypath}} but \code{X11(type="cairo")} does support it. See \code{\link{X11}} and the section on Cairo below. #endif } \item{polygon decomposition:}{ The other workaround involves decomposing the polygonal window into pieces which do not have holes. This code is experimental but works in all our test cases. If this code fails, a warning will be issued, and the filled colours will not be plotted. } } } #ifdef unix \section{Cairo graphics on a Linux system}{ Linux systems support the graphics device \code{X11(type="cairo")} (see \code{\link{X11}}) provided the external library \pkg{cairo} is installed on the computer. See \url{http://www.cairographics.org/download} for instructions on obtaining and installing \pkg{cairo}. After having installed \pkg{cairo} one needs to re-install \R from source so that it has \pkg{cairo} capabilites. To check whether your current installation of R has \pkg{cairo} capabilities, type (in \R) \code{capabilities()["cairo"]}. The default type for \code{\link{X11}} is controlled by \code{\link[grDevices]{X11.options}}. You may find it convenient to make \pkg{cairo} the default, e.g. via your \code{.Rprofile}. The magic incantation to put into \code{.Rprofile} is \preformatted{ setHook(packageEvent("graphics", "onLoad"), function(...) grDevices::X11.options(type="cairo")) } } #endif \seealso{ \code{\link{owin.object}}, \code{\link{plot.ppp}}, \code{\link{polygon}}, \code{\link{image.default}}, \code{\link{spatstat.options}} } \examples{ # rectangular window plot(as.owin(nztrees)) abline(v=148, lty=2) # polygonal window w <- as.owin(demopat) plot(w) plot(w, col="red", border="green", lwd=2) plot(w, hatch=TRUE, lwd=2) # binary mask we <- as.mask(w) plot(we) op <- spatstat.options(par.binary=list(col=grey(c(0.5,1)))) plot(we) spatstat.options(op) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/plot.ppp.Rd0000755000176000001440000002312712247607350015001 0ustar ripleyusers\name{plot.ppp} \alias{plot.ppp} \title{plot a Spatial Point Pattern} \description{ Plot a two-dimensional spatial point pattern } \usage{ \method{plot}{ppp}(x, main, \dots, chars=NULL, cols=NULL, use.marks, which.marks=NULL, add=FALSE, type=c("p","n"), maxsize=NULL, markscale=NULL,zap=0.01) } \arguments{ \item{x}{ The spatial point pattern to be plotted. An object of class \code{"ppp"}, or data which can be converted into this format by \code{\link{as.ppp}()}. } \item{main}{ text to be displayed as a title above the plot. } \item{\dots}{ extra arguments that will be passed to the plotting functions \code{\link{plot.default}}, \code{\link{points}} and/or \code{\link{symbols}}. } \item{chars}{ plotting character(s) used to plot points. } \item{cols}{ the colour(s) used to plot points. } \item{use.marks}{ logical flag; if \code{TRUE}, plot points using a different plotting symbol for each mark; if \code{FALSE}, only the locations of the points will be plotted, using \code{\link{points}()}. } \item{which.marks}{ Index determining which column of marks to use, if the marks of \code{x} are a data frame. A character or integer vector identifying one or more columns of marks. If \code{add=FALSE} then the default is to plot all columns of marks, in a series of separate plots. If \code{add=TRUE} then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } \item{add}{ logical flag; if \code{TRUE}, just the points are plotted, over the existing plot. A new plot is not created, and the window is not plotted. } \item{type}{ Type of plot: either \code{"p"} or \code{"n"}. If \code{type="p"} (the default), both the points and the observation window are plotted. If \code{type="n"}, only the window is plotted. } \item{maxsize}{ maximum size of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{markscale}. } \item{markscale}{ physical scale factor determining the sizes of the circles/squares plotted when \code{x} is a marked point pattern with numerical marks. Incompatible with \code{maxsize}. } \item{zap}{ Fraction between 0 and 1. When \code{x} is a marked point pattern with numerical marks, \code{zap} is the smallest mark value (expressed as a fraction of the maximum possible mark) that will be plotted. Any points which have marks smaller in absolute value than \code{zap * max(abs(marks(x)))} will not be plotted. } } \value{ \code{NULL}, or a vector giving the correspondence between mark values and plotting characters. } \details{ This is the \code{plot} method for point pattern datasets (of class \code{"ppp"}, see \code{\link{ppp.object}}). First the observation window \code{x$window} is plotted. Then the points themselves are plotted, in a fashion that depends on their marks, as follows. \describe{ \item{unmarked point pattern:}{ If the point pattern does not have marks, or if \code{use.marks = FALSE}, then the locations of all points will be plotted using a single plot character } \item{multitype point pattern:}{ If \code{x$marks} is a factor, then each level of the factor is represented by a different plot character. } \item{continuous marks:}{ If \code{x$marks} is a numeric vector, the marks are rescaled to the unit interval and each point is represented by a circle with radius proportional to the rescaled mark (if the value is positive) or a square with side proportional to the absolute value of the rescaled mark (if the value is negative). } \item{other kinds of marks:}{ If \code{x$marks} is neither numeric nor a factor, then each possible mark will be represented by a different plotting character. The default is to represent the \eqn{i}th smallest mark value by \code{points(..., pch=i)}. } } If there are several columns of marks, and if \code{which.marks} is missing or \code{NULL}, then \itemize{ \item if \code{add=FALSE}, the default is to plot all columns of marks, in a series of separate plots, placed side-by-side. The plotting is coordinated by \code{\link{plot.listof}}, which calls \code{plot.ppp} to make each of the individual plots. \item If \code{add=TRUE}, then only one column of marks can be plotted, and the default is \code{which.marks=1} indicating the first column of marks. } Plotting of the window \code{x$window} is performed by \code{\link{plot.owin}}. This plot may be modified through the \code{...} arguments. In particular the extra argument \code{border} determines the colour of the window. Plotting of the points themselves is performed by the function \code{\link{points}}, except for the case of continuous marks, where it is performed by \code{\link{symbols}}. Their plotting behaviour may be modified through the \code{...} arguments. The argument \code{chars} determines the plotting character or characters used to display the points (in all cases except for the case of continuous marks). For an unmarked point pattern, this should be a single integer or character determining a plotting character (see \code{par("pch")}). For a multitype point pattern, \code{chars} should be a vector of integers or characters, of the same length as \code{levels(x$marks)}, and then the \eqn{i}th level or type will be plotted using character \code{chars[i]}. If \code{chars} is absent, but there is an extra argument \code{pch}, then this will determine the plotting character for all points. The argument \code{cols} determines the colour or colours used to display the points. For an unmarked point pattern, or a marked point pattern with continuous marks, this should be a character string determining a colour. For a multitype point pattern, \code{cols} should be a character vector, of the same length as \code{levels(x$marks)}. The \eqn{i}th level or type will be plotted using colour \code{cols[i]}. If \code{cols} is absent, the colour used to plot \emph{all} the points may be determined by the extra argument \code{fg} (for multitype point patterns) or the extra argument \code{col} (for all other cases). Note that \code{col} will also reset the colour of the window. The arguments \code{maxsize} and \code{markscale} incompatible. They control the physical size of the circles and squares which represent the marks in a point pattern with continuous marks. If \code{markscale} is given, then a mark value of \code{m} is plotted as a circle of radius \code{m * markscale} (if \code{m} is positive) or a square of side \code{abs(m) * markscale} (if \code{m} is negative). If \code{maxsize} is given, then the largest mark in absolute value, \code{mmax=max(abs(x$marks))}, will be scaled to have physical size \code{maxsize}. The user can set the default values of these plotting parameters using \code{\link{spatstat.options}("par.points")}. To zoom in (to view only a subset of the point pattern at higher magnification), use the graphical arguments \code{xlim} and \code{ylim} to specify the rectangular field of view. The value returned by this plot function can be used to make a suitable legend, as shown in the examples. } \section{Removing White Space Around The Plot}{ A frequently-asked question is: How do I remove the white space around the plot? Currently \code{plot.ppp} uses the base graphics system of \R, so the space around the plot is controlled by parameters to \code{\link{par}}. To reduce the white space, change the parameter \code{mar}. Typically, \code{par(mar=rep(0.5, 4))} is adequate, if there are no annotations or titles outside the window. } \seealso{ \code{\link{iplot}}, \code{\link{ppp.object}}, \code{\link{plot}}, \code{\link{par}}, \code{\link{points}}, \code{\link{plot.owin}}, \code{\link{symbols}} } \examples{ data(cells) plot(cells) plot(cells, pch=16) # make the plotting symbols larger (for publication at reduced scale) plot(cells, cex=2) # set it in spatstat.options oldopt <- spatstat.options(par.points=list(cex=2)) plot(cells) spatstat.options(oldopt) # multitype data(lansing) plot(lansing) # marked by a real number data(longleaf) plot(longleaf) # just plot the points plot(longleaf, use.marks=FALSE) plot(unmark(longleaf)) # equivalent # controlling COLOURS of points plot(cells, cols="blue") plot(lansing, cols=c("black", "yellow", "green", "blue","red","pink")) plot(longleaf, fg="blue") # make window purple plot(lansing, border="purple") # make everything purple plot(lansing, border="purple", cols="purple", col.main="purple") # controlling PLOT CHARACTERS plot(lansing, chars = 11:16) plot(lansing, chars = c("o","h","m",".","o","o")) # controlling MARK SCALE plot(longleaf, markscale=0.1) # draw circles of DIAMETER equal to nearest neighbour distance plot(cells \%mark\% nndist(cells), markscale=1/2) # making the legend data(amacrine) v <- plot(amacrine) legend(0.2, 1.2, pch=v, legend=names(v)) # point pattern with multiple marks data(finpines) plot(finpines, which.marks="height") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/Extract.layered.Rd0000644000176000001440000000354612237642732016265 0ustar ripleyusers\name{Extract.layered} \alias{[.layered} \title{Extract Subset of a Layered Object} \description{ Extract some or all of the layers of a layered object, or extract a spatial subset of each layer. } \usage{ \method{[}{layered}(x, i, j, drop=FALSE, ...) } \arguments{ \item{x}{ A layered object (class \code{"layered"}). } \item{i}{ Subset index for the list of layers. A logical vector, integer vector or character vector specifying which layers are to be retained. } \item{j}{ Subset index to be applied to the data in each layer. Typically a spatial window (class \code{"owin"}). } \item{drop}{ Logical. If \code{i} specifies only a single layer and \code{drop=TRUE}, then the contents of this layer will be returned. } \item{\dots}{ Ignored. } } \value{ Usually an object of class \code{"layered"}. } \details{ A layered object represents data that should be plotted in successive layers, for example, a background and a foreground. See \code{\link{layered}}. This function extracts a designated subset of a layered object. It is a method for \code{\link{[}} for the class \code{"layered"}. The index \code{i} specifies which layers will be retained. It should be a valid subset index for the list of layers. The index \code{j} will be applied to each layer. It is typically a spatial window (class \code{"owin"}) so that each of the layers will be restricted to the same spatial region. Alternatively \code{j} may be any subset index which is permissible for the \code{"["} method for each of the layers. } \seealso{ \code{\link{layered}} } \examples{ example(layered) L[-2] L[, square(0.5)] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/plot.tess.Rd0000755000176000001440000000270212237642733015157 0ustar ripleyusers\name{plot.tess} \alias{plot.tess} \title{Plot a tessellation} \description{ Plots a tessellation. } \usage{ \method{plot}{tess}(x, ..., main, add=FALSE, col=NULL) } \arguments{ \item{x}{Tessellation (object of class \code{"tess"}) to be plotted.} \item{\dots}{Arguments controlling the appearance of the plot.} \item{main}{Heading for the plot. A character string.} \item{add}{Logical. Determines whether the tessellation plot is added to the existing plot. } \item{col}{ Colour of the tile boundaries. A character string. Ignored for pixel tessellations. } } \details{ This is a method for the generic \code{\link{plot}} function for the class \code{"tess"} of tessellations (see \code{\link{tess}}). The arguments \code{\dots} control the appearance of the plot. They are passed to \code{\link{segments}}, \code{\link{plot.owin}} or \code{\link{plot.im}}, depending on the type of tessellation. } \value{ None. } \seealso{ \code{\link{tess}} } \examples{ A <- tess(xgrid=0:4,ygrid=0:4) plot(A, col="blue", lwd=2, lty=2) B <- A[c(1, 2, 5, 7, 9)] plot(B, hatch=TRUE) v <- as.im(function(x,y){factor(round(5 * (x^2 + y^2)))}, W=owin()) levels(v) <- letters[seq(length(levels(v)))] E <- tess(image=v) plot(E) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/pcfcross.Rd0000755000176000001440000001031112237642733015041 0ustar ripleyusers\name{pcfcross} \alias{pcfcross} \title{Multitype pair correlation function (cross-type)} \description{ Calculates an estimate of the cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross(X, i, j, ...) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{\dots}{ Arguments passed to \code{\link{pcf.ppp}}. } } \details{ The cross-type pair correlation function is a generalisation of the pair correlation function \code{\link{pcf}} to multitype point patterns. For two locations \eqn{x} and \eqn{y} separated by a distance \eqn{r}, the probability \eqn{p(r)} of finding a point of type \eqn{i} at location \eqn{x} and a point of type \eqn{j} at location \eqn{y} is \deqn{ p(r) = \lambda_i \lambda_j g_{i,j}(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i] * lambda[j] * g[i,j](r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity of the points of type \eqn{i}. For a completely random Poisson marked point process, \eqn{p(r) = \lambda_i \lambda_j}{p(r) = lambda[i] * lambda[j]} so \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. Indeed for any marked point pattern in which the points of type \code{i} are independent of the points of type \code{j}, the theoretical value of the cross-type pair correlation is \eqn{g_{i,j}(r) = 1}{g[i,j](r) = 1}. For a stationary multitype point process, the cross-type pair correlation function between marks \eqn{i} and \eqn{j} is formally defined as \deqn{ g_{i,j}(r) = \frac{K_{i,j}^\prime(r)}{2\pi r} }{ g(r) = K[i,j]'(r)/ ( 2 * pi * r) } where \eqn{K_{i,j}^\prime}{K[i,j]'(r)} is the derivative of the cross-type \eqn{K} function \eqn{K_{i,j}(r)}{K[i,j](r)}. of the point process. See \code{\link{Kest}} for information about \eqn{K(r)}. The command \code{pcfcross} computes a kernel estimate of the cross-type pair correlation function between marks \eqn{i} and \eqn{j}. It uses \code{\link{pcf.ppp}} to compute kernel estimates of the pair correlation functions for several unmarked point patterns, and uses the bilinear properties of second moments to obtain the cross-type pair correlation. See \code{\link{pcf.ppp}} for a list of arguments that control the kernel estimation. The companion function \code{\link{pcfdot}} computes the corresponding analogue of \code{\link{Kdot}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{g_{i,j}}{g[i,j]} has been estimated } \item{theo}{the theoretical value \eqn{g_{i,j}(r) = 1}{g[i,j](r) = r} for independent marks. } together with columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{g_{i,j}}{g[i,j]} obtained by the edge corrections named. } \seealso{ Mark connection function \code{\link{markconnect}}. Multitype pair correlation \code{\link{pcfdot}}. Pair correlation \code{\link{pcf}},\code{\link{pcf.ppp}}. \code{\link{Kcross}} } \examples{ data(amacrine) p <- pcfcross(amacrine, "off", "on") p <- pcfcross(amacrine, "off", "on", stoyan=0.1) plot(p) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/scan.test.Rd0000644000176000001440000001241712237642734015130 0ustar ripleyusers\name{scan.test} \alias{scan.test} \title{ Spatial Scan Test } \description{ Performs the Spatial Scan Test for clustering in a spatial point pattern, or for clustering of one type of point in a bivariate spatial point pattern. } \usage{ scan.test(X, r, ..., method = c("poisson", "binomial"), nsim = 19, baseline = NULL, case = 2, alternative = c("greater", "less", "two.sided"), verbose = TRUE) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } \item{r}{ Radius of circle to use. A single number. } \item{\dots}{ Optional. Arguments passed to \code{\link{as.mask}} to determine the spatial resolution of the computations. } \item{method}{ Either \code{"poisson"} or \code{"binomial"} specifying the type of likelihood. } \item{nsim}{ Number of simulations for computing Monte Carlo p-value. } \item{baseline}{ Baseline for the Poisson intensity, if \code{method="poisson"}. A pixel image or a function. } \item{case}{ Which type of point should be interpreted as a case, if \code{method="binomial"}. Integer or character string. } \item{alternative}{ Alternative hypothesis: \code{"greater"} if the alternative postulates that the mean number of points inside the circle will be greater than expected under the null. } \item{verbose}{ Logical. Whether to print progress reports. } } \details{ The spatial scan test (Kulldorf, 1997) is applied to the point pattern \code{X}. In a nutshell, \itemize{ \item If \code{method="poisson"} then a significant result would mean that there is a circle of radius \code{r}, located somewhere in the spatial domain of the data, which contains a significantly higher than expected number of points of \code{X}. That is, the pattern \code{X} exhibits spatial clustering. \item If \code{method="binomial"} then \code{X} must be a bivariate (two-type) point pattern. By default, the first type of point is interpreted as a control (non-event) and the second type of point as a case (event). A significant result would mean that there is a circle of radius \code{r} which contains a significantly higher than expected number of cases. That is, the cases are clustered together, conditional on the locations of all points. } Following is a more detailed explanation. \itemize{ \item If \code{method="poisson"} then the scan test based on Poisson likelihood is performed (Kulldorf, 1997). The dataset \code{X} is treated as an unmarked point pattern. By default (if \code{baseline} is not specified) the null hypothesis is complete spatial randomness CSR (i.e. a uniform Poisson process). The alternative hypothesis is a Poisson process with one intensity \eqn{\beta_1}{beta1} inside some circle of radius \code{r} and another intensity \eqn{\beta_0}{beta0} outside the circle. If \code{baseline} is given, then it should be a pixel image or a \code{function(x,y)}. The null hypothesis is an inhomogeneous Poisson process with intensity proportional to \code{baseline}. The alternative hypothesis is an inhomogeneous Poisson process with intensity \code{beta1 * baseline} inside some circle of radius \code{r}, and \code{beta0 * baseline} outside the circle. \item If \code{method="binomial"} then the scan test based on binomial likelihood is performed (Kulldorf, 1997). The dataset \code{X} must be a bivariate point pattern, i.e. a multitype point pattern with two types. The null hypothesis is that all permutations of the type labels are equally likely. The alternative hypothesis is that some circle of radius \code{r} has a higher proportion of points of the second type, than expected under the null hypothesis. } The result of \code{scan.test} is a hypothesis test (object of class \code{"htest"}) which can be plotted to report the results. The component \code{p.value} contains the \eqn{p}-value. The result of \code{scan.test} can also be plotted (using the plot method for the class \code{"scan.test"}). The plot is a pixel image of the Likelihood Ratio Test Statistic (2 times the log likelihood ratio) as a function of the location of the centre of the circle. This pixel image can be extracted from the object using \code{\link{as.im}}. } \value{ An object of class \code{"htest"} (hypothesis test) which also belongs to the class \code{"scan.test"}. Printing this object gives the result of the test. Plotting this object displays the Likelihood Ratio Test Statistic as a function of the location of the centre of the circle. } \references{ Kulldorff, M. (1997) A spatial scan statistic. \emph{Communications in Statistics --- Theory and Methods} \bold{26}, 1481--1496. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{relrisk}} } \examples{ nsim <- if(interactive()) 19 else 2 data(redwood) scan.test(redwood, 0.1, method="poisson", nsim=nsim) data(chorley) scan.test(chorley, 1, method="binomial", case="larynx", nsim=nsim) } \keyword{htest} \keyword{spatial} spatstat/man/closepairs.Rd0000644000176000001440000001117012243543437015362 0ustar ripleyusers\name{closepairs} \alias{closepairs} \alias{crosspairs} \alias{closepaircounts} \alias{crosspaircounts} \title{ Close Pairs of Points } \description{ Low-level functions to find all close pairs of points. } \usage{ closepaircounts(X, r) crosspaircounts(X, Y, r) closepairs(X, rmax, ordered=TRUE, what=c("all", "indices")) crosspairs(X, Y, rmax, what=c("all", "indices")) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{r,rmax}{ Maximum distance between pairs of points to be counted as close pairs. } \item{ordered}{ Logical value indicating whether all ordered pairs of close points should be returned. If \code{ordered=TRUE}, each pair will appear twice in the output, as \code{(i,j)} and again as \code{(j,i)}. If \code{ordered=FALSE}, then each pair will appear only once, as the pair \code{(i,j)} such that \code{i < j}. } \item{what}{ String specifying the data to be returned for each close pair of points. If \code{what="all"} (the default) then the returned information includes the indices \code{i,j} of each pair, their \code{x,y} coordinates, and the distance between them. If \code{what="indices"} then only the indices \code{i,j} are returned. } } \details{ These are the efficient low-level functions used by \pkg{spatstat} to find all close pairs of points in a point pattern or all close pairs between two point patterns. \code{closepaircounts(X,r)} counts the number of neighbours for each point in the pattern \code{X}. That is, for each point \code{X[i]}, it counts the number of other points \code{X[j]} with \code{j != i} such that \code{d(X[i],X[j]) <= r} where \code{d} denotes Euclidean distance. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]}. \code{crosspaircounts(X,Y,r)} counts, for each point in the pattern \code{X}, the number of neighbours in the pattern \code{Y}. That is, for each point \code{X[i]}, it counts the number of points \code{Y[j]} such that \code{d(X[i],X[j]) <= r}. The result is an integer vector \code{v} such that \code{v[i]} is the number of neighbours of \code{X[i]} in the pattern \code{Y}. \code{closepairs(X,rmax)} identifies all pairs of neighbours in the pattern \code{X} and returns them. The result is a list with the following components: \describe{ \item{i}{Integer vector of indices of the first point in each pair.} \item{j}{Integer vector of indices of the second point in each pair.} \item{xi,yi}{Coordinates of the first point in each pair.} \item{xj,yj}{Coordinates of the second point in each pair.} \item{dx}{Equal to \code{xj-xi}} \item{dy}{Equal to \code{yj-yi}} \item{d}{Euclidean distance between each pair of points.} } If \code{what="indices"} then only the components \code{i} and \code{j} are returned. This is slightly faster. \code{crosspairs(X,rmax)} identifies all pairs of neighbours \code{(X[i], Y[j])} between the patterns \code{X} and \code{Y}, and returns them. The result is a list with the same format as for \code{closepairs}. } \section{Warning about accuracy}{ The results of these functions may not agree exactly with the correct answer (as calculated by a human) and may not be consistent between different computers and different installations of \R. The discrepancies arise in marginal cases where the interpoint distance is equal to, or very close to, the threshold \code{rmax}. Floating-point numbers in a computer are not mathematical Real Numbers: they are approximations using finite-precision binary arithmetic. The approximation is accurate to a tolerance of about \code{.Machine$double.eps}. If the true interpoint distance \eqn{d} and the threshold \code{rmax} are equal, or if their difference is no more than \code{.Machine$double.eps}, the result may be incorrect. } \value{ For \code{closepaircounts} and \code{crosspaircounts}, an integer vector of length equal to the number of points in \code{X}. For \code{closepairs} and \code{crosspairs}, a list with components \code{i} and \code{j}, and possibly other components as described under Details. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{Kest}}, \code{\link{Kcross}}, \code{\link{nndist}}, \code{\link{nncross}}, \code{\link{applynbd}}, \code{\link{markstat}}. } \examples{ a <- closepaircounts(cells, 0.1) sum(a) Y <- split(amacrine) b <- crosspaircounts(Y$on, Y$off, 0.1) } \keyword{spatial} \keyword{math} spatstat/man/volume.Rd0000755000176000001440000000172612237642734014541 0ustar ripleyusers\name{volume} \alias{volume} \title{Volume of an Object} \description{ Computes the volume of a spatial object such as a three-dimensional box. } \usage{ volume(x) } \arguments{ \item{x}{ An object whose volume will be computed. } } \value{ The numerical value of the volume of the object. } \details{ This function computes the volume of an object such as a three-dimensional box. The function \code{volume} is generic, with methods for the classes \code{"box3"} (three-dimensional boxes) and \code{"boxx"} (multi-dimensional boxes). There is also a method for the class \code{"owin"} (two-dimensional windows), which is identical to \code{\link{area.owin}}. } \seealso{ \code{\link{area.owin}}, \code{\link{volume.box3}}, \code{\link{volume.boxx}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/by.im.Rd0000755000176000001440000000345012237642732014242 0ustar ripleyusers\name{by.im} \alias{by.im} \title{Apply Function to Image Broken Down by Factor} \description{ Splits a pixel image into sub-images and applies a function to each sub-image. } \usage{ \method{by}{im}(data, INDICES, FUN, ...) } \arguments{ \item{data}{A pixel image (object of class \code{"im"}).} \item{INDICES}{Grouping variable. Either a tessellation (object of class \code{"tess"}) or a factor-valued pixel image. } \item{FUN}{Function to be applied to each sub-image of \code{data}.} \item{\dots}{Extra arguments passed to \code{FUN}.} } \details{ This is a method for the generic function \code{\link{by}} for pixel images (class \code{"im"}). The pixel image \code{data} is first divided into sub-images according to \code{INDICES}. Then the function \code{FUN} is applied to each subset. The results of each computation are returned in a list. The grouping variable \code{INDICES} may be either \itemize{ \item a tessellation (object of class \code{"tess"}). Each tile of the tessellation delineates a subset of the spatial domain. \item a pixel image (object of class \code{"im"}) with factor values. The levels of the factor determine subsets of the spatial domain. } } \value{ A list containing the results of each evaluation of \code{FUN}. } \seealso{ \code{\link{split.im}}, \code{\link{tess}}, \code{\link{im}} } \examples{ W <- square(1) X <- as.im(function(x,y){sqrt(x^2+y^2)}, W) Y <- dirichlet(runifpoint(12, W)) # mean pixel value in each subset unlist(by(X, Y, mean)) # trimmed mean unlist(by(X, Y, mean, trim=0.05)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{manip} spatstat/man/waka.Rd0000755000176000001440000000247612237642735014161 0ustar ripleyusers\name{waka} \alias{waka} \docType{data} \title{ Trees in Waka national park } \description{ This dataset is a spatial point pattern of trees recorded at Waka National Park, Gabon. See Balinga et al (2006). The dataset \code{waka} is a point pattern (object of class \code{"ppp"}) containing the spatial coordinates of each tree, marked by the tree diameter at breast height \code{dbh}. The survey region is a 100 by 100 metre square. Coordinates are given in metres, while the \code{dbh} is in centimetres. } \usage{data(waka)} \examples{ data(waka) plot(waka, markscale=0.005) title(sub="Tree diameters to scale") plot(waka, markscale=0.02) title(sub="Tree diameters 4x scale") } \source{ Nicolas Picard } \references{ Balinga, M., Sunderland, T., Walters, G., Issemb{\'e}, Y., Asaha, S. and Fombod, E. (2006) \emph{A vegetation assessment of the Waka national park, Gabon.} Herbier National du Gabon, LBG, MBG, WCS, FRP and Simthsonian Institution, Libreville, Gabon. CARPE Report, 154 pp. \url{http://carpe.umd.edu/resources/Documents/} Picard, N., Bar-Hen, A., Mortier, F. and Chadoeuf, J. (2009) The multi-scale marked area-interaction point process: a model for the spatial pattern of trees. \emph{Scandinavian Journal of Statistics} \bold{36} 23--41 } \keyword{datasets} \keyword{spatial} spatstat/man/Ginhom.Rd0000644000176000001440000001362412237642731014445 0ustar ripleyusers\name{Ginhom} \alias{Ginhom} \title{ Inhomogeneous Nearest Neighbour Function } \description{ Estimates the inhomogeneous nearest neighbour function \eqn{G} of a non-stationary point pattern. } \usage{ Ginhom(X, lambda = NULL, lmin = NULL, ..., sigma = NULL, varcov = NULL, r = NULL, breaks = NULL, ratio = FALSE) } \arguments{ \item{X}{ The observed data point pattern, from which an estimate of the inhomogeneous \eqn{G} function will be computed. An object of class \code{"ppp"} or in a format recognised by \code{\link{as.ppp}()} } \item{lambda}{ Optional. Values of the estimated intensity function. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lmin}{ Optional. The minimum possible value of the intensity over the spatial domain. A positive numerical value. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } \item{\dots}{ Extra arguments passed to \code{\link{as.mask}} to control the pixel resolution, or passed to \code{\link{density.ppp}} to control the smoothing bandwidth. } \item{r}{ vector of values for the argument \eqn{r} at which the inhomogeneous \eqn{K} function should be evaluated. Not normally given by the user; there is a sensible default. } \item{breaks}{ An alternative to the argument \code{r}. Not normally invoked by the user. See Details. } \item{ratio}{ Logical. If \code{TRUE}, the numerator and denominator of the estimate will also be saved, for use in analysing replicated point patterns. } } \details{ This command computes estimates of the inhomogeneous \eqn{G}-function (van Lieshout, 2010) of a point pattern. It is the counterpart, for inhomogeneous spatial point patterns, of the nearest-neighbour distance distribution function \eqn{G} for homogeneous point patterns computed by \code{\link{Gest}}. The argument \code{X} should be a point pattern (object of class \code{"ppp"}). The inhomogeneous \eqn{G} function is computed using the border correction, equation (7) in Van Lieshout (2010). The argument \code{lambda} should supply the (estimated) values of the intensity function \eqn{\lambda}{lambda} of the point process. It may be either \describe{ \item{a numeric vector}{ containing the values of the intensity function at the points of the pattern \code{X}. } \item{a pixel image}{ (object of class \code{"im"}) assumed to contain the values of the intensity function at all locations in the window. } \item{a fitted point process model}{ (object of class \code{"ppm"}) whose fitted \emph{trend} can be used as the fitted intensity. } \item{a function}{ which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother. } } If \code{lambda} is a numeric vector, then its length should be equal to the number of points in the pattern \code{X}. The value \code{lambda[i]} is assumed to be the the (estimated) value of the intensity \eqn{\lambda(x_i)}{lambda(x[i])} for the point \eqn{x_i}{x[i]} of the pattern \eqn{X}. Each value must be a positive number; \code{NA}'s are not allowed. If \code{lambda} is a pixel image, the domain of the image should cover the entire window of the point pattern. If it does not (which may occur near the boundary because of discretisation error), then the missing pixel values will be obtained by applying a Gaussian blur to \code{lambda} using \code{\link{blur}}, then looking up the values of this blurred image for the missing locations. (A warning will be issued in this case.) If \code{lambda} is a function, then it will be evaluated in the form \code{lambda(x,y)} where \code{x} and \code{y} are vectors of coordinates of the points of \code{X}. It should return a numeric vector with length equal to the number of points in \code{X}. If \code{lambda} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate \code{lambda[i]} for the point \code{X[i]} is computed by removing \code{X[i]} from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point \code{X[i]}. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. } \references{ Van Lieshout, M.N.M. and Baddeley, A.J. (1996) A nonparametric measure of spatial interaction in point patterns. \emph{Statistica Neerlandica} \bold{50}, 344--361. Van Lieshout, M.N.M. (2010) A J-function for inhomogeneous point processes. \emph{Statistica Neerlandica} \bold{65}, 183--201. } \seealso{ \code{\link{Finhom}}, \code{\link{Jinhom}}, \code{\link{Gest}} } \examples{ \dontrun{ plot(Ginhom(swedishpines, sigma=bw.diggle, adjust=2)) } plot(Ginhom(swedishpines, sigma=10)) } \author{ Original code by Marie-Colette van Lieshout. C implementation and R adaptation by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/bronzefilter.Rd0000755000176000001440000000406512237642732015734 0ustar ripleyusers\name{bronzefilter} \alias{bronzefilter} \docType{data} \title{Bronze gradient filter data} \description{ These data represent a spatially inhomogeneous pattern of circular section profiles of particles, observed in a longitudinal plane section through a gradient sinter filter made from bronze powder, prepared by Ricardo Bernhardt, Dresden. The material was produced by sedimentation of bronze powder with varying grain diameter and subsequent sintering, as described in Bernhardt et al. (1997). The data are supplied as a marked point pattern of circle centres marked by circle radii. The coordinates of the centres and the radii are recorded in mm. The field of view is an \eqn{18 \times 7}{18 * 7} mm rectangle. The data were first analysed by Hahn et al. (1999). } \format{ An object of class \code{"ppp"} representing the point pattern of cell locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of bronze grain profile centre\cr \code{y} \tab Cartesian \eqn{y}-coordinate of bronze grain profile centre\cr \code{marks} \tab radius of bronze grain profile } See \code{\link{ppp.object}} for details of the format. All coordinates are recorded in mm. } \usage{data(bronzefilter)} \examples{ data(bronzefilter) plot(bronzefilter, markscale=1) } \source{ R.\ Bernhardt (section image), H.\ Wendrock (coordinate measurement). Adjusted, formatted and communicated by U.\ Hahn. } \references{ Bernhardt, R., Meyer-Olbersleben, F. and Kieback, B. (1997) Fundamental investigation on the preparation of gradient structures by sedimentation of different powder fractions under gravity. \emph{Proc. of the 4th Int. Conf. On Composite Engineering, July 6--12 1997, ICCE/4}, Hawaii, Ed. David Hui, 147--148. Hahn U., Micheletti, A., Pohlink, R., Stoyan D. and Wendrock, H.(1999) Stereological analysis and modelling of gradient structures. \emph{Journal of Microscopy}, \bold{195}, 113--124. } \keyword{datasets} \keyword{spatial} spatstat/man/psstA.Rd0000755000176000001440000001455512237642733014327 0ustar ripleyusers\name{psstA} \Rdversion{1.1} \alias{psstA} \title{ Pseudoscore Diagnostic For Fitted Model against Area-Interaction Alternative } \description{ Given a point process model fitted to a point pattern dataset, this function computes the pseudoscore diagnostic of goodness-of-fit for the model, against moderately clustered or moderately inhibited alternatives of area-interaction type. } \usage{ psstA(object, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = reach(interaction), ppmcorrection = "border", correction = "all", truecoef = NULL, hi.res = NULL, nr=spatstat.options("psstA.nr"), ngrid=spatstat.options("psstA.ngrid")) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{ppmcorrection}{ Optional. Character string specifying the edge correction for the pseudolikelihood to be used in fitting the point process model. Passed to \code{\link{ppm}}. } \item{correction}{ Optional. Character string specifying which diagnostic quantities will be computed. Options are \code{"all"} and \code{"best"}. The default is to compute all diagnostic quantities. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{nr}{ Optional. Number of \code{r} values to be used if \code{r} is not specified. } \item{ngrid}{ Integer. Number of points in the square grid used to compute the approximate area. } } \details{ This function computes the pseudoscore test statistic which can be used as a diagnostic for goodness-of-fit of a fitted point process model. Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. The alternative hypothesis is a family of hybrid models obtained by combining the fitted model with the area-interaction process (see \code{\link{AreaInter}}). The family of alternatives includes models that are slightly more regular than the fitted model, and others that are slightly more clustered than the fitted model. The pseudoscore, evaluated at the null model, is \deqn{ V(r) = \sum_i A(x_i, x, r) - \int_W A(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( A(x[i], x, r)) - integral( A(u,x,r) lambda(u,x) du) } where \deqn{ A(u,x,r) = B(x\cup\{u\},r) - B(x\setminus u, r) }{ A(u,x,r) = B(x union u, r) - B(x setminus u, r) } where \eqn{B(x,r)} is the area of the union of the discs of radius \eqn{r} centred at the points of \eqn{x} (i.e. \eqn{B(x,r)} is the area of the dilation of \eqn{x} by a distance \eqn{r}). Thus \eqn{A(u,x,r)} is the \emph{unclaimed area} associated with \eqn{u}, that is, the area of that part of the disc of radius \eqn{r} centred at the point \eqn{u} that is not covered by any of the discs of radius \eqn{r} centred at points of \eqn{x}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \section{Warning}{ This computation can take a \bold{very long time}. To shorten the computation time, choose smaller values of the arguments \code{nr} and \code{ngrid}, or reduce the values of their defaults \code{spatstat.options("psstA.nr")} and \code{spatstat.options("psstA.ngrid")}. Computation time is roughly proportional to \code{nr * npoints * ngrid^2} where \code{npoints} is the number of points in the point pattern. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Alternative functions: \code{\link{psstG}}, \code{\link{psst}}, \code{\link{Gres}}, \code{\link{Kres}}. Point process models: \code{\link{ppm}}. Options: \code{\link{spatstat.options}} } \examples{ pso <- spatstat.options(psstA.ngrid=16,psstA.nr=10) X <- rStrauss(200,0.1,0.05) plot(psstA(X)) plot(psstA(X, interaction=Strauss(0.05))) spatstat.options(pso) } \keyword{spatial} \keyword{models} spatstat/man/owin.object.Rd0000755000176000001440000000721512237642733015451 0ustar ripleyusers\name{owin.object} \alias{owin.object} %DoNotExport \title{Class owin} \description{ A class \code{owin} to define the ``observation window'' of a point pattern } \details{ In the \pkg{spatstat} library, a point pattern dataset must include information about the window or region in which the pattern was observed. A window is described by an object of class \code{"owin"}. Windows of arbitrary shape are supported. An object of class \code{"owin"} has one of three types: \tabular{ll}{ \code{"rectangle"}: \tab a rectangle in the two-dimensional plane with edges parallel to the axes \cr \code{"polygonal"}: \tab a region whose boundary is a polygon or several polygons. The region may have holes and may consist of several disconnected pieces. \cr \code{"mask"}: \tab a binary image (a logical matrix) set to \code{TRUE} for pixels inside the window and \code{FALSE} outside the window. } Objects of class \code{"owin"} may be created by the function \code{\link{owin}} and converted from other types of data by the function \code{\link{as.owin}}. They may be manipulated by the functions \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{complement.owin}}, \code{\link{rotate}}, \code{\link{shift}}, \code{\link{affine}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}} and \code{\link{closing}}. Geometrical calculations available for windows include \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{diameter.owin}}, \code{\link{bounding.box}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}}, and \code{even.breaks.owin}. The mapping between continuous coordinates and pixel raster indices is facilitated by the functions \code{\link{raster.x}}, \code{\link{raster.y}} and \code{\link{nearest.raster.point}}. There is a \code{plot} method for window objects, \code{\link{plot.owin}}. This may be useful if you wish to plot a point pattern's window without the points for graphical purposes. There are also methods for \code{summary} and \code{print}. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}}, \code{\link{as.mask}}, \code{\link{summary.owin}}, \code{\link{print.owin}}, \code{\link{complement.owin}}, \code{\link{erosion}}, \code{\link{dilation}}, \code{\link{opening}}, \code{\link{closing}}, \code{\link{affine.owin}}, \code{\link{shift.owin}}, \code{\link{rotate.owin}}, \code{\link{raster.x}}, \code{\link{raster.y}}, \code{\link{nearest.raster.point}}, \code{\link{plot.owin}}, \code{\link{area.owin}}, \code{\link{bounding.box}}, \code{\link{diameter}}, \code{\link{eroded.areas}}, \code{\link{bdist.points}}, \code{\link{bdist.pixels}} } \section{Warnings}{ In a window of type \code{"mask"}, the row index corresponds to increasing \eqn{y} coordinate, and the column index corresponds to increasing \eqn{x} coordinate. } \examples{ w <- owin() w <- owin(c(0,1), c(0,1)) # the unit square w <- owin(c(0,1), c(0,2)) \dontrun{ plot(w) # plots edges of a box 1 unit x 2 units v <- locator() # click on points in the plot window # to be the vertices of a polygon # traversed in anticlockwise order u <- owin(c(0,1), c(0,2), poly=v) plot(u) # plots polygonal boundary using polygon() plot(as.mask(u, eps=0.02)) # plots discrete pixel approximation to polygon } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/plot.listof.Rd0000755000176000001440000000760112237642733015504 0ustar ripleyusers\name{plot.listof} \alias{plot.listof} \title{Plot a List of Things} \description{ Plots a list of things } \usage{ \method{plot}{listof}(x, \dots, main, arrange=TRUE, nrows=NULL, ncols=NULL, main.panel=NULL, mar.panel=c(2,1,1,2), panel.begin=NULL, panel.end=NULL, panel.args=NULL, plotcommand="plot", adorn.left=NULL, adorn.right=NULL, adorn.top=NULL, adorn.bottom=NULL, adorn.size=0.2) } \arguments{ \item{x}{ An object of the class \code{"listof"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot}} when generating each plot panel. } \item{main}{ Overall heading for the plot. } \item{arrange}{ Logical flag indicating whether to plot the objects side-by-side on a single page (\code{arrange=TRUE}) or plot them individually in a succession of frames (\code{arrange=FALSE}). } \item{nrows,ncols}{ Optional. The number of rows/columns in the plot layout (assuming \code{arrange=TRUE}). You can specify either or both of these numbers. } \item{main.panel}{ Optional. A character string, or a vector of character strings, giving the headings for each of the objects. } \item{mar.panel}{ Value of the graphics parameter \code{mar} controlling the size of the margins outside each plot panel. See the help file for \code{\link{par}}. } \item{panel.begin,panel.end}{ Optional. Functions that will be executed before and after each panel is plotted. See Details. } \item{panel.args}{ Internal use only. } \item{plotcommand}{ Optional. Character string containing the name of the command that should be executed to plot each panel. } \item{adorn.left,adorn.right,adorn.top,adorn.bottom}{ Optional. Functions (with no arguments) that will be executed to generate additional plots at the margins (left, right, top and/or bottom, respectively) of the array of plots. } \item{adorn.size}{ Relative width (as a fraction of the other panels' widths) of the margin plots. } } \value{ Null. } \details{ This is the \code{plot} method for the class \code{"listof"}. An object of class \code{"listof"} (defined in the base R package) represents a list of objects, all belonging to a common class. The base R package defines a method for printing these objects, \code{\link{print.listof}}, but does not define a method for \code{plot}. So here we have provided a method for \code{plot}. In the \pkg{spatstat} package, the function \code{\link{density.splitppp}} produces an object of class \code{"listof"}, essentially a list of pixel images. These images can be plotted in a nice arrangement using \code{plot.listof}. See the Example. The arguments \code{panel.begin} and \code{panel.end} may be functions that will be executed before and after each panel is plotted. They will be called as \code{panel.begin(i, y, main=main.panel[i])} and \code{panel.end(i, y, add=TRUE)}. Alternatively, \code{panel.begin} and \code{panel.end} may be objects of some class that can be plotted with the generic \code{plot} command. They will be plotted before and after each panel is plotted. If all entries of \code{x} are pixel images, the function \code{\link{image.listof}} is called to control the plotting. The arguments \code{equal.ribbon} and \code{col} can be used to determine the colour map or maps applied. } \seealso{ \code{\link{print.listof}}, \code{\link{contour.listof}}, \code{\link{image.listof}}, \code{\link{density.splitppp}} } \examples{ # Multitype point pattern data(amacrine) plot(D <- density(split(amacrine))) plot(D, main="", equal.ribbon=TRUE, panel.end=function(i,y,...){contour(y, ...)}) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/multiplicity.ppp.Rd0000755000176000001440000000273512242557163016556 0ustar ripleyusers\name{multiplicity.ppp} \alias{multiplicity} \alias{multiplicity.ppp} \alias{multiplicity.ppx} \title{Count Multiplicity of Duplicate Points} \description{ Counts the number of duplicates for each point in a spatial point pattern. } \usage{ multiplicity(x) \method{multiplicity}{ppp}(x) \method{multiplicity}{ppx}(x) } \arguments{ \item{x}{ A spatial point pattern (object of class \code{"ppp"} or \code{"ppx"}) or some other type of spatial object. } } \value{ A vector of integers (multiplicities) of length equal to the number of points in \code{x}. } \details{ Two points in a point pattern are deemed to be identical if their \eqn{x,y} coordinates are the same, and their marks are also the same (if they carry marks). The Examples section illustrates how it is possible for a point pattern to contain a pair of identical points. For each point in \code{x}, this function counts how many points are identical to it, and returns the vector of counts. } \seealso{ \code{\link{ppp.object}}, \code{\link{duplicated.ppp}}, \code{\link{unique.ppp}} } \examples{ X <- ppp(c(1,1,0.5), c(2,2,1), window=square(3)) m <- multiplicity(X) # unique points in X, marked by multiplicity first <- !duplicated(X) Y <- X[first] \%mark\% m[first] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/circumradius.Rd0000755000176000001440000000232312237642732015714 0ustar ripleyusers\name{circumradius} \alias{circumradius} \alias{diameter.linnet} \title{ Circumradius and Diameter of a Linear Network } \description{ Compute the circumradius or diameter of a linear network measured using the shortest path distance. } \usage{ circumradius(x) \method{diameter}{linnet}(x) } \arguments{ \item{x}{ Linear network (object of class \code{"linnet"}). } } \details{ The diameter of a linear network (in the shortest path distance) is the maximum value of the shortest-path distance between any two points \eqn{u} and \eqn{v} on the network. The circumradius of a linear network (in the shortest path distance) is the minimum value, over all points \eqn{u} on the network, of the maximum shortest-path distance from \eqn{u} to another point \eqn{v} on the network. The function \code{diameter} is generic; the function \code{diameter.linnet} is the method for objects of class \code{linnet}. } \value{ A single numeric value. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{linnet}} } \examples{ data(simplenet) diameter(simplenet) circumradius(simplenet) } \keyword{spatial} \keyword{math} spatstat/man/Kcom.Rd0000755000176000001440000002161512237642731014117 0ustar ripleyusers\name{Kcom} \Rdversion{1.1} \alias{Kcom} \title{ Model Compensator of K Function } \description{ Given a point process model fitted to a point pattern dataset, this function computes the \emph{compensator} of the \eqn{K} function based on the fitted model (as well as the usual nonparametric estimates of \eqn{K} based on the data alone). Comparison between the nonparametric and model-compensated \eqn{K} functions serves as a diagnostic for the model. } \usage{ Kcom(object, r = NULL, breaks = NULL, ..., correction = c("border", "isotropic", "translate"), conditional = !is.poisson(object), restrict = FALSE, trend = ~1, interaction = Poisson(), rbord = reach(interaction), compute.var = TRUE, truecoef = NULL, hi.res = NULL) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{K(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{correction}{ Optional vector of character strings specifying the edge correction(s) to be used. See \code{\link{Kest}} for options. } \item{conditional}{ Optional. Logical value indicating whether to compute the estimates for the conditional case. See Details. } \item{restrict}{ Logical value indicating whether to compute the restriction estimator (\code{restrict=TRUE}) or the reweighting estimator (\code{restrict=FALSE}, the default). Applies only if \code{conditional=TRUE}. See Details. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{compute.var}{ Logical value indicating whether to compute the Poincare variance bound for the residual \eqn{K} function (calculation is only implemented for the isotropic correction). } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } } \details{ This command provides a diagnostic for the goodness-of-fit of a point process model fitted to a point pattern dataset. It computes an estimate of the \eqn{K} function of the dataset, together with a \emph{model compensator} of the \eqn{K} function, which should be approximately equal if the model is a good fit to the data. The first argument, \code{object}, is usually a fitted point process model (object of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a point pattern (object of class \code{"ppp"}). In that case, a point process model will be fitted to it, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. The algorithm first extracts the original point pattern dataset (to which the model was fitted) and computes the standard nonparametric estimates of the \eqn{K} function. It then also computes the \emph{model compensator} of the \eqn{K} function. The different function estimates are returned as columns in a data frame (of class \code{"fv"}). The argument \code{correction} determines the edge correction(s) to be applied. See \code{\link{Kest}} for explanation of the principle of edge corrections. The following table gives the options for the \code{correction} argument, and the corresponding column names in the result: \tabular{llll}{ \code{correction} \tab \bold{description of correction} \tab \bold{nonparametric} \tab \bold{compensator} \cr \code{"isotropic"} \tab Ripley isotropic correction \tab \code{iso} \tab \code{icom} \cr \code{"translate"} \tab Ohser-Stoyan translation correction \tab \code{trans} \tab \code{tcom} \cr \code{"border"} \tab border correction \tab \code{border} \tab \code{bcom} } The nonparametric estimates can all be expressed in the form \deqn{ \hat K(r) = \sum_i \sum_{j < i} e(x_i,x_j,r,x) I\{ d(x_i,x_j) \le r \} }{ K(r) = sum[i] sum[j < i] e(x[i], x[j], r, x) I( d(x[i],x[j]) <= r ) } where \eqn{x_i}{x[i]} is the \eqn{i}-th data point, \eqn{d(x_i,x_j)}{d(x[i],x[j])} is the distance between \eqn{x_i}{x[i]} and \eqn{x_j}{x[j]}, and \eqn{e(x_i,x_j,r,x)}{e(x[i],x[j],r,x)} is a term that serves to correct edge effects and to re-normalise the sum. The corresponding model compensator is \deqn{ {\bf C} \, \tilde K(r) = \int_W \lambda(u,x) \sum_j e(u,x_j,r,x \cup u) I\{ d(u,x_j) \le r\} }{ C K(r) = integral[u] lambda(u,x) sum[j] e(u, x[j], r, x+u) I( d(u,x[j]) <= r ) } where the integral is over all locations \eqn{u} in the observation window, \eqn{\lambda(u,x)}{lambda(u,x)} denotes the conditional intensity of the model at the location \eqn{u}, and \eqn{x \cup u}{x+u} denotes the data point pattern \eqn{x} augmented by adding the extra point \eqn{u}. If the fitted model is a Poisson point process, then the formulae above are exactly what is computed. If the fitted model is not Poisson, the formulae above are modified slightly to handle edge effects. The modification is determined by the arguments \code{conditional} and \code{restrict}. The value of \code{conditional} defaults to \code{FALSE} for Poisson models and \code{TRUE} for non-Poisson models. If \code{conditional=FALSE} then the formulae above are not modified. If \code{conditional=TRUE}, then the algorithm calculates the \emph{restriction estimator} if \code{restrict=TRUE}, and calculates the \emph{reweighting estimator} if \code{restrict=FALSE}. See Appendix D of Baddeley, Rubak and Moller (2011). Thus, by default, the reweighting estimator is computed for non-Poisson models. The nonparametric estimates of \eqn{K(r)} are approximately unbiased estimates of the \eqn{K}-function, assuming the point process is stationary. The model compensators are unbiased estimates \emph{of the mean values of the corresponding nonparametric estimates}, assuming the model is true. Thus, if the model is a good fit, the mean value of the difference between the nonparametric estimates and model compensators is approximately zero. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Related functions: \code{\link{Kres}}, \code{\link{Kest}}. Alternative functions: \code{\link{Gcom}}, \code{\link{psstG}}, \code{\link{psstA}}, \code{\link{psst}}. Point process models: \code{\link{ppm}}. } \examples{ fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=16)} if(interactive()) { plot(Kcom(fit0)) # compare the isotropic-correction estimates plot(Kcom(fit0), cbind(iso, icom) ~ r) # uniform Poisson is clearly not correct } fit1 <- ppm(cells, ~1, Strauss(0.08)) \testonly{fit1 <- ppm(cells, ~1, Strauss(0.08), nd=16)} K1 <- Kcom(fit1) K1 if(interactive()) { plot(K1) plot(K1, cbind(iso, icom) ~ r) plot(K1, cbind(trans, tcom) ~ r) # how to plot the difference between nonparametric estimates and compensators plot(K1, iso - icom ~ r) # fit looks approximately OK; try adjusting interaction distance } fit2 <- ppm(cells, ~1, Strauss(0.12)) \testonly{fit2 <- ppm(cells, ~1, Strauss(0.12), nd=16)} K2 <- Kcom(fit2) if(interactive()) { plot(K2) plot(K2, cbind(iso, icom) ~ r) plot(K2, iso - icom ~ r) } } \keyword{spatial} \keyword{models} spatstat/man/methods.lppm.Rd0000755000176000001440000000340012237642733015632 0ustar ripleyusers\name{methods.lppm} \alias{methods.lppm} %DoNotExport \alias{coef.lppm} \alias{extractAIC.lppm} \alias{formula.lppm} \alias{logLik.lppm} \alias{nobs.lppm} \alias{print.lppm} \alias{terms.lppm} \alias{update.lppm} \alias{vcov.lppm} \alias{as.linnet.lppm} \title{ Methods for Fitted Point Process Models on a Linear Network } \description{ These are methods for the class \code{"lppm"} of fitted point process models on a linear network. } \usage{ \method{coef}{lppm}(object, ...) \method{extractAIC}{lppm}(fit, ...) \method{formula}{lppm}(x, ...) \method{logLik}{lppm}(object, ...) \method{nobs}{lppm}(object, ...) \method{print}{lppm}(x, ...) \method{terms}{lppm}(x, ...) \method{update}{lppm}(object, ...) \method{vcov}{lppm}(object, ...) \method{as.linnet}{lppm}(X, ...) } \arguments{ \item{object,fit,x,X}{ An object of class \code{"lppm"} representing a fitted point process model on a linear network. } \item{\dots}{ Arguments passed to other methods, usually the method for the class \code{"ppm"}. } } \details{ These are methods for the generic commands \code{\link{coef}}, \code{\link{extractAIC}}, \code{\link{formula}}, \code{\link{logLik}}, \code{\link{nobs}}, \code{\link{print}}, \code{\link{terms}}, \code{\link{update}} and \code{\link{vcov}} for the class \code{"lppm"}. } \value{ See the default methods. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{lppm}}, \code{\link{plot.lppm}}. } \examples{ example(lpp) fit <- lppm(X, ~x) print(fit) coef(fit) formula(fit) terms(fit) logLik(fit) nobs(fit) extractAIC(fit) update(fit, ~1) vcov(fit) } \keyword{spatial} \keyword{models} spatstat/man/ppp.object.Rd0000755000176000001440000000760112237642733015273 0ustar ripleyusers\name{ppp.object} \alias{ppp.object} %DoNotExport \title{Class of Point Patterns} \description{ A class \code{"ppp"} to represent a two-dimensional point pattern. Includes information about the window in which the pattern was observed. Optionally includes marks. } \details{ This class represents a two-dimensional point pattern dataset. It specifies \itemize{ \item the locations of the points \item the window in which the pattern was observed \item optionally, ``marks'' attached to each point (extra information such as a type label). } If \code{X} is an object of type \code{ppp}, it contains the following elements: \tabular{ll}{ \code{x} \tab vector of \eqn{x} coordinates of data points \cr \code{y} \tab vector of \eqn{y} coordinates of data points \cr \code{n} \tab number of points \cr \code{window} \tab window of observation \cr \tab (an object of class \code{\link{owin}}) \cr \code{marks} \tab optional vector or data frame of marks } Users are strongly advised not to manipulate these entries directly. Objects of class \code{"ppp"} may be created by the function \code{\link{ppp}} and converted from other types of data by the function \code{\link{as.ppp}}. Note that you must always specify the window of observation; there is intentionally no default action of ``guessing'' the window dimensions from the data points alone. Standard point pattern datasets provided with the package include \code{\link{amacrine}}, \code{\link{betacells}}, \code{\link{bramblecanes}}, \code{\link{cells}}, \code{\link{demopat}}, \code{\link{ganglia}}, \code{\link{lansing}}, \code{\link{longleaf}}, \code{\link{nztrees}}, \code{\link{redwood}}, \code{\link{simdat}} and \code{\link{swedishpines}}. Point patterns may be scanned from your own data files by \code{\link{scanpp}} or by using \code{\link{read.table}} and \code{\link{as.ppp}}. They may be manipulated by the functions \code{\link{[.ppp}} and \code{\link{superimpose}}. Point pattern objects can be plotted just by typing \code{plot(X)} which invokes the \code{plot} method for point pattern objects, \code{\link{plot.ppp}}. See \code{\link{plot.ppp}} for further information. There are also methods for \code{summary} and \code{print} for point patterns. Use \code{summary(X)} to see a useful description of the data. Patterns may be generated at random by \code{\link{runifpoint}}, \code{\link{rpoispp}}, \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rNeymanScott}}, \code{\link{rMatClust}}, and \code{\link{rThomas}}. Most functions which are intended to operate on a window (of class \code{\link{owin}}) will, if presented with a \code{\link{ppp}} object instead, automatically extract the window information from the point pattern. } \seealso{ \code{\link{owin}}, \code{\link{ppp}}, \code{\link{as.ppp}}, \code{\link{[.ppp}} } \section{Warnings}{ The internal representation of marks is likely to change in the next release of this package. } \examples{ x <- runif(100) y <- runif(100) X <- ppp(x, y, c(0,1),c(0,1)) X \dontrun{plot(X)} mar <- sample(1:3, 100, replace=TRUE) mm <- ppp(x, y, c(0,1), c(0,1), marks=mar) \dontrun{plot(mm)} # points with mark equal to 2 ss <- mm[ mm$marks == 2 , ] \dontrun{plot(ss)} # left half of pattern 'mm' lu <- owin(c(0,0.5),c(0,1)) mmleft <- mm[ , lu] \dontrun{plot(mmleft)} \dontrun{ # input data from file qq <- scanpp("my.table", unit.square()) # interactively build a point pattern plot(unit.square()) X <- as.ppp(locator(10), unit.square()) plot(X) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{attribute} spatstat/man/smooth.fv.Rd0000755000176000001440000000611112237642734015146 0ustar ripleyusers\name{smooth.fv} \alias{smooth.fv} \alias{Smooth.fv} \title{ Apply Smoothing to Function Values } \description{ Applies smoothing to the values in selected columns of a function value table. } \usage{ smooth.fv(x, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) \method{Smooth}{fv}(X, which = "*", ..., method=c("smooth.spline", "loess"), xinterval=NULL) } \arguments{ \item{x,X}{ Values to be smoothed. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be smoothed. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} or \code{\link[stats]{loess}} to control the smoothing. } \item{method}{ Smoothing algorithm. A character string, partially matched to either \code{"smooth.spline"} or \code{"loess"}. } \item{xinterval}{ Optional. Numeric vector of length 2 specifying a range of \eqn{x} values. Smoothing will be performed only on the part of the function corresponding to this range. } } \details{ The command \code{smooth.fv} or \code{Smooth.fv} applies smoothing to the function values in a function value table (object of class \code{"fv"}). \code{Smooth.fv} is a method for the generic function \code{\link{Smooth}}. The smoothing is performed either by \code{\link[stats]{smooth.spline}} or by \code{\link[stats]{loess}}. Smoothing is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding smooth interpolated function values. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be smoothed. The default (indicated by the wildcard \code{which="*"}) is to smooth all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{xinterval} is given, then smoothing will be performed only in the specified range of \eqn{x} values. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{Smooth}}, \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}}, \code{\link[stats]{smooth.spline}} } \examples{ data(cells) G <- Gest(cells) plot(G) plot(Smooth(G, df=9), add=TRUE) } \keyword{spatial} \keyword{nonparametric} spatstat/man/methods.linfun.Rd0000644000176000001440000000503212237642733016155 0ustar ripleyusers\name{methods.linfun} \Rdversion{1.1} \alias{methods.linfun} %DoNotExport \alias{print.linfun} \alias{plot.linfun} \alias{as.linim.linfun} \alias{as.owin.linfun} \alias{as.linnet.linfun} \alias{as.function.linfun} \title{ Methods for Functions on Linear Network } \description{ Methods for the class \code{"linfun"} of functions on a linear network. } \usage{ \method{print}{linfun}(x, \dots) \method{plot}{linfun}(x, \dots, L=NULL, eps = NULL, dimyx = NULL, xy = NULL, main="") \method{as.linim}{linfun}(X, L, \dots, eps = NULL, dimyx = NULL, xy = NULL) \method{as.owin}{linfun}(W, \dots) \method{as.linnet}{linfun}(X, \dots) \method{as.function}{linfun}(x, \dots) } \arguments{ \item{X,x,W}{ A function on a linear network (object of class \code{"linfun"}). } \item{L}{A linear network} \item{eps,dimyx,xy}{ Arguments passed to \code{\link[spatstat]{as.mask}} to control the pixel resolution. } \item{\dots}{ Extra arguments passed to \code{\link[spatstat]{plot.im}} or \code{\link{print.default}}. } \item{main}{Main title for plot.} } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{print}} and the \pkg{spatstat} generic functions \code{\link{as.owin}}, \code{\link{as.linnet}}, \code{\link{as.linim}}. An object of class \code{"linfun"} represents a mathematical function that could be evaluated at any location on a linear network. It is essentially an \R \code{function} with some extra attributes. The method \code{as.linnet.linfun} extracts the linear network on which the function is defined. The method \code{as.owin.linfun} extracts the two-dimensional spatial window containing the linear network. The method \code{as.linim.linfun} converts the function to a pixel image on the linear network (an object of class \code{"linim"}). } \value{ For \code{print.linfun} the result is \code{NULL}. For \code{plot.linfun} the result is the same as for \code{\link[spatstat]{plot.im}}. For the conversion methods, the result is an object of the required type: \code{as.linim.linfun} returns an object of class \code{"linim"}, and so on. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- nnfun(X) f plot(f) as.function(f) as.owin(f) as.linnet(f) as.linim(f) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/lurking.Rd0000755000176000001440000002376012237642733014706 0ustar ripleyusers\name{lurking} \alias{lurking} \title{Lurking variable plot} \description{ Plot spatial point process residuals against a covariate } \usage{ lurking(object, covariate, type="eem", cumulative=TRUE, clipwindow=default.clipwindow(object), rv, plot.sd, plot.it=TRUE, typename, covname, oldstyle=FALSE, check=TRUE, \dots, splineargs=list(spar=0.5)) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which diagnostics should be produced. This object is usually obtained from \code{\link{ppm}}. Alternatively, \code{object} may be a point pattern (object of class \code{"ppp"}). } \item{covariate}{ The covariate against which residuals should be plotted. Either a numeric vector, a pixel image, or an \code{expression}. See \emph{Details} below. } \item{type}{ String indicating the type of residuals or weights to be computed. Choices include \code{"eem"}, \code{"raw"}, \code{"inverse"} and \code{"pearson"}. See \code{\link{diagnose.ppm}} for all possible choices. } \item{cumulative}{ Logical flag indicating whether to plot a cumulative sum of marks (\code{cumulative=TRUE}) or the derivative of this sum, a marginal density of the smoothed residual field (\code{cumulative=FALSE}). } \item{clipwindow}{ If not \code{NULL} this argument indicates that residuals shall only be computed inside a subregion of the window containing the original point pattern data. Then \code{clipwindow} should be a window object of class \code{"owin"}. } \item{rv}{ Usually absent. If this argument is present, the point process residuals will not be calculated from the fitted model \code{object}, but will instead be taken directly from \code{rv}. } \item{plot.sd}{ Logical value indicating whether error bounds should be added to plot. The default is \code{TRUE} for Poisson models and \code{FALSE} for non-Poisson models. See Details. } \item{plot.it}{ Logical value indicating whether plots should be shown. If \code{plot.it=FALSE}, only the computed coordinates for the plots are returned. See \emph{Value}. } \item{typename}{ Usually absent. If this argument is present, it should be a string, and will be used (in the axis labels of plots) to describe the type of residuals. } \item{covname}{ A string name for the covariate, to be used in axis labels of plots. } \item{oldstyle}{ Logical flag indicating whether error bounds should be plotted using the approximation given in the original paper (\code{oldstyle=TRUE}), or using the correct asymptotic formula (\code{oldstyle=FALSE}). } \item{check}{ Logical flag indicating whether the integrity of the data structure in \code{object} should be checked. } \item{\dots}{ Arguments passed to \code{\link{plot.default}} and \code{\link{lines}} to control the plot behaviour. } \item{splineargs}{ A list of arguments passed to \code{smooth.spline} for the estimation of the derivatives in the case \code{cumulative=FALSE}. } } \value{ A list containing two dataframes \code{empirical} and \code{theoretical}. The first dataframe \code{empirical} contains columns \code{covariate} and \code{value} giving the coordinates of the lurking variable plot. The second dataframe \code{theoretical} contains columns \code{covariate}, \code{mean} and \code{sd} giving the coordinates of the plot of the theoretical mean and standard deviation. } \details{ This function generates a `lurking variable' plot for a fitted point process model. Residuals from the model represented by \code{object} are plotted against the covariate specified by \code{covariate}. This plot can be used to reveal departures from the fitted model, in particular, to reveal that the point pattern depends on the covariate. First the residuals from the fitted model (Baddeley et al, 2004) are computed at each quadrature point, or alternatively the `exponential energy marks' (Stoyan and Grabarnik, 1991) are computed at each data point. The argument \code{type} selects the type of residual or weight. See \code{\link{diagnose.ppm}} for options and explanation. A lurking variable plot for point processes (Baddeley et al, 2004) displays either the cumulative sum of residuals/weights (if \code{cumulative = TRUE}) or a kernel-weighted average of the residuals/weights (if \code{cumulative = FALSE}) plotted against the covariate. The empirical plot (solid lines) is shown together with its expected value assuming the model is true (dashed lines) and optionally also the pointwise two-standard-deviation limits (dotted lines). To be more precise, let \eqn{Z(u)} denote the value of the covariate at a spatial location \eqn{u}. \itemize{ \item If \code{cumulative=TRUE} then we plot \eqn{H(z)} against \eqn{z}, where \eqn{H(z)} is the sum of the residuals over all quadrature points where the covariate takes a value less than or equal to \eqn{z}, or the sum of the exponential energy weights over all data points where the covariate takes a value less than or equal to \eqn{z}. \item If \code{cumulative=FALSE} then we plot \eqn{h(z)} against \eqn{z}, where \eqn{h(z)} is the derivative of \eqn{H(z)}, computed approximately by spline smoothing. } For the point process residuals \eqn{E(H(z)) = 0}, while for the exponential energy weights \eqn{E(H(z)) = } area of the subset of the window satisfying \eqn{Z(u) <= z}{Z(u) \le z}. If the empirical and theoretical curves deviate substantially from one another, the interpretation is that the fitted model does not correctly account for dependence on the covariate. The correct form (of the spatial trend part of the model) may be suggested by the shape of the plot. If \code{plot.sd = TRUE}, then superimposed on the lurking variable plot are the pointwise two-standard-deviation error limits for \eqn{H(x)} calculated for the inhomogeneous Poisson process. The default is \code{plot.sd = TRUE} for Poisson models and \code{plot.sd = FALSE} for non-Poisson models. By default, the two-standard-deviation limits are calculated from the exact formula for the asymptotic variance of the residuals under the asymptotic normal approximation, equation (37) of Baddeley et al (2006). However, for compatibility with the original paper of Baddeley et al (2005), if \code{oldstyle=TRUE}, the two-standard-deviation limits are calculated using the innovation variance, an over-estimate of the true variance of the residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}) typically produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). The argument \code{covariate} is either a numeric vector, a pixel image, or an R language expression. If it is a numeric vector, it is assumed to contain the values of the covariate for each of the quadrature points in the fitted model. The quadrature points can be extracted by \code{\link{quad.ppm}(object)}. If \code{covariate} is a pixel image, it is assumed to contain the values of the covariate at each location in the window. The values of this image at the quadrature points will be extracted. Alternatively, if \code{covariate} is an \code{expression}, it will be evaluated in the same environment as the model formula used in fitting the model \code{object}. It must yield a vector of the same length as the number of quadrature points. The expression may contain the terms \code{x} and \code{y} representing the cartesian coordinates, and may also contain other variables that were available when the model was fitted. Certain variable names are reserved words; see \code{\link{ppm}}. Note that lurking variable plots for the \eqn{x} and \eqn{y} coordinates are also generated by \code{\link{diagnose.ppm}}, amongst other types of diagnostic plots. This function is more general in that it enables the user to plot the residuals against any chosen covariate that may have been present. For advanced use, even the values of the residuals/weights can be altered. If the argument \code{rv} is present, the residuals will not be calculated from the fitted model \code{object} but will instead be taken directly from the object \code{rv}. If \code{type = "eem"} then \code{rv} should be similar to the return value of \code{\link{eem}}, namely, a numeric vector with length equal to the number of data points in the original point pattern. Otherwise, \code{rv} should be similar to the return value of \code{\link{residuals.ppm}}, that is, \code{rv} should be an object of class \code{"msr"} (see \code{\link{msr}}) representing a signed measure. } \seealso{ \code{\link{residuals.ppm}}, \code{\link{diagnose.ppm}}, \code{\link{residuals.ppm}}, \code{\link{qqplot.ppm}}, \code{\link{eem}}, \code{\link{ppm}} } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Moller, J. and Pakes, A.G. (2006) Properties of residuals for spatial point processes. To appear. Stoyan, D. and Grabarnik, P. (1991) Second-order characteristics for stochastic structures connected with Gibbs point processes. \emph{Mathematische Nachrichten}, 151:95--100. } \examples{ data(nztrees) lurking(nztrees, expression(x)) fit <- ppm(nztrees, ~x, Poisson()) lurking(fit, expression(x)) lurking(fit, expression(x), cumulative=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{hplot} spatstat/man/compareFit.Rd0000755000176000001440000001022612237642732015314 0ustar ripleyusers\name{compareFit} \alias{compareFit} \title{ Residual Diagnostics for Multiple Fitted Models } \description{ Compares several fitted point process models using the same residual diagnostic. } \usage{ compareFit(object, Fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = NULL, modelnames = NULL, same = NULL, different = NULL) } \arguments{ \item{object}{ Object or objects to be analysed. Either a fitted point process model (object of class \code{"ppm"}), a point pattern (object of class \code{"ppp"}), or a list of these objects. } \item{Fun}{ Diagnostic function to be computed for each model. One of the functions \code{Kcom}, \code{Kres}, \code{Gcom}, \code{Gres}, \code{psst}, \code{psstA} or \code{psstG} or a string containing one of these names. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the diagnostic should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Extra arguments passed to \code{Fun}. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern or list of point patterns. See \code{\link{ppm}} for details. Each of these arguments can be a list, specifying different \code{trend}, \code{interaction} and/or \code{rbord} values to be used to generate different fitted models. } \item{modelnames}{ Character vector. Short descriptive names for the different models. } \item{same,different}{ Character strings or character vectors passed to \code{\link{collapse.fv}} to determine the format of the output. } } \details{ This is a convenient way to collect diagnostic information for several different point process models fitted to the same point pattern dataset, or for point process models of the same form fitted to several different datasets, etc. The first argument, \code{object}, is usually a list of fitted point process models (objects of class \code{"ppm"}), obtained from the model-fitting function \code{\link{ppm}}. For convenience, \code{object} can also be a list of point patterns (objects of class \code{"ppp"}). In that case, point process models will be fitted to each of the point pattern datasets, by calling \code{\link{ppm}} using the arguments \code{trend} (for the first order trend), \code{interaction} (for the interpoint interaction) and \code{rbord} (for the erosion distance in the border correction for the pseudolikelihood). See \code{\link{ppm}} for details of these arguments. Alternatively \code{object} can be a single point pattern (object of class \code{"ppp"}) and one or more of the arguments \code{trend}, \code{interaction} or \code{rbord} can be a list. In this case, point process models will be fitted to the same point pattern dataset, using each of the model specifications listed. The diagnostic function \code{Fun} will be applied to each of the point process models. The results will be collected into a single function value table. The \code{modelnames} are used to label the results from each fitted model. } \value{ Function value table (object of class \code{"fv"}). } \author{ Ege Rubak, Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Jesper Moller. } \seealso{ \code{\link{ppm}}, \code{\link{Kcom}}, \code{\link{Kres}}, \code{\link{Gcom}}, \code{\link{Gres}}, \code{\link{psst}}, \code{\link{psstA}}, \code{\link{psstG}}, \code{\link{collapse.fv}} } \examples{ nd <- 40 \testonly{ nd <- 10 } ilist <- list(Poisson(), Geyer(7, 2), Strauss(7)) iname <- c("Poisson", "Geyer", "Strauss") \testonly{ ilist <- ilist[c(1,3)] iname <- iname[c(1,3)] } K <- compareFit(swedishpines, Kcom, interaction=ilist, rbord=9, correction="translate", same="trans", different="tcom", modelnames=iname, nd=nd) K } \keyword{spatial} \keyword{models} spatstat/man/as.box3.Rd0000755000176000001440000000242512237642732014502 0ustar ripleyusers\name{as.box3} \Rdversion{1.1} \alias{as.box3} \title{ Convert Data to Three-Dimensional Box } \description{ Interprets data as the dimensions of a three-dimensional box. } \usage{ as.box3(...) } \arguments{ \item{\dots}{ Data that can be interpreted as giving the dimensions of a three-dimensional box. See Details. } } \details{ This function converts data in various formats to an object of class \code{"box3"} representing a three-dimensional box (see \code{\link{box3}}). The arguments \code{\dots} may be \itemize{ \item an object of class \code{"box3"} \item arguments acceptable to \code{box3} \item a numeric vector of length 6, interpreted as \code{c(xrange[1],xrange[2],yrange[1],yrange[2],zrange[1],zrange[2])} \item an object of class \code{"pp3"} representing a three-dimensional point pattern contained in a box. } } \value{ Object of class \code{"box3"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{box3}}, \code{\link{pp3}} } \examples{ X <- c(0,10,0,10,0,5) as.box3(X) X <- pp3(runif(42),runif(42),runif(42), box3(c(0,1))) as.box3(X) } \keyword{spatial} \keyword{manip} spatstat/man/opening.Rd0000755000176000001440000000510612243310060014642 0ustar ripleyusers\name{opening} %DontDeclareMethods \alias{opening} \alias{opening.owin} \alias{opening.psp} \alias{opening.ppp} \title{Morphological Opening} \description{ Perform morphological opening of a window, a line segment pattern or a point pattern. } \usage{ opening(w, r, \dots) \method{opening}{owin}(w, r, \dots, polygonal=NULL) \method{opening}{ppp}(w, r, \dots) \method{opening}{psp}(w, r, \dots) } \arguments{ \item{w}{ A window (object of class \code{"owin"} or a line segment pattern (object of class \code{"psp"}) or a point pattern (object of class \code{"ppp"}). } \item{r}{positive number: the radius of the opening.} \item{\dots}{ extra arguments passed to \code{\link{as.mask}} controlling the pixel resolution, if a pixel approximation is used } \item{polygonal}{ Logical flag indicating whether to compute a polygonal approximation to the erosion (\code{polygonal=TRUE}) or a pixel grid approximation (\code{polygonal=FALSE}). } } \value{ If \code{r > 0}, an object of class \code{"owin"} representing the opened region. If \code{r=0}, the result is identical to \code{w}. } \details{ The morphological opening (Serra, 1982) of a set \eqn{W} by a distance \eqn{r > 0} is the subset of points in \eqn{W} that can be separated from the boundary of \eqn{W} by a circle of radius \eqn{r}. That is, a point \eqn{x} belongs to the opening if it is possible to draw a circle of radius \eqn{r} (not necessarily centred on \eqn{x}) that has \eqn{x} on the inside and the boundary of \eqn{W} on the outside. The opened set is a subset of \code{W}. For a small radius \eqn{r}, the opening operation has the effect of smoothing out irregularities in the boundary of \eqn{W}. For larger radii, the opening operation removes promontories in the boundary. For very large radii, the opened set is empty. The algorithm applies \code{\link{erosion}} followed by \code{\link{dilation}}. } \seealso{ \code{\link{closing}} for the opposite operation. \code{\link{dilation}}, \code{\link{erosion}} for the basic operations. \code{\link{owin}}, \code{\link{as.owin}} for information about windows. } \examples{ v <- opening(letterR, 0.3) plot(letterR, type="n", main="opening") plot(v, add=TRUE, col="grey") plot(letterR, add=TRUE) } \references{ Serra, J. (1982) Image analysis and mathematical morphology. Academic Press. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/psst.Rd0000755000176000001440000001164312237642733014221 0ustar ripleyusers\name{psst} \alias{psst} \title{ Pseudoscore Diagnostic For Fitted Model against General Alternative } \description{ Given a point process model fitted to a point pattern dataset, and any choice of functional summary statistic, this function computes the pseudoscore test statistic of goodness-of-fit for the model. } \usage{ psst(object, fun, r = NULL, breaks = NULL, ..., trend = ~1, interaction = Poisson(), rbord = reach(interaction), truecoef=NULL, hi.res=NULL, funargs = list(correction="best"), verbose=TRUE) } \arguments{ \item{object}{ Object to be analysed. Either a fitted point process model (object of class \code{"ppm"}) or a point pattern (object of class \code{"ppp"}) or quadrature scheme (object of class \code{"quad"}). } \item{fun}{ Summary function to be applied to each point pattern. } \item{r}{ Optional. Vector of values of the argument \eqn{r} at which the function \eqn{S(r)} should be computed. This argument is usually not specified. There is a sensible default. } \item{breaks}{ Optional alternative to \code{r} for advanced use. } \item{\dots}{ Ignored. } \item{trend,interaction,rbord}{ Optional. Arguments passed to \code{\link{ppm}} to fit a point process model to the data, if \code{object} is a point pattern. See \code{\link{ppm}} for details. } \item{truecoef}{ Optional. Numeric vector. If present, this will be treated as if it were the true coefficient vector of the point process model, in calculating the diagnostic. Incompatible with \code{hi.res}. } \item{hi.res}{ Optional. List of parameters passed to \code{\link{quadscheme}}. If this argument is present, the model will be re-fitted at high resolution as specified by these parameters. The coefficients of the resulting fitted model will be taken as the true coefficients. Then the diagnostic will be computed for the default quadrature scheme, but using the high resolution coefficients. } \item{funargs}{ List of additional arguments to be passed to \code{fun}. } \item{verbose}{ Logical value determining whether to print progress reports during the computation. } } \details{ Let \eqn{x} be a point pattern dataset consisting of points \eqn{x_1,\ldots,x_n}{x[1],...,x[n]} in a window \eqn{W}. Consider a point process model fitted to \eqn{x}, with conditional intensity \eqn{\lambda(u,x)}{lambda(u,x)} at location \eqn{u}. For the purpose of testing goodness-of-fit, we regard the fitted model as the null hypothesis. Given a functional summary statistic \eqn{S}, consider a family of alternative models obtained by exponential tilting of the null model by \eqn{S}. The pseudoscore for the null model is \deqn{ V(r) = \sum_i \Delta S(x_i, x, r) - \int_W \Delta S(u,x, r) \lambda(u,x) {\rm d} u }{ V(r) = sum( Delta S(x[i], x, r)) - integral( Delta S(u,x, r) lambda(u,x) du) } where the \eqn{\Delta}{Delta} operator is \deqn{ \Delta S(u,x, r) = S(x\cup\{u\}, r) - S(x\setminus u, r) }{ Delta S(u,x, r) = S(x union u, r) - S(x setminus u, r) } the difference between the values of \eqn{S} for the point pattern with and without the point \eqn{u}. According to the Georgii-Nguyen-Zessin formula, \eqn{V(r)} should have mean zero if the model is correct (ignoring the fact that the parameters of the model have been estimated). Hence \eqn{V(r)} can be used as a diagnostic for goodness-of-fit. This algorithm computes \eqn{V(r)} by direct evaluation of the sum and integral. It is computationally intensive, but it is available for any summary statistic \eqn{S(r)}. The diagnostic \eqn{V(r)} is also called the \bold{pseudoresidual} of \eqn{S}. On the right hand side of the equation for \eqn{V(r)} given above, the sum over points of \eqn{x} is called the \bold{pseudosum} and the integral is called the \bold{pseudocompensator}. } \value{ A function value table (object of class \code{"fv"}), essentially a data frame of function values. Columns in this data frame include \code{dat} for the pseudosum, \code{com} for the compensator and \code{res} for the pseudoresidual. There is a plot method for this class. See \code{\link{fv.object}}. } \references{ Baddeley, A., Rubak, E. and Moller, J. (2011) Score, pseudo-score and residual diagnostics for spatial point process models. \emph{Statistical Science} \bold{26}, 613--646. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Ege Rubak and Jesper Moller. } \seealso{ Special cases: \code{\link{psstA}}, \code{\link{psstG}}. Alternative functions: \code{\link{Kres}}, \code{\link{Gres}}. } \examples{ data(cells) fit0 <- ppm(cells, ~1) # uniform Poisson \testonly{fit0 <- ppm(cells, ~1, nd=8)} G0 <- psst(fit0, Gest) G0 if(interactive()) plot(G0) } \keyword{spatial} \keyword{models} spatstat/man/nnmap.Rd0000644000176000001440000001144712237642733014340 0ustar ripleyusers\name{nnmap} \alias{nnmap} \title{ K-th Nearest Point Map } \description{ Given a point pattern, this function constructs pixel images giving the distance from each pixel to its \eqn{k}-th nearest neighbour in the point pattern, and the index of the \eqn{k}-th nearest neighbour. } \usage{ nnmap(X, k = 1, what = c("dist", "which"), \dots, W = as.owin(X), is.sorted.X = FALSE, sortby = c("range", "var", "x", "y")) } \arguments{ \item{X}{ Point pattern (object of class \code{"ppp"}). } \item{k}{ Integer, or integer vector. The algorithm will find the \code{k}th nearest neighbour. } \item{what}{ Character string specifying what information should be returned. Either the nearest neighbour distance (\code{"dist"}), the index of the nearest neighbour (\code{"which"}), or both. } \item{\dots}{ Arguments passed to \code{\link{as.mask}} to determine the pixel resolution of the result. } \item{W}{ Window (object of class \code{"owin"}) specifying the spatial domain in which the distances will be computed. Defaults to the window of \code{X}. } \item{is.sorted.X}{ Logical value attesting whether the point pattern \code{X} has been sorted. See Details. } \item{sortby}{ Determines which coordinate to use to sort the point pattern. See Details. } } \details{ Given a point pattern \code{X}, this function constructs two pixel images: \itemize{ \item a distance map giving, for each pixel, the distance to the nearest point of \code{X}; \item a nearest neighbour map giving, for each pixel, the identifier of the nearest point of \code{X}. } If the argument \code{k} is specified, then the \code{k}-th nearest neighbours will be found. If \code{what="dist"} then only the distance map is returned. If \code{what="which"} then only the nearest neighbour map is returned. The argument \code{k} may be an integer or an integer vector. If it is a single integer, then the \code{k}-th nearest neighbours are computed. If it is a vector, then the \code{k[i]}-th nearest neighbours are computed for each entry \code{k[i]}. For example, setting \code{k=1:3} will compute the nearest, second-nearest and third-nearest neighbours. } \section{Sorting data and pre-sorted data}{ Read this section if you care about the speed of computation. For efficiency, the algorithm sorts the point pattern \code{X} into increasing order of the \eqn{x} coordinate or increasing order of the the \eqn{y} coordinate. Sorting is only an intermediate step; it does not affect the output, which is always given in the same order as the original data. By default (if \code{sortby="range"}), the sorting will occur on the coordinate that has the larger range of values (according to the frame of the enclosing window of \code{X}). If \code{sortby = "var"}), sorting will occur on the coordinate that has the greater variance (in the pattern \code{X}). Setting \code{sortby="x"} or \code{sortby = "y"} will specify that sorting should occur on the \eqn{x} or \eqn{y} coordinate, respectively. If the point pattern \code{X} is already sorted, then the argument \code{is.sorted.X} should be set to \code{TRUE}, and \code{sortby} should be set equal to \code{"x"} or \code{"y"} to indicate which coordinate is sorted. } \section{Warning About Ties}{ Ties are possible: there may be two data points which lie exactly the same distance away from a particular pixel. This affects the results from \code{nnmap(what="which")}. The handling of ties is not well-defined: it is not consistent between different computers and different installations of \R. If there are ties, then different calls to \code{nnmap(what="which")} may give inconsistent results. For example, you may get a different answer from \code{nnmap(what="which",k=1)} and \code{nnmap(what="which", k=1:2)[[1]]}. } \value{ A pixel image, or a list of pixel images. By default (if \code{what=c("dist", "which")}), the result is a list with two components \code{dist} and \code{which} containing the distance map and the nearest neighbour map. If \code{what="dist"} then the result is a real-valued pixel image containing the distance map. If \code{what="which"} then the result is an integer-valued pixel image containing the nearest neighbour map. If \code{k} is a vector of several integers, then the result is similar except that each pixel image is replaced by a list of pixel images, one for each entry of \code{k}. } \seealso{ \code{\link{distmap}} } \examples{ plot(nnmap(cells, 2, what="which")) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Rolf Turner \email{r.turner@auckland.ac.nz}, and Jens Oehlschlaegel } \keyword{spatial} \keyword{math} spatstat/man/nearestsegment.Rd0000755000176000001440000000325012237642733016247 0ustar ripleyusers\name{nearestsegment} \alias{nearestsegment} \title{Find Line Segment Nearest to Each Point} \description{ Given a point pattern and a line segment pattern, this function finds the nearest line segment for each point. } \usage{ nearestsegment(X, Y) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{Y}{A line segment pattern (object of class \code{"psp"}).} } \details{ The distance between a point \code{x} and a straight line segment \code{y} is defined to be the shortest Euclidean distance between \code{x} and any location on \code{y}. This algorithm first calculates the distance from each point of \code{X} to each segment of \code{Y}. Then it determines, for each point \code{x} in \code{X}, which segment of \code{Y} is closest. The index of this segment is returned. } \value{ Integer vector \code{v} (of length equal to the number of points in \code{X}) identifying the nearest segment to each point. If \code{v[i] = j}, then \code{Y[j]} is the line segment lying closest to \code{X[i]}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{project2segment}} to project each point of \code{X} to a point lying on one of the line segments. Use \code{\link{distmap.psp}} to identify the nearest line segment for each pixel in a grid. } \examples{ X <- runifpoint(3) Y <- as.psp(matrix(runif(20), 5, 4), window=owin()) v <- nearestsegment(X,Y) plot(Y) plot(X, add=TRUE) plot(X[1], add=TRUE, col="red") plot(Y[v[1]], add=TRUE, lwd=2, col="red") } \keyword{spatial} \keyword{math} spatstat/man/linearmarkequal.Rd0000644000176000001440000000434612237642732016403 0ustar ripleyusers\name{linearmarkequal} \alias{linearmarkequal} \title{ Mark Connection Function for Multitype Point Pattern on Linear Network } \description{ For a multitype point pattern on a linear network, estimate the mark connection function from points of type \eqn{i} to points of type \eqn{j}. } \usage{ linearmarkequal(X, r=NULL, \dots) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the mark connection function \eqn{p_{ij}(r)}{p[ij](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{\dots}{ Arguments passed to \code{\link{linearpcfcross}} and \code{\link{linearpcf}}. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is the mark equality function for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{p_{ij}(r)}{p[ij](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \seealso{ \code{\link{linearpcfcross}}, \code{\link{linearpcf}}, \code{\link{linearmarkconnect}}, \code{\link[spatstat]{markconnect}}. } \examples{ if(interactive()) { X <- chicago } else { X <- runiflpp(20, simplenet) \%mark\% sample(c("A","B"), 20, replace=TRUE) } p <- linearmarkequal(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/is.multitype.Rd0000755000176000001440000000335412237642732015675 0ustar ripleyusers\name{is.multitype} \alias{is.multitype} \title{Test whether Object is Multitype} \description{ Generic function to test whether a given object (usually a point pattern or something related to a point pattern) has ``marks'' attached to the points which classify the points into several types. } \usage{ is.multitype(X, \dots) } \arguments{ \item{X}{ Object to be inspected } \item{\dots}{ Other arguments. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is multitype. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. Other objects related to point patterns, such as point process models, may involve marked points. This function tests whether the object \code{X} contains or involves marked points, \bold{and} that the marks are a factor. For example, the \code{\link{amacrine}} dataset is multitype (there are two types of cells, on and off), but the \code{\link{longleaf}} dataset is \emph{not} multitype (the marks are real numbers). This function is generic; methods are provided for point patterns (objects of class \code{"ppp"}) and point process models (objects of class \code{"ppm"}). } \seealso{ \code{\link{is.multitype.ppp}}, \code{\link{is.multitype.ppm}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/linfun.Rd0000644000176000001440000000334212237642732014514 0ustar ripleyusers\name{linfun} \Rdversion{1.1} \alias{linfun} \title{ Function on a Linear Network } \description{ Create a function on a linear network. } \usage{ linfun(f, L) } \arguments{ \item{f}{ A \code{function} in the \R language. } \item{L}{ A linear network (object of class \code{"linnet"}) on which \code{f} is defined. } } \details{ This creates an object of class \code{"linfun"}. This is a simple mechanism for handling a function defined on a linear network, to make it easier to display and manipulate. \code{f} should be a \code{function} in the \R language. It should be able to be called either in the form \code{f(x,y)} or \code{f(x,y,seg,tp)} where \code{x,y} are Cartesian coordinates of locations on the linear network, and \code{seg, tp} are the local coordinates. The function \code{f} should be vectorised: that is, if \code{x} and \code{y} are numeric vectors of the same length \code{n}, then \code{v <- f(x,y)} should be a vector of length \code{n}. \code{L} should be a linear network (object of class \code{"linnet"}) inside which the function \code{f} is well-defined. } \value{ It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{as.lpp}} for a way to handle the coordinates \code{x,y,seg,tp}. \code{\link{methods.linfun}} for methods applicable to \code{"linfun"} objects. \code{\link{distfun.lpp}}, \code{\link{nnfun.lpp}}. } \examples{ f <- linfun(function(x,y,seg,tp) { x+y }, simplenet) plot(f) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/as.psp.Rd0000755000176000001440000001553312237642732014435 0ustar ripleyusers\name{as.psp} \alias{as.psp} \alias{as.psp.psp} \alias{as.psp.data.frame} \alias{as.psp.matrix} \alias{as.psp.owin} \alias{as.psp.default} \title{Convert Data To Class psp} \description{ Tries to coerce any reasonable kind of data object to a line segment pattern (an object of class \code{"psp"}) for use by the \pkg{spatstat} package. } \usage{ as.psp(x, \dots, from=NULL, to=NULL) \method{as.psp}{psp}(x, \dots, check=FALSE, fatal=TRUE) \method{as.psp}{data.frame}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{matrix}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{owin}(x, \dots, check=spatstat.options("checksegments"), fatal=TRUE) \method{as.psp}{default}(x, \dots, window=NULL, marks=NULL, check=spatstat.options("checksegments"), fatal=TRUE) } \arguments{ \item{x}{Data which will be converted into a line segment pattern} \item{window}{Data which define a window for the pattern when \code{x} does not contain a window} \item{\dots}{Ignored.} \item{marks}{(Optional) vector or data frame of marks for the pattern} \item{check}{ Logical value indicating whether to check the validity of the data, e.g. to check that the line segments lie inside the window. } \item{fatal}{Logical value. See Details.} \item{from,to}{Point patterns (object of class \code{"ppp"}) containing the first and second endpoints (respectively) of each segment. Incompatible with \code{x}. } } \value{ An object of class \code{"psp"} (see \code{\link{psp.object}}) describing the line segment pattern and its window of observation. The value \code{NULL} may also be returned; see Details. } \details{ Converts the dataset \code{x} to a line segment pattern (an object of class \code{"psp"}; see \code{\link{psp.object}} for an overview). This function is normally used to convert an existing line segment pattern dataset, stored in another format, to the \code{"psp"} format. To create a new point pattern from raw data such as \eqn{x,y} coordinates, it is normally easier to use the creator function \code{\link{psp}}. The dataset \code{x} may be: \itemize{ \item an object of class \code{"psp"} \item a data frame with at least 4 columns \item a structure (list) with elements named \code{x0, y0, x1, y1} or elements named \code{xmid, ymid, length, angle} and possibly a fifth element named \code{marks} \item an object of class \code{"owin"} representing a spatial window; it must be of type \code{"rectangle"} or \code{"polygonal"}. The boundary edges of the window will be extracted as a line segment pattern. } If \code{x} is a data frame the interpretation of its columns is as follows: \itemize{ \item If there are columns named \code{x0, y0, x1, y1} then these will be interpreted as the coordinates of the endpoints of the segments and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there are columns named \code{xmid, ymid, length, angle} then these will be interpreted as the coordinates of the segment midpoints, the lengths of the segments, and the orientations of the segments in radians and used to form the \code{ends} component of the \code{psp} object to be returned. \item If there is a column named \code{marks} then this will be interpreted as the marks of the pattern provided that the argument \code{marks} of this function is \code{NULL}. If argument \code{marks} is not \code{NULL} then the value of this argument is taken to be the marks of the pattern and the column named \code{marks} is ignored (with a warning). In either case the column named marks is deleted and omitted from further consideration. \item If there is no column named \code{marks} and if the \code{marks} argument of this function is \code{NULL}, and if after interpreting 4 columns of \code{x} as determining the \code{ends} component of the \code{psp} object to be returned, there remain other columns of \code{x}, then these remaining columns will be taken to form a data frame of marks for the \code{psp} object to be returned. } If \code{x} is a structure (list) with elements named \code{x0, y0, x1, y1, marks} or \code{xmid, ymid, length, angle, marks}, then the element named \code{marks} will be interpreted as the marks of the pattern provide that the argument \code{marks} of this function is \code{NULL}. If this argument is non-\code{NULL} then it is interpreted as the marks of the pattern and the element \code{marks} of \code{x} is ignored --- with a warning. Alternatively, you may specify two point patterns \code{from} and \code{to} containing the first and second endpoints of the line segments. The argument \code{window} is converted to a window object by the function \code{\link{as.owin}}. The argument \code{fatal} indicates what to do when the data cannot be converted to a line segment pattern. If \code{fatal=TRUE}, a fatal error will be generated; if \code{fatal=FALSE}, the value \code{NULL} is returned. The function \code{as.psp} is generic, with methods for the classes \code{"psp"}, \code{"data.frame"}, \code{"matrix"} and a default method. Point pattern datasets can also be created by the function \code{\link{psp}}. } \section{Warnings}{ If only a proper subset of the names \code{x0,y0,x1,y1} or \code{xmid,ymid,length,angle} appear amongst the names of the columns of \code{x} where \code{x} is a data frame, then these special names are ignored. For example if the names of the columns were \code{xmid,ymid,length,degrees}, then these columns would be interpreted as if the represented \code{x0,y0,x1,y1} in that order. Whether it gets used or not, column named \code{marks} is \emph{always} removed from \code{x} before any attempt to form the \code{ends} component of the \code{psp} object that is returned. } \seealso{ \code{\link{psp}}, \code{\link{psp.object}}, \code{\link{as.owin}}, \code{\link{owin.object}} } \examples{ mat <- matrix(runif(40), ncol=4) mx <- data.frame(v1=sample(1:4,10,TRUE), v2=factor(sample(letters[1:4],10,TRUE),levels=letters[1:4])) a <- as.psp(mat, window=owin(),marks=mx) mat <- cbind(as.data.frame(mat),mx) b <- as.psp(mat, window=owin()) # a and b are identical. stuff <- list(xmid=runif(10), ymid=runif(10), length=rep(0.1, 10), angle=runif(10, 0, 2 * pi)) a <- as.psp(stuff, window=owin()) b <- as.psp(from=runifpoint(10), to=runifpoint(10)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/distfun.lpp.Rd0000644000176000001440000000465212237642732015474 0ustar ripleyusers\name{distfun.lpp} \Rdversion{1.1} \alias{distfun.lpp} \title{ Distance Map on Linear Network } \description{ Compute the distance function of a point pattern on a linear network. } \usage{ \method{distfun}{lpp}(X, ...) } \arguments{ \item{X}{ A point pattern on a linear network (object of class \code{"lpp"}). } \item{\dots}{ Extra arguments are ignored. } } \details{ On a linear network \eqn{L}, the \dQuote{geodesic distance function} of a set of points \eqn{A} in \eqn{L} is the mathematical function \eqn{f} such that, for any location \eqn{s} on \eqn{L}, the function value \code{f(s)} is the shortest-path distance from \eqn{s} to \eqn{A}. The command \code{distfun.lpp} is a method for the generic command \code{\link[spatstat]{distfun}} for the class \code{"lpp"} of point patterns on a linear network. If \code{X} is a point pattern on a linear network, \code{f <- distfun(X)} returns a \emph{function} in the \R language that represents the distance function of \code{X}. Evaluating the function \code{f} in the form \code{v <- f(x,y)}, where \code{x} and \code{y} are any numeric vectors of equal length containing coordinates of spatial locations, yields the values of the distance function at these locations. More efficiently \code{f} can take the arguments \code{x, y, seg, tp} where \code{seg} and \code{tp} are the local coordinates on the network. The function \code{f} obtained from \code{f <- distfun(X)} also belongs to the class \code{"linfun"}. It can be printed and plotted immediately as shown in the Examples. It can be converted to a pixel image using \code{\link{as.linim}}. } \value{ A \code{function} with arguments \code{x,y} and optional arguments \code{seg,tp}. It also belongs to the class \code{"linfun"} which has methods for \code{plot}, \code{print} etc. } \seealso{ \code{\link{linfun}}, \code{\link{methods.linfun}}. To identify \emph{which} point is the nearest neighbour, see \code{\link{nnfun.lpp}}. } \examples{ data(letterR) X <- runiflpp(3, simplenet) f <- distfun(X) f plot(f) # using a distfun as a covariate in a point process model: Y <- runiflpp(4, simplenet) fit <- lppm(Y, ~D, covariates=list(D=f)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/DiggleGratton.Rd0000755000176000001440000000517012237642731015756 0ustar ripleyusers\name{DiggleGratton} \alias{DiggleGratton} \title{Diggle-Gratton model} \description{ Creates an instance of the Diggle-Gratton pairwise interaction point process model, which can then be fitted to point pattern data. } \usage{ DiggleGratton(delta, rho) } \arguments{ \item{delta}{lower threshold \eqn{\delta}{delta}} \item{rho}{upper threshold \eqn{\rho}{rho}} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Diggle and Gratton (1984, pages 208-210) introduced the pairwise interaction point process with pair potential \eqn{h(t)} of the form \deqn{ h(t) = \left( \frac{t-\delta}{\rho-\delta} \right)^\kappa \quad\quad \mbox{ if } \delta \le t \le \rho }{ h(t) = ((t - delta)/(rho - delta))^kappa, { } delta <= t <= rho } with \eqn{h(t) = 0} for \eqn{t < \delta}{t < delta} and \eqn{h(t) = 1} for \eqn{t > \rho}{t > rho}. Here \eqn{\delta}{delta}, \eqn{\rho}{rho} and \eqn{\kappa}{kappa} are parameters. Note that we use the symbol \eqn{\kappa}{kappa} where Diggle and Gratton (1984) and Diggle, Gates and Stibbard (1987) use \eqn{\beta}{beta}, since in \pkg{spatstat} we reserve the symbol \eqn{\beta}{beta} for an intensity parameter. The parameters must all be nonnegative, and must satisfy \eqn{\delta \le \rho}{delta <= rho}. The potential is inhibitory, i.e.\ this model is only appropriate for regular point patterns. The strength of inhibition increases with \eqn{\kappa}{kappa}. For \eqn{\kappa=0}{kappa=0} the model is a hard core process with hard core radius \eqn{\delta}{delta}. For \eqn{\kappa=\infty}{kappa=infinity} the model is a hard core process with hard core radius \eqn{\rho}{rho}. The irregular parameters \eqn{\delta, \rho}{delta, rho} must be given in the call to \code{DiggleGratton}, while the regular parameter \eqn{\kappa}{kappa} will be estimated. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}}, \code{\link{Pairwise}} } \examples{ data(cells) ppm(cells, ~1, DiggleGratton(0.05, 0.1)) } \references{ Diggle, P.J., Gates, D.J. and Stibbard, A. (1987) A nonparametric estimator for pairwise-interaction point processes. \emph{Biometrika} \bold{74}, 763 -- 770. Diggle, P.J. and Gratton, R.J. (1984) Monte Carlo methods of inference for implicit statistical models. \emph{Journal of the Royal Statistical Society, series B} \bold{46}, 193 -- 212. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/angles.psp.Rd0000755000176000001440000000313212237642732015273 0ustar ripleyusers\name{angles.psp} \alias{angles.psp} \title{Orientation Angles of Line Segments} \description{ Computes the orientation angle of each line segment in a line segment pattern. } \usage{ angles.psp(x, directed=FALSE) } \arguments{ \item{x}{ A line segment pattern (object of class \code{"psp"}). } \item{directed}{ Logical flag. See details. } } \value{ Numeric vector. } \details{ For each line segment, the angle of inclination to the \eqn{x}-axis (in radians) is computed, and the angles are returned as a numeric vector. If \code{directed=TRUE}, the directed angle of orientation is computed. The angle respects the sense of direction from \code{(x0,y0)} to \code{(x1,y1)}. The values returned are angles in the full range from \eqn{-\pi}{-pi} to \eqn{\pi}{pi}. The angle is computed as \code{atan2(y1-y0,x1-x0)}. See \code{\link{atan2}}. If \code{directed=FALSE}, the undirected angle of orientation is computed. Angles differing by \eqn{pi} are regarded as equivalent. The values returned are angles in the range from \eqn{0} to \eqn{\pi}{pi}. These angles are computed by first computing the directed angle, then adding \eqn{\pi}{pi} to any negative angles. } \seealso{ \code{\link{summary.psp}}, \code{\link{midpoints.psp}}, \code{\link{lengths.psp}} } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- angles.psp(a) } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/tilenames.Rd0000644000176000001440000000167412237642734015212 0ustar ripleyusers\name{tilenames} \alias{tilenames} \alias{tilenames<-} \title{Names of Tiles in a Tessellation} \description{ Extract or Change the Names of the Tiles in a Tessellation. } \usage{ tilenames(x) tilenames(x) <- value } \arguments{ \item{x}{A tessellation (object of class \code{"tess"}).} \item{value}{Character vector giving new names for the tiles.} } \details{ These functions extract or change the names of the tiles that make up the tessellation \code{x}. If the tessellation is a regular grid, the tile names cannot be changed. } \value{ \code{tilenames} returns a character vector. } \seealso{ \code{\link{tess}}, \code{\link{tiles}} } \examples{ D <- dirichlet(runifpoint(10)) tilenames(D) tilenames(D) <- paste("Cell", 1:10) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/dummify.Rd0000644000176000001440000000355312237642732014677 0ustar ripleyusers\name{dummify} \alias{dummify} \title{ Convert Data to Numeric Values by Constructing Dummy Variables } \description{ Converts data of any kind to numeric values. A factor is expanded to a set of dummy variables. } \usage{ dummify(x) } \arguments{ \item{x}{ Vector, factor, matrix or data frame to be converted. } } \details{ This function converts data (such as a factor) to numeric values in order that the user may calculate, for example, the mean, variance, covariance and correlation of the data. If \code{x} is a numeric vector or integer vector, it is returned unchanged. If \code{x} is a logical vector, it is converted to a 0-1 matrix with 2 columns. The first column contains a 1 if the logical value is \code{FALSE}, and the second column contains a 1 if the logical value is \code{TRUE}. If \code{x} is a complex vector, it is converted to a matrix with 2 columns, containing the real and imaginary parts. If \code{x} is a factor, the result is a matrix of 0-1 dummy variables. The matrix has one column for each possible level of the factor. The \code{(i,j)} entry is equal to 1 when the \code{i}th factor value equals the \code{j}th level, and is equal to 0 otherwise. If \code{x} is a matrix or data frame, the appropriate conversion is applied to each column of \code{x}. Note that, unlike \code{\link[stats]{model.matrix}}, this command converts a factor into a full set of dummy variables (one column for each level of the factor). } \value{ A numeric matrix. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \examples{ chara <- sample(letters[1:3], 8, replace=TRUE) logi <- (runif(8) < 0.3) comp <- round(4*runif(8) + 3*runif(8) * 1i, 1) nume <- 8:1 + 0.1 df <- data.frame(nume, chara, logi, comp) df dummify(df) } \keyword{math} spatstat/man/data.ppm.Rd0000755000176000001440000000242512237642732014731 0ustar ripleyusers\name{data.ppm} \alias{data.ppm} \title{Extract Original Data from a Fitted Point Process Model} \description{ Given a fitted point process model, this function extracts the original point pattern dataset to which the model was fitted. } \usage{ data.ppm(object) } \arguments{ \item{object}{ fitted point process model (an object of class \code{"ppm"}). } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ An object of class \code{"ppm"} represents a point process model that has been fitted to data. It is typically produced by the model-fitting algorithm \code{\link{ppm}}. The object contains complete information about the original data point pattern to which the model was fitted. This function extracts the original data pattern. See \code{\link{ppm.object}} for a list of all operations that can be performed on objects of class \code{"ppm"}. } \seealso{ \code{\link{ppm.object}}, \code{\link{ppp.object}} } \examples{ data(cells) fit <- ppm(cells, ~1, Strauss(r=0.1)) X <- data.ppm(fit) # 'X' is identical to 'cells' } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/crossing.psp.Rd0000755000176000001440000000306012237642732015651 0ustar ripleyusers\name{crossing.psp} \alias{crossing.psp} \title{Crossing Points of Two Line Segment Patterns} \description{ Finds any crossing points between two line segment patterns. } \usage{ crossing.psp(A,B,fatal=TRUE) } \arguments{ \item{A,B}{ Line segment patterns (objects of class \code{"psp"}). } \item{fatal}{ Logical value indicating what to do if the windows of \code{A} and \code{B} do not overlap. See Details. } } \value{ Point pattern (object of class \code{"ppp"}). } \details{ This function finds any crossing points between the line segment patterns \code{A} and \code{B}. A crossing point occurs whenever one of the line segments in \code{A} intersects one of the line segments in \code{B}, at a nonzero angle of intersection. If the windows \code{as.owin(A)} and \code{as.owin(B)} do not overlap, then an error will be reported if \code{fatal=TRUE}, while if \code{fatal=FALSE} an error will not occur and the result will be \code{NULL}. } \seealso{ \code{\link{selfcrossing.psp}}, \code{\link{psp.object}}, \code{\link{ppp.object}}. } \examples{ a <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) b <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(a, col="green", main="crossing.psp") plot(b, add=TRUE, col="blue") P <- crossing.psp(a,b) plot(P, add=TRUE, col="red") } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/rat.Rd0000644000176000001440000000355712237642733014020 0ustar ripleyusers\name{rat} \alias{rat} \title{ Ratio object } \description{ Stores the numerator, denominator, and value of a ratio as a single object. } \usage{ rat(ratio, numerator, denominator, check = TRUE) } \arguments{ \item{ratio,numerator,denominator}{ Three objects belonging to the same class. } \item{check}{ Logical. Whether to check that the objects are \code{\link{compatible}}. } } \details{ The class \code{"rat"} is a simple mechanism for keeping track of the numerator and denominator when calculating a ratio. Its main purpose is simply to signal that the object is a ratio. The function \code{rat} creates an object of class \code{"rat"} given the numerator, the denominator and the ratio. No calculation is performed; the three objects are simply stored together. The arguments \code{ratio}, \code{numerator}, \code{denominator} can be objects of any kind. They should belong to the same class. It is assumed that the relationship \deqn{ \mbox{ratio} = \frac{\mbox{numerator}}{\mbox{denominator}} }{ ratio = numerator/denominator } holds in some version of arithmetic. However, no calculation is performed. By default the algorithm checks whether the three arguments \code{ratio}, \code{numerator}, \code{denominator} are compatible objects, according to \code{\link{compatible}}. The result is equivalent to \code{ratio} except for the addition of extra information. } \value{ An object equivalent to the object \code{ratio} except that it also belongs to the class \code{"rat"} and has additional attributes \code{numerator} and \code{denominator}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{compatible}}, \code{\link{pool}} } \keyword{spatial} \keyword{manip} spatstat/man/ganglia.Rd0000755000176000001440000000514412237642732014630 0ustar ripleyusers\name{ganglia} \alias{ganglia} \docType{data} \title{Beta Ganglion Cells in Cat Retina, Old Version} \description{ Point pattern of retinal ganglion cells identified as `on' or `off'. A marked point pattern. } \format{ An object of class \code{"ppp"} representing the point pattern of cell locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr \code{marks} \tab factor with levels \code{off} and \code{on} \cr \tab indicating ``off'' and ``on'' cells } See \code{\link{ppp.object}} for details of the format. } \usage{data(ganglia)} \source{W\"assle et al (1981), data supplied by Marie-Colette van Lieshout and attributed to Peter Diggle} \section{Notes}{ \bold{Important: these data are INCORRECT. See below.} The data represent a pattern of beta-type ganglion cells in the retina of a cat recorded in Figure 6(a) of W\"assle et al. (1981). The pattern was first analysed by W\"assle et al (1981) using nearest neighbour distances. The data used in their analysis are not available. The present dataset \code{\link{ganglia}} was scanned from Figure 6(a) of W\"assle et al (1981) in the early 1990's, but we have no further information. This dataset is the one analysed by Van Lieshout and Baddeley (1999) using multitype J functions, and by Stoyan (1995) using second order methods (pair correlation and mark correlation). It has now been discovered that these data are \bold{incorrect}. They are not faithful to the scale in Figure 6 of W\"assle et al (1981), and they contain some scanning errors. Hence they should not be used to address the original scientific question. They have been retained only for comparison with other analyses in the statistical literature. A new, corrected dataset, scanned from the original microscope image, has been provided under the name \code{\link{betacells}}. Use that dataset for any further study. } \section{Warnings}{ These data are incorrect. Use the new corrected dataset \code{\link{betacells}}. } \references{ Stoyan, D. (1995) Personal communication. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. W\"assle, H., Boycott, B. B. & Illing, R.-B. (1981). Morphology and mosaic of on- and off-beta cells in the cat retina and some functional considerations. \emph{Proc. Roy. Soc. London Ser. B} \bold{212}, 177--195. } \keyword{datasets} \keyword{spatial} spatstat/man/formula.ppm.Rd0000755000176000001440000000340612237642732015465 0ustar ripleyusers\name{formula.ppm} \alias{formula.ppm} \alias{terms.ppm} \title{ Model Formulae for Gibbs Point Process Models } \description{ Extract the trend formula, or the terms in the trend formula, in a fitted Gibbs point process model. } \usage{ \method{formula}{ppm}(x, \dots) \method{terms}{ppm}(x, \dots) } \arguments{ \item{x}{ An object of class \code{"ppm"}, representing a fitted point process model. } \item{\dots}{ Arguments passed to other methods. } } \details{ These functions are methods for the generic commands \code{\link{formula}} and \code{\link{terms}} for the class \code{"ppm"}. An object of class \code{"ppm"} represents a fitted Poisson or Gibbs point process model. It is obtained from the model-fitting function \code{\link{ppm}}. The method \code{formula.ppm} extracts the trend formula from the fitted model \code{x} (the formula originally specified as the argument \code{trend} to \code{\link{ppm}}). The method \code{terms.ppm} extracts the individual terms in the trend formula. } \value{ See the help files for the corresponding generic functions. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{ppm}}, \code{\link{as.owin}}, \code{\link{coef.ppm}}, \code{\link{extractAIC.ppm}}, \code{\link{fitted.ppm}}, \code{\link{logLik.ppm}}, \code{\link{model.frame.ppm}}, \code{\link{model.matrix.ppm}}, \code{\link{plot.ppm}}, \code{\link{predict.ppm}}, \code{\link{residuals.ppm}}, \code{\link{simulate.ppm}}, \code{\link{summary.ppm}}, \code{\link{update.ppm}}, \code{\link{vcov.ppm}}. } \examples{ data(cells) fit <- ppm(cells, ~x) formula(fit) terms(fit) } \keyword{spatial} \keyword{methods} spatstat/man/Extract.msr.Rd0000755000176000001440000000227712237642732015444 0ustar ripleyusers\name{Extract.msr} \alias{[.msr} \title{Extract Subset of Signed or Vector Measure} \description{ Extract a subset of a signed measure or vector-valued measure. } \usage{ \method{[}{msr}(x, i, j, \dots) } \arguments{ \item{x}{ A signed or vector measure. An object of class \code{"msr"} (see \code{\link{msr}}). } \item{i}{ Object defining the subregion or subset to be extracted. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or any type of index that applies to a matrix. } \item{j}{ Subset index selecting the vector coordinates to be extracted, if \code{x} is a vector-valued measure. } \item{\dots}{Ignored.} } \value{ An object of class \code{"msr"}. } \details{ This operator extracts a subset of the data which determines the signed measure or vector-valued measure \code{x}. The result is another measure. } \seealso{ \code{\link{msr}} } \examples{ example(msr) rp[square(0.5)] rs[ , 2:3] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/density.splitppp.Rd0000755000176000001440000000327712237642732016564 0ustar ripleyusers\name{density.splitppp} \alias{density.splitppp} \title{Kernel Smoothed Intensity of Split Point Pattern} \description{ Compute a kernel smoothed intensity function for each of the components of a split point pattern. } \usage{ \method{density}{splitppp}(x, \dots) } \arguments{ \item{x}{ Split point pattern (object of class \code{"splitppp"} created by \code{\link{split.ppp}}) to be smoothed. } \item{\dots}{ Arguments passed to \code{\link{density.ppp}} to control the smoothing, pixel resolution, edge correction etc. } } \value{ A list of pixel images (objects of class \code{"im"}). Can be plotted or printed. } \details{ This is a method for the generic function \code{density}. The argument \code{x} should be an object of class \code{"splitppp"}, effectively a list of point patterns. Typically \code{x} is obtained by applying the function \code{\link{split.ppp}} to a point pattern \code{y} by calling \code{split(y)}. This splits the points of \code{y} into several sub-patterns. A kernel estimate of the intensity function of each of the point patterns is computed using \code{\link{density.ppp}}. The return value is a list, each of whose entries is a pixel image (object of class \code{"im"}). The return value also belongs to the class \code{"listof"} and can be plotted or printed. } \seealso{ \code{\link{ppp.object}}, \code{\link{im.object}} } \examples{ data(amacrine) Z <- density(split(amacrine), 0.05) plot(Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/bounding.box.Rd0000755000176000001440000000367412237642732015630 0ustar ripleyusers\name{bounding.box} \alias{bounding.box} \title{Bounding Box of a Window or Point Pattern} \description{ Find the smallest rectangle containing a given window(s) or point pattern(s). } \usage{ bounding.box(\dots) } \arguments{ \item{\dots}{One or more windows (objects of class \code{"owin"}), pixel images (objects of class \code{"im"}) or point patterns (objects of class \code{"ppp"}). } } \value{ A window (object of class \code{"owin"}) of type \code{"rectangle"} representing a rectangle. } \details{ This function finds the smallest rectangle (with sides parallel to the coordinate axes) that contains all the given objects. For a window (object of class \code{"owin"}), the bounding box is the smallest rectangle that contains all the vertices of the window (this is generally smaller than the enclosing frame, which is returned by \code{\link{as.rectangle}}). For a point pattern (object of class \code{"ppp"}), the bounding box is the smallest rectangle that contains all the points of the pattern, and is computed by \code{\link{bounding.box.xy}}. For a pixel image (object of class \code{"im"}), the image will be converted to a window using \code{\link{as.owin}}, and the bounding box of this window is obtained. If the argument is a list of several objects, then this function finds the smallest rectangle that contains all the bounding boxes of the objects. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{as.rectangle}} \code{\link{bounding.box.xy}} } \examples{ w <- owin(c(0,10),c(0,10), poly=list(x=c(1,2,3,2,1), y=c(2,3,4,6,7))) r <- bounding.box(w) # returns rectangle [1,3] x [2,7] w2 <- unit.square() r <- bounding.box(w, w2) # returns rectangle [0,3] x [0,7] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/simplepanel.Rd0000644000176000001440000001717012250607776015542 0ustar ripleyusers\name{simplepanel} \alias{simplepanel} \alias{grow.simplepanel} \title{Simple Point-and-Click Interface Panels} \description{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. } \usage{ simplepanel(title, B, boxes, clicks, redraws=NULL, exit = NULL, env) grow.simplepanel(P, side = c("right", "left", "top", "bottom"), len = NULL, new.clicks, new.redraws=NULL, \dots, aspect) } \arguments{ \item{title}{ Character string giving the title of the interface panel. } \item{B}{ Bounding box of the panel coordinates. A rectangular window (object of class \code{"owin"}) } \item{boxes}{ A list of rectangular windows (objects of class \code{"owin"}) specifying the placement of the buttons and other interactive components of the panel. } \item{clicks}{ A list of \R functions, of the same length as \code{boxes}, specifying the operations to be performed when each button is clicked. See Details. } \item{redraws}{ Optional list of \R functions, of the same length as \code{boxes}, specifying how to redraw each button. See Details. } \item{exit}{ An \R function specifying actions to be taken when the interactive panel terminates. } \item{env}{ An \code{environment} that will be passed as an argument to all the functions in \code{clicks}, \code{redraws} and \code{exit}. } \item{P}{ An existing interaction panel (object of class \code{"simplepanel"}). } \item{side}{ Character string identifying which side of the panel \code{P} should be grown to accommodate the new buttons. } \item{len}{ Optional. Thickness of the new panel area that should be grown to accommodate the new buttons. A single number in the same units as the coordinate system of \code{P}. } \item{new.clicks}{ List of \R functions defining the operations to be performed when each of the new buttons is clicked. } \item{new.redraws}{ Optional. List of \R functions, of the same length as \code{new.clicks}, defining how to redraw each of the new buttons. } \item{\dots}{ Arguments passed to \code{\link{layout.boxes}} to determine the layout of the new buttons. } \item{aspect}{ Optional. Aspect ratio (height/width) of the new buttons. } } \details{ These functions enable the user to create a simple, robust, point-and-click interface to any \R code. The functions \code{simplepanel} and \code{grow.simplepanel} create an object of class \code{"simplepanel"}. Such an object defines the graphics to be displayed and the actions to be performed when the user interacts with the panel. The panel is activated by calling \code{\link{run.simplepanel}}. The function \code{simplepanel} creates a panel object from basic data. The function \code{grow.simplepanel} modifies an existing panel object \code{P} by growing an additional row or column of buttons. For \code{simplepanel}, \itemize{ \item The spatial layout of the panel is determined by the rectangles \code{B} and \code{boxes}. \item The argument \code{clicks} must be a list of functions specifying the action to be taken when each button is clicked. The list entries should have names. Each function should be of the form \code{function(env, xy)} where \code{env} is an \code{environment} that may contain shared data, and \code{xy} gives the coordinates of the mouse click, in the format \code{list(x, y)}. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. \item The argument \code{redraws}, if given, must be a list of functions specifying the action to be taken when each button is to be redrawn. Each function should be of the form \code{function(button, name, env)} where \code{button} is a rectangle specifying the location of the button in the current coordinate system; \code{name} is a character string giving the name of the button; and \code{env} is the \code{environment} that may contain shared data. The function returns \code{TRUE} if the panel should continue running, and \code{FALSE} if the panel should terminate. If \code{redraws} is not given, the default action is to draw a pink rectangle showing the button position, draw the name of the button in the middle of this rectangle, and return \code{TRUE}. \item The argument \code{exit}, if given, must be a function specifying the action to be taken when the panel terminates. (Termination occurs when one of the \code{clicks} functions returns \code{FALSE}). The \code{exit} function should be of the form \code{function(env)} where \code{env} is the \code{environment} that may contain shared data. Its return value will be used as the return value of \code{\link{run.simplepanel}}. \item The argument \code{env} should be an \R environment. The panel buttons will have access to this environment, and will be able to read and write data in it. This mechanism is used to exchange data between the panel and other \R code. } For \code{grow.simplepanel}, \itemize{ \item the spatial layout of the new boxes is determined by the arguments \code{side}, \code{len}, \code{aspect} and by the additional \code{\dots} arguments passed to \code{\link{layout.boxes}}. \item the argument \code{new.clicks} should have the same format as \code{clicks}. It implicitly specifies the number of new buttons to be added, and the actions to be performed when they are clicked. \item the optional argument \code{new.redraws}, if given, should have the same format as \code{redraws}. It specifies the actions to be performed when the new buttons are clicked. } } \value{ An object of class \code{"simplepanel"}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{run.simplepanel}}, \code{\link{layout.boxes}} } \examples{ # make boxes (alternatively use layout.boxes()) Bminus <- square(1) Bvalue <- shift(Bminus, c(1.2, 0)) Bplus <- shift(Bvalue, c(1.2, 0)) Bdone <- shift(Bplus, c(1.2, 0)) myboxes <- list(Bminus, Bvalue, Bplus, Bdone) myB <- do.call(bounding.box,myboxes) # make environment containing an integer count myenv <- new.env() assign("answer", 0, envir=myenv) # what to do when finished: return the count. myexit <- function(e) { return(get("answer", envir=e)) } # button clicks # decrement the count Cminus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans - 1, envir=e) return(TRUE) } # display the count (clicking does nothing) Cvalue <- function(...) { TRUE } # increment the count Cplus <- function(e, xy) { ans <- get("answer", envir=e) assign("answer", ans + 1, envir=e) return(TRUE) } # quit button Cdone <- function(e, xy) { return(FALSE) } myclicks <- list("-"=Cminus, value=Cvalue, "+"=Cplus, done=Cdone) # redraw the button that displays the current value of the count Rvalue <- function(button, nam, e) { plot(button, add=TRUE) ans <- get("answer", envir=e) text(centroid.owin(button), labels=ans) return(TRUE) } # make the panel P <- simplepanel("Counter", B=myB, boxes=myboxes, clicks=myclicks, redraws = list(NULL, Rvalue, NULL, NULL), exit=myexit, env=myenv) P } \keyword{iplot} \keyword{utilities} spatstat/man/plot.kstest.Rd0000755000176000001440000000602112237642733015514 0ustar ripleyusers\name{plot.kstest} \alias{plot.kstest} \title{Plot a Spatial Kolmogorov-Smirnov Test} \description{ Plot the result of a spatial Kolmogorov-Smirnov test } \usage{ \method{plot}{kstest}(x, ..., style=c("cdf", "PP", "QQ"), lwd=par("lwd"), col=par("col"), lty=par("lty"), lwd0=lwd, col0=col, lty0=lty) } \arguments{ \item{x}{ Object to be plotted. An object of class \code{"kstest"} produced by a method for \code{\link{kstest}}. } \item{\dots}{ extra arguments that will be passed to the plotting function \code{\link{plot.default}}. } \item{style}{ Style of plot. See Details. } \item{col,lwd,lty}{ The width, colour and type of lines used to plot the empirical curve (the empirical distribution, or PP plot or QQ plot). } \item{col0,lwd0,lty0}{ The width, colour and type of lines used to plot the reference curve (the predicted distribution, or the diagonal). } } \value{ \code{NULL}. } \details{ This is the \code{plot} method for the class \code{"kstest"}. An object of this class represents the outcome of a spatial Kolmogorov-Smirnov test, computed by \code{\link{kstest}}. If \code{style="cdf"} (the default), the plot displays the two cumulative distribution functions that are compared by the test: namely the empirical cumulative distribution function of the covariate at the data points, and the predicted cumulative distribution function of the covariate under the model, both plotted against the value of the covariate. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the two curves. If \code{style="PP"} then the P-P plot is drawn. The \eqn{x} coordinates of the plot are cumulative probabilities for the covariate under the model. The \eqn{y} coordinates are cumulative probabilities for the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic is the maximum vertical separation between the P-P plot and the diagonal reference line. If \code{style="QQ"} then the Q-Q plot is drawn. The \eqn{x} coordinates of the plot are quantiles of the covariate under the model. The \eqn{y} coordinates are quantiles of the covariate at the data points. The diagonal line \eqn{y=x} is also drawn for reference. The Kolmogorov-Smirnov test statistic cannot be read off the Q-Q plot. } \seealso{ \code{\link{kstest}} } \examples{ # synthetic data: nonuniform Poisson process X <- rpoispp(function(x,y) { 100 * exp(x) }, win=square(1)) # fit uniform Poisson process fit0 <- ppm(X, ~1) # test covariate = x coordinate xcoord <- function(x,y) { x } # test wrong model k <- kstest(fit0, xcoord) # plot result of test plot(k, lwd0=3) plot(k, style="PP") plot(k, style="QQ") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/rotate.ppp.Rd0000755000176000001440000000216712237642734015326 0ustar ripleyusers\name{rotate.ppp} \alias{rotate.ppp} \title{Rotate a Point Pattern} \description{ Rotates a point pattern } \usage{ \method{rotate}{ppp}(X, angle=pi/2, \dots) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{angle}{Angle of rotation.} \item{\dots}{Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask.} } \value{ Another object of class \code{"ppp"} representing the rotated point pattern. } \details{ The points of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the points carry marks, these are preserved. } \seealso{ \code{\link{ppp.object}}, \code{\link{rotate.owin}} } \examples{ data(cells) X <- rotate(cells, pi/3) \dontrun{ plot(X) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/sessionLibs.Rd0000644000176000001440000000170012237642734015514 0ustar ripleyusers\name{sessionLibs} \alias{sessionLibs} \title{ Print Names and Version Numbers of Libraries Loaded } \description{ Prints the names and version numbers of libraries currently loaded by the user. } \usage{ sessionLibs() } \details{ This function prints a list of the libraries loaded by the user in the current session, giving just their name and version number. It obtains this information from \code{\link[utils]{sessionInfo}}. This function is not needed in an interactive \R session because the package startup messages will usually provide this information. Its main use is in an \code{\link{Sweave}} script, where it is needed because the package startup messages are not printed. } \value{ Null. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link[utils]{sessionInfo}} } \keyword{data} spatstat/man/rthin.Rd0000755000176000001440000000461512237642734014356 0ustar ripleyusers\name{rthin} \alias{rthin} \title{Random Thinning} \description{ Applies independent random thinning to a point pattern. } \usage{ rthin(X, P, \dots) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}) that will be thinned. } \item{P}{ Data giving the retention probabilities, i.e. the probability that each point in \code{X} will be retained. Either a single number, or a vector of numbers, or a \code{function(x,y)}, or a pixel image (object of class \code{"im"}). } \item{\dots}{ Additional arguments passed to \code{P}, if it is a function. } } \details{ In a random thinning operation, each point of the pattern \code{X} is randomly either deleted or retained (i.e. not deleted). The result is a point pattern, consisting of those points of \code{X} that were retained. Independent random thinning means that the retention/deletion of each point is independent of other points. The argument \code{P} determines the probability of \bold{retaining} each point. It may be \describe{ \item{a single number,}{so that each point will be retained with the same probability \code{P}; } \item{a vector of numbers,}{so that the \code{i}th point of \code{X} will be retained with probability \code{P[i]}; } \item{a function \code{P(x,y)},}{so that a point at a location \code{(x,y)} will be retained with probability \code{P(x,y)}; } \item{a pixel image,}{containing values of the retention probability for all locations in a region encompassing the point pattern. } } If \code{P} is a function, it should be \sQuote{vectorised}, that is, it should accept vector arguments \code{x,y} and should yield a numeric vector of the same length. The function may have extra arguments which are passed through the \code{\dots} argument. } \value{ A point pattern (object of class \code{"ppp"}). } \examples{ data(redwood) plot(redwood, main="thinning") # delete 20\% of points Y <- rthin(redwood, 0.8) points(Y, col="green", cex=1.4) # function f <- function(x,y) { ifelse(x < 0.4, 1, 0.5) } Y <- rthin(redwood, f) # pixel image Z <- as.im(f, redwood$window) Y <- rthin(redwood, Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/istat.Rd0000755000176000001440000000310012237642732014340 0ustar ripleyusers\name{istat} \alias{istat} \title{Point and Click Interface for Exploratory Analysis of Point Pattern} \description{ Compute various summary functions for a point pattern using a point-and-click interface. } \usage{ istat(x, xname) } \arguments{ \item{x}{ The spatial point pattern to be analysed. An object of class \code{"ppp"}. } \item{xname}{ Optional. Character string to use as the title of the dataset. } } \value{ \code{NULL}. } \details{ This command launches an interactive (point-and-click) interface which offers a choice of spatial summary functions that can be applied to the point pattern \code{x}. The selected summary function is computed for the point pattern \code{x} and plotted in a popup window. The selection of functions includes \code{\link{Kest}}, \code{\link{Lest}}, \code{\link{pcf}}, \code{\link{Fest}} ,\code{\link{Gest}} and \code{\link{Jest}}. For the function \code{\link{pcf}} it is possible to control the bandwidth parameter \code{bw}. There is also an option to show simulation envelopes of the summary function. } \section{Note}{ Before adjusting the bandwidth parameter \code{bw}, it is advisable to select \emph{No simulation envelopes} to save a lot of computation time. } \seealso{ \code{\link{iplot}} } \examples{ if(interactive()) { data(swedishpines) istat(swedishpines) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/boxx.Rd0000755000176000001440000000255012237642732014204 0ustar ripleyusers\name{boxx} \Rdversion{1.1} \alias{boxx} \title{ Multi-Dimensional Box } \description{ Creates an object representing a multi-dimensional box. } \usage{ boxx(..., unitname = NULL) } \arguments{ \item{\dots}{ Dimensions of the box. Vectors of length 2. } \item{unitname}{ Optional. Name of the unit of length. See Details. } } \details{ This function creates an object representing a multi-dimensional rectangular parallelepiped (box) with sides parallel to the coordinate axes. The object can be used to specify the domain of a multi-dimensional point pattern (see \code{\link{ppx}}) and in various geometrical calculations (see \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes}}). The optional argument \code{unitname} specifies the name of the unit of length. See \code{\link{unitname}} for valid formats. } \value{ An object of class \code{"boxx"}. There is a print method for this class. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{ppx}}, \code{\link{volume.boxx}}, \code{\link{diameter.boxx}}, \code{\link{eroded.volumes.boxx}}. } \examples{ boxx(c(0,10),c(0,10),c(0,5),c(0,1), unitname=c("metre","metres")) } \keyword{spatial} \keyword{datagen} spatstat/man/Smooth.Rd0000644000176000001440000000162712237642734014500 0ustar ripleyusers\name{Smooth} \alias{Smooth} \title{Spatial smoothing of data} \description{ Generic function to perform spatial smoothing of spatial data. } \usage{ Smooth(X, ...) } \arguments{ \item{X}{Some kind of spatial data} \item{\dots}{Arguments passed to methods.} } \details{ This generic function calls an appropriate method to perform spatial smoothing on the spatial dataset \code{X}. Methods for this function include \itemize{ \item \code{\link{Smooth.ppp}} for point patterns \item \code{\link{Smooth.msr}} for measures \item \code{\link{Smooth.fv}} for function value tables } } \seealso{ \code{\link{Smooth.ppp}}, \code{\link{Smooth.msr}}, \code{\link{Smooth.fv}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/methods.funxy.Rd0000755000176000001440000000355712237642733016050 0ustar ripleyusers\name{methods.funxy} \alias{methods.funxy} %DoNotExport \alias{contour.funxy} \alias{persp.funxy} \alias{plot.funxy} \Rdversion{1.1} \title{ Methods for Spatial Functions } \description{ Methods for objects of the class \code{"funxy"}. } \usage{ \method{contour}{funxy}(x, \dots) \method{persp}{funxy}(x, \dots) \method{plot}{funxy}(x, \dots) } \arguments{ \item{x}{ Object of class \code{"funxy"} representing a function of \eqn{x,y} coordinates. } \item{\dots}{ Named arguments controlling the plot. See Details. } } \details{ These are methods for the generic functions \code{\link{plot}}, \code{\link{contour}} and \code{\link{persp}} for the class \code{"funxy"} of spatial functions. Objects of class \code{"funxy"} are created, for example, by the commands \code{\link{distfun}} and \code{\link{funxy}}. The \code{plot}, \code{contour} and \code{persp} methods first convert \code{x} to a pixel image object using \code{\link{as.im}}, then display it using \code{\link{plot.im}}, \code{\link{contour.im}} or \code{\link{persp.im}}. Additional arguments \code{\dots} are either passed to \code{\link{as.im.function}} to control the spatial resolution of the pixel image, or passed to \code{\link{contour.im}}, \code{\link{persp.im}} or \code{\link{plot.im}} to control the appearance of the plot. } \value{ \code{NULL}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{funxy}}, \code{\link{distfun}}, \code{\link{as.im}}, \code{\link{plot.im}}, \code{\link{persp.im}}, \code{\link{contour.im}}, \code{\link{spatstat.options}} } \examples{ data(letterR) f <- distfun(letterR) contour(f) contour(f, W=owin(c(1,5),c(-1,4)), eps=0.1) } \keyword{spatial} \keyword{methods} spatstat/man/eval.fasp.Rd0000755000176000001440000000563412237642732015111 0ustar ripleyusers\name{eval.fasp} \alias{eval.fasp} \title{Evaluate Expression Involving Function Arrays} \description{ Evaluates any expression involving one or more function arrays (\code{fasp} objects) and returns another function array. } \usage{ eval.fasp(expr, envir, dotonly=TRUE) } \arguments{ \item{expr}{ An expression involving the names of objects of class \code{"fasp"}. } \item{envir}{ Optional. The environment in which to evaluate the expression. } \item{dotonly}{Logical. Passed to \code{\link{eval.fv}}.} } \details{ This is a wrapper to make it easier to perform pointwise calculations with the arrays of summary functions used in spatial statistics. A function array (object of class \code{"fasp"}) can be regarded as a matrix whose entries are functions. Objects of this kind are returned by the command \code{\link{alltypes}}. Suppose \code{X} is an object of class \code{"fasp"}. Then \code{eval.fasp(X+3)} effectively adds 3 to the value of every function in the array \code{X}, and returns the resulting object. Suppose \code{X} and \code{Y} are two objects of class \code{"fasp"} which are compatible (for example the arrays must have the same dimensions). Then \code{eval.fasp(X + Y)} will add the corresponding functions in each cell of the arrays \code{X} and \code{Y}, and return the resulting array of functions. Suppose \code{X} is an object of class \code{"fasp"} and \code{f} is an object of class \code{"fv"}. Then \code{eval.fasp(X + f)} will add the function \code{f} to the functions in each cell of the array \code{X}, and return the resulting array of functions. In general, \code{expr} can be any expression involving (a) the \emph{names} of objects of class \code{"fasp"} or \code{"fv"}, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.fasp} determines which of the \emph{variable names} in the expression \code{expr} refer to objects of class \code{"fasp"}. The expression is then evaluated for each cell of the array using \code{\link{eval.fv}}. The expression \code{expr} must be vectorised. There must be at least one object of class \code{"fasp"} in the expression. All such objects must be compatible. } \value{ Another object of class \code{"fasp"}. } \seealso{ \code{\link{fasp.object}}, \code{\link{Kest}} } \examples{ # manipulating the K function K <- alltypes(amacrine, "K") # expressions involving a fasp object eval.fasp(K + 3) L <- eval.fasp(sqrt(K/pi)) # expression involving two fasp objects D <- eval.fasp(K - L) # subtracting the unmarked K function from the cross-type K functions K0 <- Kest(unmark(amacrine)) DK <- eval.fasp(K - K0) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/zapsmall.im.Rd0000755000176000001440000000151312237642735015454 0ustar ripleyusers\name{zapsmall.im} \alias{zapsmall.im} \title{Rounding of Pixel Values} \description{ Modifies a pixel image, identifying those pixels that have values very close to zero, and replacing the value by zero. } \usage{ zapsmall.im(x, digits) } \arguments{ \item{x}{Pixel image (object of class \code{"im"}).} \item{digits}{ Argument passed to \code{\link{zapsmall}} indicating the precision to be used. } } \details{ The function \code{\link{zapsmall}} is applied to each pixel value of the image \code{x}. } \value{ Another pixel image. } \seealso{ \code{\link{zapsmall}} } \examples{ data(cells) D <- density(cells) zapsmall.im(D) } \author{Ege Rubak and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{methods} \keyword{univar} spatstat/man/deriv.fv.Rd0000644000176000001440000000574312237642732014753 0ustar ripleyusers\name{deriv.fv} \alias{deriv.fv} \title{ Calculate Derivative of Function Values } \description{ Applies numerical differentiation to the values in selected columns of a function value table. } \usage{ \method{deriv}{fv}(expr, which = "*", ..., method=c("spline", "numeric"), kinks=NULL) } \arguments{ \item{expr}{ Function values to be differentiated. A function value table (object of class \code{"fv"}, see \code{\link{fv.object}}). } \item{which}{ Character vector identifying which columns of the table should be differentiated. Either a vector containing names of columns, or one of the wildcard strings \code{"*"} or \code{"."} explained below. } \item{\dots}{ Extra arguments passed to \code{\link[stats]{smooth.spline}} to control the differentiation algorithm, if \code{method="spline"}. } \item{method}{ Differentiation method. A character string, partially matched to either \code{"spline"} or \code{"numeric"}. } \item{kinks}{ Optional vector of \eqn{x} values where the derivative is allowed to be discontinuous. } } \details{ This command performs numerical differentiation on the function values in a function value table (object of class \code{"fv"}). The differentiation is performed either by \code{\link[stats]{smooth.spline}} or by a naive numerical difference algorithm. The command \code{\link{deriv}} is generic. This is the method for objects of class \code{"fv"}. Differentiation is applied to every column (or to each of the selected columns) of function values in turn, using the function argument as the \eqn{x} coordinate and the selected column as the \eqn{y} coordinate. The original function values are then replaced by the corresponding derivatives. The optional argument \code{which} specifies which of the columns of function values in \code{x} will be differentiated. The default (indicated by the wildcard \code{which="*"}) is to differentiate all function values, i.e.\ all columns except the function argument. Alternatively \code{which="."} designates the subset of function values that are displayed in the default plot. Alternatively \code{which} can be a character vector containing the names of columns of \code{x}. If the argument \code{kinks} is given, it should be a numeric vector giving a value or values of \eqn{x} where the function is not differentiable. Differentiation will be performed separately on intervals between the discontinuity points. } \value{ Another function value table (object of class \code{"fv"}) of the same format. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{with.fv}}, \code{\link{fv.object}}, \code{\link[stats]{smooth.spline}} } \examples{ data(cells) G <- Gest(cells) plot(deriv.fv(G, which=".", spar=0.5)) } \keyword{spatial} \keyword{nonparametric} spatstat/man/residuals.ppm.Rd0000755000176000001440000001670712237642734016025 0ustar ripleyusers\name{residuals.ppm} \alias{residuals.ppm} \title{ Residuals for Fitted Point Process Model } \description{ Given a point process model fitted to a point pattern, compute residuals. } \usage{ \method{residuals}{ppm}(object, type="raw", \dots, check=TRUE, drop=FALSE, fittedvalues=fitted.ppm(object, check=check, drop=drop), coefs=NULL, quad=NULL) } \arguments{ \item{object}{ The fitted point process model (an object of class \code{"ppm"}) for which residuals should be calculated. } \item{type}{ String indicating the type of residuals to be calculated. Current options are \code{"raw"}, \code{"inverse"}, \code{"pearson"} and \code{"score"}. A partial match is adequate. } \item{\dots}{ Ignored. } \item{check}{ Logical value indicating whether to check the internal format of \code{object}. If there is any possibility that this object has been restored from a dump file, or has otherwise lost track of the environment where it was originally computed, set \code{check=TRUE}. } \item{drop}{ Logical value determining whether to delete quadrature points that were not used to fit the model. See \code{\link{quad.ppm}} for explanation. } \item{fittedvalues}{ Vector of fitted values for the conditional intensity at the quadrature points, from which the residuals will be computed. For expert use only. } \item{coefs}{ Optional. Numeric vector of coefficients for the model, replacing \code{coef(object)}. See the section on Modified Residuals below. } \item{quad}{ Optional. Data specifying how to re-fit the model. A list of arguments passed to \code{\link{quadscheme}}. See the section on Modified Residuals below. } } \value{ An object of class \code{"msr"} representing a signed measure or vector-valued measure (see \code{\link{msr}}). This object can be plotted. } \details{ This function computes several kinds of residuals for the fit of a point process model to a spatial point pattern dataset (Baddeley et al, 2005). Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. The argument \code{object} must be a fitted point process model (object of class \code{"ppm"}). Such objects are produced by the maximum pseudolikelihood fitting algorithm \code{\link{ppm}}). This fitted model object contains complete information about the original data pattern. Residuals are attached both to the data points and to some other points in the window of observation (namely, to the dummy points of the quadrature scheme used to fit the model). If the fitted model is correct, then the sum of the residuals over all (data and dummy) points in a spatial region \eqn{B} has mean zero. For further explanation, see Baddeley et al (2005). The type of residual is chosen by the argument \code{type}. Current options are \describe{ \item{\code{"raw"}:}{ the raw residuals \deqn{ r_j = z_j - w_j \lambda_j }{ r[j] = z[j] - w[j] lambda[j] } at the quadrature points \eqn{u_j}{u[j]}, where \eqn{z_j}{z[j]} is the indicator equal to 1 if \eqn{u_j}{u[j]} is a data point and 0 if \eqn{u_j}{u[j]} is a dummy point; \eqn{w_j}{w[j]} is the quadrature weight attached to \eqn{u_j}{u[j]}; and \deqn{\lambda_j = \hat\lambda(u_j,x)}{lambda[j] = lambda(u[j],x)} is the conditional intensity of the fitted model at \eqn{u_j}{u[j]}. These are the spatial analogue of the martingale residuals of a one-dimensional counting process. } \item{\code{"inverse"}:}{ the `inverse-lambda' residuals (Baddeley et al, 2005) \deqn{ r^{(I)}_j = \frac{r_j}{\lambda_j} = \frac{z_j}{\lambda_j} - w_j }{ rI[j] = r[j]/lambda[j] = z[j]/lambda[j] - w[j] } obtained by dividing the raw residuals by the fitted conditional intensity. These are a counterpart of the exponential energy marks (see \code{\link{eem}}). } \item{\code{"pearson"}:}{ the Pearson residuals (Baddeley et al, 2005) \deqn{ r^{(P)}_j = \frac{r_j}{\sqrt{\lambda_j}} = \frac{z_j}{\sqrt{\lambda_j}} - w_j \sqrt{\lambda_j} }{ rP[j] = r[j]/sqrt(lambda[j]) = z[j]/sqrt(lambda[j]) - w[j] sqrt(lambda[j]) } obtained by dividing the raw residuals by the square root of the fitted conditional intensity. The Pearson residuals are standardised, in the sense that if the model (true and fitted) is Poisson, then the sum of the Pearson residuals in a spatial region \eqn{B} has variance equal to the area of \eqn{B}. } \item{\code{"score"}:}{ the score residuals (Baddeley et al, 2005) \deqn{ r_j = (z_j - w_j \lambda_j) x_j }{ r[j] = (z[j] - w[j] lambda[j]) * x[j,] } obtained by multiplying the raw residuals \eqn{r_j}{r[j]} by the covariates \eqn{x_j}{x[j,]} for quadrature point \eqn{j}. The score residuals always sum to zero. } } Use \code{\link{plot.msr}} to plot the residuals directly, or \code{\link{diagnose.ppm}} to produce diagnostic plots based on these residuals. } \section{Modified Residuals}{ Sometimes we want to modify the calculation of residuals by using different values for the model parameters. This capability is provided by the arguments \code{coefs} and \code{quad}. If \code{coefs} is given, then the residuals will be computed by taking the model parameters to be \code{coefs}. This should be a numeric vector of the same length as the vector of fitted model parameters \code{coef(object)}. If \code{coefs} is missing and \code{quad} is given, then the model parameters will be determined by re-fitting the model using a new quadrature scheme specified by \code{quad}. Residuals will be computed for the original model \code{object} using these new parameter values. The argument \code{quad} should normally be a list of arguments in \code{name=value} format that will be passed to \code{\link{quadscheme}} (together with the original data points) to determine the new quadrature scheme. It may also be a quadrature scheme (object of class \code{"quad"} to which the model should be fitted, or a point pattern (object of class \code{"ppp"} specifying the \emph{dummy points} in a new quadrature scheme. } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. Baddeley, A., Moller, J. and Pakes, A.G. (2008) Properties of residuals for spatial point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{60}, 627--649. } \seealso{ \code{\link{msr}}, \code{\link{diagnose.ppm}}, \code{\link{ppm.object}}, \code{\link{ppm}} } \examples{ data(cells) fit <- ppm(cells, ~x, Strauss(r=0.15)) # Pearson residuals rp <- residuals(fit, type="pe") rp # simulated data X <- rStrauss(100,0.7,0.05) # fit Strauss model fit <- ppm(X, ~1, Strauss(0.05)) res.fit <- residuals(fit) # true model parameters truecoef <- c(log(100), log(0.7)) res.true <- residuals(fit, coefs=truecoef) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods} spatstat/man/whist.Rd0000755000176000001440000000450212237642735014364 0ustar ripleyusers\name{whist} \alias{whist} \title{ Weighted Histogram } \description{ Computes the weighted histogram of a set of observations with a given set of weights. } \usage{ whist(x, breaks, weights = NULL) } \arguments{ \item{x}{ Numeric vector of observed values. } \item{breaks}{ Vector of breakpoints for the histogram. } \item{weights}{ Numeric vector of weights for the observed values. } } \details{ This low-level function computes (but does not plot) the weighted histogram of a vector of observations \code{x} using a given vector of \code{weights}. The arguments \code{x} and \code{weights} should be numeric vectors of equal length. They may include \code{NA} or infinite values. The argument \code{breaks} should be a numeric vector whose entries are strictly increasing. These values define the boundaries between the successive histogram cells. The breaks \emph{do not} have to span the range of the observations. There are \code{N-1} histogram cells, where \code{N = length(breaks)}. An observation \code{x[i]} falls in the \code{j}th cell if \code{breaks[j] <= x[i] < breaks[j+1]} (for \code{j < N-1}) or \code{breaks[j] <= x[i] <= breaks[j+1]} (for \code{j = N-1}). The weighted histogram value \code{h[j]} for the \code{j}th cell is the sum of \code{weights[i]} for all observations \code{x[i]} that fall in the cell. Note that, in contrast to the function \code{\link{hist}}, the function \code{whist} does not require the breakpoints to span the range of the observations \code{x}. Values of \code{x} that fall outside the range of \code{breaks} are handled separately; their total weight is returned as an attribute of the histogram. } \value{ A numeric vector of length \code{N-1} containing the histogram values, where \code{N = length(breaks)}. The return value also has attributes \code{"low"} and \code{"high"} giving the total weight of all observations that are less than the lowest breakpoint, or greater than the highest breakpoint, respectively. } \examples{ x <- rnorm(100) b <- seq(-1,1,length=21) w <- runif(100) whist(x,b,w) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} with thanks to Peter Dalgaard. } \keyword{arith} spatstat/man/rNeymanScott.Rd0000755000176000001440000002040212237642734015650 0ustar ripleyusers\name{rNeymanScott} \alias{rNeymanScott} \title{Simulate Neyman-Scott Process} \description{ Generate a random point pattern, a realisation of the Neyman-Scott cluster process. } \usage{ rNeymanScott(kappa, rmax, rcluster, win = owin(c(0,1),c(0,1)), \dots, lmax=NULL) } \arguments{ \item{kappa}{ Intensity of the Poisson process of cluster centres. A single positive number, a function, or a pixel image. } \item{rmax}{ Maximum radius of a random cluster. } \item{rcluster}{ A function which generates random clusters, or other data specifying the random cluster mechanism. See Details. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{\dots}{ Arguments passed to \code{rcluster}. } \item{lmax}{ Optional. Upper bound on the values of \code{kappa} when \code{kappa} is a function or pixel image. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). Additionally, some intermediate results of the simulation are returned as attributes of this point pattern: see Details. } \details{ This algorithm generates a realisation of the general Neyman-Scott process, with the cluster mechanism given by the function \code{rcluster}. The clusters must have a finite maximum possible radius \code{rmax}. First, the algorithm generates a Poisson point process of \dQuote{parent} points with intensity \code{kappa}. Here \code{kappa} may be a single positive number, a function \code{kappa(x,y)}, or a pixel image object of class \code{"im"} (see \code{\link{im.object}}). See \code{\link{rpoispp}} for details. Second, each parent point is replaced by a random cluster of points. These clusters are combined together to yield a single point pattern which is then returned as the result of \code{rNeymanScott}. The argument \code{rcluster} specifies the cluster mechanism. It may be either: \itemize{ \item A \code{function} which will be called to generate each random cluster (the offspring points of each parent point). The function should expect to be called in the form \code{rcluster(x0,y0,\dots)} for a parent point at a location \code{(x0,y0)}. The return value of \code{rcluster} should specify the coordinates of the points in the cluster; it may be a list containing elements \code{x,y}, or a point pattern (object of class \code{"ppp"}). If it is a marked point pattern then the result of \code{rNeymanScott} will be a marked point pattern. \item A \code{list(mu, f)} where \code{mu} specifies the mean number of offspring points in each cluster, and \code{f} generates the random displacements (vectors pointing from the parent to the offspring). In this case, the number of offspring in a cluster is assumed to have a Poisson distribution, implying that the Neyman-Scott process is also a Cox process. The first element \code{mu} should be either a single nonnegative number (interpreted as the mean of the Poisson distribution of cluster size) or a pixel image or a \code{function(x,y)} giving a spatially varying mean cluster size (interpreted in the sense of Waagepetersen, 2007). The second element \code{f} should be a function that will be called once in the form \code{f(n)} to generate \code{n} independent and identically distributed displacement vectors (i.e. as if there were a cluster of size \code{n} with a parent at the origin \code{(0,0)}). The function should return a point pattern (object of class \code{"ppp"}) or something acceptable to \code{\link[grDevices]{xy.coords}} that specifies the coordinates of \code{n} points. } If required, the intermediate stages of the simulation (the parents and the individual clusters) can also be extracted from the return value of \code{rNeymanScott} through the attributes \code{"parents"} and \code{"parentid"}. The attribute \code{"parents"} is the point pattern of parent points. The attribute \code{"parentid"} is an integer vector specifying the parent for each of the points in the simulated pattern. Neyman-Scott models where \code{kappa} is a single number and \code{rcluster = list(mu,f)} can be fitted to data using the function \code{\link{kppm}}. } \section{Inhomogeneous Neyman-Scott Processes}{ There are several different ways of specifying a spatially inhomogeneous Neyman-Scott process: \itemize{ \item The point process of parent points can be inhomogeneous. If the argument \code{kappa} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then it is taken as specifying the intensity function of an inhomogeneous Poisson process according to which the parent points are generated. \item The number of points in a typical cluster can be spatially varying. If the argument \code{rcluster} is a list of two elements \code{mu, f} and the first entry \code{mu} is a \code{function(x,y)} or a pixel image (object of class \code{"im"}), then \code{mu} is interpreted as the reference intensity for offspring points, in the sense of Waagepetersen (2007). For a given parent point, the offspring constitute a Poisson process with intensity function equal to \code{mu(x, y) * g(x-x0, y-y0)} where \code{g} is the probability density of the offspring displacements generated by the function \code{f}. Equivalently, clusters are first generated with a constant expected number of points per cluster: the constant is \code{mumax}, the maximum of \code{mu}. Then the offspring are randomly \emph{thinned} (see \code{\link{rthin}}) with spatially-varying retention probabilities given by \code{mu/mumax}. \item The entire mechanism for generating a cluster can be dependent on the location of the parent point. If the argument \code{rcluster} is a function, then the cluster associated with a parent point at location \code{(x0,y0)} will be generated by calling \code{rcluster(x0, y0, \dots)}. The behaviour of this function could depend on the location \code{(x0,y0)} in any fashion. } Note that if \code{kappa} is an image, the spatial domain covered by this image must be large enough to include the \emph{expanded} window in which the parent points are to be generated. This expanded window consists of \code{\link{as.rectangle}(win)} extended by the amount \code{rmax} in each direction. This requirement means that \code{win} must be small enough so that the expansion of \code{as.rectangle(win)} is contained in the spatial domain of \code{kappa}. As a result, one may wind up having to simulate the process in a window smaller than what is really desired. In the first two cases, the intensity of the Neyman-Scott process is equal to \code{kappa * mu} if at least one of \code{kappa} or \code{mu} is a single number, and is otherwise equal to an integral involving \code{kappa}, \code{mu} and \code{f}. } \seealso{ \code{\link{rpoispp}}, \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rCauchy}}, \code{\link{rVarGamma}} } \examples{ # each cluster consist of 10 points in a disc of radius 0.2 nclust <- function(x0, y0, radius, n) { return(runifdisc(n, radius, centre=c(x0, y0))) } plot(rNeymanScott(10, 0.2, nclust, radius=0.2, n=5)) # multitype Neyman-Scott process (each cluster is a multitype process) nclust2 <- function(x0, y0, radius, n, types=c("a", "b")) { X <- runifdisc(n, radius, centre=c(x0, y0)) M <- sample(types, n, replace=TRUE) marks(X) <- M return(X) } plot(rNeymanScott(15,0.1,nclust2, radius=0.1, n=5)) } \references{ Neyman, J. and Scott, E.L. (1958) A statistical approach to problems of cosmology. \emph{Journal of the Royal Statistical Society, Series B} \bold{20}, 1--43. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/superimpose.Rd0000755000176000001440000001656712237642734015616 0ustar ripleyusers\name{superimpose} %DontDeclareMethods \alias{superimpose} \alias{superimpose.ppp} \alias{superimpose.psp} \alias{superimpose.default} \title{Superimpose Several Geometric Patterns} \description{ Superimpose any number of point patterns or line segment patterns. } \usage{ superimpose(\dots) \method{superimpose}{ppp}(\dots, W=NULL, check=TRUE) \method{superimpose}{psp}(\dots, W=NULL, check=TRUE) \method{superimpose}{default}(\dots, W=NULL, check=TRUE) } \arguments{ \item{\dots}{ Any number of arguments, each of which represents either a point pattern or a line segment pattern. } \item{W}{ Optional. Data determining the window for the resulting pattern. Either a window (object of class \code{"owin"}, or something acceptable to \code{\link{as.owin}}), or a function which returns a window, or one of the strings \code{"convex"}, \code{"rectangle"}, \code{"bbox"} or \code{"none"}. } \item{check}{ Logical value (passed to \code{\link{ppp}} or \code{\link{psp}} as appropriate) determining whether to check the geometrical validity of the resulting pattern. } } \value{ For \code{superimpose.ppp}, a point pattern (object of class \code{"ppp"}). For \code{superimpose.default}, either a point pattern (object of class \code{"ppp"}) or a \code{list(x,y)}. For \code{superimpose.psp}, a line segment pattern (object of class \code{"psp"}). } \details{ This function is used to superimpose several geometric patterns of the same kind, producing a single pattern of the same kind. The function \code{superimpose} is generic, with methods for the class \code{ppp} of point patterns, the class \code{psp} of line segment patterns, and a default method. The dispatch to a method is determined by the class of the first argument in \code{\dots}. \itemize{ \item \code{default}: If the first argument is \emph{not} an object of class \code{ppp} or \code{psp}, then the default method \code{superimpose.default} is executed. All arguments in \code{\dots} must have components \code{x} and \code{y} representing spatial coordinates. They may include objects of class \code{ppp}. The result will be either a \code{list(x,y)} or a point pattern (objects of class \code{ppp}) as explained below. \item \code{ppp}: If the first \code{\dots} argument is an object of class \code{ppp} then the method \code{superimpose.ppp} is executed. All arguments in \code{\dots} must be either \code{ppp} objects or lists with components \code{x} and \code{y}. The result will be an object of class \code{ppp}. \item psp: If the first \code{\dots} argument is an object of class \code{psp} then the \code{psp} method is dispatched and all \code{\dots} arguments must be \code{psp} objects. The result is a \code{psp} object. } The patterns are \emph{not} required to have the same window of observation. The window for the superimposed pattern is controlled by the argument \code{W}. \itemize{ \item If \code{W} is a window (object of class \code{"W"} or something acceptable to \code{\link{as.owin}}) then this determines the window for the superimposed pattern. \item If \code{W} is \code{NULL}, or the character string \code{"none"}, then windows are extracted from the geometric patterns, as follows. For \code{superimpose.psp}, all arguments \code{\dots} are line segment patterns (objects of class \code{"psp"}); their observation windows are extracted; the union of these windows is computed; and this union is taken to be the window for the superimposed pattern. For \code{superimpose.ppp} and \code{superimpose.default}, the arguments \code{\dots} are inspected, and any arguments which are point patterns (objects of class \code{"ppp"}) are selected; their observation windows are extracted, and the union of these windows is taken to be the window for the superimposed point pattern. For \code{superimpose.default} if none of the arguments is of class \code{"ppp"} then no window is computed and the result of \code{superimpose} is a \code{list(x,y)}. \item If \code{W} is one of the strings \code{"convex"}, \code{"rectangle"} or \code{"bbox"} then a window for the superimposed pattern is computed from the coordinates of the points or the line segments as follows. \describe{ \item{\code{"bbox"}:}{the bounding box of the points or line segments (see \code{\link{bounding.box.xy}});} \item{\code{"convex"}:}{the Ripley-Rasson estimator of a convex window (see \code{\link{ripras}});} \item{\code{"rectangle"}:}{the Ripley-Rasson estimator of a rectangular window (using \code{\link{ripras}} with argument \code{shape="rectangle"}).} } \item If \code{W} is a function, then this function is used to compute a window for the superimposed pattern from the coordinates of the points or the line segments. The function should accept input of the form \code{list(x,y)} and is expected to return an object of class \code{"owin"}. Examples of such functions are \code{\link{ripras}} and \code{\link{bounding.box.xy}}. } The arguments \code{\dots} may be \emph{marked} patterns. The marks of each component pattern must have the same format. Numeric and character marks may be ``mixed''. If there is such mixing then the numeric marks are coerced to character in the combining process. If the mark structures are all data frames, then these data frames must have the same number of columns and identical column names. If the arguments \code{\dots} are given in the form \code{name=value}, then the \code{name}s will be used as an extra column of marks attached to the elements of the corresponding patterns. } \seealso{ \code{\link{concatxy}}, \code{\link{quadscheme}}. } \examples{ # superimposing point patterns p1 <- runifrect(30) p2 <- runifrect(42) s1 <- superimpose(p1,p2) # Unmarked pattern. p3 <- list(x=rnorm(20),y=rnorm(20)) s2 <- superimpose(p3,p2,p1) # Default method gets called. s2a <- superimpose(p1,p2,p3) # Same as s2 except for order of points. s3 <- superimpose(clyde=p1,irving=p2) # Marked pattern; marks a factor # with levels "clyde" and "irving"; # warning given. marks(p1) <- factor(sample(LETTERS[1:3],30,TRUE)) marks(p2) <- factor(sample(LETTERS[1:3],42,TRUE)) s5 <- superimpose(clyde=p1,irving=p2) # Marked pattern with extra column marks(p2) <- data.frame(a=marks(p2),b=runif(42)) s6 <- try(superimpose(p1,p2)) # Gives an error. marks(p1) <- data.frame(a=marks(p1),b=1:30) s7 <- superimpose(p1,p2) # O.K. # how to make a 2-type point pattern with types "a" and "b" u <- superimpose(a = rpoispp(10), b = rpoispp(20)) # how to make a 2-type point pattern with types 1 and 2 u <- superimpose("1" = rpoispp(10), "2" = rpoispp(20)) # superimposing line segment patterns X <- rpoisline(10) Y <- as.psp(matrix(runif(40), 10, 4), window=owin()) Z <- superimpose(X, Y) # being unreasonable \dontrun{ crud <- superimpose(p1,p2,X,Y) # Gives an error, of course! } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/Extract.quad.Rd0000755000176000001440000000243012237642732015564 0ustar ripleyusers\name{Extract.quad} \alias{[.quad} \title{Subset of Quadrature Scheme} \description{ Extract a subset of a quadrature scheme. } \usage{ \method{[}{quad}(x, ...) } \arguments{ \item{x}{ A quadrature scheme (object of class \code{"quad"}). } \item{\dots}{ Arguments passed to \code{\link{[.ppp}} to determine the subset. } } \value{ A quadrature scheme (object of class \code{"quad"}). } \details{ This function extracts a designated subset of a quadrature scheme. The function \code{[.quad} is a method for \code{\link{[}} for the class \code{"quad"}. It extracts a designated subset of a quadrature scheme. The subset to be extracted is determined by the arguments \code{\dots} which are interpreted by \code{\link{[.ppp}}. Thus it is possible to take the subset consisting of all quadrature points that lie inside a given region, or a subset of quadrature points identified by numeric indices. } \seealso{ \code{\link{quad.object}}, \code{\link{[.ppp}}. } \examples{ data(nztrees) Q <- quadscheme(nztrees) W <- owin(c(0,148),c(0,95)) # a subwindow Q[W] } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/flipxy.Rd0000755000176000001440000000236112237642732014537 0ustar ripleyusers\name{flipxy} %DontDeclareMethods \alias{flipxy} \alias{flipxy.owin} \alias{flipxy.ppp} \alias{flipxy.psp} \alias{flipxy.im} \title{Exchange X and Y Coordinates} \description{ Exchanges the \eqn{x} and \eqn{y} coordinates in a spatial dataset. } \usage{ flipxy(X) \method{flipxy}{owin}(X) \method{flipxy}{ppp}(X) \method{flipxy}{psp}(X) \method{flipxy}{im}(X) } \arguments{ \item{X}{Spatial dataset. An object of class \code{"owin"}, \code{"ppp"}, \code{"psp"} or \code{"im"}. } } \value{ Another object of the same type, representing the result of swapping the \eqn{x} and \eqn{y} coordinates. } \details{ This function swaps the \eqn{x} and \eqn{y} coordinates of a spatial dataset. This could also be performed using the command \code{\link{affine}}, but \code{flipxy} is faster. The function \code{\link{flipxy}} is generic, with methods for the classes of objects listed above. } \seealso{ \code{\link{affine}}, \code{\link{reflect}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ data(cells) X <- flipxy(cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/linim.Rd0000755000176000001440000000541312237642732014335 0ustar ripleyusers\name{linim} \alias{linim} \title{ Create Pixel Image on Linear Network } \description{ Creates an object of class \code{"linim"} that represents a pixel image on a linear network. } \usage{ linim(L, Z, ..., df=NULL) } \arguments{ \item{L}{ Linear network (object of class \code{"linnet"}). } \item{Z}{ Pixel image (object of class \code{"im"}). } \item{\dots}{Ignored.} \item{df}{ Advanced use only. Data frame giving full details of the mapping between the pixels of \code{Z} and the lines of \code{L}. See Details. } } \details{ This command creates an object of class \code{"linim"} that represents a pixel image defined on a linear network. Typically such objects are used to represent the result of smoothing or model-fitting on the network. Most users will not need to call \code{linim} directly. The argument \code{L} is a linear network (object of class \code{"linnet"}). It gives the exact spatial locations of the line segments of the network, and their connectivity. The argument \code{Z} is a pixel image object of class \code{"im"} that gives a pixellated approximation of the function values. For increased efficiency, advanced users may specify the optional argument \code{df}. This is a data frame giving the precomputed mapping between the pixels of \code{Z} and the line segments of \code{L}. It should have columns named \code{xc, yc} containing the coordinates of the pixel centres, \code{x,y} containing the projections of these pixel centres onto the linear network, \code{mapXY} identifying the line segment on which each projected point lies, and \code{tp} giving the parametric position of \code{(x,y)} along the segment. } \value{ Object of class \code{"linim"} that also inherits the class \code{"im"}. There is a special method for plotting this class. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{plot.linim}}, \code{\link{linnet}}, % \code{\link{eval.linim}}, \code{\link{im}}. } \examples{ example(linnet) M <- as.mask.psp(as.psp(letterA)) Z <- as.im(function(x,y) {x-y}, W=M) X <- linim(letterA, Z) X } \references{ Ang, Q.W. (2010) \emph{Statistical methodology for events on a network}. Master's thesis, School of Mathematics and Statistics, University of Western Australia. Ang, Q.W., Baddeley, A. and Nair, G. (2012) Geometrically corrected second-order analysis of events on a linear network, with applications to ecology and criminology. To appear in \emph{Scandinavian Journal of Statistics}. McSwiggan, G., Nair, M.G. and Baddeley, A. (2012) Fitting Poisson point process models to events on a linear network. Manuscript in preparation. } \keyword{spatial} spatstat/man/stratrand.Rd0000755000176000001440000000422212237642734015226 0ustar ripleyusers\name{stratrand} \alias{stratrand} \title{Stratified random point pattern} \description{ Generates a \dQuote{stratified random} pattern of points in a window, by dividing the window into rectangular tiles and placing \code{k} random points in each tile. } \usage{ stratrand(window, nx, ny, k = 1) } \arguments{ \item{window}{A window. An object of class \code{\link{owin}}, or data in any format acceptable to \code{\link{as.owin}()}. } \item{nx}{Number of tiles in each row. } \item{ny}{Number of tiles in each column. } \item{k}{Number of random points to generate in each tile. } } \value{ A list with two components \code{x} and \code{y}, which are numeric vectors giving the coordinates of the random points. } \details{ The bounding rectangle of \code{window} is divided into a regular \eqn{nx \times ny}{nx * ny} grid of rectangular tiles. In each tile, \code{k} random points are generated independently with a uniform distribution in that tile. Note that some of these grid points may lie outside the window, if \code{window} is not of type \code{"rectangle"}. The function \code{\link{inside.owin}} can be used to select those grid points which do lie inside the window. See the examples. This function is useful in creating dummy points for quadrature schemes (see \code{\link{quadscheme}}) as well as in simulating random point patterns. } \seealso{ \code{\link{quad.object}}, \code{\link{quadscheme}}, \code{\link{inside.owin}}, \code{\link{gridcentres}} } \examples{ w <- unit.square() xy <- stratrand(w, 10, 10) \dontrun{ plot(w) points(xy) } # polygonal boundary bdry <- list(x=c(0.1,0.3,0.7,0.4,0.2), y=c(0.1,0.1,0.5,0.7,0.3)) w <- owin(c(0,1), c(0,1), poly=bdry) xy <- stratrand(w, 10, 10, 3) \dontrun{ plot(w) points(xy) } # determine which grid points are inside polygon ok <- inside.owin(xy$x, xy$y, w) \dontrun{ plot(w) points(xy$x[ok], xy$y[ok]) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/waterstriders.Rd0000644000176000001440000000426012241443112016105 0ustar ripleyusers\name{waterstriders} \alias{waterstriders} \docType{data} \title{ Waterstriders data. Three independent replications of a point pattern formed by insects. } \description{ The territorial behaviour of an insect group called waterstriders was studied in a series of laboratory experiments by Dr Matti Nummelin (University of Helskini). The data were analysed in the pioneering PhD thesis of Antti Penttinen (1984). The dataset \code{waterstriders} is a list of three point patterns. Each point pattern gives the locations of larvae of the waterstrider \emph{Limnoporus (Gerris) rufoscutellatus} (larval stage V) in a homogeneous area about 48 cm square. The point patterns can be assumed to be independent. It is known that this species of waterstriders exhibits territorialism at older larvae stages and at the adult stage. Therefore, if any deviation from Complete Spatial Randomness exists in these three point patterns, it is expected to be towards inhibition. The data were obtained from photographs which were scanned manually. The waterstriders are in a pool which is larger than the picture. A guard area (width about 2.5 cm) has been deleted because it is a source of inhomogeneity to interactions. Penttinen (1984, chapter 5) fitted a pairwise interaction model with a Strauss/hardcore interaction (see \code{\link[spatstat]{StraussHard}}) with hard core radius 1.5 cm and interaction radius 4.5 cm. } \format{ \code{waterstriders} is a list of three point patterns (objects of class \code{"ppp"}). It also has class \code{"listof"} so that it can be plotted and printed directly. The point pattern coordinates are in centimetres. } \usage{data(waterstriders)} \source{ Data were collected by Dr. Matti Nummelin (University of Helsinki, Finland). Data kindly provided by Prof. Antti Penttinen, University of Jyv\"askyl\"a, Finland. } \references{ Penttinen, A. (1984) Modelling interaction in spatial point patterns: parameter estimation by the maximum likelihood method. \emph{Jyv\"askyl\"a Studies in Computer Science, Economics and Statistics} \bold{7}, University of {Jyv\"askyl\"a}, Finland. } \keyword{datasets} \keyword{spatial} spatstat/man/linnet.Rd0000755000176000001440000000404012237642732014511 0ustar ripleyusers\name{linnet} \alias{linnet} \title{ Create a Linear Network } \description{ Creates an object of class \code{"linnet"} representing a network of line segments. } \usage{ linnet(vertices, m, edges) } \arguments{ \item{vertices}{ Point pattern (object of class \code{"ppp"}) specifying the vertices of the network. } \item{m}{ Adjacency matrix. A matrix of logical values equal to \code{TRUE} when the corresponding vertices are joined by a line. (Specify either \code{m} or \code{edges}.) } \item{edges}{ Edge list. A two-column matrix of integers, specifying all pairs of vertices that should be joined by an edge. (Specify either \code{m} or \code{edges}.) } } \details{ An object of class \code{"linnet"} represents a network of straight line segments in two dimensions. The function \code{linnet} creates such an object from the minimal information: the spatial location of each vertex (endpoint, crossing point or meeting point of lines) and information about which vertices are joined by an edge. This function can take some time to execute, because the algorithm computes various properties of the network that are stored in the resulting object. } \value{ Object of class \code{"linnet"} representing the linear network. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{simplenet}} for an example of a linear network. \code{\link[spatstat:methods.linnet]{methods.linnet}} for methods applicable to \code{linnet} objects. \code{\link{ppp}}, \code{\link{psp}}. } \examples{ # letter 'A' specified by adjacency matrix v <- ppp(x=(-2):2, y=3*c(0,1,2,1,0), c(-3,3), c(-1,7)) m <- matrix(FALSE, 5,5) for(i in 1:4) m[i,i+1] <- TRUE m[2,4] <- TRUE m <- m | t(m) letterA <- linnet(v, m) plot(letterA) # letter 'A' specified by edge list edg <- cbind(1:4, 2:5) edg <- rbind(edg, c(2,4)) letterA <- linnet(v, edges=edg) } \keyword{spatial} spatstat/man/is.empty.Rd0000755000176000001440000000245612237642732015001 0ustar ripleyusers\name{is.empty} %DontDeclareMethods \alias{is.empty} \alias{is.empty.owin} \alias{is.empty.ppp} \alias{is.empty.psp} \alias{is.empty.default} \title{Test Whether An Object Is Empty} \description{ Checks whether the argument is an empty window, an empty point pattern, etc. } \usage{ is.empty(x) \method{is.empty}{owin}(x) \method{is.empty}{ppp}(x) \method{is.empty}{psp}(x) \method{is.empty}{default}(x) } \arguments{ \item{x}{ A window (object of class \code{"owin"}), a point pattern (object of class \code{"ppp"}), or a line segment pattern (object of class \code{"psp"}). } } \details{ This function tests whether the object \code{x} represents an empty spatial object, such as an empty window, a point pattern with zero points, or a line segment pattern with zero line segments. An empty window can be obtained as the output of \code{\link{intersect.owin}}, \code{\link{erosion}}, \code{\link{opening}}, \code{\link{complement.owin}} and some other operations. An empty point pattern or line segment pattern can be obtained as the result of simulation. } \value{ Logical value. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/spruces.Rd0000755000176000001440000000346312237642734014716 0ustar ripleyusers\name{spruces} \alias{spruces} \docType{data} \title{ Spruces Point Pattern } \description{ The data give the locations of Norwegian spruce trees in a natural forest stand in Saxonia, Germany. Each tree is marked with its diameter at breast height. } \format{ An object of class \code{"ppp"} representing the point pattern of tree locations in a 56 x 38 metre sampling region. Each tree is marked with its diameter at breast height. All values are given in metres. See \code{\link{ppp.object}} for details of the format of a point pattern object. The marks are numeric. These data have been analysed by Fiksel (1984, 1988), Stoyan et al (1987), Penttinen et al (1992) and Goulard et al (1996). } \usage{data(spruces)} \source{Stoyan et al (1987). Original source unknown.} \examples{ data(spruces) plot(spruces) # To reproduce Goulard et al. Figure 3 plot(spruces, maxsize=5*max(spruces$marks)) plot(unmark(spruces), add=TRUE) } \references{ Fiksel, T. (1984) Estimation of parameterized pair potentials of marked and nonmarked Gibbsian point processes. \emph{Elektron. Informationsverarb. u. Kybernet.} \bold{20}, 270--278. Fiksel, T. (1988) Estimation of interaction potentials of Gibbsian point processes. \emph{Statistics} \bold{19}, 77-86 Goulard, M., S\"arkk\"a, A. and Grabarnik, P. (1996) Parameter estimation for marked Gibbs point processes through the maximum pseudolikelihood method. \emph{Scandinavian Journal of Statistics} \bold{23}, 365--379. Penttinen, A., Stoyan, D. and Henttonen, H. (1992) Marked point processes in forest statistics. \emph{Forest Science} \bold{38}, 806--824. Stoyan, D., Kendall, W.S. and Mecke, J. (1987) \emph{Stochastic Geometry and its Applications}. Wiley. } \keyword{datasets} \keyword{spatial} spatstat/man/urkiola.Rd0000755000176000001440000000263512237642734014700 0ustar ripleyusers\encoding{latin1} \name{urkiola} \alias{urkiola} \docType{data} \title{Urkiola Woods Point Pattern} \description{ Locations of birch (\emph{Betula celtiberica}) and oak (\emph{Quercus robur}) trees in a secondary wood in Urkiola Natural Park (Basque country, northern Spain). They are part of a more extensive dataset collected and analysed by Laskurain (2008). The coordinates of the trees are given in meters. } \usage{data(urkiola)} \format{ An object of class \code{"ppp"} representing the point pattern of tree locations. Entries include \describe{ \item{x}{Cartesian x-coordinate of tree} \item{y}{Cartesian y-coordinate of tree } \item{marks}{factor indicating species of each tree} } The levels of \code{marks} are \code{birch} and \code{oak}. See \code{\link{ppp.object}} for details of the format of a ppp object. } \source{N.A. Laskurain. Kindly formatted and communicated by M. de la Cruz Rot} \references{ \enc{Laskurain, N. A. (2008) \emph{Dinmica espacio-temporal de un bosque secundario en el Parque Natural de Urkiola (Bizkaia).} Tesis Doctoral. Universidad del Pas Vasco /Euskal Herriko Unibertsitatea. }{ Laskurain, N. A. (2008) \emph{Dinamica espacio-temporal de un bosque secundario en el Parque Natural de Urkiola (Bizkaia).} Tesis Doctoral. Universidad del Pais Vasco /Euskal Herriko Unibertsitatea. } } \keyword{datasets} spatstat/man/rounding.Rd0000644000176000001440000000441012237642734015045 0ustar ripleyusers\name{rounding} %DontDeclareMethods \alias{rounding} \alias{rounding.default} \alias{rounding.ppp} \alias{rounding.pp3} \alias{rounding.ppx} \title{ Detect Numerical Rounding } \description{ Given a numeric vector, or an object containing numeric spatial coordinates, determine whether the values have been rounded to a certain number of decimal places. } \usage{ rounding(x) \method{rounding}{default}(x) \method{rounding}{ppp}(x) \method{rounding}{pp3}(x) \method{rounding}{ppx}(x) } \arguments{ \item{x}{ A numeric vector, or an object containing numeric spatial coordinates. } } \details{ For a numeric vector \code{x}, this function determines whether the values have been rounded to a certain number of decimal places. \itemize{ \item If the entries of \code{x} are not all integers, then \code{rounding(x)} returns the smallest number of digits \code{d} after the decimal point such that \code{\link[base]{round}(x, digits=d)} is identical to \code{x}. For example if \code{rounding(x) = 2} then the entries of \code{x} are rounded to 2 decimal places, and are multiples of 0.01. \item If all the entries of \code{x} are integers, then \code{rounding(x)} returns \code{-d}, where \code{d} is the smallest number of digits \emph{before} the decimal point such that \code{\link[base]{round}(x, digits=-d)} is identical to \code{x}. For example if \code{rounding(x) = -3} then the entries of \code{x} are multiples of 1000. If \code{rounding(x) = 0} then the entries of \code{x} are integers but not multiples of 10. \item If all entries of \code{x} are equal to 0, the rounding is not determined, and a value of \code{NULL} is returned. } For a point pattern (object of class \code{"ppp"}) or similar object \code{x} containing numeric spatial coordinates, this procedure is applied to the spatial coordinates. } \value{ An integer. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{round.ppp}} } \examples{ rounding(c(0.1, 0.3, 1.2)) rounding(c(1940, 1880, 2010)) rounding(0) rounding(cells) } \keyword{spatial} \keyword{math} spatstat/man/clusterset.Rd0000644000176000001440000001064112237642732015416 0ustar ripleyusers\name{clusterset} \alias{clusterset} \title{ Allard-Fraley Estimator of Cluster Feature } \description{ Detect high-density features in a spatial point pattern using the (unrestricted) Allard-Fraley estimator. } \usage{ clusterset(X, result=c("marks", "domain"), \dots, verbose=TRUE, fast=FALSE, exact=!fast) } \arguments{ \item{X}{ A dimensional spatial point pattern (object of class \code{"ppp"}). } \item{result}{ Character string specifying the type of result. See Details. } \item{verbose}{ Logical value indicating whether to print progress reports. } \item{fast}{ Logical. If \code{FALSE} (the default), the Dirichlet tile areas will be computed exactly using polygonal geometry, so that the optimal choice of tiles will be computed exactly. If \code{TRUE}, the Dirichlet tile areas will be approximated using pixel counting, so the optimal choice will be approximate. } \item{exact}{ Logical. If \code{TRUE}, the Allard-Fraley estimator of the domain will be computed exactly using polygonal geometry. If \code{FALSE}, the Allard-Fraley estimator of the domain will be approximated by a binary pixel mask. The default is initially set to \code{FALSE}. } \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} to control the pixel resolution if \code{exact=FALSE}. } } \details{ Allard and Fraley (1997) developed a technique for recognising features of high density in a spatial point pattern in the presence of random clutter. This algorithm computes the \emph{unrestricted} Allard-Fraley estimator. The Dirichlet (Voronoi) tessellation of the point pattern \code{X} is computed. The smallest \code{m} Dirichlet cells are selected, where the number \code{m} is determined by a maximum likelihood criterion. \itemize{ \item If \code{fast=FALSE} (the default), the areas of the tiles of the Dirichlet tessellation will be computed exactly using polygonal geometry. This ensures that the optimal selection of tiles is computed exactly. \item If \code{fast=TRUE}, the Dirichlet tile areas will be approximated by counting pixels. This is faster, and is usually correct (depending on the pixel resolution, which is controlled by the arguments \code{\dots}). } If \code{result="marks"} the result is the point pattern \code{X} with a vector of marks labelling each point with a value \code{yes} or \code{no} depending on whether the corresponding Dirichlet cell is selected by the Allard-Fraley estimator. In other words each point of \code{X} is labelled as either a cluster point or a non-cluster point. If \code{result="domain"}, the result is the Allard-Fraley estimator of the cluster feature set, which is the union of all the selected Dirichlet cells, represented as a window (object of class \code{"owin"}). \itemize{ \item If \code{exact=TRUE}, the Allard-Fraley set estimator will be computed exactly using polygonal geometry. The result is a polygonal window. \item If \code{exact=FALSE}, the Allard-Fraley set estimator will be approximated by a binary pixel mask. This is faster than the exact computation. The result is a binary mask. } When \pkg{spatstat} is initialised, the default is \code{exact=FALSE}. } \value{ If \code{result="marks"}, a multitype point pattern (object of class \code{"ppp"}). If \code{result="domain"}, a window (object of class \code{"owin"}). } \references{ Allard, D. and Fraley, C. (1997) Nonparametric maximum likelihood estimation of features in spatial point processes using Voronoi tessellation. \emph{Journal of the American Statistical Association} \bold{92}, 1485--1493. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{nnclean}}, \code{\link{sharpen}} } \examples{ opa <- par(mfrow=c(1,2)) W <- grow.rectangle(as.rectangle(letterR), 1) X <- superimpose(runifpoint(300, letterR), runifpoint(50, W), W=W) plot(W, main="clusterset(X)") plot(clusterset(X, fast=TRUE), add=TRUE, chars=c("o", "+"), cols=1:2) plot(letterR, add=TRUE) plot(W, main="clusterset(X, 'd')") plot(clusterset(X, "d", exact=FALSE), add=TRUE) plot(letterR, add=TRUE) par(opa) } \keyword{spatial} \keyword{classif} spatstat/man/pcf3est.Rd0000755000176000001440000001034212237642733014572 0ustar ripleyusers\name{pcf3est} \Rdversion{1.1} \alias{pcf3est} \title{ Pair Correlation Function of a Three-Dimensional Point Pattern } \description{ Estimates the pair correlation function from a three-dimensional point pattern. } \usage{ pcf3est(X, ..., rmax = NULL, nrval = 128, correction = c("translation", "isotropic"), delta=NULL, adjust=1, biascorrect=TRUE) } \arguments{ \item{X}{ Three-dimensional point pattern (object of class \code{"pp3"}). } \item{\dots}{ Ignored. } \item{rmax}{ Optional. Maximum value of argument \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{nrval}{ Optional. Number of values of \eqn{r} for which \eqn{g_3(r)}{g3(r)} will be estimated. } \item{correction}{ Optional. Character vector specifying the edge correction(s) to be applied. See Details. } \item{delta}{ Optional. Half-width of the Epanechnikov smoothing kernel. } \item{adjust}{ Optional. Adjustment factor for the default value of \code{delta}. } \item{biascorrect}{ Logical value. Whether to correct for underestimation due to truncation of the kernel near \eqn{r=0}. } } \details{ For a stationary point process \eqn{\Phi}{Phi} in three-dimensional space, the pair correlation function is \deqn{ g_3(r) = \frac{K_3'(r)}{4\pi r^2} }{ g3(r) = K3'(r)/(4 * pi * r^2) } where \eqn{K_3'}{K3'} is the derivative of the three-dimensional \eqn{K}-function (see \code{\link{K3est}}). The three-dimensional point pattern \code{X} is assumed to be a partial realisation of a stationary point process \eqn{\Phi}{Phi}. The distance between each pair of distinct points is computed. Kernel smoothing is applied to these distance values (weighted by an edge correction factor) and the result is renormalised to give the estimate of \eqn{g_3(r)}{g3(r)}. The available edge corrections are: \describe{ \item{\code{"translation"}:}{ the Ohser translation correction estimator (Ohser, 1983; Baddeley et al, 1993) } \item{\code{"isotropic"}:}{ the three-dimensional counterpart of Ripley's isotropic edge correction (Ripley, 1977; Baddeley et al, 1993). } } Kernel smoothing is performed using the Epanechnikov kernel with half-width \code{delta}. If \code{delta} is missing, the default is to use the rule-of-thumb \eqn{\delta = 0.26/\lambda^{1/3}}{delta = 0.26/lambda^(1/3)} where \eqn{\lambda = n/v}{lambda = n/v} is the estimated intensity, computed from the number \eqn{n} of data points and the volume \eqn{v} of the enclosing box. This default value of \code{delta} is multiplied by the factor \code{adjust}. The smoothing estimate of the pair correlation \eqn{g_3(r)}{g3(r)} is typically an underestimate when \eqn{r} is small, due to truncation of the kernel at \eqn{r=0}. If \code{biascorrect=TRUE}, the smoothed estimate is approximately adjusted for this bias. This is advisable whenever the dataset contains a sufficiently large number of points. } \value{ A function value table (object of class \code{"fv"}) that can be plotted, printed or coerced to a data frame containing the function values. Additionally the value of \code{delta} is returned as an attribute of this object. } \references{ Baddeley, A.J, Moyeed, R.A., Howard, C.V. and Boyde, A. (1993) Analysis of a three-dimensional point pattern with replication. \emph{Applied Statistics} \bold{42}, 641--668. Ohser, J. (1983) On estimators for the reduced second moment measure of point processes. \emph{Mathematische Operationsforschung und Statistik, series Statistics}, \bold{14}, 63 -- 71. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rana Moyeed. } \seealso{ \code{\link{K3est}}, \code{\link{pcf}} } \examples{ X <- rpoispp3(250) Z <- pcf3est(X) Zbias <- pcf3est(X, biascorrect=FALSE) if(interactive()) { opa <- par(mfrow=c(1,2)) plot(Z, ylim.covers=c(0, 1.2)) plot(Zbias, ylim.covers=c(0, 1.2)) par(opa) } attr(Z, "delta") } \keyword{spatial} \keyword{nonparametric} spatstat/man/affine.lpp.Rd0000644000176000001440000000411612237642732015243 0ustar ripleyusers\name{affine.lpp} %DontDeclareMethods \alias{affine.lpp} \alias{shift.lpp} \alias{rotate.lpp} \alias{rescale.lpp} \alias{scalardilate.lpp} \title{Apply Geometrical Transformations to Point Pattern on a Linear Network} \description{ Apply geometrical transformations to a point pattern on a linear network. } \usage{ \method{affine}{lpp}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) \method{shift}{lpp}(X, \dots) \method{rotate}{lpp}(X, angle=pi/2, \dots) \method{scalardilate}{lpp}(X, f, \dots) \method{rescale}{lpp}(X, s) } \arguments{ \item{X}{Point pattern on a linear network (object of class \code{"lpp"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{angle}{Rotation angle in radians.} \item{f}{Scalar dilation factor.} \item{s}{ Unit conversion factor: the new units are \code{s} times the old units. } \item{\dots}{ Arguments passed to other methods. } } \value{ Another point pattern on a linear network (object of class \code{"lpp"}) representing the result of applying the geometrical transformation. } \details{ These functions are methods for the generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{rescale}} and \code{\link{scalardilate}} applicable to objects of class \code{"lpp"}. All of these functions perform geometrical transformations on the object \code{X}, except for \code{rescale}, which simply rescales the units of length. } \seealso{ \code{\link{lpp}}. Generic functions \code{\link{affine}}, \code{\link{shift}}, \code{\link{rotate}}, \code{\link{scalardilate}}, \code{\link{rescale}}. } \examples{ X <- rpoislpp(2, simplenet) U <- rotate(X, pi) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear, vec=c(0, 1)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/eval.linim.Rd0000644000176000001440000000550512237642732015262 0ustar ripleyusers\name{eval.linim} \alias{eval.linim} \title{Evaluate Expression Involving Pixel Images on Linear Network} \description{ Evaluates any expression involving one or more pixel images on a linear network, and returns a pixel image on the same linear network. } \usage{ eval.linim(expr, envir, harmonize=TRUE) } \arguments{ \item{expr}{An expression in the \R language, involving the names of objects of class \code{"linim"}.} \item{envir}{Optional. The environment in which to evaluate the expression.} \item{harmonize}{ Logical. Whether to resolve inconsistencies between the pixel grids. } } \details{ This function a wrapper to make it easier to perform pixel-by-pixel calculations. It is one of several functions whose names begin with \code{eval} which work on objects of different types. This particular function is designed to work with objects of class \code{"linim"} which represent pixel images on a linear network. Suppose \code{X} is a pixel image on a linear network (object of class \code{"linim"}. Then \code{eval.linim(X+3)} will add 3 to the value of every pixel in \code{X}, and return the resulting pixel image on the same linear network. Suppose \code{X} and \code{Y} are two pixel images on the same linear network, with compatible pixel dimensions. Then \code{eval.linim(X + Y)} will add the corresponding pixel values in \code{X} and \code{Y}, and return the resulting pixel image on the same linear network. In general, \code{expr} can be any expression in the R language involving (a) the \emph{names} of pixel images, (b) scalar constants, and (c) functions which are vectorised. See the Examples. First \code{eval.linim} determines which of the \emph{variable names} in the expression \code{expr} refer to pixel images. Each such name is replaced by a matrix containing the pixel values. The expression is then evaluated. The result should be a matrix; it is taken as the matrix of pixel values. The expression \code{expr} must be vectorised. There must be at least one linear pixel image in the expression. All images must have compatible dimensions. If \code{harmonize=TRUE}, images that have incompatible dimensions will be resampled so that they are compatible. If \code{harmonize=FALSE}, images that are incompatible will cause an error. } \value{ An image object of class \code{"linim"}. } \seealso{ \code{\link{eval.im}}, \code{\link{linim}} } \examples{ example(linim) Y <- linim(letterA, as.im(function(x,y){y^2+x}, W=M)) eval.linim(X + 3) eval.linim(X - Y) eval.linim(abs(X - Y)) Z <- eval.linim(sin(X * pi) + Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{programming} spatstat/man/lgcp.estK.Rd0000755000176000001440000002104112237642732015052 0ustar ripleyusers\name{lgcp.estK} \alias{lgcp.estK} \title{Fit a Log-Gaussian Cox Point Process by Minimum Contrast} \description{ Fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast. } \usage{ lgcp.estK(X, startpar=c(sigma2=1,alpha=1), covmodel=list(model="exponential"), lambda=NULL, q = 1/4, p = 2, rmin = NULL, rmax = NULL, ...) } \arguments{ \item{X}{ Data to which the model will be fitted. Either a point pattern or a summary statistic. See Details. } \item{startpar}{ Vector of starting values for the parameters of the log-Gaussian Cox process model. } \item{covmodel}{ Specification of the covariance model for the log-Gaussian field. See Details. } \item{lambda}{ Optional. An estimate of the intensity of the point process. } \item{q,p}{ Optional. Exponents for the contrast criterion. } \item{rmin, rmax}{ Optional. The interval of \eqn{r} values for the contrast criterion. } \item{\dots}{ Optional arguments passed to \code{\link[stats]{optim}} to control the optimisation algorithm. See Details. } } \details{ This algorithm fits a log-Gaussian Cox point process model to a point pattern dataset by the Method of Minimum Contrast, using the K function. The argument \code{X} can be either \describe{ \item{a point pattern:}{An object of class \code{"ppp"} representing a point pattern dataset. The \eqn{K} function of the point pattern will be computed using \code{\link{Kest}}, and the method of minimum contrast will be applied to this. } \item{a summary statistic:}{An object of class \code{"fv"} containing the values of a summary statistic, computed for a point pattern dataset. The summary statistic should be the \eqn{K} function, and this object should have been obtained by a call to \code{\link{Kest}} or one of its relatives. } } The algorithm fits a log-Gaussian Cox point process (LGCP) model to \code{X}, by finding the parameters of the LGCP model which give the closest match between the theoretical \eqn{K} function of the LGCP model and the observed \eqn{K} function. For a more detailed explanation of the Method of Minimum Contrast, see \code{\link{mincontrast}}. The model fitted is a stationary, isotropic log-Gaussian Cox process (Moller and Waagepetersen, 2003, pp. 72-76). To define this process we start with a stationary Gaussian random field \eqn{Z} in the two-dimensional plane, with constant mean \eqn{\mu}{mu} and covariance function \eqn{C(r)}. Given \eqn{Z}, we generate a Poisson point process \eqn{Y} with intensity function \eqn{\lambda(u) = \exp(Z(u))}{lambda(u) = exp(Z(u))} at location \eqn{u}. Then \eqn{Y} is a log-Gaussian Cox process. The \eqn{K}-function of the LGCP is \deqn{ K(r) = \int_0^r 2\pi s \exp(C(s)) \, {\rm d}s. }{ K(r) = integral from 0 to r of (2 * pi * s * exp(C(s))) ds. } The intensity of the LGCP is \deqn{ \lambda = \exp(\mu + \frac{C(0)}{2}). }{ lambda= exp(mu + C(0)/2). } The covariance function \eqn{C(r)} is parametrised in the form \deqn{ C(r) = \sigma^2 c(r/\alpha) }{ C(r) = sigma^2 * c(-r/alpha) } where \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} are parameters controlling the strength and the scale of autocorrelation, respectively, and \eqn{c(r)} is a known covariance function determining the shape of the covariance. The strength and scale parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha} will be estimated by the algorithm. The template covariance function \eqn{c(r)} must be specified as explained below. In this algorithm, the Method of Minimum Contrast is first used to find optimal values of the parameters \eqn{\sigma^2}{sigma^2} and \eqn{\alpha}{alpha^2}. Then the remaining parameter \eqn{\mu}{mu} is inferred from the estimated intensity \eqn{\lambda}{lambda}. The template covariance function \eqn{c(r)} is specified using the argument \code{covmodel}. It may be any of the covariance functions recognised by the command \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package. The default is the exponential covariance \eqn{c(r) = e^{-r}}{c(r) = e^(-r)} so that the scaled covariance is \deqn{ C(r) = \sigma^2 e^{-r/\alpha}. }{ C(r) = sigma^2 * exp(-r/alpha). } The argument \code{covmodel} should be of the form \code{list(model="modelname", \dots)} where \code{modelname} is the string name of one of the covariance models recognised by the command \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package, and \code{\dots} are arguments of the form \code{tag=value} giving the values of parameters controlling the shape of these models. For example the exponential covariance is specified by \code{covmodel=list(model="exponential")} while the Matern covariance with exponent \eqn{\nu=0.3}{nu = 0.3} is specified by \code{covmodel=list(model="matern", nu=0.3)}. If the argument \code{lambda} is provided, then this is used as the value of \eqn{\lambda}{lambda}. Otherwise, if \code{X} is a point pattern, then \eqn{\lambda}{lambda} will be estimated from \code{X}. If \code{X} is a summary statistic and \code{lambda} is missing, then the intensity \eqn{\lambda}{lambda} cannot be estimated, and the parameter \eqn{\mu}{mu} will be returned as \code{NA}. The remaining arguments \code{rmin,rmax,q,p} control the method of minimum contrast; see \code{\link{mincontrast}}. The optimisation algorithm can be controlled through the additional arguments \code{"..."} which are passed to the optimisation function \code{\link[stats]{optim}}. For example, to constrain the parameter values to a certain range, use the argument \code{method="L-BFGS-B"} to select an optimisation algorithm that respects box constraints, and use the arguments \code{lower} and \code{upper} to specify (vectors of) minimum and maximum values for each parameter. } \value{ An object of class \code{"minconfit"}. There are methods for printing and plotting this object. It contains the following main components: \item{par }{Vector of fitted parameter values.} \item{fit }{Function value table (object of class \code{"fv"}) containing the observed values of the summary statistic (\code{observed}) and the theoretical values of the summary statistic computed from the fitted model parameters. } } \note{ This function is considerably slower than \code{\link{lgcp.estpcf}} because of the computation time required for the integral in the \eqn{K}-function. Computation can be accelerated, at the cost of less accurate results, by setting \code{spatstat.options(fastK.lgcp=TRUE)}. } \references{ Moller, J, Syversveen, A. and Waagepetersen, R. (1998) Log Gaussian Cox Processes. \emph{Scandinavian Journal of Statistics} \bold{25}, 451--482. Moller, J. and Waagepetersen, R. (2003). Statistical Inference and Simulation for Spatial Point Processes. Chapman and Hall/CRC, Boca Raton. Waagepetersen, R. (2007) An estimating function approach to inference for inhomogeneous Neyman-Scott processes. \emph{Biometrics} \bold{63}, 252--258. } \author{ Rasmus Waagepetersen \email{rw@math.auc.dk}. Adapted for \pkg{spatstat} by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} Further modifications by Rasmus Waagepetersen and Shen Guochun. } \seealso{ \code{\link{lgcp.estpcf}} for alternative method of fitting LGCP. \code{\link{matclust.estK}}, \code{\link{thomas.estK}} for other models. \code{\link{mincontrast}} for the generic minimum contrast fitting algorithm, including important parameters that affect the accuracy of the fit. \code{\link[RandomFields:CovarianceFct]{Covariance}} in the \pkg{RandomFields} package, for covariance function models. \code{\link{Kest}} for the \eqn{K} function. } \examples{ \donttest{ u <- lgcp.estK(redwood) } u <- lgcp.estK(redwood, c(sigma2=1, alpha=0.1)) u if(interactive()) plot(u) \testonly{ if(require(RandomFields)) { K <- Kest(redwood, r=seq(0, 0.1, length=9)) op <- spatstat.options(fastK.lgcp=TRUE) lgcp.estK(K, covmodel=list(model="matern", nu=0.3), control=list(maxit=2)) spatstat.options(op) } } \donttest{ if(require(RandomFields)) { lgcp.estK(redwood, covmodel=list(model="matern", nu=0.3)) } } } \keyword{spatial} \keyword{models} spatstat/man/will.expand.Rd0000644000176000001440000000210512237642735015445 0ustar ripleyusers\name{will.expand} \alias{will.expand} \title{ Test Expansion Rule } \description{ Determines whether an expansion rule will actually expand the window or not. } \usage{ will.expand(x) } \arguments{ \item{x}{ Expansion rule. An object of class \code{"rmhexpand"}. } } \details{ An object of class \code{"rmhexpand"} describes a rule for expanding a simulation window. See \code{\link{rmhexpand}} for details. One possible expansion rule is to do nothing, i.e. not to expand the window. This command inspects the expansion rule \code{x} and determines whether it will or will not actually expand the window. It returns \code{TRUE} if the window will be expanded. } \value{ Logical value. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rmhexpand}}, \code{\link{expand.owin}} } \examples{ x <- rmhexpand(distance=0.2) y <- rmhexpand(area=1) will.expand(x) will.expand(y) } \keyword{spatial} \keyword{manip} spatstat/man/Replace.im.Rd0000755000176000001440000000740312237642734015207 0ustar ripleyusers\name{Replace.im} \alias{[<-.im} \title{Reset Values in Subset of Image} \description{ Reset the values in a subset of a pixel image. } \usage{ \method{[}{im}(x, i, j) <- value } \arguments{ \item{x}{ A two-dimensional pixel image. An object of class \code{"im"}. } \item{i}{ Object defining the subregion or subset to be replaced. Either a spatial window (an object of class \code{"owin"}), or a pixel image with logical values, or a point pattern (an object of class \code{"ppp"}), or any type of index that applies to a matrix, or something that can be converted to a point pattern by \code{\link{as.ppp}} (using the window of \code{x}). } \item{j}{ An integer or logical vector serving as the column index if matrix indexing is being used. Ignored if \code{i} is appropriate to some sort of replacement \emph{other than} matrix indexing. } \item{value}{ Vector, matrix, factor or pixel image containing the replacement values. Short vectors will be recycled. } } \value{ The image \code{x} with the values replaced. } \details{ This function changes some of the pixel values in a pixel image. The image \code{x} must be an object of class \code{"im"} representing a pixel image defined inside a rectangle in two-dimensional space (see \code{\link{im.object}}). The subset to be changed is determined by the arguments \code{i,j} according to the following rules (which are checked in this order): \enumerate{ \item \code{i} is a spatial object such as a window, a pixel image with logical values, or a point pattern; or \item \code{i,j} are indices for the matrix \code{as.matrix(x)}; or \item \code{i} can be converted to a point pattern by \code{\link{as.ppp}(i, W=as.owin(x))}, and \code{i} is not a matrix. } If \code{i} is a spatial window (an object of class \code{"owin"}), the values of the image inside this window are changed. If \code{i} is a point pattern (an object of class \code{"ppp"}), then the values of the pixel image at the points of this pattern are changed. If \code{i} does not satisfy any of the conditions above, then the algorithm tries to interpret \code{i,j} as indices for the matrix \code{as.matrix(x)}. Either \code{i} or \code{j} may be missing or blank. If none of the conditions above are met, and if \code{i} is not a matrix, then \code{i} is converted into a point pattern by \code{\link{as.ppp}(i, W=as.owin(x))}. Again the values of the pixel image at the points of this pattern are changed. } \section{Warning}{ If you have a 2-column matrix containing the \eqn{x,y} coordinates of point locations, then to prevent this being interpreted as an array index, you should convert it to a \code{data.frame} or to a point pattern. } \seealso{ \code{\link{im.object}}, \code{\link{[.im}}, \code{\link{[}}, \code{\link{ppp.object}}, \code{\link{as.ppp}}, \code{\link{owin.object}} } \examples{ # make up an image X <- setcov(unit.square()) plot(X) # a rectangular subset W <- owin(c(0,0.5),c(0.2,0.8)) X[W] <- 2 plot(X) # a polygonal subset data(letterR) R <- affine(letterR, diag(c(1,1)/2), c(-2,-0.7)) X[R] <- 3 plot(X) # a point pattern P <- rpoispp(20) X[P] <- 10 plot(X) # change pixel value at a specific location X[list(x=0.1,y=0.2)] <- 7 # matrix indexing --- single vector index X[1:2570] <- 10 plot(X) # matrix indexing using double indices X[1:257,1:10] <- 5 plot(X) # matrix indexing using a matrix of indices X[cbind(1:257,1:257)] <- 10 X[cbind(257:1,1:257)] <- 10 plot(X) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/ants.Rd0000755000176000001440000001436612237642732014201 0ustar ripleyusers\name{ants} \alias{ants} \alias{ants.extra} \docType{data} \title{ Harkness-Isham ants' nests data } \description{ These data give the spatial locations of nests of two species of ants, \emph{Messor wasmanni} and \emph{Cataglyphis bicolor}, recorded by Professor R.D. Harkness at a site in northern Greece, and described in Harkness \& Isham (1983). The full dataset (supplied here) has an irregular polygonal boundary, while most analyses have been confined to two rectangular subsets of the pattern (also supplied here). The harvester ant \emph{M. wasmanni} collects seeds for food and builds a nest composed mainly of seed husks. \emph{C. bicolor} is a heat-tolerant desert foraging ant which eats dead insects and other arthropods. Interest focuses on whether there is evidence in the data for intra-species competition between \emph{Messor} nests (i.e. competition for resources) and for preferential placement of \emph{Cataglyphis} nests in the vicinity of \emph{Messor} nests. The full dataset is displayed in Figure 1 of Harkness \& Isham (1983). See \bold{Usage} below to produce a comparable plot. It comprises 97 nests (68 Messor and 29 Cataglyphis) inside an irregular convex polygonal boundary, together with annotations showing a foot track through the region, the boundary between field and scrub areas inside the region, and indicating the two rectangular subregions A and B used in their analysis. Rectangular subsets of the data were analysed by Harkness \& Isham (1983), Isham (1984), Takacs \& Fiksel (1986), S\"arkk\"a (1993, section 5.3), H\"ogmander and S\"arkk\"a (1999) and Baddeley \& Turner (2000). The full dataset (inside its irregular boundary) was first analysed by Baddeley \& Turner (2005b). The dataset \code{ants} is the full point pattern enclosed by the irregular polygonal boundary. The \eqn{x} and \eqn{y} coordinates are eastings (E-W) and northings (N-S) scaled so that 1 unit equals 0.5 feet. This is a multitype point pattern object, each point carrying a mark indicating the ant species (with levels \code{Cataglyphis} and \code{Messor}). The dataset \code{ants.extra} is a list of auxiliary information: \describe{ \item{\code{A} and \code{B}}{The subsets of the pattern within the rectangles A and B demarcated in Figure 1 of Harkness \& Isham (1983). These are multitype point pattern objects. } \item{\code{trackNE} and \code{trackSW}}{ coordinates of two straight lines bounding the foot track. } \item{\code{fieldscrub}}{The endpoints of a straight line separating the regions of `field' and `scrub': scrub to the North and field to the South. } \item{\code{side}}{ A \code{function(x,y)} that determines whether the location \code{(x,y)} is in the scrub or the field. The function can be applied to numeric vectors \code{x} and \code{y}, and returns a factor with levels \code{"scrub"} and \code{"field"}. This function is useful as a spatial covariate. } \item{\code{plotit}}{A function which produces a plot of the full dataset. } } } \format{ \code{ants} is an object of class \code{"ppp"} representing the full point pattern of ants' nests. See \code{\link{ppp.object}} for details of the format. The coordinates are scaled so that 1 unit equals 0.5 feet. The points are marked by species (with levels \code{Cataglyphis} and \code{Messor}). \code{ants.extra} is a list with entries \describe{ \item{A}{point pattern of class \code{"ppp"}} \item{B}{point pattern of class \code{"ppp"}} \item{trackNE}{data in format \code{list(x=numeric(2),y=numeric(2))} giving the two endpoints of line markings} \item{trackSW}{data in format \code{list(x=numeric(2),y=numeric(2))} giving the two endpoints of line markings} \item{fieldscrub}{data in format \code{list(x=numeric(2),y=numeric(2))} giving the two endpoints of line markings} \item{side}{Function with arguments \code{x,y}} \item{plotit}{Function} } } \usage{data(ants)} \examples{ # Equivalent to Figure 1 of Harkness and Isham (1983) data(ants) ants.extra$plotit() # Data in subrectangle A, rotated # Approximate data used by Sarkka (1993) angle <- atan(diff(ants.extra$fieldscrub$y)/diff(ants.extra$fieldscrub$x)) plot(rotate(ants.extra$A, -angle)) # Approximate window used by Takacs and Fiksel (1986) tfwindow <- bounding.box(ants$window) antsTF <- ppp(ants$x, ants$y, window=tfwindow) plot(antsTF) } \source{Harkness and Isham (1983). Nest coordinates kindly provided by Prof Valerie Isham. Polygon coordinates digitised by Adrian Baddeley from a reprint of Harkness \& Isham (1983). } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Baddeley, A. and Turner, R. (2005a) Spatstat: an R package for analyzing spatial point patterns. \emph{Journal of Statistical Software} \bold{12}:6, 1--42. URL: \code{www.jstatsoft.org}, ISSN: 1548-7660. Baddeley, A. and Turner, R. (2005b) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. Harkness, R.D. and Isham, V. (1983) A bivariate spatial point pattern of ants' nests. \emph{Applied Statistics} \bold{32}, 293--303. H\"ogmander, H. and S\"arkk\"a, A. (1999) Multitype spatial point patterns with hierarchical interactions. \emph{Biometrics} \bold{55}, 1051--1058. Isham, V.S. (1984) Multitype Markov point processes: some approximations. \emph{Proceedings of the Royal Society of London, Series A}, \bold{391}, 39--53. Takacs, R. and Fiksel, T. (1986) Interaction pair-potentials for a system of ants' nests. \emph{Biometrical Journal} \bold{28}, 1007--1013. S\"arkk\"a, A. (1993) \emph{Pseudo-likelihood approach for pair potential estimation of Gibbs processes}. Number 22 in Jyv\"askyl\"a Studies in Computer Science, Economics and Statistics. University of Jyv\"askyl\"a, Finland. } \keyword{datasets} \keyword{spatial} spatstat/man/pyramidal.Rd0000644000176000001440000000267212252274531015203 0ustar ripleyusers\name{pyramidal} \alias{pyramidal} \docType{data} \title{ Pyramidal Neurons in Cingulate Cortex } \description{ Point patterns giving the locations of pyramidal neurons in micrographs from area 24, layer 2 of the cingulate cortex in the human brain. There is one point pattern from each of 31 human subjects. The subjects are divided into three groups: controls (12 subjects), schizoaffective (9 subjects) and schizophrenic (10 subjects). Each point pattern is recorded in a unit square region; the unit of measurement is unknown. These data were introduced and analysed by Diggle, Lange and Benes (1991). } \format{ \code{pyramidal} is a hyperframe with 31 rows, one row for each subject. It has a column named \code{Neurons} containing the point patterns of neuron locations, and a column named \code{group} which is a factor with levels \code{"control", "schizoaffective", "schizophrenic"} identifying the grouping of subjects. } \usage{data(pyramidal)} \source{ Peter Diggle's website. } \references{ Diggle, P.J., Lange, N. and Benes, F.M. (1991). Analysis of variance for replicated spatial point patterns in clinical neuroanatomy. \emph{Journal of the American Statistical Association} \bold{86}, 618--625. } \examples{ pyr <- pyramidal pyr$grp <- abbreviate(pyramidal$group, minlength=7) plot(pyr, quote(plot(Neurons, pch=16, main=grp)), main="Pyramidal Neurons") } \keyword{datasets} \keyword{spatial} spatstat/man/border.Rd0000755000176000001440000000361612237642732014505 0ustar ripleyusers\name{border} \alias{border} \title{Border Region of a Window} \description{ Computes the border region of a window, that is, the region lying within a specified distance of the boundary of a window. } \usage{ border(w, r, outside=FALSE, ...) } \arguments{ \item{w}{A window (object of class \code{"owin"}) or something acceptable to \code{\link{as.owin}}. } \item{r}{Numerical value.} \item{outside}{Logical value determining whether to compute the border outside or inside \code{w}.} \item{\dots}{ Optional arguments passed to \code{\link{erosion}} (if \code{outside=FALSE}) or to \code{\link{dilation}} (if \code{outside=TRUE}). } } \value{ A window (object of class \code{"owin"}). } \details{ By default (if \code{outside=FALSE}), the border region is the subset of \code{w} lying within a distance \code{r} of the boundary of \code{w}. It is computed by eroding \code{w} by the distance \code{r} (using \code{\link{erosion}}) and subtracting this eroded window from the original window \code{w}. If \code{outside=TRUE}, the border region is the set of locations outside \code{w} lying within a distance \code{r} of \code{w}. It is computed by dilating \code{w} by the distance \code{r} (using \code{\link{dilation}}) and subtracting the original window \code{w} from the dilated window. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{erosion}}, \code{\link{dilation}} } \examples{ # rectangle u <- unit.square() border(u, 0.1) border(u, 0.1, outside=TRUE) # polygon \testonly{opa <- spatstat.options(npixel=32)} data(letterR) plot(letterR) plot(border(letterR, 0.1), add=TRUE) plot(border(letterR, 0.1, outside=TRUE), add=TRUE) \testonly{spatstat.options(opa)} } \keyword{spatial} \keyword{math} spatstat/man/bw.scott.Rd0000644000176000001440000000275512237642732014773 0ustar ripleyusers\name{bw.scott} \alias{bw.scott} \title{ Scott's Rule for Bandwidth Selection for Kernel Density } \description{ Use Scott's rule of thumb to determine the smoothing bandwidth for the kernel estimation of point process intensity. } \usage{ bw.scott(X) } \arguments{ \item{X}{ A point pattern (object of class \code{"ppp"}). } } \details{ This function selects a bandwidth \code{sigma} for the kernel estimator of point process intensity computed by \code{\link{density.ppp}}. The bandwidth \eqn{\sigma}{sigma} is computed by the rule of thumb of Scott (1992, page 152). It is very fast to compute. This rule is designed for density estimation, and typically produces a larger bandwidth than \code{\link{bw.diggle}}. It is useful for estimating gradual trend. } \value{ A numerical vector of two elements giving the selected bandwidths in the \code{x} and \code{y} directions. } \seealso{ \code{\link{density.ppp}}, \code{\link{bw.diggle}}, \code{\link{bw.ppl}}, \code{\link{bw.frac}}. } \examples{ data(lansing) attach(split(lansing)) b <- bw.scott(hickory) b \donttest{ plot(density(hickory, b)) } } \references{ Scott, D.W. (1992) \emph{Multivariate Density Estimation. Theory, Practice and Visualization}. New York: Wiley. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/is.lpp.Rd0000644000176000001440000000126212237642732014425 0ustar ripleyusers\name{is.lpp} \alias{is.lpp} \title{Test Whether An Object Is A Point Pattern on a Linear Network} \description{ Checks whether its argument is a point pattern on a linear network (object of class \code{"lpp"}). } \usage{ is.lpp(x) } \arguments{ \item{x}{Any object.} } \details{ This function tests whether the object \code{x} is a point pattern object of class \code{"lpp"}. } \value{ \code{TRUE} if \code{x} is a point pattern of class \code{"lpp"}, otherwise \code{FALSE}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/methods.rhohat.Rd0000755000176000001440000000443312237642733016156 0ustar ripleyusers\name{methods.rhohat} \alias{methods.rhohat} %DoNotExport \alias{print.rhohat} \alias{plot.rhohat} \alias{predict.rhohat} \title{ Methods for Intensity Functions of Spatial Covariate } \description{ These are methods for the class \code{"rhohat"}. } \usage{ \method{print}{rhohat}(x, ...) \method{plot}{rhohat}(x, ..., do.rug=TRUE) \method{predict}{rhohat}(object, ..., relative=FALSE) } \arguments{ \item{x,object}{ An object of class \code{"rhohat"} representing a smoothed estimate of the intensity function of a point process. } \item{\dots}{ Arguments passed to other methods. } \item{do.rug}{ Logical value indicating whether to plot the observed values of the covariate as a rug plot along the horizontal axis. } \item{relative}{ Logical value indicating whether to compute the estimated point process intensity (\code{relative=FALSE}) or the relative risk (\code{relative=TRUE}) in the case of a relative risk estimate. } } \details{ These functions are methods for the generic commands \code{\link{print}}, \code{\link{plot}} and \code{\link{predict}} for the class \code{"rhohat"}. An object of class \code{"rhohat"} is an estimate of the intensity of a point process, as a function of a given spatial covariate. See \code{\link{rhohat}}. The method \code{plot.rhohat} displays the estimated function \eqn{\rho}{rho} using \code{\link{plot.fv}}, and optionally adds a \code{\link{rug}} plot of the observed values of the covariate. The method \code{predict.rhohat} computes a pixel image of the intensity \eqn{\rho(Z(u))}{rho(Z(u))} at each spatial location \eqn{u}, where \eqn{Z} is the spatial covariate. } \value{ For \code{predict.rhohat} the value is a pixel image (object of class \code{"im"}). For other functions, the value is \code{NULL}. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{rhohat}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, function(x,y){x}) rho plot(rho) Y <- predict(rho) plot(Y) # fit <- ppm(X, ~x) rho <- rhohat(fit, "y") opa <- par(mfrow=c(1,2)) plot(predict(rho)) plot(predict(rho, relative=TRUE)) par(opa) } \keyword{spatial} \keyword{methods} spatstat/man/clarkevans.Rd0000755000176000001440000001064512237642732015361 0ustar ripleyusers\name{clarkevans} \alias{clarkevans} \title{Clark and Evans Aggregation Index} \description{ Computes the Clark and Evans aggregation index \eqn{R} for a spatial point pattern. } \usage{ clarkevans(X, correction=c("none", "Donnelly", "cdf"), clipregion=NULL) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{correction}{ Character vector. The type of edge correction(s) to be applied. } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See Details. } } \details{ The Clark and Evans (1954) aggregation index \eqn{R} is a crude measure of clustering or ordering of a point pattern. It is the ratio of the observed mean nearest neighbour distance in the pattern to that expected for a Poisson point process of the same intensity. A value \eqn{R>1} suggests ordering, while \eqn{R<1} suggests clustering. Without correction for edge effects, the value of \code{R} will be positively biased. Edge effects arise because, for a point of \code{X} close to the edge of the window, the true nearest neighbour may actually lie outside the window. Hence observed nearest neighbour distances tend to be larger than the true nearest neighbour distances. The argument \code{correction} specifies an edge correction or several edge corrections to be applied. It is a character vector containing one or more of the options \code{"none"}, \code{"Donnelly"}, \code{"guard"} and \code{"cdf"} (which are recognised by partial matching). These edge corrections are: \describe{ \item{"none":}{ No edge correction is applied. } \item{"Donnelly":}{ Edge correction of Donnelly (1978), available for rectangular windows only. The theoretical expected value of mean nearest neighbour distance under a Poisson process is adjusted for edge effects by the edge correction of Donnelly (1978). The value of \eqn{R} is the ratio of the observed mean nearest neighbour distance to this adjusted theoretical mean. } \item{"guard":}{ Guard region or buffer area method. The observed mean nearest neighbour distance for the point pattern \code{X} is re-defined by averaging only over those points of \code{X} that fall inside the sub-window \code{clipregion}. } \item{"cdf":}{ Cumulative Distribution Function method. The nearest neighbour distance distribution function \eqn{G(r)} of the stationary point process is estimated by \code{\link{Gest}} using the Kaplan-Meier type edge correction. Then the mean of the distribution is calculated from the cdf. } } If the argument \code{clipregion} is given, then the selected edge corrections will be assumed to include \code{correction="guard"}. To perform a test based on the Clark-Evans index, see \code{\link{clarkevans.test}}. } \value{ A numeric value or numeric vector, with named components \item{naive}{\eqn{R} without edge correction} \item{Donnelly}{\eqn{R} using Donnelly edge correction} \item{guard}{\eqn{R} using guard region} \item{cdf}{\eqn{R} using cdf method} (as selected by \code{correction}). The value of the \code{Donnelly} component will be \code{NA} if the window of \code{X} is not a rectangle. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In I. Hodder (ed.) \emph{Simulation studies in archaeology}, Cambridge/New York: Cambridge University Press, pp 91--95. } \author{ John Rudge \email{rudge@esc.cam.ac.uk} with modifications by Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{clarkevans.test}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ # Example of a clustered pattern data(redwood) clarkevans(redwood) # Example of an ordered pattern data(cells) clarkevans(cells) # Random pattern X <- rpoispp(100) clarkevans(X) # How to specify a clipping region clip1 <- owin(c(0.1,0.9),c(0.1,0.9)) clip2 <- erosion(cells$window, 0.1) clarkevans(cells, clipregion=clip1) clarkevans(cells, clipregion=clip2) } \keyword{spatial} \keyword{nonparametric} spatstat/man/bei.Rd0000755000176000001440000000534712237642732013772 0ustar ripleyusers\name{bei} \alias{bei} \alias{bei.extra} \docType{data} \title{Tropical rain forest trees} \description{ A point pattern giving the locations of 3605 trees in a tropical rain forest. Accompanied by covariate data giving the elevation (altitude) and slope of elevation in the study region. } \format{ \code{bei} is an object of class \code{"ppp"} representing the point pattern of tree locations. See \code{\link{ppp.object}} for details of the format. \code{bei.extra} is a list containing two pixel images, \code{elev} (elevation in metres) and \code{grad} (norm of elevation gradient). These pixel images are objects of class \code{"im"}, see \code{\link{im.object}}. } \usage{data(bei)} \source{ Hubbell and Foster (1983), Condit, Hubbell and Foster (1996) and Condit (1998). Data files kindly supplied by Rasmus Waagepetersen. The data were collected in the forest dynamics plot of Barro Colorado Island. The study was made possible through the generous support of the U.S. National Science Foundation, the John D. and Catherine T. MacArthur Foundation, and the Smithsonian Tropical Research Institute. } \section{Notes}{ The dataset \code{bei} gives the positions of 3605 trees of the species \emph{Beilschmiedia pendula} (Lauraceae) in a 1000 by 500 metre rectangular sampling region in the tropical rainforest of Barro Colorado Island. The accompanying dataset \code{bei.extra} gives information about the altitude (elevation) in the study region. It is a list containing two pixel images, \code{elev} (elevation in metres) and \code{grad} (norm of elevation gradient). These data are part of a much larger dataset containing the positions of hundreds of thousands of trees belong to thousands of species; see Hubbell and Foster (1983), Condit, Hubbell and Foster (1996) and Condit (1998). The present data were analysed by Moller and Waagepetersen (2007). } \references{ Condit, R. (1998) \emph{Tropical Forest Census Plots}. Springer-Verlag, Berlin and R.G. Landes Company, Georgetown, Texas. Condit, R., Hubbell, S.P and Foster, R.B. (1996) Changes in tree species abundance in a neotropical forest: impact of climate change. \emph{Journal of Tropical Ecology} \bold{12}, 231--256. Hubbell, S.P and Foster, R.B. (1983) Diversity of canopy trees in a neotropical forest and implications for conservation. In: \emph{Tropical Rain Forest: Ecology and Management} (eds. S.L. Sutton, T.C. Whitmore and A.C. Chadwick), Blackwell Scientific Publications, Oxford, 25--41. Moller, J. and Waagepetersen, R.P. (2007) Modern spatial point process modelling and inference (with discussion). \emph{Scandinavian Journal of Statistics} \bold{34}, 643--711. } \keyword{datasets} \keyword{spatial} spatstat/man/as.data.frame.im.Rd0000755000176000001440000000160712237642732016236 0ustar ripleyusers\name{as.data.frame.im} \alias{as.data.frame.im} \title{Convert Pixel Image to Data Frame} \description{ Convert a pixel image to a data frame } \usage{ \method{as.data.frame}{im}(x, ...) } \arguments{ \item{x}{A pixel image (object of class \code{"im"}).} \item{\dots}{Further arguments passed to \code{\link[base:as.data.frame]{as.data.frame.default}} to determine the row names and other features.} } \details{ This function takes the pixel image \code{x} and returns a data frame with three columns containing the pixel coordinates and the pixel values. } \value{ A data frame } \examples{ # artificial image Z <- setcov(square(1)) Y <- as.data.frame(Z) head(Y) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/summary.owin.Rd0000755000176000001440000000165312237642734015701 0ustar ripleyusers\name{summary.owin} \alias{summary.owin} \title{Summary of a Spatial Window} \description{ Prints a useful description of a window object. } \usage{ \method{summary}{owin}(object, \dots) } \arguments{ \item{object}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} } \details{ A useful description of the window \code{object} is printed. This is a method for the generic function \code{\link{summary}}. } \seealso{ \code{\link{summary}}, \code{\link{summary.ppp}}, \code{\link{print.owin}} } \examples{ summary(owin()) # the unit square data(demopat) W <- demopat$window # weird polygonal window summary(W) # describes it summary(as.mask(W)) # demonstrates current pixel resolution } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} spatstat/man/relrisk.Rd0000755000176000001440000001070612237642733014702 0ustar ripleyusers\name{relrisk} \alias{relrisk} \title{ Nonparametric Estimate of Spatially-Varying Relative Risk } \description{ Given a multitype point pattern, this function estimates the spatially-varying probability of each type of point, using kernel smoothing. The default smoothing bandwidth is selected by cross-validation. } \usage{ relrisk(X, sigma = NULL, ..., varcov = NULL, at = "pixels", casecontrol=TRUE) } \arguments{ \item{X}{ A multitype point pattern (object of class \code{"ppp"} which has factor valued marks). } \item{sigma}{ Optional. Standard deviation of isotropic Gaussian smoothing kernel. } \item{\dots}{ Arguments passed to \code{\link{bw.relrisk}} to select the bandwidth, or passed to \code{\link{density.ppp}} to control the pixel resolution. } \item{varcov}{ Optional. Variance-covariance matrix of anisotopic Gaussian smoothing kernel. Incompatible with \code{sigma}. } \item{at}{ String specifying whether to compute the probability values at a grid of pixel locations (\code{at="pixels"}) or only at the points of \code{X} (\code{at="points"}). } \item{casecontrol}{ Logical. Whether to treat a bivariate point pattern as consisting of cases and controls. See Details. } } \details{ If \code{X} is a bivariate point pattern (a multitype point pattern consisting of two types of points) then by default, the points of the first type (the first level of \code{marks(X)}) are treated as controls or non-events, and points of the second type are treated as cases or events. Then this command computes the spatially-varying risk of an event, i.e. the probability \eqn{p(u)} that a point at spatial location \eqn{u} will be a case. If \code{X} is a multitype point pattern with \eqn{m > 2} types, or if \code{X} is a bivariate point pattern and \code{casecontrol=FALSE}, then this command computes, for each type \eqn{j}, a nonparametric estimate of the spatially-varying risk of an event of type \eqn{j}. This is the probability \eqn{p_j(u)}{p[j](u)} that a point at spatial location \eqn{u} will belong to type \eqn{j}. If \code{at = "pixels"} the calculation is performed for every spatial location \eqn{u} on a fine pixel grid, and the result is a pixel image representing the function \eqn{p(u)} or a list of pixel images representing the functions \eqn{p_j(u)}{p[j](u)} for \eqn{j = 1,\ldots,m}{j = 1,...,m}. If \code{at = "points"} the calculation is performed only at the data points \eqn{x_i}{x[i]}. The result is a vector of values \eqn{p(x_i)}{p(x[i])} giving the estimated probability of a case at each data point, or a matrix of values \eqn{p_j(x_i)}{p[j](x[i])} giving the estimated probability of each possible type \eqn{j} at each data point. Estimation is performed by a simple Nadaraja-Watson type kernel smoother (Diggle, 2003). If \code{sigma} and \code{varcov} are both missing or null, then the smoothing bandwidth \code{sigma} is selected by cross-validation using \code{\link{bw.relrisk}}. } \value{ If \code{X} consists of only two types of points, the result is a pixel image (if \code{at="pixels"}) or a vector of probabilities (if \code{at="points"}). If \code{X} consists of more than two types of points, the result is: \itemize{ \item (if \code{at="pixels"}) a list of pixel images, with one image for each possible type of point. The result also belongs to the class \code{"listof"} so that it can be printed and plotted. \item (if \code{at="points"}) a matrix of probabilities, with rows corresponding to data points \eqn{x_i}{x[i]}, and columns corresponding to types \eqn{j}. } } \seealso{ \code{\link{bw.relrisk}}, \code{\link{density.ppp}}, \code{\link{Smooth.ppp}}, \code{\link{eval.im}}. \code{\link{which.max.im}}. } \examples{ data(urkiola) p <- relrisk(urkiola, 20) if(interactive()) { plot(p, main="proportion of oak") plot(eval.im(p > 0.3), main="More than 30 percent oak") data(lansing) z <- relrisk(lansing) plot(z, main="Lansing Woods") plot(which.max.im(z), main="Most common species") } } \references{ Diggle, P.J. (2003) \emph{Statistical analysis of spatial point patterns}, Second edition. Arnold. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{methods} \keyword{smooth} spatstat/man/as.hyperframe.Rd0000755000176000001440000000503112237642732015765 0ustar ripleyusers\name{as.hyperframe} %DontDeclareMethods \Rdversion{1.1} \alias{as.hyperframe} \alias{as.hyperframe.default} \alias{as.hyperframe.data.frame} \alias{as.hyperframe.hyperframe} \alias{as.hyperframe.listof} \title{ Convert Data to Hyperframe } \description{ Converts data from any suitable format into a hyperframe. } \usage{ as.hyperframe(x, \dots) \method{as.hyperframe}{default}(x, \dots) \method{as.hyperframe}{data.frame}(x, \dots, stringsAsFactors=FALSE) \method{as.hyperframe}{hyperframe}(x, \dots) \method{as.hyperframe}{listof}(x, \dots) } \arguments{ \item{x}{ Data in some other format. } \item{\dots}{ Optional arguments passed to \code{\link{hyperframe}}. } \item{stringsAsFactors}{ Logical. If \code{TRUE}, any column of the data frame \code{x} that contains character strings will be converted to a \code{factor}. If \code{FALSE}, no such conversion will occur. } } \details{ A hyperframe is like a data frame, except that its entries can be objects of any kind. The generic function \code{as.hyperframe} converts any suitable kind of data into a hyperframe. There are methods for the classes \code{data.frame} and \code{listof}, and a default method, all of which convert data that is like a hyperframe into a hyperframe object. (The method for the class \code{listof} converts a list of objects, of arbitrary type, into a hyperframe with one column.) These methods do not discard any information. There are also methods for other classes (see \code{\link{as.hyperframe.ppx}}) which extract the coordinates from a spatial dataset. These methods do discard some information. } \section{Conversion of Strings to Factors}{ Note that \code{as.hyperframe.default} will convert a character vector to a factor. It behaves like \code{\link{as.data.frame}}. However \code{as.hyperframe.data.frame} does not convert strings to factors; it respects the structure of the data frame \code{x}. The behaviour can be changed using the argument \code{stringsAsFactors}. } \value{ An object of class \code{"hyperframe"} created by \code{\link{hyperframe}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{hyperframe}}, \code{\link{as.hyperframe.ppx}} } \examples{ df <- data.frame(x=runif(4),y=letters[1:4]) as.hyperframe(df) sims <- list() for(i in 1:3) sims[[i]] <- rpoispp(42) as.hyperframe(as.listof(sims)) } \keyword{spatial} \keyword{manip} spatstat/man/rhohat.Rd0000755000176000001440000002130512252245773014511 0ustar ripleyusers\name{rhohat} %DontDeclareMethods \alias{rhohat} \alias{rhohat.ppp} \alias{rhohat.quad} \alias{rhohat.ppm} \alias{rhohat.lpp} \alias{rhohat.lppm} \title{ Smoothing Estimate of Covariate Transformation } \description{ Computes a smoothing estimate of the intensity of a point process, as a function of a (continuous) spatial covariate. } \usage{ rhohat(object, covariate, ...) \method{rhohat}{ppp}(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{quad}(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{ppm}(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), dimyx=NULL, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{lpp}(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), nd=1000, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) \method{rhohat}{lppm}(object, covariate, ..., method=c("ratio", "reweight", "transform"), smoother=c("kernel", "local"), nd=1000, eps=NULL, n = 512, bw = "nrd0", adjust=1, from = NULL, to = NULL, bwref=bw, covname, confidence=0.95) } \arguments{ \item{object}{ A point pattern (object of class \code{"ppp"} or \code{"lpp"}), a quadrature scheme (object of class \code{"quad"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{covariate}{ Either a \code{function(x,y)} or a pixel image (object of class \code{"im"}) providing the values of the covariate at any location. Alternatively one of the strings \code{"x"} or \code{"y"} signifying the Cartesian coordinates. } \item{method}{ Character string determining the smoothing method. See Details. } \item{smoother}{ Character string determining the smoothing algorithm. See Details. } \item{dimyx,eps,nd}{ Arguments controlling the pixel resolution at which the covariate will be evaluated. See Details. } \item{bw}{ Smoothing bandwidth or bandwidth rule (passed to \code{\link{density.default}}). } \item{adjust}{ Smoothing bandwidth adjustment factor (passed to \code{\link{density.default}}). } \item{n, from, to}{ Arguments passed to \code{\link{density.default}} to control the number and range of values at which the function will be estimated. } \item{bwref}{ Optional. An alternative value of \code{bw} to use when smoothing the reference density (the density of the covariate values observed at all locations in the window). } \item{\dots}{ Additional arguments passed to \code{\link{density.default}} or \code{\link[locfit]{locfit}}. } \item{covname}{ Optional. Character string to use as the name of the covariate. } \item{confidence}{ Confidence level for confidence intervals. A number between 0 and 1. } } \details{ If \code{object} is a point pattern, this command assumes that \code{object} is a realisation of a Poisson point process with intensity function \eqn{\lambda(u)}{lambda(u)} of the form \deqn{\lambda(u) = \rho(Z(u))}{lambda(u) = rho(Z(u))} where \eqn{Z} is the spatial covariate function given by \code{covariate}, and \eqn{\rho(z)}{rho(z)} is a function to be estimated. This command computes estimators of \eqn{\rho(z)}{rho(z)} proposed by Baddeley and Turner (2005) and Baddeley et al (2012). The covariate \eqn{Z} must have continuous values. If \code{object} is a fitted point process model, suppose \code{X} is the original data point pattern to which the model was fitted. Then this command assumes \code{X} is a realisation of a Poisson point process with intensity function of the form \deqn{ \lambda(u) = \rho(Z(u)) \kappa(u) }{ lambda(u) = rho(Z(u)) * kappa(u) } where \eqn{\kappa(u)}{kappa(u)} is the intensity of the fitted model \code{object}. A smoothing estimator of \eqn{\rho(z)}{rho(z)} is computed. The estimation procedure is determined by the character strings \code{method} and \code{smoother}. The estimation procedure involves computing several density estimates and combining them. The algorithm used to compute density estimates is determined by \code{smoother}: \itemize{ \item If \code{smoother="kernel"}, each the smoothing procedure is based on fixed-bandwidth kernel density estimation, performed by \code{\link{density.default}}. \item If \code{smoother="local"}, the smoothing procedure is based on local likelihood density estimation, performed by \code{\link[locfit]{locfit}}. } The \code{method} determines how the density estimates will be combined to obtain an estimate of \eqn{\rho(z)}{rho(z)}: \itemize{ \item If \code{method="ratio"}, then \eqn{\rho(z)}{rho(z)} is estimated by the ratio of two density estimates. The numerator is a (rescaled) density estimate obtained by smoothing the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}. The denominator is a density estimate of the reference distribution of \eqn{Z}. \item If \code{method="reweight"}, then \eqn{\rho(z)}{rho(z)} is estimated by applying density estimation to the values \eqn{Z(y_i)}{Z(y[i])} of the covariate \eqn{Z} observed at the data points \eqn{y_i}{y[i]}, with weights inversely proportional to the reference density of \eqn{Z}. \item If \code{method="transform"}, the smoothing method is variable-bandwidth kernel smoothing, implemented by applying the Probability Integral Transform to the covariate values, yielding values in the range 0 to 1, then applying edge-corrected density estimation on the interval \eqn{[0,1]}, and back-transforming. } The covariate will be evaluated on a fine grid of locations, with spatial resolution controlled by the arguments \code{dimyx,eps,nd}. In two dimensions (i.e. if \code{object} is of class \code{"ppp"}, \code{"ppm"} or \code{"quad"}) the arguments \code{dimyx, eps} are passed to \code{\link{as.mask}} to control the pixel resolution. On a linear network (i.e. if \code{object} is of class \code{"lpp"} or \code{"lppm"}) the argument \code{nd} specifies the total number of test locations on the linear network, and \code{eps} specifies the linear separation between test locations. } \value{ A function value table (object of class \code{"fv"}) containing the estimated values of \eqn{\rho}{rho} for a sequence of values of \eqn{Z}. Also belongs to the class \code{"rhohat"} which has special methods for \code{print}, \code{plot} and \code{predict}. } \section{Categorical and discrete covariates}{ This technique assumes that the covariate has continuous values. It is not applicable to covariates with categorical (factor) values or discrete values such as small integers. For a categorical covariate, use \code{\link{quadratcount}(X, tess=covariate)} } \references{ Baddeley, A., Chang, Y.-M., Song, Y. and Turner, R. (2012) Nonparametric estimation of the dependence of a point process on spatial covariates. \emph{Statistics and Its Interface} \bold{5} (2), 221--236. Baddeley, A. and Turner, R. (2005) Modelling spatial point patterns in R. In: A. Baddeley, P. Gregori, J. Mateu, R. Stoica, and D. Stoyan, editors, \emph{Case Studies in Spatial Point Pattern Modelling}, Lecture Notes in Statistics number 185. Pages 23--74. Springer-Verlag, New York, 2006. ISBN: 0-387-28311-0. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{rho2hat}}, \code{\link{methods.rhohat}}, \code{\link{parres}} } \examples{ X <- rpoispp(function(x,y){exp(3+3*x)}) rho <- rhohat(X, "x") rho <- rhohat(X, function(x,y){x}) plot(rho) curve(exp(3+3*x), lty=3, col=2, add=TRUE) rhoB <- rhohat(X, "x", method="reweight") rhoC <- rhohat(X, "x", method="transform") \testonly{rh <- rhohat(X, "x", dimyx=32)} fit <- ppm(X, ~x) rr <- rhohat(fit, "y") # linear network Y <- runiflpp(30, simplenet) rhoY <- rhohat(Y, "y") } \keyword{spatial} \keyword{models} spatstat/man/AreaInter.Rd0000755000176000001440000001734612237642731015106 0ustar ripleyusers\name{AreaInter} \alias{AreaInter} \title{The Area Interaction Point Process Model} \description{ Creates an instance of the Area Interaction point process model (Widom-Rowlinson penetrable spheres model) which can then be fitted to point pattern data. } \usage{ AreaInter(r) } \arguments{ \item{r}{The radius of the discs in the area interaction process} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of the area-interaction process with disc radius \eqn{r}. } \details{ This function defines the interpoint interaction structure of a point process called the Widom-Rowlinson penetrable sphere model or area-interaction process. It can be used to fit this model to point pattern data. The function \code{\link{ppm}()}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of the area interaction structure is yielded by the function \code{AreaInter()}. See the examples below. In \bold{standard form}, the area-interaction process (Widom and Rowlinson, 1970; Baddeley and Van Lieshout, 1995) with disc radius \eqn{r}, intensity parameter \eqn{\kappa}{kappa} and interaction parameter \eqn{\gamma}{gamma} is a point process with probability density \deqn{ f(x_1,\ldots,x_n) = \alpha \kappa^{n(x)} \gamma^{-A(x)} }{ f(x[1],\ldots,x[n]) = alpha . kappa^n(x) . gamma^(-A(x)) } for a point pattern \eqn{x}, where \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]} represent the points of the pattern, \eqn{n(x)} is the number of points in the pattern, and \eqn{A(x)} is the area of the region formed by the union of discs of radius \eqn{r} centred at the points \eqn{x_1,\ldots,x_n}{x[1],\ldots,x[n]}. Here \eqn{\alpha}{alpha} is a normalising constant. The interaction parameter \eqn{\gamma}{gamma} can be any positive number. If \eqn{\gamma = 1}{gamma = 1} then the model reduces to a Poisson process with intensity \eqn{\kappa}{kappa}. If \eqn{\gamma < 1}{gamma < 1} then the process is regular, while if \eqn{\gamma > 1}{gamma > 1} the process is clustered. Thus, an area interaction process can be used to model either clustered or regular point patterns. Two points interact if the distance between them is less than \eqn{2r}{2 * r}. The standard form of the model, shown above, is a little complicated to interpret in practical applications. For example, each isolated point of the pattern \eqn{x} contributes a factor \eqn{\kappa \gamma^{-\pi r^2}}{kappa * gamma^(-pi * r^2)} to the probability density. In \pkg{spatstat}, the model is parametrised in a different form, which is easier to interpret. In \bold{canonical scale-free form}, the probability density is rewritten as \deqn{ f(x_1,\ldots,x_n) = \alpha \beta^{n(x)} \eta^{-C(x)} }{ f(x_1,\ldots,x_n) = alpha . beta^n(x) eta^(-C(x)) } where \eqn{\beta}{beta} is the new intensity parameter, \eqn{\eta}{eta} is the new interaction parameter, and \eqn{C(x) = B(x) - n(x)} is the interaction potential. Here \deqn{ B(x) = \frac{A(x)}{\pi r^2} }{ B(x) = A(x)/(pi * r^2) } is the normalised area (so that the discs have unit area). In this formulation, each isolated point of the pattern contributes a factor \eqn{\beta}{beta} to the probability density (so the first order trend is \eqn{\beta}{beta}). The quantity \eqn{C(x)} is a true interaction potential, in the sense that \eqn{C(x) = 0} if the point pattern \eqn{x} does not contain any points that lie close together (closer than \eqn{2r}{2*r} units apart). When a new point \eqn{u} is added to an existing point pattern \eqn{x}, the rescaled potential \eqn{-C(x)} increases by a value between 0 and 1. The increase is zero if \eqn{u} is not close to any point of \eqn{x}. The increase is 1 if the disc of radius \eqn{r} centred at \eqn{u} is completely contained in the union of discs of radius \eqn{r} centred at the data points \eqn{x_i}{x[i]}. Thus, the increase in potential is a measure of how close the new point \eqn{u} is to the existing pattern \eqn{x}. Addition of the point \eqn{u} contributes a factor \eqn{\beta \eta^\delta}{beta * eta^delta} to the probability density, where \eqn{\delta}{delta} is the increase in potential. The old parameters \eqn{\kappa,\gamma}{kappa,gamma} of the standard form are related to the new parameters \eqn{\beta,\eta}{beta,eta} of the canonical scale-free form, by \deqn{ \beta = \kappa \gamma^{-\pi r^2} = \kappa /\eta }{ beta = kappa * gamma^(-pi * r^2)= kappa / eta } and \deqn{ \eta = \gamma^{\pi r^2} }{ eta = gamma^(pi * r^2) } provided \eqn{\gamma}{gamma} and \eqn{\kappa}{kappa} are positive and finite. In the canonical scale-free form, the parameter \eqn{\eta}{eta} can take any nonnegative value. The value \eqn{\eta = 1}{eta = 1} again corresponds to a Poisson process, with intensity \eqn{\beta}{beta}. If \eqn{\eta < 1}{eta < 1} then the process is regular, while if \eqn{\eta > 1}{eta > 1} the process is clustered. The value \eqn{\eta = 0}{eta = 0} corresponds to a hard core process with hard core radius \eqn{r} (interaction distance \eqn{2r}). The \emph{nonstationary} area interaction process is similar except that the contribution of each individual point \eqn{x_i}{x[i]} is a function \eqn{\beta(x_i)}{beta(x[i])} of location, rather than a constant beta. Note the only argument of \code{AreaInter()} is the disc radius \code{r}. When \code{r} is fixed, the model becomes an exponential family. The canonical parameters \eqn{\log(\beta)}{log(beta)} and \eqn{\log(\eta)}{log(eta)} are estimated by \code{\link{ppm}()}, not fixed in \code{AreaInter()}. } \seealso{ \code{\link{ppm}}, \code{\link{pairwise.family}}, \code{\link{ppm.object}} } \section{Warnings}{ The interaction distance of this process is equal to \code{2 * r}. Two discs of radius \code{r} overlap if their centres are closer than \code{2 * r} units apart. The estimate of the interaction parameter \eqn{\eta}{eta} is unreliable if the interaction radius \code{r} is too small or too large. In these situations the model is approximately Poisson so that \eqn{\eta}{eta} is unidentifiable. As a rule of thumb, one can inspect the empty space function of the data, computed by \code{\link{Fest}}. The value \eqn{F(r)} of the empty space function at the interaction radius \code{r} should be between 0.2 and 0.8. } \examples{ \testonly{op <- spatstat.options(ngrid.disc=8)} # prints a sensible description of itself AreaInter(r=0.1) # Note the reach is twice the radius reach(AreaInter(r=1)) # Fit the stationary area interaction process to Swedish Pines data data(swedishpines) ppm(swedishpines, ~1, AreaInter(r=7)) # Fit the stationary area interaction process to `cells' data(cells) ppm(cells, ~1, AreaInter(r=0.06)) # eta=0 indicates hard core process. # Fit a nonstationary area interaction with log-cubic polynomial trend \dontrun{ ppm(swedishpines, ~polynom(x/10,y/10,3), AreaInter(r=7)) } \testonly{spatstat.options(op)} } \references{ Baddeley, A.J. and Van Lieshout, M.N.M. (1995). Area-interaction point processes. \emph{Annals of the Institute of Statistical Mathematics} \bold{47} (1995) 601--619. Widom, B. and Rowlinson, J.S. (1970). New model for the study of liquid-vapor phase transitions. \emph{The Journal of Chemical Physics} \bold{52} (1970) 1670--1684. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/ippm.Rd0000755000176000001440000001401312237642732014166 0ustar ripleyusers\name{ippm} \alias{ippm} \title{ Optimise Irregular Trend Parameters in Point Process Model } \description{ Experimental extension to \code{ppm}. Find optimal values of the irregular trend parameters in a point process model using Fisher scoring algorithm. } \usage{ ippm(..., iScore=NULL, start=list(), covfunargs=start, maxiter=20, tol=1e-4, progress=TRUE, stepfactor=1, dbug=FALSE) } \arguments{ \item{\dots}{ Arguments passed to \code{\link[spatstat]{ppm}} to fit the point process model. } \item{iScore}{ A named list of \R functions that compute the partial derivatives of \code{logf} with respect to each irregular parameter. See Details. } \item{start}{ Named list containing initial values of the irregular parameters over which to optimise. } \item{covfunargs}{ Argument passed to \code{\link[spatstat]{ppm}}. A named list containing values for \emph{all} irregular parameters required by the covariates in the model. Must include all the parameters named in \code{start}. } \item{maxiter}{ Integer. Maximum number of iterations of Fisher scoring algorithm. } \item{tol}{ Numeric value or vector. The algorithm stops when the difference between two successive estimates of the irregular parameter is less than \code{tol}. } \item{progress}{ Logical. Whether to print progress reports. } \item{stepfactor}{ Numeric value between 0 and 1 indicating that the change in the parameter between successive iterations is only a specified fraction of the step computed by the Newton-Raphson algorithm. } \item{dbug}{ Logical. Whether to print debugging output. } } \details{ This function is an experimental extension to the point process model fitting command \code{\link[spatstat]{ppm}}. The extension allows the trend of the model to include irregular parameters, which will be maximised by a Fisher scoring method. For the sake of explanation, consider a Poisson point process with intensity function \eqn{\lambda(u)}{lambda(u)} at location \eqn{u}. Assume that \deqn{ \lambda(u) = \exp(\alpha + \beta Z(u)) \, f(u, \gamma) }{ lambda(u) = \exp(alpha + beta * Z(u)) * f(u, gamma) } where \eqn{\alpha,\beta,\gamma}{alpha,beta,gamma} are parameters to be estimated, \eqn{Z(u)} is a spatial covariate function, and \eqn{f} is some known function. Then the parameters \eqn{\alpha,\beta}{alpha,beta} are called \emph{regular} because they appear in a loglinear form; the parameter \eqn{\gamma}{gamma} is called \emph{irregular}. To fit this model using \code{ippm}, we specify the intensity using the \code{trend} formula in the same way as usual for \code{\link[spatstat]{ppm}}. The trend formula is a representation of the log intensity. In the above example the log intensity is \deqn{ \log\lambda(u) = \alpha + \beta Z(u) + \log f(u, \gamma) }{ log(lambda(u)) = alpha + beta * Z(u) + log(f(u, gamma)) } So the model above would be encoded with the trend formula \code{~Z + offset(log(f))}. Note that the irregular part of the model is an \emph{offset} term, which means that it is included in the log trend as it is, without being multiplied by another regular parameter. To perform Fisher scoring we also need the derivative of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to \eqn{\gamma}{gamma}. We call this the \emph{irregular score}. The user must write an \R function that computes the irregular score for any value of \eqn{\gamma}{gamma} at any location \code{(x,y)}. Thus, to code such a problem, \enumerate{ \item The argument \code{trend} should define the log intensity, with the irregular part as an offset; \item The argument \code{start} should be a list containing initial values of each of the irregular parameters; \item The argument \code{iScore} must be a list (with one entry for each entry of \code{start}) of functions with arguments \code{x,y,\dots}, that evaluate the partial derivatives of \eqn{\log f(u,\gamma)}{log(f(u,gamma))} with respect to each irregular parameter. } The coded example below illustrates the model with two irregular parameters \eqn{\gamma,\delta}{gamma,delta} and irregular term \deqn{ f((x,y), (\gamma, \delta)) = 1 + \exp(\gamma - \delta x^3) }{ f((x,y), (gamma, delta)) = 1 + \exp(gamma - delta * x^3) } Arguments \code{\dots} passed to \code{\link[spatstat]{ppm}} may also include \code{interaction}. In this case the model is not a Poisson point process but a more general Gibbs point process; the trend formula \code{trend} determines the first-order trend of the model (the first order component of the conditional intensity), not the intensity. } \value{ A fitted point process model (object of class \code{"ppm"}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link[spatstat]{ppm}} } \examples{ nd <- 32 \testonly{nd <- 10} gamma0 <- 3 delta0 <- 5 POW <- 3 # Terms in intensity Z <- function(x,y) { -2*y } f <- function(x,y,gamma,delta) { 1 + exp(gamma - delta * x^POW) } # True intensity lamb <- function(x,y,gamma,delta) { 200 * exp(Z(x,y)) * f(x,y,gamma,delta) } # Simulate realisation lmax <- max(lamb(0,0,gamma0,delta0), lamb(1,1,gamma0,delta0)) set.seed(42) X <- rpoispp(lamb, lmax=lmax, win=owin(), gamma=gamma0, delta=delta0) # Partial derivatives of log f DlogfDgamma <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) topbit/(1 + topbit) } DlogfDdelta <- function(x,y, gamma, delta) { topbit <- exp(gamma - delta * x^POW) - (x^POW) * topbit/(1 + topbit) } # irregular score Dlogf <- list(gamma=DlogfDgamma, delta=DlogfDdelta) # fit model ippm(X, ~Z + offset(log(f)), covariates=list(Z=Z, f=f), iScore=Dlogf, start=list(gamma=1, delta=1), tol=0.01, nd=nd) } \keyword{spatial} \keyword{models} spatstat/man/shift.owin.Rd0000755000176000001440000000365312237642734015323 0ustar ripleyusers\name{shift.owin} \alias{shift.owin} \title{Apply Vector Translation To Window} \description{ Applies a vector shift to a window } \usage{ \method{shift}{owin}(X, vec=c(0,0), \dots, origin=NULL) } \arguments{ \item{X}{Window (object of class \code{"owin"}).} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{Ignored} \item{origin}{Character string determining a location that will be shifted to the origin. Options are \code{"centroid"}, \code{"midpoint"} and \code{"bottomleft"}. Partially matched. } } \value{ Another window (of class \code{"owin"}) representing the result of applying the vector shift. } \details{ The window is translated by the vector \code{vec}. This is a method for the generic function \code{\link{shift}}. If \code{origin} is given, then it should be one of the character strings \code{"centroid"}, \code{"midpoint"} or \code{"bottomleft"}. The argument \code{vec} will be ignored; instead the shift will be performed so that the specified geometric location is shifted to the origin. If \code{origin="centroid"} then the centroid of the window will be shifted to the origin. If \code{origin="midpoint"} then the centre of the bounding rectangle of the window will be shifted to the origin. If \code{origin="bottomleft"} then the bottom left corner of the bounding rectangle of the window will be shifted to the origin. } \seealso{ \code{\link{shift}}, \code{\link{shift.ppp}}, \code{\link{periodify}}, \code{\link{rotate}}, \code{\link{affine}}, \code{\link{centroid.owin}} } \examples{ W <- owin(c(0,1),c(0,1)) X <- shift(W, c(2,3)) \dontrun{ plot(W) # no discernible difference except coordinates are different } shift(W, origin="mid") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/compatible.fasp.Rd0000755000176000001440000000233712237642732016276 0ustar ripleyusers\name{compatible.fasp} %DontDeclareMethods \alias{compatible.fasp} \title{Test Whether Function Arrays Are Compatible} \description{ Tests whether two or more function arrays (class \code{"fasp"}) are compatible. } \usage{ \method{compatible}{fasp}(A, B, \dots) } \arguments{ \item{A,B,\dots}{Two or more function arrays (object of class \code{"fasp"}).} } \details{ An object of class \code{"fasp"} can be regarded as an array of functions. Such objects are returned by the command \code{\link{alltypes}}. This command tests whether such objects are compatible (so that, for example, they could be added or subtracted). It is a method for the generic command \code{\link{compatible}}. The function arrays are compatible if the arrays have the same dimensions, and the corresponding elements in each cell of the array are compatible as defined by \code{\link{compatible.fv}}. } \value{ Logical value: \code{TRUE} if the objects are compatible, and \code{FALSE} if they are not. } \seealso{ \code{\link{eval.fasp}} } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/run.simplepanel.Rd0000644000176000001440000000710412250734313016325 0ustar ripleyusers\name{run.simplepanel} \alias{clear.simplepanel} \alias{redraw.simplepanel} \alias{run.simplepanel} \title{ Run Point-and-Click Interface } \description{ Execute various operations in a simple point-and-click user interface. } \usage{ run.simplepanel(P, popup=TRUE, verbose = FALSE) clear.simplepanel(P) redraw.simplepanel(P, verbose = FALSE) } \arguments{ \item{P}{ An interaction panel (object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}). } \item{popup}{ Logical. If \code{popup=TRUE} (the default), the panel will be displayed in a new popup window. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise. } \item{verbose}{ Logical. If \code{TRUE}, debugging information will be printed. } } \details{ These commands enable the user to run a simple, robust, point-and-click interface to any \R code. The interface is implemented using only the basic graphics package in \R. The argument \code{P} is an object of class \code{"simplepanel"}, created by \code{\link{simplepanel}} or \code{\link{grow.simplepanel}}, which specifies the graphics to be displayed and the actions to be performed when the user interacts with the panel. The command \code{run.simplepanel(P)} activates the panel: the display is initialised and the graphics system waits for the user to click the panel. While the panel is active, the user can only interact with the panel; the \R command line interface and the \R GUI cannot be used. When the panel terminates (typically because the user clicked a button labelled Exit), control returns to the \R command line interface and the \R GUI. The command \code{clear.simplepanel(P)} clears all the display elements in the panel, resulting in a blank display except for the title of the panel. The command \code{redraw.simplepanel(P)} redraws all the buttons of the panel, according to the \code{redraw} functions contained in the panel. If \code{popup=TRUE} (the default), \code{run.simplepanel} begins by calling \code{\link[grDevices]{dev.new}} so that a new popup window is created; this window is closed using \code{\link[grDevices]{dev.off}} when \code{run.simplepanel} terminates. If \code{popup=FALSE}, the panel will be displayed on the current graphics window if it already exists, and on a new window otherwise; this window is not closed when \code{run.simplepanel} terminates. For more sophisticated control of the graphics focus (for example, to use the panel to control the display on another window), initialise the graphics devices yourself using \code{\link[grDevices]{dev.new}} or similar commands; save these devices in the shared environment \code{env} of the panel \code{P}; and write the click/redraw functions of \code{P} in such a way that they access these devices using \code{\link[grDevices]{dev.set}}. Then use \code{run.simplepanel} with \code{popup=FALSE}. } \value{ The return value of \code{run.simplepanel(P)} is the value returned by the \code{exit} function of \code{P}. See \code{\link{simplepanel}}. The functions \code{clear.simplepanel} and \code{redraw.simplepanel} return \code{NULL}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{simplepanel}} } \examples{ if(interactive()) { example(simplepanel) run.simplepanel(P) } } \keyword{iplot} \keyword{utilities} spatstat/man/plot.im.Rd0000755000176000001440000002374312237642733014616 0ustar ripleyusers\name{plot.im} \alias{plot.im} \alias{image.im} \title{Plot a Pixel Image} \description{ Plot a pixel image. } \usage{ \method{plot}{im}(x, \dots, col=NULL, valuesAreColours=NULL, log=FALSE, ribbon=TRUE, ribside=c("right", "left", "bottom", "top"), ribsep=0.15, ribwid=0.05, ribn=1024, ribscale=1, ribargs=list(), colargs=list()) } \arguments{ \item{x}{ The pixel image to be plotted. An object of class \code{"im"} (see \code{\link{im.object}}). } \item{\dots}{ Extra arguments passed to \code{\link[graphics]{image.default}} to control the plot. See Details. } \item{col}{ Colours for displaying the pixel values. Either a character vector of colour values, an object of class \code{\link{colourmap}}, or a \code{function} as described under Details. } \item{valuesAreColours}{ Logical value. If \code{TRUE}, the pixel values of \code{x} are to be interpreted as colour values. } \item{log}{ Logical value. If \code{TRUE}, the colour map will be evenly-spaced on a logarithmic scale. } \item{ribbon}{ Logical flag indicating whether to display a ribbon showing the colour map. } \item{ribside}{ Character string indicating where to display the ribbon relative to the main image. } \item{ribsep}{ Factor controlling the space between the ribbon and the image. } \item{ribwid}{ Factor controlling the width of the ribbon. } \item{ribn}{ Number of different values to display in the ribbon. } \item{ribscale}{ Rescaling factor for tick marks. The values on the numerical scale printed beside the ribbon will be multiplied by this rescaling factor. } \item{ribargs}{ List of additional arguments passed to \code{\link[graphics]{image.default}} and \code{\link[graphics]{axis}} to control the display of the ribbon and its scale axis. These may override the \code{\dots} arguments. } \item{colargs}{ List of additional arguments passed to \code{col} if it is a function. } } \value{ The colour map used. An object of class \code{"colourmap"}. } \details{ This is the \code{plot} method for the class \code{"im"}. [It is also the \code{image} method for \code{"im"}.] The pixel image \code{x} is displayed on the current plot device, using equal scales on the \code{x} and \code{y} axes. If \code{ribbon=TRUE}, a legend will be plotted. The legend consists of a colour ribbon and an axis with tick-marks, showing the correspondence between the pixel values and the colour map. By default, the ribbon is placed at the right of the main image. This can be changed using the argument \code{ribside}. Arguments \code{ribsep, ribwid, ribn} control the appearance of the ribbon. The width of the ribbon is \code{ribwid} times the size of the pixel image, where `size' means the larger of the width and the height. The distance separating the ribbon and the image is \code{ribsep} times the size of the pixel image. The ribbon contains \code{ribn} different numerical values, evenly spaced between the minimum and maximum pixel values in the image \code{x}, rendered according to the chosen colour map. Arguments \code{ribscale, ribargs} control the annotation of the colour ribbon. To plot the colour ribbon without the axis and tick-marks, use \code{ribargs=list(axes=FALSE)}. Normally the pixel values are displayed using the colours given in the argument \code{col}. This may be either \itemize{ \item an explicit colour map (an object of class \code{"colourmap"}, created by the command \code{\link{colourmap}}). This is the best way to ensure that when we plot different images, the colour maps are consistent. \item a character vector or integer vector that specifies a set of colours. The colour mapping will be stretched to match the range of pixel values in the image \code{x}. The mapping of pixel values to colours is determined as follows. \describe{ \item{logical-valued images:}{the values \code{FALSE} and \code{TRUE} are mapped to the colours \code{col[1]} and \code{col[2]} respectively. The vector \code{col} should have length 2. } \item{factor-valued images:}{the factor levels \code{levels(x)} are mapped to the entries of \code{col} in order. The vector \code{col} should have the same length as \code{levels(x)}. } \item{numeric-valued images:}{ By default, the range of pixel values in \code{x} is divided into \code{n = length(col)} equal subintervals, which are mapped to the colours in \code{col}. (If \code{col} was not specified, it defaults to a vector of 255 colours.) Alternatively if the argument \code{zlim} is given, it should be a vector of length 2 specifying an interval of real numbers. This interval will be used instead of the range of pixel values. The interval from \code{zlim[1]} to \code{zlim[2]} will be mapped to the colours in \code{col}. This facility enables the user to plot several images using a consistent colour map. Alternatively if the argument \code{breaks} is given, then this specifies the endpoints of the subintervals that are mapped to each colour. This is incompatible with \code{zlim}. The arguments \code{col} and \code{zlim} or \code{breaks} are then passed to the function \code{\link{image.default}}. For examples of the use of these arguments, see \code{\link{image.default}}. } } \item a \code{function} in the \R language with an argument named \code{range} or \code{inputs}. If \code{col} is a function with an argument named \code{range}, and if the pixel values of \code{x} are numeric values, then the colour values will be determined by evaluating \code{col(range=range(x))}. The result should be a character vector containing colour values. If \code{col} is a function with an argument named \code{inputs}, and if the pixel values of \code{x} are discrete values (integer, logical, factor or character), then the colour values will be determined by evaluating \code{col(inputs=p)} where \code{p} is the set of possible pixel values. The result should be a character vector containing colour values. } Other graphical parameters controlling the display of both the pixel image and the ribbon can be passed through the \code{...} arguments to the function \code{\link[graphics]{image.default}}. A parameter is handled only if it is one of the following: \itemize{ \item a formal argument of \code{\link[graphics]{image.default}} that is operative when \code{add=TRUE}. \item one of the parameters \code{"main", "asp", "sub", "axes", "ann", "cex", "font", "cex.axis", "cex.lab", "cex.main", "cex.sub", "col.axis", "col.lab", "col.main", "col.sub", "font.axis", "font.lab", "font.main", "font.sub"} described in \code{\link[graphics]{par}}. \item the argument \code{box}, a logical value specifying whether a box should be drawn. } By default, images are plotted using image rasters rather than polygons, by setting \code{useRaster=TRUE} in \code{image.default}. Alternatively, the pixel values could be directly interpretable as colour values in \R. That is, the pixel values could be character strings that represent colours, or values of a factor whose levels are character strings representing colours. \itemize{ \item If \code{valuesAreColours=TRUE}, then the pixel values will be interpreted as colour values and displayed using these colours. \item If \code{valuesAreColours=FALSE}, then the pixel values will \emph{not} be interpreted as colour values, even if they could be. \item If \code{valuesAreColours=NULL}, the algorithm will guess what it should do. If the argument \code{col} is given, the pixel values will \emph{not} be interpreted as colour values. Otherwise, if all the pixel values are strings that represent colours, then they will be interpreted and displayed as colours. } If pixel values are interpreted as colours, the arguments \code{col} and \code{ribbon} will be ignored, and a ribbon will not be plotted. } \section{Image Rendering Errors and Problems}{ The help for \code{\link[graphics]{image.default}} explains that errors may occur, or images may be rendered incorrectly, on some devices, depending on the availability of colours and other device-specific constraints. An error may occur on some graphics devices if the image is very large. If this happens, try setting \code{useRaster=FALSE} in the call to \code{plot.im}. The error message \code{useRaster=TRUE can only be used with a regular grid} means that the \eqn{x} and \eqn{y} coordinates of the pixels in the image are not perfectly equally spaced, due to numerical rounding. This occurs with some images created by earlier versions of \pkg{spatstat}. To repair the coordinates in an image \code{X}, type \code{X <- as.im(X)}. } \seealso{ \code{\link{im.object}}, \code{\link{colourmap}}, \code{\link{contour.im}}, \code{\link{persp.im}}, \code{\link[graphics]{image.default}}, \code{\link{spatstat.options}} } \examples{ # an image Z <- setcov(owin()) plot(Z) plot(Z, ribside="bottom") # stretchable colour map plot(Z, col=terrain.colors(128), axes=FALSE) # fixed colour map tc <- colourmap(rainbow(128), breaks=seq(-1,2,length=129)) plot(Z, col=tc) # colour map function, with argument 'range' plot(Z, col=beachcolours, colargs=list(sealevel=0.5)) # tweaking the plot plot(Z, main="La vie en bleu", col.main="blue", cex.main=1.5, box=FALSE, ribargs=list(col.axis="blue", col.ticks="blue", cex.axis=0.75)) # log scale V <- eval.im(exp(exp(Z+2))/1e4) plot(V, log=TRUE, main="Log scale") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/rshift.ppp.Rd0000755000176000001440000001627512237642734015334 0ustar ripleyusers\name{rshift.ppp} %DontDeclareMethods \alias{rshift.ppp} \title{Randomly Shift a Point Pattern} \description{ Randomly shifts the points of a point pattern. } \usage{ \method{rshift}{ppp}(X, \dots, which=NULL, group) } \arguments{ \item{X}{Point pattern to be subjected to a random shift. An object of class \code{"ppp"} } \item{\dots}{ Arguments that determine the random shift. See Details. } \item{group}{ Optional. Factor specifying a grouping of the points of \code{X}, or \code{NULL} indicating that all points belong to the same group. Each group will be shifted together, and separately from other groups. By default, points in a marked point pattern are grouped according to their mark values, while points in an unmarked point pattern are treated as a single group. } \item{which}{ Optional. Identifies which groups of the pattern will be shifted, while other groups are not shifted. A vector of levels of \code{group}. } } \value{ A point pattern (object of class \code{"ppp"}). } \details{ This operation randomly shifts the locations of the points in a point pattern. The function \code{rshift} is generic. This function \code{rshift.ppp} is the method for point patterns. The most common use of this function is to shift the points in a multitype point pattern. By default, points of the same type are shifted in parallel (i.e. points of a common type are shifted by a common displacement vector), and independently of other types. This is useful for testing the hypothesis of independence of types (the null hypothesis that the sub-patterns of points of each type are independent point processes). In general the points of \code{X} are divided into groups, then the points within a group are shifted by a common random displacement vector. Different groups of points are shifted independently. The grouping is determined as follows: \itemize{ \item If the argument \code{group} is present, then this determines the grouping. \item Otherwise, if \code{X} is a multitype point pattern, the marks determine the grouping. \item Otherwise, all points belong to a single group. } The argument \code{group} should be a factor, of length equal to the number of points in \code{X}. Alternatively \code{group} may be \code{NULL}, which specifies that all points of \code{X} belong to a single group. By default, every group of points will be shifted. The argument \code{which} indicates that only some of the groups should be shifted, while other groups should be left unchanged. \code{which} must be a vector of levels of \code{group} (for example, a vector of types in a multitype pattern) indicating which groups are to be shifted. The displacement vector, i.e. the vector by which the data points are shifted, is generated at random. Parameters that control the randomisation and the handling of edge effects are passed through the \code{\dots} argument. They are \describe{ \item{radius,width,height}{ Parameters of the random shift vector. } \item{edge}{ String indicating how to deal with edges of the pattern. Options are \code{"torus"}, \code{"erode"} and \code{"none"}. } \item{clip}{ Optional. Window to which the final point pattern should be clipped. } } If the window is a rectangle, the \emph{default} behaviour is to generate a displacement vector at random with equal probability for all possible displacements. This means that the \eqn{x} and \eqn{y} coordinates of the displacement vector are independent random variables, uniformly distributed over the range of possible coordinates. Alternatively, the displacement vector can be generated by another random mechanism, controlled by the arguments \code{radius}, \code{width} and \code{height}. \describe{ \item{rectangular:}{ if \code{width} and \code{height} are given, then the displacement vector is uniformly distributed in a rectangle of these dimensions, centred at the origin. The maximum possible displacement in the \eqn{x} direction is \code{width/2}. The maximum possible displacement in the \eqn{y} direction is \code{height/2}. The \eqn{x} and \eqn{y} displacements are independent. (If \code{width} and \code{height} are actually equal to the dimensions of the observation window, then this is equivalent to the default.) } \item{radial:}{ if \code{radius} is given, then the displacement vector is generated by choosing a random point inside a disc of the given radius, centred at the origin, with uniform probability density over the disc. Thus the argument \code{radius} determines the maximum possible displacement distance. The argument \code{radius} is incompatible with the arguments \code{width} and \code{height}. } } The argument \code{edge} controls what happens when a shifted point lies outside the window of \code{X}. Options are: \describe{ \item{"none":}{ Points shifted outside the window of \code{X} simply disappear. } \item{"torus":}{ Toroidal or periodic boundary. Treat opposite edges of the window as identical, so that a point which disappears off the right-hand edge will re-appear at the left-hand edge. This is called a ``toroidal shift'' because it makes the rectangle topologically equivalent to the surface of a torus (doughnut). The window must be a rectangle. Toroidal shifts are undefined if the window is non-rectangular. } \item{"erode":}{ Clip the point pattern to a smaller window. If the random displacements are generated by a radial mechanism (see above), then the window of \code{X} is eroded by a distance equal to the value of the argument \code{radius}, using \code{\link{erosion}}. If the random displacements are generated by a rectangular mechanism, then the window of \code{X} is (if it is not rectangular) eroded by a distance \code{max(height,width)} using \code{\link{erosion}}; or (if it is rectangular) trimmed by a margin of width \code{width} at the left and right sides and trimmed by a margin of height \code{height} at the top and bottom. The rationale for this is that the clipping window is the largest window for which edge effects can be ignored. } } The optional argument \code{clip} specifies a smaller window to which the pattern should be restricted. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.psp}} } \examples{ data(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(amacrine) # shift "on" points and leave "off" points fixed X <- rshift(amacrine, which="on") # shift all points simultaneously X <- rshift(amacrine, group=NULL) # maximum displacement distance 0.1 units X <- rshift(amacrine, radius=0.1) # shift with erosion X <- rshift(amacrine, radius=0.1, edge="erode") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/reach.Rd0000755000176000001440000001147512237642733014315 0ustar ripleyusers\name{reach} \alias{reach} \alias{reach.ppm} \alias{reach.interact} \alias{reach.fii} \alias{reach.rmhmodel} \title{Interaction Distance of a Point Process} \description{ Computes the interaction distance of a point process. } \usage{ reach(x, \dots) \method{reach}{ppm}(x, \dots, epsilon=0) \method{reach}{interact}(x, \dots) \method{reach}{rmhmodel}(x, \dots) \method{reach}{fii}(x, \dots, epsilon) } \arguments{ \item{x}{Either a fitted point process model (object of class \code{"ppm"}), an interpoint interaction (object of class \code{"interact"}), a fitted interpoint interaction (object of class \code{"fii"}) or a point process model for simulation (object of class \code{"rmhmodel"}). } \item{epsilon}{ Numerical threshold below which interaction is treated as zero. See details. } \item{\dots}{ Other arguments are ignored. } } \value{ The interaction distance, or \code{NA} if this cannot be computed from the information given. } \details{ The `interaction distance' or `interaction range' of a point process model is the smallest distance \eqn{D} such that any two points in the process which are separated by a distance greater than \eqn{D} do not interact with each other. For example, the interaction range of a Strauss process (see \code{\link{Strauss}}) with parameters \eqn{\beta,\gamma,r}{beta,gamma,r} is equal to \eqn{r}, unless \eqn{\gamma=1}{gamma=1} in which case the model is Poisson and the interaction range is \eqn{0}. The interaction range of a Poisson process is zero. The interaction range of the Ord threshold process (see \code{\link{OrdThresh}}) is infinite, since two points \emph{may} interact at any distance apart. The function \code{reach(x)} is generic, with methods for the case where \code{x} is \itemize{ \item a fitted point process model (object of class \code{"ppm"}, usually obtained from the model-fitting function \code{\link{ppm}}); \item an interpoint interaction structure (object of class \code{"interact"}), created by one of the functions \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}} or \code{\link{Ord}}; \item a fitted interpoint interaction (object of class \code{"fii"}) extracted from a fitted point process model by the command \code{\link{fitin}}; \item a point process model for simulation (object of class \code{"rmhmodel"}), usually obtained from \code{\link{rmhmodel}}. } When \code{x} is an \code{"interact"} object, \code{reach(x)} returns the maximum possible interaction range for any point process model with interaction structure given by \code{x}. For example, \code{reach(Strauss(0.2))} returns \code{0.2}. When \code{x} is a \code{"ppm"} object, \code{reach(x)} returns the interaction range for the point process model represented by \code{x}. For example, a fitted Strauss process model with parameters \code{beta,gamma,r} will return either \code{0} or \code{r}, depending on whether the fitted interaction parameter \code{gamma} is equal or not equal to 1. For some point process models, such as the soft core process (see \code{\link{Softcore}}), the interaction distance is infinite, because the interaction terms are positive for all pairs of points. A practical solution is to compute the distance at which the interaction contribution from a pair of points falls below a threshold \code{epsilon}, on the scale of the log conditional intensity. This is done by setting the argument \code{epsilon} to a positive value. } \seealso{ \code{\link{ppm}}, \code{\link{Poisson}}, \code{\link{Strauss}}, \code{\link{StraussHard}}, \code{\link{MultiStrauss}}, \code{\link{MultiStraussHard}}, \code{\link{Softcore}}, \code{\link{DiggleGratton}}, \code{\link{Pairwise}}, \code{\link{PairPiece}}, \code{\link{Geyer}}, \code{\link{LennardJones}}, \code{\link{Saturated}}, \code{\link{OrdThresh}}, \code{\link{Ord}}, \code{\link{rmhmodel}} } \examples{ reach(Poisson()) # returns 0 reach(Strauss(r=7)) # returns 7 data(swedishpines) fit <- ppm(swedishpines, ~1, Strauss(r=7)) reach(fit) # returns 7 reach(OrdThresh(42)) # returns Inf reach(MultiStrauss(1:2, matrix(c(1,3,3,1),2,2))) # returns 3 } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/integral.im.Rd0000755000176000001440000000352212237642732015435 0ustar ripleyusers\name{integral.im} \alias{integral.im} \title{ Integral of a Pixel Image } \description{ Computes the integral of a pixel image. } \usage{ integral.im(x, ...) } \arguments{ \item{x}{ A pixel image (object of class \code{"im"}) with pixel values that can be treated as numeric or complex values. } \item{\dots}{ Ignored. } } \details{ This function treats the pixel image \code{x} as a function of the spatial coordinates, and computes its integral. The integral is calculated by summing the pixel values and multiplying by the area of one pixel. The pixel values of \code{x} may be numeric, integer, logical or complex. They cannot be factor or character values. The logical values \code{TRUE} and \code{FALSE} are converted to \code{1} and \code{0} respectively, so that the integral of a logical image is the total area of the \code{TRUE} pixels, in the same units as \code{unitname(x)}. For more complicated integration tasks such as computing the integral of an image over a specified subset, use \code{\link{eval.im}} to construct an integrand or \code{\link{[.im}} to extract a subset of the image. } \value{ A single numeric or complex value. } \seealso{ \code{\link{eval.im}}, \code{\link{[.im}} } \examples{ # approximate integral of f(x,y) dx dy f <- function(x,y){3*x^2 + 2*y} Z <- as.im(f, square(1)) integral.im(Z) # correct answer is 2 data(cells) D <- density(cells) integral.im(D) # should be approximately equal to number of points = 42 # integrate over the subset [0.1,0.9] x [0.2,0.8] W <- owin(c(0.1,0.9), c(0.2,0.8)) DW <- D[W, drop=FALSE] integral.im(DW) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/beachcolours.Rd0000644000176000001440000000561412237642732015676 0ustar ripleyusers\name{beachcolours} \alias{beachcolours} \alias{beachcolourmap} \title{ Create Colour Scheme for a Range of Numbers } \description{ Given a range of numerical values, this command creates a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). } \usage{ beachcolours(range, sealevel = 0, monochrome = FALSE, ncolours = if (monochrome) 16 else 64, nbeach = 1) beachcolourmap(range, ...) } \arguments{ \item{range}{ Range of numerical values to be mapped. A numeric vector of length 2. } \item{sealevel}{ Value that should be treated as zero. A single number, lying between \code{range[1]} and \code{range[2]}. } \item{monochrome}{ Logical. If \code{TRUE} then a greyscale colour map is constructed. } \item{ncolours}{ Number of distinct colours to use. } \item{nbeach}{ Number of colours that will be yellow. } \item{\dots}{Arguments passed to \code{beachcolours}.} } \details{ Given a range of numerical values, these commands create a colour scheme that would be appropriate if the numbers were altitudes (elevation above or below sea level). Numerical values close to zero are portrayed in green (representing the waterline). Negative values are blue (representing water) and positive values are yellow to red (representing land). At least, these are the colours of land and sea in Western Australia. This colour scheme was proposed by Baddeley et al (2005). The function \code{beachcolours} returns these colours as a character vector, while \code{beachcolourmap} returns a colourmap object. The argument \code{range} should be a numeric vector of length 2 giving a range of numerical values. The argument \code{sealevel} specifies the height value that will be treated as zero, and mapped to the colour green. A vector of \code{ncolours} colours will be created, of which \code{nbeach} colours will be green. The argument \code{monochrome} is included for convenience when preparing publications. If \code{monochrome=TRUE} the colour map will be a simple grey scale containing \code{ncolours} shades from black to white. } \value{ For \code{beachcolours}, a character vector of length \code{ncolours} specifying colour values. For \code{beachcolourmap}, a colour map (object of class \code{"colourmap"}). } \references{ Baddeley, A., Turner, R., Moller, J. and Hazelton, M. (2005) Residual analysis for spatial point processes. \emph{Journal of the Royal Statistical Society, Series B} \bold{67}, 617--666. } \seealso{ \code{\link{colourmap}}, \code{\link[spatstat:colourtools]{colourtools}}. } \examples{ plot(beachcolourmap(c(-2,2))) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{color} spatstat/man/OrdThresh.Rd0000755000176000001440000000355412237642731015132 0ustar ripleyusers\name{OrdThresh} \alias{OrdThresh} \title{Ord's Interaction model} \description{ Creates an instance of Ord's point process model which can then be fitted to point pattern data. } \usage{ OrdThresh(r) } \arguments{ \item{r}{Positive number giving the threshold value for Ord's model.} } \value{ An object of class \code{"interact"} describing the interpoint interaction structure of a point process. } \details{ Ord's point process model (Ord, 1977) is a Gibbs point process of infinite order. Each point \eqn{x_i}{x[i]} in the point pattern \eqn{x} contributes a factor \eqn{g(a_i)}{g(a[i])} where \eqn{a_i = a(x_i, x)}{a[i] = a(x[i], x)} is the area of the tile associated with \eqn{x_i}{x[i]} in the Dirichlet tessellation of \eqn{x}. The function \eqn{g} is simply \eqn{g(a) = 1} if \eqn{a \ge r}{a >= r} and \eqn{g(a) = \gamma < 1}{g(a) = gamma < 1} if \eqn{a < r}{a < r}, where \eqn{r} is called the threshold value. This function creates an instance of Ord's model with a given value of \eqn{r}. It can then be fitted to point process data using \code{\link{ppm}}. } \seealso{ \code{\link{ppm}}, \code{\link{ppm.object}} } \references{ Baddeley, A. and Turner, R. (2000) Practical maximum pseudolikelihood for spatial point patterns. \emph{Australian and New Zealand Journal of Statistics} \bold{42}, 283--322. Ord, J.K. (1977) Contribution to the discussion of Ripley (1977). Ord, J.K. (1978) How many trees in a forest? \emph{Mathematical Scientist} \bold{3}, 23--33. Ripley, B.D. (1977) Modelling spatial patterns (with discussion). \emph{Journal of the Royal Statistical Society, Series B}, \bold{39}, 172 -- 212. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/crossdist.ppp.Rd0000755000176000001440000000511112237642732016033 0ustar ripleyusers\name{crossdist.ppp} %DontDeclareMethods \alias{crossdist.ppp} \title{Pairwise distances between two different point patterns} \description{ Computes the distances between pairs of points taken from two different point patterns. } \usage{ \method{crossdist}{ppp}(X, Y, \dots, periodic=FALSE, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Point patterns (objects of class \code{"ppp"}). } \item{\dots}{ Ignored. } \item{periodic}{ Logical. Specifies whether to apply a periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A matrix whose \code{[i,j]} entry is the distance from the \code{i}-th point in \code{X} to the \code{j}-th point in \code{Y}. } \details{ Given two point patterns, this function computes the Euclidean distance from each point in the first pattern to each point in the second pattern, and returns a matrix containing these distances. This is a method for the generic function \code{\link{crossdist}} for point patterns (objects of class \code{"ppp"}). This function expects two point patterns \code{X} and \code{Y}, and returns the matrix whose \code{[i,j]} entry is the distance from \code{X[i]} to \code{Y[j]}. Alternatively if \code{periodic=TRUE}, then provided the windows containing \code{X} and \code{Y} are identical and are rectangular, then the distances will be computed in the `periodic' sense (also known as `torus' distance): opposite edges of the rectangle are regarded as equivalent. This is meaningless if the window is not a rectangle. The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is faster by a factor of 4. } \seealso{ \code{\link{crossdist}}, \code{\link{crossdist.default}}, \code{\link{crossdist.psp}}, \code{\link{pairdist}}, \code{\link{nndist}}, \code{\link{Gest}} } \examples{ data(cells) d <- crossdist(cells, runifpoint(6)) d <- crossdist(cells, runifpoint(6), periodic=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/Hybrid.Rd0000755000176000001440000000653512237642731014453 0ustar ripleyusers\name{Hybrid} \alias{Hybrid} \title{ Hybrid Interaction Point Process Model } \description{ Creates an instance of a hybrid point process model which can then be fitted to point pattern data. } \usage{ Hybrid(...) } \arguments{ \item{\dots}{ Two or more interactions (objects of class \code{"interact"}) or objects which can be converted to interactions. See Details. } } \details{ A \emph{hybrid} (Baddeley, Turner, Mateu and Bevan, 2013) is a point process model created by combining two or more point process models, or an interpoint interaction created by combining two or more interpoint interactions. The \emph{hybrid} of two point processes, with probability densities \eqn{f(x)} and \eqn{g(x)} respectively, is the point process with probability density \deqn{h(x) = c \, f(x) \, g(x)}{h(x) = c * f(x) * g(x)} where \eqn{c} is a normalising constant. Equivalently, the hybrid of two point processes with conditional intensities \eqn{\lambda(u,x)}{lambda(u,x)} and \eqn{\kappa(u,x)}{kappa(u,x)} is the point process with conditional intensity \deqn{ \phi(u,x) = \lambda(u,x) \, \kappa(u,x). }{ phi(u,x) = lambda(u,x) * kappa(u,x). } The hybrid of \eqn{m > 3} point processes is defined in a similar way. The function \code{\link{ppm}}, which fits point process models to point pattern data, requires an argument of class \code{"interact"} describing the interpoint interaction structure of the model to be fitted. The appropriate description of a hybrid interaction is yielded by the function \code{Hybrid()}. The arguments \code{\dots} will be interpreted as interpoint interactions (objects of class \code{"interact"}) and the result will be the hybrid of these interactions. Each argument must either be an interpoint interaction (object of class \code{"interact"}), or a point process model (object of class \code{"ppm"}) from which the interpoint interaction will be extracted. The arguments \code{\dots} may also be given in the form \code{name=value}. This is purely cosmetic: it can be used to attach simple mnemonic names to the component interactions, and makes the printed output from \code{\link{print.ppm}} neater. } \value{ An object of class \code{"interact"} describing an interpoint interaction structure. } \references{ Baddeley, A., Turner, R., Mateu, J. and Bevan, A. (2013) Hybrids of Gibbs point process models and their implementation. \emph{Journal of Statistical Software} \bold{55}:11, 1--43. \url{http://www.jstatsoft.org/v55/i11/} } \seealso{ \code{\link{ppm}} } \examples{ Hybrid(Strauss(0.1), Geyer(0.2, 3)) Hybrid(Ha=Hardcore(0.05), St=Strauss(0.1), Ge=Geyer(0.2, 3)) fit <- ppm(redwood, ~1, Hybrid(A=Strauss(0.02), B=Geyer(0.1, 2))) fit ctr <- rmhcontrol(nrep=5e4, expand=1) plot(simulate(fit, control=ctr)) # hybrid components can be models (including hybrid models) Hybrid(fit, S=Softcore(0.5)) # plot.fii only works if every component is a pairwise interaction data(swedishpines) fit2 <- ppm(swedishpines, ~1, Hybrid(DG=DiggleGratton(2,10), S=Strauss(5))) plot(fitin(fit2)) plot(fitin(fit2), separate=TRUE, mar.panel=rep(4,4)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/affine.im.Rd0000644000176000001440000000314412237642732015055 0ustar ripleyusers\name{affine.im} %DontDeclareMethods \alias{affine.im} \title{Apply Affine Transformation To Pixel Image} \description{ Applies any affine transformation of the plane (linear transformation plus vector shift) to a pixel image. } \usage{ \method{affine}{im}(X, mat=diag(c(1,1)), vec=c(0,0), \dots) } \arguments{ \item{X}{Pixel image (object of class \code{"im"}).} \item{mat}{Matrix representing a linear transformation.} \item{vec}{Vector of length 2 representing a translation.} \item{\dots}{ Optional arguments passed to \code{\link{as.mask}} controlling the pixel resolution of the transformed image. } } \value{ Another pixel image (of class \code{"im"}) representing the result of applying the affine transformation. } \details{ The image is subjected first to the linear transformation represented by \code{mat} (multiplying on the left by \code{mat}), and then the result is translated by the vector \code{vec}. The argument \code{mat} must be a nonsingular \eqn{2 \times 2}{2 * 2} matrix. This is a method for the generic function \code{\link{affine}}. } \seealso{ \code{\link{affine}}, \code{\link{affine.ppp}}, \code{\link{affine.psp}}, \code{\link{affine.owin}}, \code{\link{rotate}}, \code{\link{shift}} } \examples{ X <- setcov(owin()) stretch <- diag(c(2,3)) Y <- affine(X, mat=stretch) shear <- matrix(c(1,0,0.6,1),ncol=2, nrow=2) Z <- affine(X, mat=shear) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/localpcf.Rd0000755000176000001440000001311712237642733015011 0ustar ripleyusers\name{localpcf} \alias{localpcf} \alias{localpcfinhom} \title{Local pair correlation function} \description{ Computes individual contributions to the pair correlation function from each data point. } \usage{ localpcf(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15) localpcfinhom(X, ..., delta=NULL, rmax=NULL, nr=512, stoyan=0.15, lambda=NULL, sigma=NULL, varcov=NULL) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} \item{delta}{ Smoothing bandwidth for pair correlation. The halfwidth of the Epanechnikov kernel. } \item{rmax}{ Optional. Maximum value of distance \eqn{r} for which pair correlation values \eqn{g(r)} should be computed. } \item{nr}{ Optional. Number of values of distance \eqn{r} for which pair correlation \eqn{g(r)} should be computed. } \item{stoyan}{ Optional. The value of the constant \eqn{c} in Stoyan's rule of thumb for selecting the smoothing bandwidth \code{delta}. } \item{lambda}{ Optional. Values of the estimated intensity function, for the inhomogeneous pair correlation. Either a vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{sigma,varcov,\dots}{ These arguments are ignored by \code{localpcf} but are passed by \code{localpcfinhom} (when \code{lambda=NULL}) to the function \code{\link{density.ppp}} to control the kernel smoothing estimation of \code{lambda}. } } \details{ \code{localpcf} computes the contribution, from each individual data point in a point pattern \code{X}, to the empirical pair correlation function of \code{X}. These contributions are sometimes known as LISA (local indicator of spatial association) functions based on pair correlation. \code{localpcfinhom} computes the corresponding contribution to the \emph{inhomogeneous} empirical pair correlation function of \code{X}. Given a spatial point pattern \code{X}, the local pcf \eqn{g_i(r)}{g[i](r)} associated with the \eqn{i}th point in \code{X} is computed by \deqn{ g_i(r) = \frac a {2 \pi n} \sum_j k(d_{i,j} - r) }{ g[i](r) = (a/(2 * pi * n) * sum[j] k(d[i,j] - r) } where the sum is over all points \eqn{j \neq i}{j != i}, \eqn{a} is the area of the observation window, \eqn{n} is the number of points in \code{X}, and \eqn{d_{ij}}{d[i,j]} is the distance between points \code{i} and \code{j}. Here \code{k} is the Epanechnikov kernel, \deqn{ k(t) = \frac 3 { 4\delta} \max(0, 1 - \frac{t^2}{\delta^2}). }{ k(t) = (3/(4*delta)) * max(0, 1 - t^2/delta^2). } Edge correction is performed using the border method (for the sake of computational efficiency): the estimate \eqn{g_i(r)}{g[i](r)} is set to \code{NA} if \eqn{r > b_i}{r > b[i]}, where \eqn{b_i}{b[i]} is the distance from point \eqn{i} to the boundary of the observation window. The smoothing bandwidth \eqn{\delta}{delta} may be specified. If not, it is chosen by Stoyan's rule of thumb \eqn{\delta = c/\hat\lambda}{delta = c/lambda} where \eqn{\hat\lambda = n/a}{lambda = n/a} is the estimated intensity and \eqn{c} is a constant, usually taken to be 0.15. The value of \eqn{c} is controlled by the argument \code{stoyan}. For \code{localpcfinhom}, the optional argument \code{lambda} specifies the values of the estimated intensity function. If \code{lambda} is given, it should be either a numeric vector giving the intensity values at the points of the pattern \code{X}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, a fitted point process model (object of class \code{"ppm"}) or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. If \code{lambda} is not given, then it will be estimated using a leave-one-out kernel density smoother as described in \code{\link{pcfinhom}}. } \value{ An object of class \code{"fv"}, see \code{\link{fv.object}}, which can be plotted directly using \code{\link{plot.fv}}. Essentially a data frame containing columns \item{r}{the vector of values of the argument \eqn{r} at which the function \eqn{K} has been estimated } \item{theo}{the theoretical value \eqn{K(r) = \pi r^2}{K(r) = pi * r^2} or \eqn{L(r)=r} for a stationary Poisson process } together with columns containing the values of the local pair correlation function for each point in the pattern. Column \code{i} corresponds to the \code{i}th point. The last two columns contain the \code{r} and \code{theo} values. } \seealso{ \code{\link{localK}}, \code{\link{localKinhom}}, \code{\link{pcf}}, \code{\link{pcfinhom}} } \examples{ data(ponderosa) X <- ponderosa g <- localpcf(X, stoyan=0.5) colo <- c(rep("grey", npoints(X)), "blue") a <- plot(g, main=c("local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) # plot only the local pair correlation function for point number 7 plot(g, est007 ~ r) gi <- localpcfinhom(X, stoyan=0.5) a <- plot(gi, main=c("inhomogeneous local pair correlation functions", "Ponderosa pines"), legend=FALSE, col=colo, lty=1) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/bdist.points.Rd0000755000176000001440000000260112237642732015641 0ustar ripleyusers\name{bdist.points} \alias{bdist.points} \title{Distance to Boundary of Window} \description{ Computes the distances from each point of a point pattern to the boundary of the window. } \usage{ bdist.points(X) } \arguments{ \item{X}{A point pattern (object of class \code{"ppp"}).} } \value{ A numeric vector, giving the distances from each point of the pattern to the boundary of the window. } \details{ This function computes, for each point \eqn{x_i}{x[i]} in the point pattern \code{X}, the shortest distance \eqn{d(x_i, W^c)}{dist(x[i], W')} from \eqn{x_i}{x[i]} to the boundary of the window \eqn{W} of observation. If the window \code{X$window} is of type \code{"rectangle"} or \code{"polygonal"}, then these distances are computed by analytic geometry and are exact, up to rounding errors. If the window is of type \code{"mask"} then the distances are computed using the real-valued distance transform, which is an approximation with maximum error equal to the width of one pixel in the mask. } \seealso{ \code{\link{bdist.pixels}}, \code{\link{bdist.tiles}}, \code{\link{ppp.object}}, \code{\link{erosion}} } \examples{ data(cells) d <- bdist.points(cells) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/amacrine.Rd0000755000176000001440000000255412237642732015007 0ustar ripleyusers\name{amacrine} \alias{amacrine} \docType{data} \title{Hughes' Amacrine Cell Data} \description{ Austin Hughes' data: a point pattern of displaced amacrine cells in the retina of a rabbit. A marked point pattern. } \format{ An object of class \code{"ppp"} representing the point pattern of cell locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr \code{marks} \tab factor with levels \code{off} and \code{on} \cr \tab indicating ``off'' and ``on'' cells } See \code{\link{ppp.object}} for details of the format. } \usage{data(amacrine)} \source{Peter Diggle, personal communication} \section{Notes}{ Austin Hughes' data: a point pattern of displaced amacrine cells in the retina of a rabbit. 152 ``on'' cells and 142 ``off'' cells in a rectangular sampling frame. The true dimensions of the rectangle are 1060 by 662 microns. The coordinates here are scaled to a rectangle of height 1 and width \eqn{1060/662 = 1.601} so the unit of measurement is approximately 662 microns. The data were analysed by Diggle (1986). } \references{ Diggle, P. J. (1986). Displaced amacrine cells in the retina of a rabbit: analysis of a bivariate spatial point pattern. \emph{J. Neurosci. Meth.} \bold{18}, 115--125. } \keyword{datasets} \keyword{spatial} spatstat/man/model.matrix.ppm.Rd0000755000176000001440000000624412237642733016427 0ustar ripleyusers\name{model.matrix.ppm} \alias{model.matrix.ppm} \alias{model.matrix.kppm} \alias{model.matrix.lppm} \title{Extract Design Matrix from Point Process Model} \description{ Given a point process model that has been fitted to spatial point pattern data, this function extracts the design matrix of the model. } \usage{ \method{model.matrix}{ppm}(object, data=model.frame(object), ..., keepNA=TRUE) \method{model.matrix}{kppm}(object, data=model.frame(object), ..., keepNA=TRUE) \method{model.matrix}{lppm}(object, data=model.frame(object), ..., keepNA=TRUE) } \arguments{ \item{object}{ The fitted point process model. An object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}. } \item{data}{ A model frame, containing the data required for the Berman-Turner device. } \item{keepNA}{ Logical. Determines whether rows containing NA values will be deleted or retained. } \item{\dots}{ Other arguments (such as \code{na.action}) passed to \code{\link{model.matrix.lm}}. } } \details{ These commands are methods for the generic function \code{\link{model.matrix}}. They extracts the design matrix of a spatial point process model (class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}). More precisely, this command extracts the design matrix of the generalised linear model associated with a spatial point process model. The \code{object} must be a fitted point process model (object of class \code{"ppm"} or \code{"kppm"} or \code{"lppm"}) fitted to spatial point pattern data. Such objects are produced by the model-fitting functions \code{\link{ppm}}, \code{\link{kppm}} and \code{\link{lppm}}. The methods \code{model.matrix.ppm}, \code{model.matrix.kppm} and \code{model.matrix.lppm} extract the model matrix for the GLM. The result is a matrix, with one row for every quadrature point in the fitting procedure, and one column for every constructed covariate in the design matrix. If there are \code{NA} values in the covariates, the argument \code{keepNA} determines whether to retain or delete the corresponding rows of the model matrix. The default \code{keepNA=TRUE} is to retain them. Note that this differs from the default behaviour of many other methods for \code{model.matrix}, which typically delete rows containing \code{NA}. The quadrature points themselves can be extracted using \code{\link{quad.ppm}}. } \value{ A matrix. Columns of the matrix are covariates in the model. Rows of the matrix correspond to quadrature points in the fitting procedure (provided \code{keepNA=TRUE}). } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{model.matrix}}, \code{\link{model.images}}, \code{\link{ppm}}, \code{\link{kppm}}, \code{\link{lppm}}, \code{\link{ppm.object}}, \code{\link{quad.ppm}}, \code{\link{residuals.ppm}} } \examples{ fit <- ppm(cells, ~x) head(model.matrix(fit)) # matrix with two columns: '(Intercept)' and 'x' kfit <- kppm(redwood, ~x, "Thomas") m <- model.matrix(kfit) } \keyword{spatial} \keyword{models} spatstat/man/contour.listof.Rd0000644000176000001440000000417212237642732016213 0ustar ripleyusers\name{contour.listof} \alias{contour.listof} \alias{image.listof} \title{Plot a List of Things} \description{ Plots a list of things, either as an array of contour plots, or as an array of images. } \usage{ \method{contour}{listof}(x, \dots) \method{image}{listof}(x, \dots, equal.ribbon=FALSE) } \arguments{ \item{x}{ An object of the class \code{"listof"}. Essentially a list of objects. } \item{\dots}{ Arguments passed to \code{\link{plot.listof}} to control the plot, and passed to the appropriate plot command for each element of the list. } \item{equal.ribbon}{ Logical. If \code{TRUE}, the colour maps of all the images are the same. If \code{FALSE}, the colour map of each image is adjusted to the range of values of that image. Applies only when all the elements of \code{x} are pixel images (objects of class \code{"im"}). } } \value{ Null. } \details{ These are methods for the generic plot commands \code{contour} and \code{image}, for the class \code{"listof"}. The commands will display each object in the list \code{x}, either as a contour plot (\code{contour.listof}) or a pixel image plot (\code{image.listof}), with the displays laid out in a grid. See \code{\link{plot.listof}} for more information. For the image plot, if \code{equal.ribbon=FALSE}, the images are rendered using different colour maps, which are displayed as colour ribbons beside each image. If \code{equal.ribbon=TRUE}, the images are rendered using the same colour map, and a single colour ribbon will be displayed at the right side of the array. The colour maps and the placement of the colour ribbons are controlled by arguments \code{\dots} passed to \code{\link{plot.im}}. } \seealso{ \code{\link{plot.listof}}, \code{\link{plot.im}} } \examples{ # Multitype point pattern contour(D <- density(split(amacrine))) image(D, equal.ribbon=TRUE, main="", col.ticks="red", col.axis="red") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{hplot} spatstat/man/timed.Rd0000644000176000001440000000605612237642734014332 0ustar ripleyusers\name{timed} \alias{timed} \title{ Record the Computation Time } \description{ Saves the result of a calculation as an object of class \code{"timed"} which includes information about the time taken to compute the result. The computation time is printed when the object is printed. } \usage{ timed(x, ..., starttime = NULL, timetaken = NULL) } \arguments{ \item{x}{ An expression to be evaluated, or an object that has already been evaluated. } \item{starttime}{ The time at which the computation is defined to have started. The default is the current time. Ignored if \code{timetaken} is given. } \item{timetaken}{ The length of time taken to perform the computation. The default is the time taken to evaluate \code{x}. } \item{\dots}{ Ignored. } } \details{ This is a simple mechanism for recording how long it takes to perform complicated calculations (usually for the purposes of reporting in a publication). If \code{x} is an expression to be evaluated, \code{timed(x)} evaluates the expression and measures the time taken to evaluate it. The result is saved as an object of the class \code{"timed"}. Printing this object displays the computation time. If \code{x} is an object which has already been computed, then the time taken to compute the object can be specified either directly by the argument \code{timetaken}, or indirectly by the argument \code{starttime}. \itemize{ \item \code{timetaken} is the duration of time taken to perform the computation. It should be the difference of two clock times returned by \code{\link{proc.time}}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, then \code{end <- proc.time()} after completing the calculations, and then sets \code{timetaken <- end - begin}. \item \code{starttime} is the clock time at which the computation started. It should be a value that was returned by \code{\link{proc.time}} at some earlier time when the calculations commenced. When \code{timed} is called, the computation time will be taken as the difference between the current clock time and \code{starttime}. Typically the user sets \code{begin <- proc.time()} before commencing the calculations, and when the calculations are completed, the user calls \code{result <- timed(result, starttime=begin)}. } If the result of evaluating \code{x} belongs to other S3 classes, then the result of \code{timed(x, \dots)} also inherits these classes, and printing the object will display the appropriate information for these classes as well. } \value{ An object inheriting the class \code{"timed"}. } \examples{ timed(envelope(cells, nsim=19)) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line spatstat/man/latest.news.Rd0000755000176000001440000000246412237642732015477 0ustar ripleyusers\name{latest.news} \alias{latest.news} \title{ Print News About Latest Version of Package } \description{ Prints the news documentation for the current version of \code{spatstat} or another specified package. } \usage{ latest.news(package = "spatstat") } \arguments{ \item{package}{ Name of package for which the latest news should be printed. } } \details{ By default, this function prints the news documentation about changes in the current installed version of the \pkg{spatstat} package. The function can be called simply by typing its name without parentheses (see the Examples). If \code{package} is given, then the function reads the news for the specified package from its \code{NEWS} file (if it has one) and prints only the entries that refer to the current version of the package. To see the news for all previous versions as well as the current version, use the \R utility \code{\link[utils]{news}}. See the Examples. } \value{ Null. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link[utils]{news}} } \examples{ if(interactive()) { # current news latest.news # all news news(package="spatstat") } } \keyword{documentation} spatstat/man/lpp.Rd0000755000176000001440000000565312237642733014027 0ustar ripleyusers\name{lpp} \alias{lpp} \title{ Create Point Pattern on Linear Network } \description{ Creates an object of class \code{"lpp"} that represents a point pattern on a linear network. } \usage{ lpp(X, L) } \arguments{ \item{X}{ Locations of the points. A matrix or data frame of coordinates, or a point pattern object (of class \code{"ppp"}) or other data acceptable to \code{\link{as.ppp}}. } \item{L}{ Linear network (object of class \code{"linnet"}). } } \details{ This command creates an object of class \code{"lpp"} that represents a point pattern on a linear network. Normally \code{X} is a point pattern. The points of \code{X} should lie on the lines of \code{L}. Alternatively \code{X} may be a matrix or data frame containing at least two columns. \itemize{ \item Usually the first two columns of \code{X} will be interpreted as spatial coordinates, and any remaining columns as marks. \item The exception occurs if \code{X} is a data frame with columns named \code{x}, \code{y}, \code{seg} and \code{tp}. Then \code{x} and \code{y} will be interpreted as spatial coordinates, and \code{seg} and \code{tp} as local coordinates, with \code{seg} indicating which line segment of \code{L} the point lies on, and \code{tp} indicating how far along the segment the point lies (normalised to 1). Any remaining columns will be interpreted as marks. } } \section{Note on changed format}{ The internal format of \code{"lpp"} objects was changed in \pkg{spatstat} version \code{1.28-0}. Objects in the old format are still handled correctly, but computations are faster in the new format. To convert an object \code{X} from the old format to the new format, use \code{X <- lpp(as.ppp(X), as.linnet(X))}. } \value{ An object of class \code{"lpp"}. Also inherits the class \code{"ppx"}. } \author{ Ang Qi Wei \email{aqw07398@hotmail.com} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{as.lpp}} for converting data to an \code{lpp} object. See \code{\link{methods.lpp}} and \code{\link{methods.ppx}} for other methods applicable to \code{lpp} objects. Calculations on an \code{lpp} object: \code{\link{intensity.lpp}}, \code{\link{distfun.lpp}}, \code{\link{nndist.lpp}}, \code{\link{nnwhich.lpp}}, \code{\link{nncross.lpp}}, \code{\link{nnfun.lpp}}. Summary functions: \code{\link{linearK}}, \code{\link{linearKinhom}}, \code{\link{linearpcf}}, \code{\link{linearKdot}}, \code{\link{linearKcross}}, \code{\link{linearmarkconnect}}, etc. Random point patterns on a linear network can be generated by \code{\link{rpoislpp}} or \code{\link{runiflpp}}. See \code{\link{linnet}} for linear networks. } \examples{ example(linnet) xx <- list(x=c(-1.5,0,0.5,1.5), y=c(1.5,3,4.5,1.5)) X <- lpp(xx, letterA) plot(X) X summary(X) } \keyword{spatial} spatstat/man/print.owin.Rd0000755000176000001440000000161212237642733015332 0ustar ripleyusers\name{print.owin} \alias{print.owin} \title{Print Brief Details of a Spatial Window} \description{ Prints a very brief description of a window object. } \usage{ \method{print}{owin}(x, \dots) } \arguments{ \item{x}{Window (object of class \code{"owin"}).} \item{\dots}{Ignored.} } \details{ A very brief description of the window \code{x} is printed. This is a method for the generic function \code{\link{print}}. } \seealso{ \code{\link{print}}, \code{\link{print.ppp}}, \code{\link{summary.owin}} } \examples{ owin() # the unit square data(demopat) W <- demopat$window W # just says it is polygonal as.mask(W) # just says it is a binary image } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{print} spatstat/man/rpoispp.Rd0000755000176000001440000001042512237642734014722 0ustar ripleyusers\name{rpoispp} \alias{rpoispp} \title{Generate Poisson Point Pattern} \description{ Generate a random point pattern using the (homogeneous or inhomogeneous) Poisson process. Includes CSR (complete spatial randomness). } \usage{ rpoispp(lambda, lmax, win, \dots) } \arguments{ \item{lambda}{ Intensity of the Poisson process. Either a single positive number, a \code{function(x,y, \dots)}, or a pixel image. } \item{lmax}{ An upper bound for the value of \code{lambda(x,y)}, if \code{lambda} is a function. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. Ignored if \code{lambda} is a pixel image. } \item{\dots}{ Arguments passed to \code{lambda} if it is a function. } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ If \code{lambda} is a single number, then this algorithm generates a realisation of the uniform Poisson process (also known as Complete Spatial Randomness, CSR) inside the window \code{win} with intensity \code{lambda} (points per unit area). If \code{lambda} is a function, then this algorithm generates a realisation of the inhomogeneous Poisson process with intensity function \code{lambda(x,y,\dots)} at spatial location \code{(x,y)} inside the window \code{win}. The function \code{lambda} must work correctly with vectors \code{x} and \code{y}. The value \code{lmax} must be given and must be an upper bound on the values of \code{lambda(x,y,\dots)} for all locations \code{(x, y)} inside the window \code{win}. If \code{lambda} is a pixel image object of class \code{"im"} (see \code{\link{im.object}}), this algorithm generates a realisation of the inhomogeneous Poisson process with intensity equal to the pixel values of the image. (The value of the intensity function at an arbitrary location is the pixel value of the nearest pixel.) The argument \code{win} is ignored; the window of the pixel image is used instead. To generate an inhomogeneous Poisson process the algorithm uses ``thinning'': it first generates a uniform Poisson process of intensity \code{lmax}, then randomly deletes or retains each point, independently of other points, with retention probability \eqn{p(x,y) = \lambda(x,y)/\mbox{lmax}}{p(x,y) = lambda(x,y)/lmax}. For \emph{marked} point patterns, use \code{\link{rmpoispp}}. } \section{Warning}{ Note that \code{lambda} is the \bold{intensity}, that is, the expected number of points \bold{per unit area}. The total number of points in the simulated pattern will be random with expected value \code{mu = lambda * a} where \code{a} is the area of the window \code{win}. } \seealso{ \code{\link{rmpoispp}} for Poisson \emph{marked} point patterns, \code{\link{runifpoint}} for a fixed number of independent uniform random points; \code{\link{rpoint}}, \code{\link{rmpoint}} for a fixed number of independent random points with any distribution; \code{\link{rMaternI}}, \code{\link{rMaternII}}, \code{\link{rSSI}}, \code{\link{rStrauss}}, \code{\link{rstrat}} for random point processes with spatial inhibition or regularity; \code{\link{rThomas}}, \code{\link{rGaussPoisson}}, \code{\link{rMatClust}}, \code{\link{rcell}} for random point processes exhibiting clustering; \code{\link{rmh.default}} for Gibbs processes. See also \code{\link{ppp.object}}, \code{\link{owin.object}}. } \examples{ # uniform Poisson process with intensity 100 in the unit square pp <- rpoispp(100) # uniform Poisson process with intensity 1 in a 10 x 10 square pp <- rpoispp(1, win=owin(c(0,10),c(0,10))) # plots should look similar ! # inhomogeneous Poisson process in unit square # with intensity lambda(x,y) = 100 * exp(-3*x) # Intensity is bounded by 100 pp <- rpoispp(function(x,y) {100 * exp(-3*x)}, 100) # How to tune the coefficient of x lamb <- function(x,y,a) { 100 * exp( - a * x)} pp <- rpoispp(lamb, 100, a=3) # pixel image Z <- as.im(function(x,y){100 * sqrt(x+y)}, unit.square()) pp <- rpoispp(Z) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/Kmulti.inhom.Rd0000644000176000001440000002226412237642731015602 0ustar ripleyusers\name{Kmulti.inhom} \alias{Kmulti.inhom} \title{ Inhomogeneous Marked K-Function } \description{ For a marked point pattern, estimate the inhomogeneous version of the multitype \eqn{K} function which counts the expected number of points of subset \eqn{J} within a given distance from a typical point in subset \code{I}, adjusted for spatially varying intensity. } \usage{ Kmulti.inhom(X, I, J, lambdaI=NULL, lambdaJ=NULL, \dots, r=NULL, breaks=NULL, correction=c("border", "isotropic", "Ripley", "translate"), lambdaIJ=NULL, sigma=NULL, varcov=NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} will be computed. It must be a marked point pattern. See under Details. } \item{I}{Subset index specifying the points of \code{X} from which distances are measured. See Details. } \item{J}{Subset index specifying the points in \code{X} to which distances are measured. See Details. } \item{lambdaI}{ Optional. Values of the estimated intensity of the sub-process \code{X[I]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[I]}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity of the sub-process \code{X[J]}. Either a pixel image (object of class \code{"im"}), a numeric vector containing the intensity values at each of the points in \code{X[J]}, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{\dots}{Ignored.} \item{r}{Optional. Numeric vector. The values of the argument \eqn{r} at which the multitype \eqn{K} function \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{breaks}{An alternative to the argument \code{r}. Not normally invoked by the user. See the \bold{Details} section. } \item{correction}{ A character vector containing any selection of the options \code{"border"}, \code{"bord.modif"}, \code{"isotropic"}, \code{"Ripley"}, \code{"translate"}, \code{"none"} or \code{"best"}. It specifies the edge correction(s) to be applied. } \item{lambdaIJ}{ Optional. A matrix containing estimates of the product of the intensities \code{lambdaI} and \code{lambdaJ} for each pair of points, the first point } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambda} is estimated by kernel smoothing. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). Essentially a data frame containing numeric columns \item{r}{the values of the argument \eqn{r} at which the function \eqn{K_{IJ}(r)}{KIJ(r)} has been estimated } \item{theo}{the theoretical value of \eqn{K_{IJ}(r)}{KIJ(r)} for a marked Poisson process, namely \eqn{\pi r^2}{pi * r^2} } together with a column or columns named \code{"border"}, \code{"bord.modif"}, \code{"iso"} and/or \code{"trans"}, according to the selected edge corrections. These columns contain estimates of the function \eqn{K_{IJ}(r)}{KIJ(r)} obtained by the edge corrections named. } \details{ The function \code{Kmulti.inhom} is the counterpart, for spatially-inhomogeneous marked point patterns, of the multitype \eqn{K} function \code{\link{Kmulti}}. Suppose \eqn{X} is a marked point process, with marks of any kind. Suppose \eqn{X_I}{X[I]}, \eqn{X_J}{X[J]} are two sub-processes, possibly overlapping. Typically \eqn{X_I}{X[I]} would consist of those points of \eqn{X} whose marks lie in a specified range of mark values, and similarly for \eqn{X_J}{X[J]}. Suppose that \eqn{\lambda_I(u)}{lambdaI(u)}, \eqn{\lambda_J(u)}{lambdaJ(u)} are the spatially-varying intensity functions of \eqn{X_I}{X[I]} and \eqn{X_J}{X[J]} respectively. Consider all the pairs of points \eqn{(u,v)} in the point process \eqn{X} such that the first point \eqn{u} belongs to \eqn{X_I}{X[I]}, the second point \eqn{v} belongs to \eqn{X_J}{X[J]}, and the distance between \eqn{u} and \eqn{v} is less than a specified distance \eqn{r}. Give this pair \eqn{(u,v)} the numerical weight \eqn{1/(\lambda_I(u)\lambda_J(u))}{1/(lambdaI(u) lambdaJ(u))}. Calculate the sum of these weights over all pairs of points as described. This sum (after appropriate edge-correction and normalisation) is the estimated inhomogeneous multitype \eqn{K} function. The argument \code{X} must be a point pattern (object of class \code{"ppp"}) or any data that are acceptable to \code{\link{as.ppp}}. The arguments \code{I} and \code{J} specify two subsets of the point pattern. They may be any type of subset indices, for example, logical vectors of length equal to \code{npoints(X)}, or integer vectors with entries in the range 1 to \code{npoints(X)}, or negative integer vectors. Alternatively, \code{I} and \code{J} may be \bold{functions} that will be applied to the point pattern \code{X} to obtain index vectors. If \code{I} is a function, then evaluating \code{I(X)} should yield a valid subset index. This option is useful when generating simulation envelopes using \code{\link{envelope}}. The argument \code{lambdaI} supplies the values of the intensity of the sub-process identified by index \code{I}. It may be either \describe{ \item{a pixel image}{(object of class \code{"im"}) which gives the values of the intensity of \code{X[I]} at all locations in the window containing \code{X}; } \item{a numeric vector}{containing the values of the intensity of \code{X[I]} evaluated only at the data points of \code{X[I]}. The length of this vector must equal the number of points in \code{X[I]}. } \item{a function}{ of the form \code{function(x,y)} which can be evaluated to give values of the intensity at any locations. } \item{omitted:}{ if \code{lambdaI} is omitted then it will be estimated using a leave-one-out kernel smoother. } } If \code{lambdaI} is omitted, then it will be estimated using a `leave-one-out' kernel smoother, as described in Baddeley, Moller and Waagepetersen (2000). The estimate of \code{lambdaI} for a given point is computed by removing the point from the point pattern, applying kernel smoothing to the remaining points using \code{\link{density.ppp}}, and evaluating the smoothed intensity at the point in question. The smoothing kernel bandwidth is controlled by the arguments \code{sigma} and \code{varcov}, which are passed to \code{\link{density.ppp}} along with any extra arguments. Similarly \code{lambdaJ} supplies the values of the intensity of the sub-process identified by index \code{J}. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{K_{IJ}(r)}{KIJ(r)} should be evaluated. It is also used to determine the breakpoints (in the sense of \code{\link{hist}}) for the computation of histograms of distances. First-time users would be strongly advised not to specify \code{r}. However, if it is specified, \code{r} must satisfy \code{r[1] = 0}, and \code{max(r)} must be larger than the radius of the largest disc contained in the window. Biases due to edge effects are treated in the same manner as in \code{\link{Kinhom}}. The edge corrections implemented here are \describe{ \item{border}{the border method or ``reduced sample'' estimator (see Ripley, 1988). This is the least efficient (statistically) and the fastest to compute. It can be computed for a window of arbitrary shape. } \item{isotropic/Ripley}{Ripley's isotropic correction (see Ripley, 1988; Ohser, 1983). This is currently implemented only for rectangular windows. } \item{translate}{Translation correction (Ohser, 1983). Implemented for all window geometries. } } The pair correlation function \code{\link{pcf}} can also be applied to the result of \code{Kmulti.inhom}. } \references{ Baddeley, A., Moller, J. and Waagepetersen, R. (2000) Non- and semiparametric estimation of interaction in inhomogeneous point patterns. \emph{Statistica Neerlandica} \bold{54}, 329--350. } \seealso{ \code{\link{Kmulti}}, \code{\link{Kdot.inhom}}, \code{\link{Kcross.inhom}}, \code{\link{pcf}} } \examples{ # Finnish Pines data: marked by diameter and height plot(finpines, which.marks="height") I <- (marks(finpines)$height <= 2) J <- (marks(finpines)$height > 3) K <- Kmulti.inhom(finpines, I, J) plot(K) # functions determining subsets f1 <- function(X) { marks(X)$height <= 2 } f2 <- function(X) { marks(X)$height > 3 } K <- Kmulti.inhom(finpines, f1, f2) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pointsOnLines.Rd0000755000176000001440000000337212237642733016034 0ustar ripleyusers\name{pointsOnLines} \alias{pointsOnLines} \title{Place Points Evenly Along Specified Lines} \description{ Given a line segment pattern, place a series of points at equal distances along each line segment. } \usage{ pointsOnLines(X, eps = NULL, np = 1000, shortok=TRUE) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{eps}{Spacing between successive points.} \item{np}{Approximate total number of points (incompatible with \code{eps}).} \item{shortok}{ Logical. If \code{FALSE}, very short segments (of length shorter than \code{eps}) will not generate any points. If \code{TRUE}, a very short segment will be represented by its midpoint. } } \details{ For each line segment in the pattern \code{X}, a succession of points is placed along the line segment. These points are equally spaced at a distance \code{eps}, except for the first and last points in the sequence. The spacing \code{eps} is measured in coordinate units of \code{X}. If \code{eps} is not given, then it is determined by \code{eps = len/np} where \code{len} is the total length of the segments in \code{X}. The actual number of points will then be slightly larger than \code{np}. } \value{ A point pattern (object of class \code{"ppp"}) in the same window as \code{X}. } \seealso{ \code{\link{psp}}, \code{\link{ppp}}, \code{\link{runifpointOnLines}} } \examples{ X <- psp(runif(20), runif(20), runif(20), runif(20), window=owin()) Y <- pointsOnLines(X, eps=0.05) plot(X, main="") plot(Y, add=TRUE, pch="+") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/matchingdist.Rd0000755000176000001440000000751612237642733015712 0ustar ripleyusers\name{matchingdist} \alias{matchingdist} \title{Distance for a Point Pattern Matching} \description{ Computes the distance associated with a matching between two point patterns. } \usage{ matchingdist(matching, type = NULL, cutoff = NULL, q = NULL) } \arguments{ \item{matching}{A point pattern matching (an object of class \code{"pppmatching"}).} \item{type}{ A character string giving the type of distance to be computed. One of \code{"spa"}, \code{"ace"} or \code{"mat"}. See details below. } \item{cutoff}{ The value \eqn{> 0} at which interpoint distances are cut off. } \item{q}{ The order of the average that is applied to the interpoint distances. May be \code{Inf}, in which case the maximum of the interpoint distances is taken. } } \details{ Computes the distance specified by \code{type}, \code{cutoff}, and \code{order} for a point matching. If any of these arguments are not provided, the function uses the corresponding elements of \code{matching} (if available). For the type \code{"spa"} (subpattern assignment) it is assumed that the points of the point pattern with the smaller cardinality \eqn{m} are matched to a \eqn{m}-point subpattern of the point pattern with the larger cardinality \eqn{n} in a 1-1 way. The distance is then given as the \code{q}-th order average of the \eqn{m} distances between matched points (minimum of Euclidean distance and \code{cutoff}) and \eqn{n-m} "penalty distances" of value \code{cutoff}. For the type \code{"ace"} (assignment only if cardinalities equal) the matching is assumed to be 1-1 if the cardinalities of the point patterns are the same, in which case the \code{q}-th order average of the matching distances (minimum of Euclidean distance and \code{cutoff}) is taken. If the cardinalities are different, the matching may be arbitrary and the distance returned is always equal to \code{cutoff}. For the type \code{mat} (mass transfer) it is assumed that each point of the point pattern with the smaller cardinality \eqn{m} has mass \eqn{1}, each point of the point pattern with the larger cardinality \eqn{n} has mass \eqn{m/n}, and fractions of these masses are matched in such a way that each point contributes exactly its mass. The distance is then given as the \code{q}-th order weighted average of all distances (minimum of Euclidean distance and \code{cutoff}) of (partially) matched points with weights equal to the fractional masses divided by \eqn{m}. If the cardinalities of the two point patterns are equal, \code{matchingdist(m, type, cutoff, q)} yields the same result no matter if \code{type} is \code{"spa"}, \code{"ace"} or \code{"mat"}. } \value{ Numeric value of the distance associated with the matching. } \author{ Dominic Schuhmacher \email{dominic.schuhmacher@stat.unibe.ch} \url{http://www.dominic.schuhmacher.name} } \seealso{ \code{\link{pppdist}} \code{\link{pppmatching.object}} } \examples{ # an optimal matching X <- runifpoint(20) Y <- runifpoint(20) m.opt <- pppdist(X, Y) summary(m.opt) matchingdist(m.opt) # is the same as the distance given by summary(m.opt) # sequential nearest neighbour matching # (go through all points of point pattern X in sequence # and match each point with the closest point of Y that is # still unmatched) am <- matrix(0, 20, 20) h <- matrix(c(1:20, rep(0,20)), 20, 2) h[1,2] = nncross(X[1],Y)[1,2] for (i in 2:20) { nn <- nncross(X[i],Y[-h[1:(i-1),2]])[1,2] h[i,2] <- ((1:20)[-h[1:(i-1),2]])[nn] } am[h] <- 1 m.nn <- pppmatching(X, Y, am) matchingdist(m.nn, type="spa", cutoff=1, q=1) # is >= the distance obtained for m.opt # in most cases strictly > \dontrun{ par(mfrow=c(1,2)) plot(m.opt) plot(m.nn) text(X$x, X$y, 1:20, pos=1, offset=0.3, cex=0.8) } } \keyword{spatial} \keyword{math} spatstat/man/diameter.box3.Rd0000755000176000001440000000404512237642732015671 0ustar ripleyusers\name{diameter.box3} %DontDeclareMethods \Rdversion{1.1} \alias{diameter.box3} \alias{volume.box3} \alias{shortside.box3} \alias{sidelengths.box3} \alias{eroded.volumes.box3} \alias{shortside} \alias{sidelengths} \alias{eroded.volumes} \title{ Geometrical Calculations for Three-Dimensional Box } \description{ Calculates the volume, diameter, shortest side, side lengths, or eroded volume of a three-dimensional box. } \usage{ \method{diameter}{box3}(x) \method{volume}{box3}(x) shortside(x) sidelengths(x) eroded.volumes(x, r) \method{shortside}{box3}(x) \method{sidelengths}{box3}(x) \method{eroded.volumes}{box3}(x, r) } \arguments{ \item{x}{ Three-dimensional box (object of class \code{"box3"}). } \item{r}{ Numeric value or vector of numeric values for which eroded volumes should be calculated. } } \details{ \code{diameter.box3} computes the diameter of the box. \code{volume.box3} computes the volume of the box. \code{shortside.box3} finds the shortest of the three side lengths of the box. \code{sidelengths.box3} returns all three side lengths of the box. \code{eroded.volumes} computes, for each entry \code{r[i]}, the volume of the smaller box obtained by removing a slab of thickness \code{r[i]} from each face of the box. This smaller box is the subset consisting of points that lie at least \code{r[i]} units away from the boundary of the box. } \value{ For \code{diameter.box3}, \code{shortside.box3} and \code{volume.box3}, a single numeric value. For \code{sidelengths.box3}, a vector of three numbers. For \code{eroded.volumes}, a numeric vector of the same length as \code{r}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \seealso{ \code{\link{as.box3}} } \examples{ X <- box3(c(0,10),c(0,10),c(0,5)) diameter(X) volume(X) sidelengths(X) shortside(X) hd <- shortside(X)/2 eroded.volumes(X, seq(0,hd, length=10)) } \keyword{spatial} \keyword{math} spatstat/man/rshift.splitppp.Rd0000755000176000001440000000434212237642734016400 0ustar ripleyusers\name{rshift.splitppp} %DontDeclareMethods \alias{rshift.splitppp} \title{Randomly Shift a List of Point Patterns} \description{ Randomly shifts each point pattern in a list of point patterns. } \usage{ \method{rshift}{splitppp}(X, \dots, which=seq_along(X)) } \arguments{ \item{X}{ An object of class \code{"splitppp"}. Basically a list of point patterns. } \item{\dots}{ Parameters controlling the generation of the random shift vector and the handling of edge effects. See \code{\link{rshift.ppp}}. } \item{which}{ Optional. Identifies which patterns will be shifted, while other patterns are not shifted. Any valid subset index for \code{X}. } } \value{ Another object of class \code{"splitppp"}. } \details{ This operation applies a random shift to each of the point patterns in the list \code{X}. The function \code{\link{rshift}} is generic. This function \code{rshift.splitppp} is the method for objects of class \code{"splitppp"}, which are essentially lists of point patterns, created by the function \code{\link{split.ppp}}. By default, every pattern in the list \code{X} will be shifted. The argument \code{which} indicates that only some of the patterns should be shifted, while other groups should be left unchanged. \code{which} can be any valid subset index for \code{X}. Each point pattern in the list \code{X} (or each pattern in \code{X[which]}) is shifted by a random displacement vector. The shifting is performed by \code{\link{rshift.ppp}}. See the help page for \code{\link{rshift.ppp}} for details of the other arguments. } \seealso{ \code{\link{rshift}}, \code{\link{rshift.ppp}} } \examples{ data(amacrine) Y <- split(amacrine) # random toroidal shift # shift "on" and "off" points separately X <- rshift(Y) # shift "on" points and leave "off" points fixed X <- rshift(Y, which="on") # maximum displacement distance 0.1 units X <- rshift(Y, radius=0.1) # shift with erosion X <- rshift(Y, radius=0.1, edge="erode") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/rMaternI.Rd0000755000176000001440000000377412237642734014760 0ustar ripleyusers\name{rMaternI} \alias{rMaternI} \title{Simulate Matern Model I} \description{ Generate a random point pattern, a simulated realisation of the \ifelse{latex}{\out{Mat\'ern}}{Matern} Model I inhibition process model. } \usage{ rMaternI(kappa, r, win = owin(c(0,1),c(0,1)), stationary=TRUE) } \arguments{ \item{kappa}{ Intensity of the Poisson process of proposal points. A single positive number. } \item{r}{ Inhibition distance. } \item{win}{ Window in which to simulate the pattern. An object of class \code{"owin"} or something acceptable to \code{\link{as.owin}}. } \item{stationary}{ Logical. Whether to start with a stationary process of proposal points (\code{stationary=TRUE}) or to generate the proposal points only inside the window (\code{stationary=FALSE}). } } \value{ The simulated point pattern (an object of class \code{"ppp"}). } \details{ This algorithm generates a realisation of \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I inhibition process inside the window \code{win}. The process is constructed by first generating a uniform Poisson point process of ``proposal'' points with intensity \code{kappa}. If \code{stationary = TRUE} (the default), the proposal points are generated in a window larger than \code{win} that effectively means the proposals are stationary. If \code{stationary=FALSE} then the proposal points are only generated inside the window \code{win}. A proposal point is then deleted if it lies within \code{r} units' distance of another proposal point. Otherwise it is retained. The retained points constitute \ifelse{latex}{\out{Mat\'ern}}{Matern}'s Model I. } \seealso{ \code{\link{rpoispp}}, \code{\link{rMatClust}} } \examples{ X <- rMaternI(20, 0.05) Y <- rMaternI(20, 0.05, stationary=FALSE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/}, Ute Hahn, and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{datagen} spatstat/man/intensity.Rd0000644000176000001440000000266212237642732015253 0ustar ripleyusers\name{intensity} \alias{intensity} \title{ Intensity of a Dataset or a Model } \description{ Generic function for computing the intensity of a spatial dataset or spatial point process model. } \usage{ intensity(X, ...) } \arguments{ \item{X}{ A spatial dataset or a spatial point process model. } \item{\dots}{ Further arguments depending on the class of \code{X}. } } \details{ This is a generic function for computing the intensity of a spatial dataset or spatial point process model. There are methods for point patterns (objects of class \code{"ppp"}) and fitted point process models (objects of class \code{"ppm"}). The empirical intensity of a dataset is the average density (the average amount of \sQuote{stuff} per unit area or volume). The empirical intensity of a point pattern is computed by the method \code{\link{intensity.ppp}}. The theoretical intensity of a stochastic model is the expected density (expected amount of \sQuote{stuff} per unit area or volume). The theoretical intensity of a fitted point process model is computed by the method \code{\link{intensity.ppm}}. } \value{ Usually a numeric value or vector. } \seealso{ \code{\link{intensity.ppp}}, \code{\link{intensity.ppm}}. } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/rStraussHard.Rd0000644000176000001440000000544512237642734015656 0ustar ripleyusers\name{rStraussHard} \alias{rStraussHard} \title{Perfect Simulation of the Strauss-Hardcore Process} \description{ Generate a random pattern of points, a simulated realisation of the Strauss-Hardcore process, using a perfect simulation algorithm. } \usage{ rStraussHard(beta, gamma = 1, R = 0, H = 0, W = owin()) } \arguments{ \item{beta}{ intensity parameter (a positive number). } \item{gamma}{ interaction parameter (a number between 0 and 1, inclusive). } \item{R}{ interaction radius (a non-negative number). } \item{H}{ hard core distance (a non-negative number smaller than \code{R}). } \item{W}{ window (object of class \code{"owin"}) in which to generate the random pattern. Currently this must be a rectangular window. } } \details{ This function generates a realisation of the Strauss-Hardcore point process in the window \code{W} using a \sQuote{perfect simulation} algorithm. The Strauss-Hardcore process is described in \code{\link{StraussHard}}. The simulation algorithm used to generate the point pattern is \sQuote{dominated coupling from the past} as implemented by Berthelsen and Moller (2002, 2003). This is a \sQuote{perfect simulation} or \sQuote{exact simulation} algorithm, so called because the output of the algorithm is guaranteed to have the correct probability distribution exactly (unlike the Metropolis-Hastings algorithm used in \code{\link{rmh}}, whose output is only approximately correct). A limitation of the perfect simulation algorithm is that the interaction parameter \eqn{\gamma}{gamma} must be less than or equal to \eqn{1}. To simulate a Strauss-hardcore process with \eqn{\gamma > 1}{gamma > 1}, use \code{\link{rmh}}. There is a tiny chance that the algorithm will run out of space before it has terminated. If this occurs, an error message will be generated. } \value{ A point pattern (object of class \code{"ppp"}). } \references{ Berthelsen, K.K. and Moller, J. (2002) A primer on perfect simulation for spatial point processes. \emph{Bulletin of the Brazilian Mathematical Society} 33, 351-367. Berthelsen, K.K. and Moller, J. (2003) Likelihood and non-parametric Bayesian MCMC inference for spatial point processes based on perfect simulation and path sampling. \emph{Scandinavian Journal of Statistics} 30, 549-564. Moller, J. and Waagepetersen, R. (2003). \emph{Statistical Inference and Simulation for Spatial Point Processes.} Chapman and Hall/CRC. } \author{ Kasper Klitgaard Berthelsen and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \examples{ Z <- rStraussHard(100,0.7,0.05,0.02) } \seealso{ \code{\link{rmh}}, \code{\link{rStrauss}}, \code{\link{StraussHard}}. } \keyword{spatial} \keyword{datagen} spatstat/man/raster.x.Rd0000755000176000001440000000503012237642733014767 0ustar ripleyusers\name{raster.x} \alias{raster.x} \alias{raster.y} \alias{raster.xy} \title{Cartesian Coordinates for a Pixel Raster} \description{ Return the \eqn{x} and \eqn{y} coordinates of each pixel in a binary pixel image. } \usage{ raster.x(w, drop=FALSE) raster.y(w, drop=FALSE) raster.xy(w, drop=FALSE) } \arguments{ \item{w}{ A window (object of class \code{"owin"}) of type \code{"mask"} representing a binary pixel image. } \item{drop}{ Logical. If \code{TRUE}, then coordinates of pixels that lie outside the window are removed. If \code{FALSE} (the default) then the coordinates of every pixel in the containing rectangle are retained. } } \value{ \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. If \code{drop=FALSE}, \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the pixel grid in \code{w}, and giving the value of the \eqn{x} (or \eqn{y}) coordinate of each pixel in the raster. If \code{drop=TRUE}, \code{raster.x} and \code{raster.y} return numeric vectors. } \details{ The argument \code{w} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details). A window of type \code{"mask"} represents a binary pixel image. If \code{drop=FALSE} (the default), the functions \code{raster.x} and \code{raster.y} return a matrix of the same dimensions as the binary pixel image itself, with entries giving the \eqn{x} coordinate (for \code{raster.x}) or \eqn{y} coordinate (for \code{raster.y}) of each pixel in the binary image. If \code{drop=TRUE}, pixels that lie outside the window \code{w} are removed, and \code{raster.x} and \code{raster.y} return numeric vectors containing the coordinates of the pixels that are inside the window \code{w}. The function \code{raster.xy} returns a list with components \code{x} and \code{y} which are numeric vectors of equal length containing the pixel coordinates. } \seealso{ \code{\link{owin}}, \code{\link{as.mask}} } \examples{ u <- owin(c(-1,1),c(-1,1)) # square of side 2 w <- as.mask(u, eps=0.01) # 200 x 200 grid X <- raster.x(w) Y <- raster.y(w) disc <- owin(c(-1,1), c(-1,1), mask=(X^2 + Y^2 <= 1)) \dontrun{plot(disc)} # approximation to the unit disc } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} spatstat/man/distmap.Rd0000755000176000001440000000312412237642732014663 0ustar ripleyusers\name{distmap} \alias{distmap} \title{ Distance Map } \description{ Compute the distance map of an object, and return it as a pixel image. Generic. } \usage{ distmap(X, \dots) } \arguments{ \item{X}{Any suitable dataset representing a two-dimensional object, such as a point pattern (object of class \code{"ppp"}), a window (object of class \code{"owin"}) or a line segment pattern (object of class \code{"psp"}). } \item{\dots}{Arguments passed to \code{\link{as.mask}} to control pixel resolution. } } \value{ A pixel image (object of class \code{"im"}) whose grey scale values are the values of the distance map. } \details{ The \dQuote{distance map} of a set of points \eqn{A} is the function \eqn{f} whose value \code{f(x)} is defined for any two-dimensional location \eqn{x} as the shortest distance from \eqn{x} to \eqn{A}. This function computes the distance map of the set \code{X} and returns the distance map as a pixel image. This is generic. Methods are provided for point patterns (\code{\link{distmap.ppp}}), line segment patterns (\code{\link{distmap.psp}}) and windows (\code{\link{distmap.owin}}). } \seealso{ \code{\link{distmap.ppp}}, \code{\link{distmap.psp}}, \code{\link{distmap.owin}}, \code{\link{distfun}} } \examples{ data(cells) U <- distmap(cells) data(letterR) V <- distmap(letterR) \dontrun{ plot(U) plot(V) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/bounding.box.xy.Rd0000755000176000001440000000262312237642732016260 0ustar ripleyusers\name{bounding.box.xy} \alias{bounding.box.xy} \title{Convex Hull of Points} \description{ Computes the smallest rectangle containing a set of points. } \usage{ bounding.box.xy(x, y=NULL) } \arguments{ \item{x}{ vector of \code{x} coordinates of observed points, or a 2-column matrix giving \code{x,y} coordinates, or a list with components \code{x,y} giving coordinates (such as a point pattern object of class \code{"ppp"}.) } \item{y}{(optional) vector of \code{y} coordinates of observed points, if \code{x} is a vector.} } \value{ A window (an object of class \code{"owin"}). } \details{ Given an observed pattern of points with coordinates given by \code{x} and \code{y}, this function finds the smallest rectangle, with sides parallel to the coordinate axes, that contains all the points, and returns it as a window. } \seealso{ \code{\link{owin}}, \code{\link{as.owin}}, \code{\link{convexhull.xy}}, \code{\link{ripras}} } \examples{ x <- runif(30) y <- runif(30) w <- bounding.box.xy(x,y) plot(owin(), main="bounding.box.xy(x,y)") plot(w, add=TRUE) points(x,y) X <- rpoispp(30) plot(X, main="bounding.box.xy(X)") plot(bounding.box.xy(X), add=TRUE) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{utilities} spatstat/man/is.marked.ppm.Rd0000755000176000001440000000452012237642732015673 0ustar ripleyusers\name{is.marked.ppm} \alias{is.marked.ppm} \alias{is.marked.lppm} \title{Test Whether A Point Process Model is Marked} \description{ Tests whether a fitted point process model involves ``marks'' attached to the points. } \usage{ \method{is.marked}{ppm}(X, \dots) } \arguments{ \item{X}{ Fitted point process model (object of class \code{"ppm"}) usually obtained from \code{\link{ppm}}. Alternatively, a model of class \code{"lppm"}. } \item{\dots}{ Ignored. } } \value{ Logical value, equal to \code{TRUE} if \code{X} is a model that was fitted to a marked point pattern dataset. } \details{ ``Marks'' are observations attached to each point of a point pattern. For example the \code{\link{longleaf}} dataset contains the locations of trees, each tree being marked by its diameter; the \code{\link{amacrine}} dataset gives the locations of cells of two types (on/off) and the type of cell may be regarded as a mark attached to the location of the cell. The argument \code{X} is a fitted point process model (an object of class \code{"ppm"}) typically obtained by fitting a model to point pattern data using \code{\link{ppm}}. This function returns \code{TRUE} if the \emph{original data} (to which the model \code{X} was fitted) were a marked point pattern. Note that this is not the same as testing whether the model involves terms that depend on the marks (i.e. whether the fitted model ignores the marks in the data). Currently we have not implemented a test for this. If this function returns \code{TRUE}, the implications are (for example) that any simulation of this model will require simulation of random marks as well as random point locations. } \seealso{ \code{\link{is.marked}}, \code{\link{is.marked.ppp}} } \examples{ X <- lansing # Multitype point pattern --- trees marked by species \testonly{ # Smaller dataset X <- amacrine } fit1 <- ppm(X, ~ marks, Poisson()) is.marked(fit1) # TRUE fit2 <- ppm(X, ~ 1, Poisson()) is.marked(fit2) # TRUE # Unmarked point pattern fit3 <- ppm(cells, ~ 1, Poisson()) is.marked(fit3) # FALSE } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{manip} \keyword{models} spatstat/man/linearpcfdot.inhom.Rd0000644000176000001440000001017712237642732017010 0ustar ripleyusers\name{linearpcfdot.inhom} \alias{linearpcfdot.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Dot-type) for Linear Point Pattern } \description{ For a multitype point pattern on a linear network, estimate the inhomogeneous multitype pair correlation function from points of type \eqn{i} to points of any type. } \usage{ linearpcfdot.inhom(X, i, lambdaI, lambdadot, r=NULL, \dots, correction="Ang", normalise=TRUE) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the \eqn{i}-to-any pair correlation function \eqn{g_{i\bullet}(r)}{g[i.](r)} will be computed. An object of class \code{"lpp"} which must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{Number or character string identifying the type (mark value) of the points in \code{X} from which distances are measured. Defaults to the first level of \code{marks(X)}. } \item{lambdaI}{ Intensity values for the points of type \code{i}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{lambdadot}{ Intensity values for all points of \code{X}. Either a numeric vector, a \code{function}, a pixel image (object of class \code{"im"} or \code{"linim"}) or a fitted point process model (object of class \code{"ppm"} or \code{"lppm"}). } \item{r}{numeric vector. The values of the argument \eqn{r} at which the function \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. There is a sensible default. First-time users are strongly advised not to specify this argument. See below for important conditions on \eqn{r}. } \item{correction}{ Geometry correction. Either \code{"none"} or \code{"Ang"}. See Details. } \item{\dots}{ Arguments passed to \code{\link[stats]{density.default}} to control the kernel smoothing. } \item{normalise}{ Logical. If \code{TRUE} (the default), the denominator of the estimator is data-dependent (equal to the sum of the reciprocal intensities at the points of type \code{i}), which reduces the sampling variability. If \code{FALSE}, the denominator is the length of the network. } } \value{ An object of class \code{"fv"} (see \code{\link{fv.object}}). } \details{ This is a counterpart of the function \code{\link[spatstat]{pcfdot.inhom}} for a point pattern on a linear network (object of class \code{"lpp"}). The argument \code{i} will be interpreted as levels of the factor \code{marks(X)}. If \code{i} is missing, it defaults to the first level of the marks factor. The argument \code{r} is the vector of values for the distance \eqn{r} at which \eqn{g_{i\bullet}(r)}{g[i.](r)} should be evaluated. The values of \eqn{r} must be increasing nonnegative numbers and the maximum \eqn{r} value must not exceed the radius of the largest disc contained in the window. } \references{ Baddeley, A, Jammalamadaka, A. and Nair, G. (to appear) Multitype point process analysis of spines on the dendrite network of a neuron. \emph{Applied Statistics} (Journal of the Royal Statistical Society, Series C), In press. } \section{Warnings}{ The argument \code{i} is interpreted as a level of the factor \code{marks(X)}. Beware of the usual trap with factors: numerical values are not interpreted in the same way as character values. } \seealso{ \code{\link{linearpcfcross.inhom}}, \code{\link{linearpcfcross}}, \code{\link[spatstat]{pcfcross.inhom}}. } \examples{ lam <- table(marks(chicago))/(summary(chicago)$totlength) lamI <- function(x,y,const=lam[["assault"]]){ rep(const, length(x)) } lam. <- function(x,y,const=sum(lam)){ rep(const, length(x)) } g <- linearpcfdot.inhom(chicago, "assault", lamI, lam.) \dontrun{ fit <- lppm(chicago, ~marks + x) linearpcfdot.inhom(chicago, "assault", fit, fit) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{nonparametric} spatstat/man/profilepl.Rd0000755000176000001440000001310312237642733015215 0ustar ripleyusers\name{profilepl} \alias{profilepl} \title{Profile Maximum Pseudolikelihood} \description{ Fits point process models by profile maximum pseudolikelihood } \usage{ profilepl(s, f, \dots, rbord = NULL, verbose = TRUE) } \arguments{ \item{s}{ Data frame containing values of the irregular parameters over which the profile pseudolikelihood will be computed. } \item{f}{ Function (such as \code{\link{Strauss}}) that generates an interpoint interaction object, given values of the irregular parameters. } \item{\dots}{ Data passed to \code{\link{ppm}} to fit the model. } \item{rbord}{ Radius for border correction (same for all models). If omitted, this will be computed from the interactions. } \item{verbose}{ Logical flag indicating whether to print progress reports. } } \details{ The model-fitting function \code{\link{ppm}} fits point process models to point pattern data. However, only the \sQuote{regular} parameters of the model can be fitted by \code{\link{ppm}}. The model may also depend on \sQuote{irregular} parameters that must be fixed in any call to \code{\link{ppm}}. This function \code{profilepl} is a wrapper which finds the values of the irregular parameters that give the best fit. It uses the method of maximum profile pseudolikelihood. The argument \code{s} must be a data frame whose columns contain values of the irregular parameters over which the maximisation is to be performed. An irregular parameter may affect either the interpoint interaction or the spatial trend. \describe{ \item{interaction parameters:}{ in a call to \code{\link{ppm}}, the argument \code{interaction} determines the interaction between points. It is usually a call to a function such as \code{\link{Strauss}}. The arguments of this call are irregular parameters. For example, the interaction radius parameter \eqn{r} of the Strauss process, determined by the argument \code{r} to the function \code{\link{Strauss}}, is an irregular parameter. } \item{trend parameters:}{ in a call to \code{\link{ppm}}, the spatial trend may depend on covariates, which are supplied by the argument \code{covariates}. These covariates may be functions written by the user, of the form \code{function(x,y,...)}, and the extra arguments \code{\dots} are irregular parameters. } } The argument \code{f} determines the interaction for each model to be fitted. It would typically be one of the functions \code{\link{Poisson}}, \code{\link{AreaInter}}, \code{\link{BadGey}}, \code{\link{DiggleGatesStibbard}}, \code{\link{DiggleGratton}}, \code{\link{Fiksel}}, \code{\link{Geyer}}, \code{\link{Hardcore}}, \code{\link{LennardJones}}, \code{\link{OrdThresh}}, \code{\link{Softcore}}, \code{\link{Strauss}} or \code{\link{StraussHard}}. Alternatively it could be a function written by the user. Columns of \code{s} which match the names of arguments of \code{f} will be interpreted as interaction parameters. Other columns will be interpreted as trend parameters. To apply the method of profile maximum pseudolikelihood, each row of \code{s} will be taken in turn. Interaction parameters in this row will be passed to \code{f}, resulting in an interaction object. Then \code{\link{ppm}} will be applied to the data \code{...} using this interaction. Any trend parameters will be passed to \code{\link{ppm}} through the argument \code{covfunargs}. This results in a fitted point process model. The value of the log pseudolikelihood from this model is stored. After all rows of \code{s} have been processed in this way, the row giving the maximum value of log pseudolikelihood will be found. The object returned by \code{profilepl} contains the profile pseudolikelihood function, the best fitting model, and other data. It can be plotted (yielding a plot of the log pseudolikelihood values against the irregular parameters) or printed (yielding information about the best fitting values of the irregular parameters). In general, \code{f} may be any function that will return an interaction object (object of class \code{"interact"}) that can be used in a call to \code{\link{ppm}}. Each argument of \code{f} must be a single value. } \value{ An object of class \code{"profilepl"}. There are methods for \code{\link{plot}} and \code{\link{print}} for this class. The components of the object include \item{fit}{Best-fitting model} \item{param}{The data frame \code{s}} \item{iopt}{Row index of the best-fitting parameters in \code{s}} To extract the best fitting model you can also use \code{\link{as.ppm}}. } \examples{ # one irregular parameter s <- data.frame(r=seq(0.05,0.15, by=0.01)) \testonly{ s <- data.frame(r=c(0.05,0.1,0.15)) } ps <- profilepl(s, Strauss, cells) ps if(interactive()) plot(ps) # two irregular parameters s <- expand.grid(r=seq(0.05,0.15, by=0.01),sat=1:3) \testonly{ s <- expand.grid(r=c(0.07,0.12),sat=1:2) } pg <- profilepl(s, Geyer, cells) pg if(interactive()) { plot(pg) as.ppm(pg) } # multitype pattern with a common interaction radius \dontrun{ s <- data.frame(R=seq(0.03,0.05,by=0.01)) MS <- function(R) { MultiStrauss(radii=diag(c(R,R))) } pm <- profilepl(s, MS, amacrine, ~marks) } } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} spatstat/man/clarkevans.test.Rd0000755000176000001440000000656012237642732016340 0ustar ripleyusers\name{clarkevans.test} \alias{clarkevans.test} \title{Clark and Evans Test} \description{ Performs the Clark-Evans test of aggregation for a spatial point pattern. } \usage{ clarkevans.test(X, ..., correction="none", clipregion=NULL, alternative=c("two.sided", "less", "greater"), nsim=1000) } \arguments{ \item{X}{ A spatial point pattern (object of class \code{"ppp"}). } \item{\dots}{Ignored.} \item{correction}{ Character string. The type of edge correction to be applied. See \code{\link{clarkevans}} } \item{clipregion}{ Clipping region for the guard area correction. A window (object of class \code{"owin"}). See \code{\link{clarkevans}} } \item{alternative}{ String indicating the type of alternative for the hypothesis test. } \item{nsim}{ Number of Monte Carlo simulations to perform, if a Monte Carlo p-value is required. } } \details{ This command uses the Clark and Evans (1954) aggregation index \eqn{R} as the basis for a crude test of clustering or ordering of a point pattern. The Clark-Evans index is computed by the function \code{\link{clarkevans}}. See the help for \code{\link{clarkevans}} for information about the Clark-Evans index \eqn{R} and about the arguments \code{correction} and \code{clipregion}. This command performs a hypothesis test of clustering or ordering of the point pattern \code{X}. The null hypothesis is Complete Spatial Randomness, i.e.\ a uniform Poisson process. The alternative hypothesis is specified by the argument \code{alternative}: \itemize{ \item \code{alternative="less"} or \code{alternative="clustered"}: the alternative hypothesis is that \eqn{R < 1} corresponding to a clustered point pattern; \item \code{alternative="greater"} or \code{alternative="regular"}: the alternative hypothesis is that \eqn{R > 1} corresponding to a regular or ordered point pattern; \item \code{alternative="two.sided"}: the alternative hypothesis is that \eqn{R \neq 1}{R != 1} corresponding to a clustered or regular pattern. } The Clark-Evans index \eqn{R} is computed for the data as described in \code{\link{clarkevans}}. If \code{correction="none"}, the \eqn{p}-value for the test is computed by standardising \eqn{R} as proposed by Clark and Evans (1954) and referring the statistic to the standard Normal distribution. For other edge corrections, the \eqn{p}-value for the test is computed by Monte Carlo simulation of \code{nsim} realisations of Complete Spatial Randomness. } \value{ An object of class \code{"htest"} representing the result of the test. } \references{ Clark, P.J. and Evans, F.C. (1954) Distance to nearest neighbour as a measure of spatial relationships in populations \emph{Ecology} \bold{35}, 445--453. Donnelly, K. (1978) Simulations to determine the variance and edge-effect of total nearest neighbour distance. In \emph{Simulation methods in archaeology}, Cambridge University Press, pp 91--95. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{clarkevans}} } \examples{ # Example of a clustered pattern data(redwood) clarkevans.test(redwood) clarkevans.test(redwood, alternative="less") } \keyword{spatial} \keyword{nonparametric} spatstat/man/rotate.psp.Rd0000755000176000001440000000247712237642734015335 0ustar ripleyusers\name{rotate.psp} \alias{rotate.psp} \title{Rotate a Line Segment Pattern} \description{ Rotates a line segment pattern } \usage{ \method{rotate}{psp}(X, angle=pi/2, \dots) } \arguments{ \item{X}{A line segment pattern (object of class \code{"psp"}).} \item{angle}{Angle of rotation.} \item{\dots}{Arguments passed to \code{\link{rotate.owin}} affecting the handling of the observation window, if it is a binary pixel mask. } } \value{ Another object of class \code{"psp"} representing the rotated line segment pattern. } \details{ The line segments of the pattern, and the window of observation, are rotated about the origin by the angle specified. Angles are measured in radians, anticlockwise. The default is to rotate the pattern 90 degrees anticlockwise. If the line segments carry marks, these are preserved. } \seealso{ \code{\link{psp.object}}, \code{\link{rotate.owin}}, \code{\link{rotate.ppp}} } \examples{ oldpar <- par(mfrow=c(2,1)) X <- psp(runif(10), runif(10), runif(10), runif(10), window=owin()) plot(X, main="original") Y <- rotate(X, pi/4) plot(Y, main="rotated") par(oldpar) } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{math} spatstat/man/betacells.Rd0000755000176000001440000000753212237642732015167 0ustar ripleyusers\name{betacells} \alias{betacells} \docType{data} \title{Beta Ganglion Cells in Cat Retina} \description{ Point pattern of cells in the retina, each cell classified as `on' or `off' and labelled with the cell profile area. } \format{ \code{betacells} is an object of class \code{"ppp"} representing the point pattern of cell locations. Entries include \tabular{ll}{ \code{x} \tab Cartesian \eqn{x}-coordinate of cell \cr \code{y} \tab Cartesian \eqn{y}-coordinate of cell \cr \code{marks} \tab data frame of marks } Cartesian coordinates are given in microns. The data frame of marks has two columns: \tabular{ll}{ \code{type} \tab factor with levels \code{off} and \code{on} \cr \tab indicating ``off'' and ``on'' cells\cr \code{area} \tab numeric vector giving the \cr \tab areas of cell profiles (in square microns) } See \code{\link{ppp.object}} for details of the format. } \usage{data(betacells)} \source{ W\"assle et al (1981), Figure 6(a), scanned and processed by Stephen Eglen \email{S.J.Eglen@damtp.cam.ac.uk} } \section{Notes}{ This is a new, corrected version of the old dataset \code{\link{ganglia}}. See below. These data represent a pattern of beta-type ganglion cells in the retina of a cat recorded by W\"assle et al. (1981). Beta cells are associated with the resolution of fine detail in the cat's visual system. They can be classified anatomically as ``on'' or ``off''. Statistical independence of the arrangement of the ``on''- and ``off''-components would strengthen the evidence for Hering's (1878) `opponent theory' that there are two separate channels for sensing ``brightness'' and ``darkness''. See W\"assle et al (1981). There is considerable current interest in the arrangement of cell mosaics in the retina, see Rockhill et al (2000). The dataset is a marked point pattern giving the locations, types (``on'' or ``off''), and profile areas of beta cells observed in a rectangle of dimensions \eqn{750 \times 990}{750 x 990} microns. Coordinates are given in microns (thousandths of a millimetre) and areas are given in square microns. The original source is Figure 6 of W\"assle et al (1981), which is a manual drawing of the beta mosaic observed in a microscope field-of-view of a whole mount of the retina. Thus, all beta cells in the retina were effectively projected onto the same two-dimensional plane. The data were scanned in 2004 by Stephen Eglen from Figure 6(a) of W\"assle et al (1981). Image analysis software was used to identify the soma (cell body). The \eqn{x,y} location of each cell was taken to be the centroid of the soma. The type of each cell (``on'' or `off'') was identified by referring to Figures 6(b) and 6(d). The area of each soma (in square microns) was also computed. Note that this is a corrected version of the \code{\link{ganglia}} dataset provided in earlier versions of \pkg{spatstat}. The earlier data \code{\link{ganglia}} were not faithful to the scale in the original paper and contain some scanning errors. } \examples{ plot(betacells) area <- marks(betacells)$area plot(betacells \%mark\% sqrt(area/pi), markscale=1) } \references{ Hering, E. (1878) Zur Lehre von Lichtsinn. Vienna. Van Lieshout, M.N.M. and Baddeley, A.J. (1999) Indices of dependence between types in multivariate point patterns. \emph{Scandinavian Journal of Statistics} \bold{26}, 511--532. Rockhill, R.L., Euler, T. and Masland, R.H. (2000) Spatial order within but not between types of retinal neurons. \emph{Proc. Nat. Acad. Sci. USA} \bold{97}(5), 2303--2307. W\"assle, H., Boycott, B. B. & Illing, R.-B. (1981). Morphology and mosaic of on- and off-beta cells in the cat retina and some functional considerations. \emph{Proc. Roy. Soc. London Ser. B} \bold{212}, 177--195. } \keyword{datasets} \keyword{spatial} spatstat/man/pcfcross.inhom.Rd0000755000176000001440000001174612237642733016167 0ustar ripleyusers\name{pcfcross.inhom} \alias{pcfcross.inhom} \title{ Inhomogeneous Multitype Pair Correlation Function (Cross-Type) } \description{ Estimates the inhomogeneous cross-type pair correlation function for a multitype point pattern. } \usage{ pcfcross.inhom(X, i, j, lambdaI = NULL, lambdaJ = NULL, ..., r = NULL, breaks = NULL, kernel="epanechnikov", bw=NULL, stoyan=0.15, correction = c("isotropic", "Ripley", "translate"), sigma = NULL, varcov = NULL) } \arguments{ \item{X}{The observed point pattern, from which an estimate of the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} will be computed. It must be a multitype point pattern (a marked point pattern whose marks are a factor). } \item{i}{The type (mark value) of the points in \code{X} from which distances are measured. A character string (or something that will be converted to a character string). Defaults to the first level of \code{marks(X)}. } \item{j}{The type (mark value) of the points in \code{X} to which distances are measured. A character string (or something that will be converted to a character string). Defaults to the second level of \code{marks(X)}. } \item{lambdaI}{ Optional. Values of the estimated intensity function of the points of type \code{i}. Either a vector giving the intensity values at the points of type \code{i}, a pixel image (object of class \code{"im"}) giving the intensity values at all locations, or a \code{function(x,y)} which can be evaluated to give the intensity value at any location. } \item{lambdaJ}{ Optional. Values of the estimated intensity function of the points of type \code{j}. A numeric vector, pixel image or \code{function(x,y)}. } \item{r}{ Vector of values for the argument \eqn{r} at which \eqn{g_{ij}(r)}{g[i,j](r)} should be evaluated. There is a sensible default. } \item{breaks}{ Optional. An alternative to the argument \code{r}. Not normally invoked by the user. } \item{kernel}{ Choice of smoothing kernel, passed to \code{\link{density.default}}. } \item{bw}{ Bandwidth for smoothing kernel, passed to \code{\link{density.default}}. } \item{\dots}{ Other arguments passed to the kernel density estimation function \code{\link{density.default}}. } \item{stoyan}{ Bandwidth coefficient; see Details. } \item{correction}{ Choice of edge correction. } \item{sigma,varcov}{ Optional arguments passed to \code{\link{density.ppp}} to control the smoothing bandwidth, when \code{lambdaI} or \code{lambdaJ} is estimated by kernel smoothing. } } \details{ The inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} is a summary of the dependence between two types of points in a multitype spatial point process that does not have a uniform density of points. The best intuitive interpretation is the following: the probability \eqn{p(r)} of finding two points, of types \eqn{i} and \eqn{j} respectively, at locations \eqn{x} and \eqn{y} separated by a distance \eqn{r} is equal to \deqn{ p(r) = \lambda_i(x) lambda_j(y) g(r) \,{\rm d}x \, {\rm d}y }{ p(r) = lambda[i](x) * lambda[j](y) * g(r) dx dy } where \eqn{\lambda_i}{lambda[i]} is the intensity function of the process of points of type \eqn{i}. For a multitype Poisson point process, this probability is \eqn{p(r) = \lambda_i(x) \lambda_j(y)}{p(r) = lambda[i](x) * lambda[j](y)} so \eqn{g_{ij}(r) = 1}{g[i,j](r) = 1}. The command \code{pcfcross.inhom} estimates the inhomogeneous pair correlation using a modified version of the algorithm in \code{\link{pcf.ppp}}. If the arguments \code{lambdaI} and \code{lambdaJ} are missing or null, they are estimated from \code{X} by kernel smoothing using a leave-one-out estimator. } \value{ A function value table (object of class \code{"fv"}). Essentially a data frame containing the variables \item{r}{ the vector of values of the argument \eqn{r} at which the inhomogeneous cross-type pair correlation function \eqn{g_{ij}(r)}{g[i,j](r)} has been estimated } \item{theo}{vector of values equal to 1, the theoretical value of \eqn{g_{ij}(r)}{g[i,j](r)} for the Poisson process } \item{trans}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by translation correction } \item{iso}{vector of values of \eqn{g_{ij}(r)}{g[i,j](r)} estimated by Ripley isotropic correction } as required. } \seealso{ \code{\link{pcf.ppp}}, \code{\link{pcfinhom}}, \code{\link{pcfcross}}, \code{\link{pcfdot.inhom}} } \examples{ data(amacrine) plot(pcfcross.inhom(amacrine, "on", "off", stoyan=0.1), legendpos="bottom") } \author{Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{nonparametric} spatstat/man/pairdist.default.Rd0000755000176000001440000000577312237642733016501 0ustar ripleyusers\name{pairdist.default} \alias{pairdist.default} \title{Pairwise distances} \description{ Computes the matrix of distances between all pairs of points in a set of points } \usage{ \method{pairdist}{default}(X, Y=NULL, \dots, period=NULL, method="C", squared=FALSE) } \arguments{ \item{X,Y}{ Arguments specifying the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components \code{x} and \code{y}, or a matrix with two columns. } \item{\dots}{ Ignored. } \item{period}{ Optional. Dimensions for periodic edge correction. } \item{method}{ String specifying which method of calculation to use. Values are \code{"C"} and \code{"interpreted"}. Usually not specified. } \item{squared}{ Logical. If \code{squared=TRUE}, the squared distances are returned instead (this computation is faster). } } \value{ A square matrix whose \code{[i,j]} entry is the distance between the points numbered \code{i} and \code{j}. } \details{ Given the coordinates of a set of points, this function computes the Euclidean distances between all pairs of points, and returns the matrix of distances. It is a method for the generic function \code{pairdist}. The arguments \code{X} and \code{Y} must determine the coordinates of a set of points. Typically \code{X} and \code{Y} would be numeric vectors of equal length. Alternatively \code{Y} may be omitted and \code{X} may be a list with two components named \code{x} and \code{y}, or a matrix or data frame with two columns. Alternatively if \code{period} is given, then the distances will be computed in the `periodic' sense (also known as `torus' distance). The points will be treated as if they are in a rectangle of width \code{period[1]} and height \code{period[2]}. Opposite edges of the rectangle are regarded as equivalent. If \code{squared=TRUE} then the \emph{squared} Euclidean distances \eqn{d^2} are returned, instead of the Euclidean distances \eqn{d}. The squared distances are faster to calculate, and are sufficient for many purposes (such as finding the nearest neighbour of a point). The argument \code{method} is not normally used. It is retained only for checking the validity of the software. If \code{method = "interpreted"} then the distances are computed using interpreted R code only. If \code{method="C"} (the default) then C code is used. The C code is somewhat faster. } \seealso{ \code{\link{crossdist}}, \code{\link{nndist}}, \code{\link{Kest}} } \examples{ x <- runif(100) y <- runif(100) d <- pairdist(x, y) d <- pairdist(cbind(x,y)) d <- pairdist(x, y, period=c(1,1)) d <- pairdist(x, y, squared=TRUE) } \author{Pavel Grabarnik \email{pavel.grabar@issp.serpukhov.su} and Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \keyword{spatial} \keyword{math} spatstat/man/plot.leverage.ppm.Rd0000755000176000001440000000240612237642733016567 0ustar ripleyusers\name{plot.leverage.ppm} \alias{plot.leverage.ppm} \title{ Plot Leverage Function } \description{ Plots a leverage function that has been computed by \code{\link{leverage.ppm}}. } \usage{ \method{plot}{leverage.ppm}(x, ..., showcut=TRUE) } \arguments{ \item{x}{ Leverage measure (object of class \code{"leverage.ppm"}) computed by \code{\link{leverage.ppm}}. } \item{\dots}{ Arguments passed to \code{\link[spatstat]{plot.im}} controlling the image plot. } \item{showcut}{ Logical. If \code{TRUE}, a contour line is plotted at the level equal to the theoretical mean of the leverage. } } \details{ This is the plot method for objects of class \code{"leverage.ppm"}. These objects are computed by the command \code{\link{leverage.ppm}}. } \value{ None. } \references{ Baddeley, A. and Chang, Y.M. and Song, Y. (2011) Leverage and influence diagnostics for spatial point process models. \emph{Scandinavian Journal of Statistics}, in press. } \author{ Adrian Baddeley \email{Adrian.Baddeley@uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} } \seealso{ \code{\link{leverage.ppm}} } \examples{ X <- rpoispp(function(x,y) { exp(3+3*x) }) fit <- ppm(X, ~x+y) plot(leverage(fit)) } \keyword{spatial} \keyword{models} spatstat/man/predict.slrm.Rd0000755000176000001440000000553012237642733015634 0ustar ripleyusers\name{predict.slrm} \Rdversion{1.1} \alias{predict.slrm} \title{ Predicted or Fitted Values from Spatial Logistic Regression } \description{ Given a fitted Spatial Logistic Regression model, this function computes the fitted probabilities for each pixel, or the fitted point process intensity, or the values of the linear predictor in each pixel. } \usage{ \method{predict}{slrm}(object, ..., type = "intensity", newdata=NULL, window=NULL) } \arguments{ \item{object}{ a fitted spatial logistic regression model. An object of class \code{"slrm"}. } \item{\dots}{ Optional arguments passed to \code{\link[spatstat]{pixellate}} determining the pixel resolution for the discretisation of the point pattern. } \item{type}{ Character string (partially) matching one of \code{"probabilities"}, \code{"intensity"} or \code{"link"}. } \item{newdata}{ Optional. List containing new covariate values for the prediction. See Details. } \item{window}{ Optional. New window in which to predict. An object of class \code{"owin"}. } } \details{ This is a method for \code{\link[stats]{predict}} for spatial logistic regression models (objects of class \code{"slrm"}, usually obtained from the function \code{\link{slrm}}). The argument \code{type} determines which quantity is computed. If \code{type="intensity"}), the value of the point process intensity is computed at each pixel. If \code{type="probabilities"}) the probability of the presence of a random point in each pixel is computed. If \code{type="link"}, the value of the linear predictor is computed at each pixel. If \code{newdata = NULL} (the default), the algorithm computes fitted values of the model (based on the data that was originally used to fit the model \code{object}). If \code{newdata} is given, the algorithm computes predicted values of the model, using the new values of the covariates provided by \code{newdata}. The argument \code{newdata} should be a list; names of entries in the list should correspond to variables appearing in the model formula of the \code{object}. Each list entry may be a pixel image or a single numeric value. } \value{ A pixel image (object of class \code{"im"}) containing the predicted values for each pixel. } \seealso{ \code{\link{slrm}} } \examples{ X <- rpoispp(42) fit <- slrm(X ~ x+y) plot(predict(fit)) data(copper) X <- copper$SouthPoints Y <- copper$SouthLines Z <- distmap(Y) fitc <- slrm(X ~ Z) pc <- predict(fitc) Znew <- distmap(copper$Lines)[copper$SouthWindow] pcnew <- predict(fitc, newdata=list(Z=Znew)) } \author{Adrian Baddeley \email{adrian@maths.uwa.edu.au} \url{http://www.maths.uwa.edu.au/~adrian/} and Rolf Turner \email{r.turner@auckland.ac.nz} } \keyword{spatial} \keyword{models} \keyword{methods}